pax_global_header00006660000000000000000000000064126417656110014523gustar00rootroot0000000000000052 comment=7dad56662b2685f7b460467702b7ccb734a5fd8d disque-1.0-rc1/000077500000000000000000000000001264176561100133405ustar00rootroot00000000000000disque-1.0-rc1/.gitignore000066400000000000000000000004521264176561100153310ustar00rootroot00000000000000.*.swp *.o *.log dump.rdb disque-server disque-check-aof disque doc-tools release misc/* src/release.h appendonly.aof SHORT_TERM_TODO release.h src/transfer.sh src/configs redis.ds src/redis.conf src/nodes.conf deps/lua/src/lua deps/lua/src/luac deps/lua/src/liblua.a .make-* .prerequisites *.dSYM disque-1.0-rc1/.travis.yml000066400000000000000000000002361264176561100154520ustar00rootroot00000000000000language: c sudo: false addons: apt: packages: - tcl8.5 cache: directories: - $HOME/.ccache install: make CC="ccache $CC" script: make test disque-1.0-rc1/COPYING000066400000000000000000000027201264176561100143740ustar00rootroot00000000000000Copyright (c) 2006-2014, Salvatore Sanfilippo All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Disque nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. disque-1.0-rc1/Makefile000066400000000000000000000002271264176561100150010ustar00rootroot00000000000000# Top level makefile, the real shit is at src/Makefile default: all .DEFAULT: cd src && $(MAKE) $@ install: cd src && $(MAKE) $@ .PHONY: install disque-1.0-rc1/README.md000066400000000000000000002146221264176561100146260ustar00rootroot00000000000000[![Build Status](https://travis-ci.org/antirez/disque.svg)](https://travis-ci.org/antirez/disque) Disque, an in-memory, distributed job queue === Disque is ongoing experiment to build a distributed, in memory, message broker. Its goal is to capture the essence of the "Redis as a jobs queue" use case, which is usually implemented using blocking list operations, and move it into an ad-hoc, self-contained, scalable, and fault tolerant design, with simple to understand properties and guarantees, but still resembling Redis in terms of simplicity, performances, and implementation as a C non-blocking networked server. Currently (2 Jan 2015) the project is in release candidate state. People are encouraged to start evaluating it and report bugs and experiences. **WARNING: This is beta code and may not be suitable for production usage. The API is considered to be stable if not for details that may change in the next release candidates, however it's new code, so handle with care!** What is a message queue? --- *Hint: skip this section if you are familiar with message queues.* Do you know how humans use text messages to communicate right? I could write my wife "please get the milk at the store", and she maybe will reply "Ok message received, I'll get two bottles on my way home". A message queue is the same as human text messages, but for computer programs. For example a web application, when an user subscribes, may send another process that handles sending emails "please send the confirmation email to tom@example.net". Message systems like Disque allow communication between processes using different queues. So a process can send a message into a queue with a given name, and only processes fetching messages from this queue will return those messages. Moreover multiple processes can listen for messages in a given queue, and multiple processes can send messages to the same queue. The important part of a message queue is to be able to provide guarantees so that messages are eventually delivered even in the face of failures. So even if in theory implementing a message queue is very easy, to write a very robust and scalable one is harder than it may appear. Give me the details! --- Disque is a distributed and fault tolerant message broker, so it works as middle layer among processes that want to exchange messages. Producers add messages that are served to consumers. Since message queues are often used in order to process delayed jobs, Disque often uses the term "job" in the API and in the documentation, however jobs are actually just messages in the form of strings, so Disque can be used for other use cases. In this documentation "jobs" and "messages" are used in an interchangeable way. Job queues with a producer-consumer model are pretty common, so the devil is in the details. A few details about Disque are: Disque is a **synchronously replicated job queue**. By default when a new job is added, it is replicated to W nodes before the client gets an acknowledge about the job being added. W-1 nodes can fail and still the message will be delivered. Disque supports both **at-least-once and at-most-once** delivery semantics. At least once delivery semantics is where most efforts were spent in the design and implementation, while the at most once semantics is a trivial result of using a *retry time* set to 0 (which means, never re-queue the message again) and a replication factor of 1 for the message (not strictly needed, but it is useless to have multiple copies of a message around if it will be delivered at most one time). You can have, at the same time, both at-least-once and at-most-once jobs in the same queues and nodes, since this is a per message setting. Disque at-least-once delivery is designed to **approximate single delivery** when possible, even during certain kinds of failures. This means that while Disque can only guarantee a number of deliveries equal or greater to one, it will try hard to avoid multiple deliveries whenever possible. Disque is a distributed system where **all nodes have the same role** (aka, it is multi-master). Producers and consumers can attach to whatever node they like, and there is no need for producers and consumers of the same queue, to stay connected to the same node. Nodes will automatically exchange messages based on load and client requests. Disque is Available (it is an eventually consistent AP system in CAP terms): producers and consumers can make progresses as long as a single node is reachable. Disque supports **optional asynchronous commands** that are low latency for the client but provide less guarantees. For example a producer can add a job to a queue with a replication factor of 3, but may want to run away before knowing if the contacted node was really able to replicate it to the specified number of nodes or not. The node will replicate the message in the background in a best effort way. Disque **automatically re-queue messages that are not acknowledged** as already processed by consumers, after a message-specific retry time. There is no need for consumers to re-queue a message if it was not processed. Disque uses **explicit acknowledges** in order for a consumer to signal a message as delivered (or, using a different terminology, to signal a job as already processed). Disque queues only provides **best effort ordering**. Each queue sorts messages based on the job creation time, which is obtained using the *wall clock* of the local node where the message was created (plus an incremental counter for messages created in the same millisecond), so messages created in the same node are normally delivered in the same order they were created. This is not causal ordering since correct ordering is violated in different cases: when messages are re-issued because not acknowledged, because of nodes local clock drifts, and when messages are moved to other nodes for load balancing and federation (in this case you end with queues having jobs originated in different nodes with different wall clocks). However all this also means that normally messages are not delivered in random order and usually messages created first are delivered first. Note that since Disque does not provide strict FIFO semantics, technically speaking it should not be called a *message queue*, and it could better identified as a message broker. However I believe that at this point in the IT industry a *message queue* is often more lightly used to identify a generic broker that may or may not be able to guarantee order in all the cases. Given that we document very clearly the semantics, I grant myself the right to call Disque a message queue anyway. Disque provides the user with fine-grained control for each job **using three time related parameters**, and one replication parameter. For each job, the user can control: 1. The replication factor (how many nodes have a copy). 2. The delay time (the min time Disque will wait before putting the message in a queue, making the message deliverable). 3. The retry time (how much time should elapse, since the last time the job was queued, and without an acknowledge about the job delivery, before the job is re-queued again for delivery). 4. The expire time (how much time should elapse for the job to be deleted regardless of the fact it was successfully delivered, i.e. acknowledged, or not). Finally, Disque supports optional disk persistence, which is not enabled by default, but that can be handy in single data center setups and during restarts. Other minor features are: * Ability to block queues. * A few statistics about queues activity. * Stateless iterators for queues and jobs. * Commands to control the visiblity of single jobs. * Easy resize of the cluster (adding nodes is trivial). * Gracefully removal of nodes without losing jobs replicas. ACKs and retries --- Disque implementation of *at least once* delivery semantics is designed in order to avoid multiple delivery during certain classes of failures. It is not able to guarantee that no multiple deliveries will occur. However there are many *at least once* workloads where duplicated deliveries are acceptable (or explicitly handled), but not desirable either. A trivial example is sending emails to users (it is not terrible if an user gets a duplicated email, but is important to avoid it when possible), or doing idempotent operations that are expensive (all the times where it is critical for performances to avoid multiple deliveries). In order to avoid multiple deliveries when possible, Disque uses client ACKs. When a consumer processes a message correctly, it should acknowledge this fact to Disque. ACKs are replicated to multiple nodes, and are garbage collected as soon as the system believes it is unlikely that more nodes in the cluster have the job (the ACK refers to) still active. Under memory pressure or under certain failure scenarios, ACKs are eventually discarded. More explicitly: 1. A job is replicated to multiple nodes, but usually only *queued* in a single node. There is a difference between having a job in memory, and queueing it for delivery. 2. Nodes having a copy of a message, if a certain amount of time has elapsed without getting the ACK for the message, will re-queue it. Nodes will run a best-effort protocol to avoid re-queueing the message multiple times. 3. ACKs are replicated and garbage collected across the cluster so that eventually processed messages are evicted (this happens ASAP if there are no failures nor network partitions). For example, if a node having a copy of a job gets partitioned away during the time the job gets acknowledged by the consumer, it is likely that when it returns back (in a reasonable amount of time, that is, before the retry time is reached) it will be informed about the ACK and will avoid to re-queue the message. Similarly jobs can be acknowledged during a partition to just a single node available, and when the partition heals the ACK will be propagated to other nodes that may still have a copy of the message. So an ACK is just a **proof of delivery** that is replicated and retained for some time in order to make multiple deliveries less likely to happen in practice. As already mentioned, In order to control replication and retries, a Disque job has the following associated properties: number of replicas, delay, retry and expire. If a job has a retry time set to 0, it will get queued exactly once (and in this case a replication factor greater than 1 is useless, and signaled as an error to the user), so it will get delivered either a single time or will never get delivered. While jobs can be persisted on disk for safety, queues aren't, so this behavior is guaranteed even when nodes restart after a crash, whatever the persistence configuration is. However when nodes are manually restarted by the sysadmin, for example for upgrades, queues are persisted correctly and reloaded at startup, since the store/load operation is atomic in this case, and there are no race conditions possible (it is not possible that a job was delivered to a client and is persisted on disk as queued at the same time). Fast acknowledges --- Disque supports a faster way to acknowledge processed messages, via the `FASTACK` command. The normal acknowledge is very expensive from the point of view of messages exchanged between nodes, this is what happens during a normal acknowledge: 1. The client sends ACKJOB to one node. 2. The node sends a SETACK message to everybody it believes to have a copy. 3. The receivers of SETACK reply with GOTACK to confirm. 4. The node finally sends DELJOB to all the nodes. *Note: actual garbage collection is more complex in case of failures and is explained in the state machine later. The above is what happens 99% of times.* If a message is replicated to 3 nodes, acknowledging requires 1+2+2+2 messages, for the sake of retaining the ack if some nodes may not be reached when the message is acknowledged. This makes the probability of multiple deliveries of this message less likely. However the alternative **fast ack**, while less reliable, is much faster and invovles exchanging less messages. This is how a fast acknowledge works: 1. The client sends `FASTACK` to one node. 2. The node evicts the job and sends a best effort DELJOB to all the nodes that may have a copy, or to all the cluster if the node was not aware of the job. If during a fast acknowledge a node having a copy of the message is not reachable, for example because of a network partition, the node will deliver the message again, since it has a non acknowledged copy of the message and there is nobody able to inform it the message was acknowledged when it returns back available. If the network you are using is pretty reliable, you are very concerned with performances, and multiple deliveries in the context of your applications are a non issue, then `FASTACK` is probably the way to go. Dead letter queue --- Many message queues implement a feature called *dead letter queue*. It is a special queue used in order to accumulate messages that cannot be processed for some reason. Common causes could be: 1. The message was delivered too many times but never correctly processed. 2. The message time to live reached zero before it was processed. 3. Some worker explicitly asked the system to flag the message as having issues. The idea is that the administrator of the system checks (usually via automatic systems) if there is something in the dead letter queue in order to understand if there is some software error or other kind of error preventing messages to be processed as expected. Since Disque is an in memory system the message time to live is an important property. When it is reached, we want messages to go away, since the TTL should be choosen so that after such a time it is no longer meaningful to process the message. In such a system, to use memory and create a queue in response to an error or to messages timing out looks like a non optimal idea. Moreover for the distributed nature of Disque dead letters could end spawning multiple nodes and having duplicated entries inside. So Disque uses a different approach. Each node message representation has two counters: a **nacks counter** and an **additional deliveries** counter. The counters are not consistent among nodes having a copy of the same message, they are just best effort counters that may not increment in some node during network partitions. The idea of these two counters is that one is incremented every time a worker uses the `NACK` command to tell the queue the message was not processed correctly and should be put back on the queue ASAP. The other is incremented for every other condition (different than `NACK` call) that requires a message to be put back on the queue again. This includes messages that get lost and are enqueued again or messages that are enqueued in one side of the partition since the message was processed in the other side and so forth. Using the `GETJOB` command with the `WITHCOUNTERS` option, or using the `SHOW` command to inspect a job, it is possible to retrieve these two counters together with the other jobs information, so if a worker, before processing a message, sees the counters having too high values, it can notify operations people in multiple ways: 1. It may send an email. 2. Set a flag in a monitoring system. 3. Put the message in a special queue (simulating the dead letter feature). 4. Attempt to process the message and report the stack trace of the error if any. Basically the exact handling of the feature is up to the application using Disque. Note that the counters don't need to be consistent in the face of failures or network partitions: the idea is that eventually if a message has issues the counters will get incremented enough times to reach the limit selected by the application as a warning threshold. The reason for having two distinct counters is that applications may want to handle the case of explicit negative acknowledges via `NACK` differently than multiple deliveries because of timeouts or messages lost. Disque and disk persistence --- Disque can be operated in-memory only, using the synchronous replication as durability guarantee, or can be operated using the Append Only File where jobs creations and evictions are logged on disk (with configurable fsync policies) and reloaded at restart. AOF is recommended especially if you run in a single availability zone where a mass reboot of all your nodes is possible. Normally Disque only reloads jobs data in memory, without populating queues, since anyway not acknowledged jobs are requeued eventually. Moreover reloading queue data is not safe in the case of at-most-once jobs having the retry value set to 0. However a special option is provided in order to reload the full state from the AOF. This is used together with an option that allows to shutdown the server just after the AOF is generated from scratch, in order to be safe to reload even jobs with retry set to 0, since the AOF is generated while the server no longer accepts commands from clients, so no race condition is possible. Even when running memory-only, Disque is able to dump its memory on disk and reload from disk on controlled restarts, for example in order to upgrade the software. This is how to perform a controlled restart, that works whether AOF is enabled or not: 1. CONFIG SET aof-enqueue-jobs-once yes 2. CONFIG REWRITE 3. SHUTDOWN REWRITE-AOF At this point we have a freshly generated AOF on disk, and the server is configured in order to load the full state only at the next restart (the `aof-enqueue-jobs-once` is automatically turned off after the restart). We can just restart the server with the new software, or in a new server, and it will restart with the full state. Note that the `aof-enqueue-jobs-once` implies to load the AOF even if the AOF support is switched off, so there is no need to enable AOF just for the upgrade of an in-memory only server. Job IDs --- Disque jobs are uniquely identified by an ID like the following: D-dcb833cf-8YL1NT17e9+wsA/09NqxscQI-05a1 Job IDs always start with "D-" and end with "$" and are always composed of exactly 42 characters. We can split an ID into multiple parts: D- | dcb833cf | 8YL1NT17e9+wsA/09NqxscQI | 05a1 1. "D-" is the prefix 2. dcb833cf is the first 8 bytes of the node ID where the message was generated. 3. 8YL1NT17e9+wsA/09NqxscQI is the 144 bit ID pesudo random part encoded in base 64. 4. 05a1 is the Job TTL in minutes. Because of it, message IDs can be expired safely even without having the job representation. IDs are returned by ADDJOB when a job is successfully created, are part of the GETJOB output, and are used in order to acknowledge that a job was correctly processed by a worker. Part of the node ID is included in the message so that a worker processing messages for a given queue can easily guess what are the nodes where jobs are created, and move directly to these nodes to increase efficiency instead of listening for messages in a node that will require to fetch messages from other nodes. Only 32 bits of the original node ID is included in the message, however in a cluster with 100 Disque nodes, the probability of two nodes to have identical 32 bit ID prefixes is given by the birthday paradox: P(100,2^32) = .000001164 In case of collisions, the workers may just do a non-efficient choice. Collisions in the 144 bits random part are believed to be impossible, since it is computed as follows. 144 bit ID = HIGH_144_BITS_OF_SHA1(seed || counter) Where: * **seed** is a seed generated via `/dev/urandom` at startup. * **counter** is a 64 bit counter incremented at every ID generation. So there are 22300745198530623141535718272648361505980416 possible IDs, selected in an uniform way. While the probability of a collision is non-zero mathematically, in practice each ID can be regarded as unique. The encoded TTL in minutes has a special property: it is always even for at most once jobs (job retry value set to 0), and is always odd otherwise. This changes the encoded TTL precision to 2 minutes, but allows to tell if a Job ID is about a job with deliveries guarantees or not. Note that this fact does not mean that Disque jobs TTLs have a precision of two minutes. The TTL field is only used to expire job IDs of jobs a given node does not actually have a copy, search "dummy ACK" in this documentation for more information. Setup === To play with Disque please do the following: 1. Compile Disque, if you can compile Redis, you can compile Disque, it's the usual no external deps thing. Just type `make`. Binaries (`disque` and `disque-server`) will end up in the `src` directory. 2. Run a few Disque nodes in different ports. Create different `disque.conf` files following the example `disque.conf` in the source distribution. 3. After you have them running, you need to join the cluster. Just select a random node among the nodes you are running, and send the command `CLUSTER MEET ` for every other node in the cluster. **Please note that you need to open two TCP ports on each node**, the base port of the Disque instance, for example 7711, plus the cluster bus port, which is always at a fixed offset, obtained summing 10000 to the base port, so in the above example, you need to open both 7711 and 17711. Disque uses the base port to communicate with clients and the cluster bus port to communicate with other Disque processes. To run a node, just call `./disque-server`. For example if you are running three Disque servers in port 7711, 7712, 7713 in order to join the cluster you should use the `disque` command line tool and run the following commands: ./disque -p 7711 cluster meet 127.0.0.1 7712 ./disque -p 7711 cluster meet 127.0.0.1 7713 Your cluster should now be ready. You can try to add a job and fetch it back in order to test if everything is working: ./disque -p 7711 127.0.0.1:7711> ADDJOB queue body 0 D-dcb833cf-8YL1NT17e9+wsA/09NqxscQI-05a1 127.0.0.1:7711> GETJOB FROM queue 1) 1) "queue" 2) "D-dcb833cf-8YL1NT17e9+wsA/09NqxscQI-05a1" 3) "body" Remember that you can add and get jobs from different nodes as Disque is multi master. Also remember that you need to acknowledge jobs otherwise they'll never go away from the server memory (unless the time to live is reached). Main API === Disque API is composed of a small set of commands, since the system solves a single very specific problem. The three main commands are: #### `ADDJOB queue_name job [REPLICATE ] [DELAY ] [RETRY ] [TTL ] [MAXLEN ] [ASYNC]` Adds a job to the specified queue. Arguments are as follows: * *queue_name* is the name of the queue, any string, basically. You don't need to create queues, if it does not exist, it gets created automatically. If it has no longer jobs, it gets removed. * *job* is a string representing the job. Disque is job meaning agnostic, for it a job is just a message to deliver. Job max size is 4GB. * *ms-timeout* is the command timeout in milliseconds. If no ASYNC is specified, and the replication level specified is not reached in the specified number of milliseconds, the command returns with an error, and the node does a best-effort cleanup, that is, it will try to delete copies of the job across the cluster. However the job may still be delivered later. Note that the actual timeout resolution is 1/10 of second or worse with the default server hz. * *REPLICATE count* is the number of nodes the job should be replicated to. * *DELAY sec* is the number of seconds that should elapse before the job is queued by any server. By default there is no delay. * *RETRY sec* period after which, if no ACK is received, the job is put again into the queue for delivery. If RETRY is 0, the job has at-most-once delivery semantics. The default retry time is 5 minutes, with the exception of jobs having a TTL so small that 10% of TTL is less than 5 minutes. In this case the default RETRY is set to TTL/10 (with a minimum value of 1 second). * *TTL sec* is the max job life in seconds. After this time, the job is deleted even if it was not successfully delivered. If not specified, the default TTL is one day. * *MAXLEN count* specifies that if there are already *count* messages queued for the specified queue name, the message is refused and an error reported to the client. * *ASYNC* asks the server to let the command return ASAP and replicate the job to other nodes in the background. The job gets queued ASAP, while normally the job is put into the queue only when the client gets a positive reply. The command returns the Job ID of the added job, assuming ASYNC is specified, or if the job was replicated correctly to the specified number of nodes. Otherwise an error is returned. #### `GETJOB [NOHANG] [TIMEOUT ] [COUNT ] [WITHCOUNTERS] FROM queue1 queue2 ... queueN` Return jobs available in one of the specified queues, or return NULL if the timeout is reached. A single job per call is returned unless a count greater than 1 is specified. Jobs are returned as a three elements array containing the queue name, the Job ID, and the job body itself. If jobs are available into multiple queues, queues are processed left to right. If there are no jobs for the specified queues the command blocks, and messages are exchanged with other nodes, in order to move messages about these queues to this node, so that the client can be served. Options: * **NOHANG**: Ask the command to don't block even if there are no jobs in all the specified queues. This way the caller can just check if there are available jobs without blocking at all. * **WITHCOUNTERS**: Return the best-effort count of NACKs (negative acknowledges) received by this job, and the number of additional deliveries performed for this ob. See the *Dead Letters* section for more information. #### `ACKJOB jobid1 jobid2 ... jobidN` Acknowledges the execution of one or more jobs via job IDs. The node receiving the ACK will replicate it to multiple nodes and will try to garbage collect both the job and the ACKs from the cluster so that memory can be freed. A node receiving an ACKJOB command about a job ID it does not know, will create a special empty job, with the state set to "acknowledged", called a "dummy ACK". The dummy ACK is used in order to retain the acknolwedge during a netsplit if the ACKJOB is send to a node that does not have a copy of the job. When the partition heals, a job garbage collection will be attempted. However since the job ID encodes information about the job being an "at most once" or an "at least once" job, the dummy ACK is only created for at least once jobs. #### `FASTACK jobid1 jobid2 ... jobidN` Performs a best effort cluster wide deletion of the specified job IDs. When the network is well connected and there are no node failures, this is equivalent to `ACKJOB` but much faster (less messages exchanged), however during failures it is more likely that fast acknowledges will result into multiple deliveries of the same messages. #### `WORKING jobid` Claims to be still working with the specified job, and asks Disque to postpone the next time it will deliver again the job. The next delivery is postponed for the job retry time, however the command works in a **best effort** way since there is no way to guarantee during failures that another node in a different network partition is performing a delivery of the same job. Another limitation of the `WORKING` command is that it cannot be sent to nodes not knowing about this particular job. In such a case the command replies with a `NOJOB` error. Similarly if the job is already acknowledged an error is returned. Note that the `WORKING` command is refused by Disque nodes if 50% of the job time to live has already elapsed. This limitation makes Disque safer since usually the *retry* time is much smaller than the time to live of a job, so it can't happen that a set of broken workers monopolize a job with `WORKING` never processing it. After 50% of the TTL elapsed, the job will be delivered to other workers anyway. Note that `WORKING` returns the number of seconds you (likely) postponed the message visiblity for other workers (the command basically returns the *retry* time of the job), so the worker should make sure to send the next `WORKING` command before this time elapses. Moreover a worker that may want to use such an iterface may fetch the retry value with the `SHOW` command when starting to process a message, or may simply send a `WORKING` command ASAP, like in the following example (in pseudo code): retry = WORKING(jobid) RESET timer WHILE ... work with the job still not finished ... IF timer reached 80% of the retry time WORKING(jobid) RESET timer END END #### `NACK ... ` The `NACK` command tells Disque to put back the job in the queue ASAP. It is very similar to `ENQUEUE` but it increments the job `nacks` counter instead of the `additional-deliveries` counter. The command should be used when the worker was not able to process a message and wants the message to be put back into the queue in order to be processed again. Other commands === #### `INFO` Generic server information / stats. #### `HELLO` Returns hello format version, this node ID, all the nodes IDs, IP addresses, ports, and priority (lower is better, means node more available). Clients should use this as an handshake command when connecting with a Disque node. #### `QLEN ` Return the length of the queue. #### `QSTAT ` Show information about a queue as an array of key value pairs. This is an example of the output, however implementations should not rely on the order of the fields nor on the existance of the fields listed above. They may be (unlikely) removed or more can be (likely) added in the future. If a queue does not exist, NULL is returned. Note that queues are automatically evicted after some time if empty and without clients blocked waiting for jobs, even if there are active jobs for the queue. So the non existance of a queue does not mean there are not jobs in the node or in the whole cluster about this queue. The queue will be immediately created again when needed to serve requests. Example output: ``` QSTAT foo 1) "name" 2) "foo" 3) "len" 4) (integer) 56520 5) "age" 6) (integer) 601 7) "idle" 8) (integer) 3 9) "blocked" 10) (integer) 50 11) "import-from" 12) 1) "dcb833cf8f42fbb7924d92335ff6d67d3cea6e3d" 2) "4377bdf656040a18d8caf4d9f409746f1f9e6396" 13) "import-rate" 14) (integer) 19243 15) "jobs-in" 16) (integer) 3462847 17) "jobs-out" 18) (integer) 3389522 19) "pause" 20) "none" ``` Most fields should be obvious. The `import-from` field shows a list of node IDs this node is importing jobs from, for this queue, in order to serve clients requests. The `import-rate` is the instantaneous amount of jos/sec we import in order to handle our outgoing traffic (GETJOB commadns). `blocked` is the number of clients blocked on this queue right now. `age` and `idle` are reported in seconds. Jobs in and out counters are incremented every time job is enqueued or dequeued for any reason. #### `QPEEK ` Return, without consuming from queue, *count* jobs. If *count* is positive the specified number of jobs are returned from the oldest to the newest (in the same best-effort FIFO order as GETJOB). If *count* is negative the commands changes behavior and shows the *count* newest jobs, from the newest from the oldest. #### `ENQUEUE ... ` Queue jobs if not already queued. #### `DEQUEUE ... ` Remove the job from the queue. #### `DELJOB ... ` Completely delete a job from a node. Note that this is similar to `FASTACK`, but limited to a single node since no `DELJOB` cluster bus message is sent to other nodes. #### `SHOW ` Describe the job. #### `QSCAN [COUNT ] [BUSYLOOP] [MINLEN ] [MAXLEN ] [IMPORTRATE ]` The command provides an interface to iterate all the existing queues in the local node, providing a cursor in the form of an integer that is passed to the next command invocation. During the first call cursor must be 0, in the next calls the cursor returned in the previous call is used in the next. The iterator guarantees to return all the elements but may return duplicated elements. Options: * `COUNT ` An hit about how much work to do per iteration. * `BUSYLOOP` Block and return all the elements in a busy loop. * `MINLEN ` Don't return elements with less than count jobs queued. * `MAXLEN `Don't return elements with more than count jobs queued. * `IMPORTRATE ` Only return elements with an job import rate (from other nodes) `>=` rate. The cursor argument can be in any place, the first non matching option that has valid cursor form of an usigned number will be sensed as a valid cursor. #### `JSCAN [] [COUNT ] [BUSYLOOP] [QUEUE ] [STATE STATE ... STATE ] [REPLY all|id]` The command provides an interface to iterate all the existing jobs in the local node, providing a cursor in the form of an integer that is passed to the next command invocation. During the first call cursor must be 0, in the next calls the cursor returned in the previous call is used in the next. The iterator guarantees to return all the elements but may return duplicated elements. Options: * `COUNT ` An hit about how much work to do per iteration. * `BUSYLOOP` Block and return all the elements in a busy loop. * `QUEUE ` Return only jobs in the specified queue. * `STATE ` Return jobs in the specified state. Can be used multiple times for a logic OR. * `REPLY ` Job reply type. Type can be `all` or `id`. Default is to report just the job ID. If `all` is specified the full job state is returned like for the SHOW command. The cursor argument can be in any place, the first non matching option that has valid cursor form of an usigned number will be sensed as a valid cursor. #### `PAUSE option1 [option2 ... optionN]` Control the paused state of a queue, possibly broadcasting the command to other nodes in the cluster. Disque queues can be paused in both directions, input and output, or both. Pausing a queue makes it not available for input or output operations. Specifically: A queue paused in input will have change behavior in the following way: 1. ADDJOB returns a `-PAUSED` error for queues paused in input. 2. The node where the queue is paused, no longer accept to replicate jobs for this queue when requested by other nodes. Since ADDJOB by default uses synchronous replication, it means that if the queue is paused in enough nodes, adding jobs with a specified level of replication may fail. In general the node where the queue is paused will not create new jobs in the local node about this queue. 3. The job no longer accepts ENQUEUE messages from other nodes. Those messages are usually used by nodes in out of memory conditions that replicate jobs externally (not holding a copy), in order to put the job in the queue of some random node, among the nodes having a copy of a job. 4. Active jobs that reach their retry time, are not put back into the queue. Instead their retry timer is updated and the node will try again later. Basically a queue paused in input never creates new jobs for this queue, and never puts active jobs (jobs for which the node have a copy but are not currently queued) back in the queue, for all the time the queue is paused. A queue paused in output instead will behave in the following way: 1. GETJOB will block even if there are jobs available in the specified queue, instead of serving the jobs. But GETJOB will unblock if the queue output pause will be cleared later. 2. The node will not provide jobs to other nodes in the context of nodes federation, for paused queues. So a queue paused in output will stop to act as a source of messages for both local and non local clients. The paused state can be set for each queue using the PAUSE command followed by options to specify how to change the paused state. Possible options are: * **in**: pause the queue in input. * **out**: pause the queue in output. * **all**: pause the queue in input and output (same as specifying both the **in** and **out** options). * **none**: clear the paused state in input and output. * **state**: just report the current queue state. * **bcast**: send a PAUSE command to all the reachable nodes of the cluster to set the same queue in the other nodes to the same state. The command always returns the state of the queue after the execution of the specified options, so the return value is one of **in**, **out**, **all**, **none**. Queues paused in input or output are never evicted to reclaim memory, even if they are empty and inactive for a long time, since otherwise the paused state would be forgot. For example in order to block output for the queue `myqueue` in all the nodes currently reachable, the following command should be send to a single node: PAUSE myqueue out bcast To specify all is the same as to specify both in and out, so the two following forms are equivalent: PAUSE myqueue in out PAUSE myqueue all To just get the current state use: PAUSE myqueue state "none" Special handling of messages with RETRY set to 0 === In order to provide a coherent API, messages with an almost once delivery semantics are still retained after being delivered a first time, and should be acknowledged like any other message. Of course the acknowledge is not mandatory, since the message may be lost and there is no way for the receiver to get the same message again, since the message is associated with a retry value of 0. In order to avoid non acknowledged messages with retry set to 0, from leaking into Disque and eating all the memory, when the Disque server memory is full and starts to evict, it does not just evict acknowledged messages, but also can evict non acknowledged messages having, at the same time, the following two properties: 1. Their retry is set to 0. 2. The job was already delivered. In theory to acknowledge a job that will never be retried is a waste of time and resources, however this design has hidden advantages: 1. The API is exactly the same for all the kinds of jobs. 2. After the job is delivered, it is yet possible to examine it. Observability is a very good property of messaging systems. However not acknowledging the job does not result in big issues since they are evicted eventually during memory pressure. Adding and removing nodes at runtime === Adding nodes is trivial, just consists in starting a new node and sending it a `CLUSTER MEET` command. Assuming the node you just started is located at address 192.168.1.10 port 7714, and a random (you can use any) node of the existing cluster is located at 192.168.1.9 port 7711, all you need to do is: ./disque -h 192.168.1.10 -p 7714 cluster meet 192.168.1.9 7711 Note that you can invert the source and destination arguments and still the new node will join the cluster. It does not matter if it's the old node to meet the new one or the other way around. In order to remove a node, it is possible to use the crude way of just shutting it down, and the use `CLUSTER FORGET ` in all the other nodes in order to remove references to it from the configuration of the other nodes. However this means that, for example, messages that had a replication factor of 3, and one of the replicas was the node you are shutting down, suddenly are left with just 2 replicas even if no *actual* failure happened. Moreover if the node you are removing had messages in queue, you'll need to wait the retry time before the messages will be queued again. For all this reasons Disque has a better way to remove nodes which is described in the next section. Gracefully removal of nodes === In order to empty a node of its content before to remove it, it is possible to use a feature that puts a node in *leaving* state. To enable this feature just contact the node to remove, and use the following command: CLUSTER LEAVING yes The node will start advertising it as *leaving*, so in a matter of seconds all the cluster will know (if there are partitions, when the partition heals all the nodes will eventually be informed), and this is what happens when the node is in this state: 1. When the node receives `ADDJOB` commands, it performs external replication, like when a node is near the memory limits. This means that it will make sure to create the number of replicas of the message in the cluster **without using itself** as a replica. So no new messages are created in the context of a node which is leaving. 2. The node starts to send `-LEAVING` messages to all the clients that use `GETJOB` but would block waiting for jobs. The `-LEAVING` error means the clients should connect to another node. Clients that were already blocked waiting for messages will be unblocked and a `-LEAVING` error will be sent to them as well. 3. The node no longer sends `NEEDJOBS` messages in the context of Disque federation, so it will never ask other nodes to transfer messages to it. 4. The node and all the other nodes will advertise it with a bad priority in the `HELLO` command output, so that clients will select a different node. 5. The node will no longer create *dummy acks* in response to an `ACKJOB` command about a job it does not know. All this behavior changes result in the node participating only as a source of messages, so eventually its messages count will drop to zero (it is possible to check for this condition using `INFO jobs`). When this happens the node can be stopped and removed from the other nodes tables using `CLUSTER FORGET` as described in the section above. Client libraries === Disque uses the same protocol as Redis itself. To adapt Redis clients, or to use it directly, should be pretty easy. However note that Disque default port is 7711 and not 6379. While a vanilla Redis client may work well with Disque, clients should optionally use the following protocol in order to connect with a Disque cluster: 1. The client should be given a number of IP addresses and ports where nodes are located. The client should select random nodes and should try to connect until one available is found. 2. On a successful connection the `HELLO` command should be used in order to retrieve the Node ID and other potentially useful information (server version, number of nodes). 3. If a consumer sees a high message rate received from foreign nodes, it may optionally have logic in order to retrieve messages directly from the nodes where producers are producing the messages for a given topic. The consumer can easily check the source of the messages by checking the Node ID prefix in the messages IDs. 4. The `GETJOB` command, or other commands, may return a `-LEAVING` error instead of blocking. This error should be considered by the client library as a request to connect to a different node, since the node it is connected to is not able to serve the requet since it is leaving the cluster. Nodes in this state have a very high *priority* number published via `HELLO`, so will hardly be picked at the next connection attempt. This way producers and consumers will eventually try to minimize nodes messages exchanges whenever possible. So basically you could perform basic usage using just a Redis client, however there are already specialized client libraries implementing a more specialized API on top of Disque: *C++* - [disque C++ client](https://github.com/zhengshuxin/acl/tree/master/lib_acl_cpp/samples/disque) *Common Lisp* - [cl-disque](https://github.com/CodyReichert/cl-disque) *Elixir* - [exdisque](https://github.com/mosic/exdisque) *Erlang* - [edisque](https://github.com/nacmartin/edisque) *Go* - [disque-go](https://github.com/zencoder/disque-go) - [go-disque](https://github.com/EverythingMe/go-disque) - [disque](https://github.com/goware/disque) *Java* - [jedisque](https://github.com/xetorthio/jedisque) - [spinach](https://github.com/mp911de/spinach) *Node.js* - [disque.js](https://www.npmjs.com/package/disque.js) - [thunk-disque](https://github.com/thunks/thunk-disque) - [disqueue-node](https://www.npmjs.com/package/disqueue-node) *Perl* - [perl-disque](https://github.com/lovelle/perl-disque) *PHP* - [phpque](https://github.com/s12v/phpque) (PHP/HHVM) - [disque-php](https://github.com/mariano/disque-php) ([Composer/Packagist](https://packagist.org/packages/mariano/disque-php)) - [disque-client-php](https://github.com/mavimo/disque-client-php) ([Composer/Packagist](https://packagist.org/packages/mavimo/disque-client)) - [phloppy](https://github.com/0x20h/phloppy) ([Composer/Packagist](https://packagist.org/packages/0x20h/phloppy)) *Python* - [disq](https://github.com/ryansb/disq) ([PyPi](https://pypi.python.org/pypi/disq)) - [pydisque](https://github.com/ybrs/pydisque) ([PyPi](https://pypi.python.org/pypi/pydisque)) - [django-q](https://github.com/koed00/django-q) ([PyPi](https://pypi.python.org/pypi/django-q)) *Ruby* - [disque-rb](https://github.com/soveran/disque-rb) - [disque_jockey](https://github.com/DevinRiley/disque_jockey) *Rust* - [disque-rs](https://github.com/seppo0010/disque-rs) *.NET* - [Disque.Net](https://github.com/ziyasal/Disque.Net) Implementation details === Jobs replication strategy --- 1. Disque tries to replicate to W-1 (or W during out of memory) reachable nodes, shuffled. 2. The cluster REPLJOB message is used to replicate a job to multiple nodes, the job is sent together with the list of nodes that may have a copy. 2. If the required replication is not reached promptly, the job is send to one additional node every 50 milliseconds. When this happens, a new REPLJOB message is also re-sent to each node that may have already a copy, in order to refresh the list of nodes that have a copy. 3. If the specified synchronous replication timeout is reached, the node that originally received the ADDJOB command from the client, gives up and returns an error to the client. When this happens the node performs a best-effort procedure to delete the job from nodes that may have already received a copy of the job. Cluster topology --- Disque is a full mesh, with each node connected to each other. Disque performs distributed failure detection via gossip, only in order to adjust the replication strategy (try reachable nodes first when trying to replicate a message), and in order to inform clients about non reachable nodes when they want the list of nodes they can connect to. Being Disque multi-master the event of nodes failing is not handled in any special way. Cluster messages --- Nodes communicate via a set of messages, using the node-to-node message bus. A few of the messages are used in order to check that other nodes are reachable and to mark nodes as failing. Those messages are PING, PONG and FAIL. Since failure detection is only used to adjust the replication strategy (talk with reachable nodes first in order to improve latency), the details are yet not described. Other messages are more important since they are used in order to replicate jobs, re-issue jobs trying to minimize multiple deliveries, and in order to auto-federate to serve consumers when messages are produced in different nodes compared to where consumers are. The following is a list of messages and what they do, split by category. Note that this is just an informal description, while in the next sections describing the Disque state machine, there is a more detailed description of the behavior caused by messages reception, and in what cases they are generated. Cluster messages related to jobs replication and queueing --- * REPLJOB: ask the receiver to replicate a job, that is, to add a copy of the job among the registered jobs in the target node. When a job is accepted, the receiver replies with GOTJOB to the sender. A job may not be accepted if the receiving node is near out of memory. In this case GOTJOB is not send and the message discarded. * GOTJOB: The reply to REPLJOB to confirm the job was replicated. * ENQUEUE: Ask a node to put a given job into its queue. This message is used when a job is created by a node that does not want to take a copy, so it asks another node (among the ones that acknowledged the job replication) to queue it for the first time. If this message is lost, after the retry time some node will try to re-queue the message, unless retry is set to zero. * WILLQUEUE: This message is sent 500 milliseconds before a job is re-queued to all the nodes that may have a copy of the message, according to the sender table. If some of the receivers already have the job queued, they'll reply with QUEUED in order to prevent the sender to queue the job again (avoid multiple delivery when possible). * QUEUED: When a node re-queue a job, it sends QUEUED to all the nodes that may have a copy of the message, so that the other nodes will update the time at which they'll retry to queue the job. Moreover every node that already has the same job in queue, but with a node ID which is lexicographically smaller than the sending node, will de-queue the message in order to best-effort de-dup messages that may be queued into multiple nodes at the same time. Cluster messages related to ACKs propagation and garbage collection --- * SETACK: This message is sent to force a node to mark a job as successfully delivered (acknowledged by the worker): the job will no longer be considered active, and will never be re-queued by the receiving node. Also SETACK is send to the sender if the receiver of QUEUED or WILLQUEUE message has the same job marked as acknowledged (successfully delivered) already. * GOTACK: This message is sent in order to acknowledge a SETACK message. The receiver can mark a given node that may have a copy of a job, as informed about the fact that the job was acknowledged by the worker. Nodes delete (garbage collect) a message cluster wide when they believe all the nodes that may have a copy are informed about the fact the job was acknowledged. * DELJOB: Ask the receiver to remove a job. Is only sent in order to perform garbage collection of jobs by nodes that are sure the job was already delivered correctly. Usually the node sending DELJOB only does that when its sure that all the nodes that may have a copy of the message already marked the message ad delivered, however after some time the job GC may be performed anyway, in order to reclaim memory, and in that case, an otherwise avoidable multiple delivery of a job may happen. The DELJOB message is also used in order to implement *fast acknowledges*. Cluster messages related to nodes federation --- * NEEDJOBS(queue,count): The sender asks the receiver to obtain messages for a given queue, possibly *count* messages, but this is only an hit for congestion control and messages optimization, the receiver is free to reply with whatever number of messages. NEEDJOBS messages are delivered in two ways: broadcasted to every node in the cluster from time to time, in order to discover new source nodes for a given queue, or more often, to a set of nodes that recently replies with jobs for a given queue. This latter mechanism is called an *ad hoc* delivery, and is possible since every node remembers for some time the set of nodes that were recent providers of messages for a given queue. In both cases, NEEDJOBS messages are delivered with exponential delays, with the exception of queues that drop to zero-messages and have a positive recent import rate, in this case an ad hoc NEEDJOBS delivery is performed regardless of the last time the message was delivered in order to allow a continuous stream of messages under load. * YOURJOBS(array of messages): The reply to NEEDJOBS. An array of serialized jobs, usually all about the same queue (but future optimization may allow to send different jobs from different queues). Jobs into YOURJOBS replies are extracted from the local queue, and queued at the receiver node's queue with the same name. So even messages with a retry set to 0 (at most once delivery) still guarantee the safety rule since a given message may be in the source node, on the wire, or already received in the destination node. If a YOURJOBS message is lost, at least once delivery jobs will be re-queued later when the retry time is reached. Disque state machine --- This section shows the most interesting (as in less obvious) parts of the state machine each Disque node implements. While practically it is a single state machine, it is split in sections. The state machine description uses a convention that is not standard but should look familiar, since it is event driven, made of actions performed upon: message receptions in the form of commands received from clients, messages received from other cluster nodes, timers, and procedure calls. Note that: `job` is a job object with the following fields: 1. `job.delivered`: A list of nodes that may have this message. This list does not need to be complete, is used for best-effort algorithms. 2. `job.confirmed`: A list of nodes that confirmed reception of ACK by replying with a GOTJOB message. 3. `job.id`: The job 48 chars ID. 4. `job.state`: The job state among: `wait-repl`, `active`, `queued`, `acknowledged`. 5. `job.replicate`: Replication factor for this job. 5. `job.qtime`: Time at which we need to re-queue the job. List fields such as `.delivered` and `.confirmed` support methods like `.size` to get the number of elements. States are as follows: 1. `wait-repl`: the job is waiting to be synchronously replicated. 2. `active`: the job is active, either it reached the replication factor in the originating node, or it was created because the node received an `REPLJOB` message from another node. 3. `queued`: the job is active and also is pending into a queue in this node. 4. `acknowledged`: the job is no longer actived since a client confirmed the reception using the `ACKJOB` command or another Disque node sent a `SETACK` message for the job. Generic functions --- PROCEDURE `LOOKUP-JOB(string job-id)`: 1. If job with the specified id is found, returns the corresponding job object. 2. Otherwise returns NULL. PROCEDURE `UNREGISTER(object job)`: 1. Delete the job from memory, and if queued, from the queue. PROCEDURE `ENQUEUE(job)`: 1. If `job.state == queued` return ASAP. 2. Add `job` into `job.queue`. 3. Change `job.state` to `queued`. PROCEDURE `DEQUEUE(job)`: 1. If `job.state != queued` return ASAP. 2. Remove `job` from `job.queue`. 3. Change `job.state` to `active`. ON RECV cluster message: `DELJOB(string job.id)`: 1. job = Call `LOOKUP-JOB(job-id)`. 2. IF `job != NULL` THEN call `UNREGISTER(job)`. Job replication state machine --- This part of the state machine documents how clients add jobs to the cluster and how the cluster replicates jobs across different Disque nodes. ON RECV client command `ADDJOB(string queue-name, string body, integer replicate, integer retry, integer ttl, ...): 1. Create a job object in `wait-repl` state, having as body, ttl, retry, queue name, the specified values. 2. Send REPLJOB(job.serialized) cluster message to `replicate-1` nodes. 3. Block the client without replying. Step 3: We'll reply to the client in step 4 of `GOTJOB` message processing. ON RECV cluster message `REPLJOB(object serialized-job)`: 1. job = Call `LOOKUP-JOB(serialized-job.id)`. 2. IF `job != NULL` THEN: job.delivered = UNION(job.delivered,serialized-job.delivered). Return ASAP, since we have the job. 3. Create a job from serialized-job information. 4. job.state = `active`. 5. Reply to the sender with `GOTJOB(job.id)`. Step 1: We may already have the job, since REPLJOB may be duplicated. Step 2: If we already have the same job, we update the list of jobs that may have a copy of this job, performing the union of the list of nodes we have with the list of nodes in the serialized job. ON RECV cluster message `GOTJOB(object serialized-job)`: 1. job = Call `LOOKUP-JOB(serialized-job.id)`. 2. IF `job == NULL` OR `job.state != wait-repl` Return ASAP. 3. Add sender node to `job.confirmed`. 4. IF `job.confirmed.size == job.replicate` THEN change `job.state` to `active`, call ENQUEUE(job), and reply to the blocked client with `job.id`. Step 4: As we receive enough confirmations via `GOTJOB` messages, we finally reach the replication factor required by the user and consider the message active. TIMER, firing every next 50 milliseconds while a job still did not reached the expected replication factor. 1. Select an additional node not already listed in `job.delivered`, call it `node`. 2. Add `node` to `job.delivered`. 3. Send REPLJOB(job.serialized) cluster message to each node in `job.delivered`. Step 3: We send the message to every node again, so that each node will have a chance to update `job.delivered` with the new nodes. It is not required for each node to know the full list of nodes that may have a copy, but doing so improves our approximation of single delivery whenever possible. Job re-queueing state machine --- This part of the state machine documents how Disque nodes put a given job back into the queue after the specified retry time elapsed without the job being acknowledged. TIMER, firing 500 milliseconds before the retry time elapses: 1. Send `WILLQUEUE(job.id)` to every node in `jobs.delivered`. TIMER, firing when `job.qtime` time is reached. 1. If `job.retry == 0` THEN return ASAP. 2. Call ENQUEUE(job). 3. Update `job.qtime` to NOW + job.retry. 4. Send `QUEUED(job.id)` message to each node in `job.delivered`. Step 1: At most once jobs never get enqueued again. Step 3: We'll retry again after the retry period. ON RECV cluster message `WILLQUEUE(string job-id)`: 1. job = Call `LOOKUP-JOB(job-id)`. 2. IF `job == NULL` THEN return ASAP. 3. IF `job.state == queued` SEND `QUEUED(job.id)` to `job.delivered`. 4. IF `job.state == acked` SEND `SETACK(job.id)` to the sender. Step 3: We broadcast the message since likely the other nodes are going to retry as well. Step 4: SETACK processing is documented below in the acknowledges section of the state machine description. ON RECV cluster message `QUEUED(string job-id)`: 1. job = Call `LOOKUP-JOB(job-id)`. 2. IF `job == NULL` THEN return ASAP. 3. IF `job.state == acknowledged` THEN return ASAP. 4. IF `job.state == queued` THEN if sender node ID is greater than my node ID call DEQUEUE(job). 5. Update `job.qtime` setting it to NOW + job.retry. Step 4: If multiple nodes re-queue the job about at the same time because of race conditions or network partitions that make `WILLQUEUE` not effective, then `QUEUED` forces receiving nodes to dequeue the message if the sender has a greater node ID, lowering the probability of unwanted multiple delivery. Step 5: Now the message is already queued somewhere else, but the node will retry again after the retry time. Acknowledged jobs garbage collection state machine --- This part of the state machine is used in order to garbage collect acknowledged jobs, when a job finally gets acknowledged by a client. PROCEDURE `ACK-JOB(job)`: 1. If job state is already `acknowledged`, do nothing and return ASAP. 2. Change job state to `acknowledged`, dequeue the job if queued, schedule first call to TIMER. PROCEDURE `START-GC(job)`: 1. Send `SETACK(job.delivered.size)` to each node that is listed in `job.delivered` but is not listed in `job.confirmed`. 2. IF `job.delivered.size == 0`, THEN send `SETACK(0)` to every node in the cluster. Step 2: this is an ACK about a job we don’t know. In that case, we can just broadcast the acknowledged hoping somebody knows about the job and replies. ON RECV client command `ACKJOB(string job-id)`: 1. job = Call `LOOKUP-JOB(job-id)`. 1. if job is `NULL`, ignore the message and return. 2. Call `ACK-JOB(job)`. 3. Call `START-GC(job)`. ON RECV cluster message `SETACK(string job-id, integer may-have)`: 1. job = Call `LOOKUP-JOB(job-id)`. 2. Call ACK-JOB(job) IF job is not `NULL`. 3. Reply with GOTACK IF `job == NULL OR job.delivered.size <= may-have`. 4. IF `job != NULL` and `jobs.delivered.size > may-have` THEN call `START-GC(job)`. 5. IF `may-have == 0 AND job != NULL`, reply with `GOTACK(1)` and call `START-GC(job)`. Steps 3 and 4 makes sure that among the reachalbe nodes that may have a message, garbage collection will be performed by the node that is aware of more nodes that may have a copy. Step 5 instead is used in order to start a GC attempt if we received a SETACK message from a node just hacking a dummy ACK (an acknowledge about a job it was not aware of). ON RECV cluster message `GOTACK(string job-id, bool known)`: 1. job = Call `LOOKUP-JOB(job-id)`. Return ASAP IF `job == NULL`. 2. Call `ACK-JOB(job)`. 3. IF `known == true AND job.delivered.size > 0` THEN add the sender node to `job.delivered`. 4. IF `(known == true) OR (known == false AND job.delivered.size > 0) OR (known == false AND sender is an element of job.delivered)` THEN add the sender node to `jobs.confirmed`. 5. IF `job.delivered.size > 0 AND job.delivered.size == job.confirmed.size`, THEN send `DELJOB(job.id)` to every node in the `job.delivered` list and call `UNREGISTER(job)`. 6. IF `job.delivered == 0 AND known == true`, THEN call `UNREGISTER(job)`. 7. IF `job.delivered == 0 AND job.confirmed.size == cluster.size` THEN call `UNREGISTER(job)`. Step 3: If `job.delivered.size` is zero, it means that the node just holds a *dummy ack* for the job. It means the node has an acknowledged job it created on the fly because a client acknowledged (via ACKJOB command) a job it was not aware of. Step 6: we don't have to hold a dummy acknowledged jobs if there are nodes that have the job already acknowledged. Step 7: this happens when nobody knows about a job, like when a client acknowledged a wrong job ID. TIMER, from time to time (exponential backoff with random error), for every acknowledged job in memory: 1. call `START-GC(job)`. Limitations === * Disque is new code, not tested, and will require quite some time to reach production quality. It is likely very buggy and may contain wrong assumptions or tradeoffs. * As long as the software is non stable, the API may change in random ways without prior notification. * It is possible that Disque spends too much effort in approximating single delivery during failures. The **fast acknowledge** concept and command makes the user able to opt-out this efforts, but yet I may change the Disque implementation and internals in the future if I see the user base really not caring about multiple deliveries during partitions. * There is yet a lot of Redis dead code inside probably that could be removed. * Disque was designed a bit in *astronaut mode*, not triggered by an actual use case of mine, but more in response to what I was seeing people doing with Redis as a message queue and with other message queues. However I'm not an expert, if I succeeded to ship something useful for most users, this is kinda of an accomplishment. Otherwise it may just be that Disque is pretty useless. * As Redis, Disque is single threaded. While in Redis there are stronger reasons to do so, in Disque there is no manipulation of complex data structures, so maybe in the future it should be moved into a threaded server. We need to see what happens in real use cases in order to understand if it's worth it or not. * The number of jobs in a Disque process is limited to the amount of memory available. Again while this in Redis makes sense (IMHO), in Disque there are definitely simple ways in order to circumvent this limitation, like logging messages on disk when the server is out of memory and consuming back the messages when memory pressure is already acceptable. However in general, like in Redis, manipulating data structures in memory is a big advantage from the point of view of the implementation simplicity and the functionality we can provide to users. * Disque is completely not optimized for speed, was never profiled so far. I'm currently not aware of the fact it's slow, fast, or average, compared to other messaging solutions. For sure it is not going to have Redis-alike numbers because it does a lot more work at each command. For example when a job is added, it is serialized and transmitted to other `N` servers. There is a lot more message passing between nodes involved, and so forth. The good news is that being totally unoptimized, there is room for improvements. * Ability of federation to handle well low and high loads without incurring into congestion or high latency, was not tested well enough. The algorithm is reasonable but may fail short under many load patterns. * Amount of tested code path and possible states is not enough. FAQ === Is Disque part of Redis? --- No, it is a standalone project, however a big part of the Redis networking source code, nodes message bus, libraries, and the client protocol, were reused in this new project. In theory it was possible to extract the common code and release it as a framework to write distributed systems in C. However this is not a perfect solution as well, since the projects are expected to diverge more and more in the future, and to rely on a common fundation was hard. Moreover the initial effort to turn Redis into two different layers: an abstract server, networking stack and cluster bus, and the actual Redis implementation, was a huge effort, ways bigger than writing Disque itself. However while it is a separated project, conceptually Disque is related to Redis, since it tries to solve a Redis use case in a vertical, ad-hoc way. Who created Disque? --- Disque is a side project of Salvatore Sanfilippo, aka @antirez. There are chances for this project to be actively developed? --- Currently I consider this just a public alpha: If I see people happy to use it for the right reasons (i.e. it is better in some use cases compared to other message queues) I'll continue the development. Otherwise it was anyway cool to develop it, I had much fun, and I definitely learned new things. What happens when a node runs out of memory? --- 1. Maxmemory setting is mandatory in Disque, and defaults to 1GB. 2. When 75% of maxmemory is reached, Disque starts to replicate the new jobs only to external nodes, without taking a local copy, so basically if there is free RAM into other nodes, adding still works. 3. When 95% of maxmemory is reached, Disque starts to evict data that does not violates the safety guarantees: For instance acknowledged jobs and inactive queues. 4. When 100% of maxmemory is reached, commands that may result into more memory used are not processed at all and the client is informed with an error. Are there plans to add the ability to hold more jobs than the physical memory of a single node can handle? --- Yes. In Disque it should be relatively simple to use the disk when memory is not available, since jobs are immutable and don't need to necessarily exist in memory at a given time. There are multiple strategies available. The current idea is that when an instance is out of memory, jobs are stored into a log file instead of memory. As more free memory is available in the instance, on disk jobs are loaded. However in order to implement this, there is to observe strong evidence of its general usefulness for the user base. When I consume and produce from different nodes, sometimes there is a delay in order for the jobs to reach the consumer, why? --- Disque routing is not static, the cluster automatically tries to provide messages to nodes where consumers are attached. When there is an high enough traffic (even one message per second is enough) nodes remember other nodes that recently were sources for jobs in a given queue, so it is possible to aggressively send messages asking for more jobs, every time there are consumers waiting for more messages and the local queue is empty. However when the traffic is very low, informations about recent sources of messages are discarded, and nodes rely on a more generic mechanism in order to discover other nodes that may have messages in the queues we need them (which is also used in high traffic conditions as well, in order to discover new sources of messages for a given queue). For example imagine a setup with two nodes, A and B. 1. A client attaches to node A and asks for jobs in the queue `myqueue`. Node A has no jobs enqueued, so the client is blocked. 2. After a few seconds another client produces messages into `myqueue`, but sending them to node B. During step `1` if there was no recent traffic of imported messages for this queue, node A has no idea about who may have messages for the queue `myqueue`. Every other node may have, or none may have. So it starts to broadcast `NEEDJOBS` messages to the whole cluster. However we can't spam the cluster with messages, so if no reply is received after the first broadcast, the next will be sent with a larger delay, and so foth. The delay is exponential, with a maximum value of 30 seconds (this parameters will be configurable in the future, likely). When there is some traffic instead, nodes send `NEEDJOBS` messages ASAP to other nodes that were recent sources of messages. Even when no reply is received, the next `NEEDJOBS` messages will be sent more aggressively to the subset of nodes that had messages in the mast, with a delay that starts at 25 milliseconds and has a maximum value of two seconds. In order to minimize the latency, `NEEDJOBS` messages are not trottled at all when: 1. A client consumed the last message from a given queue. Source nodes are informed immediately in order to receive messages before the node asks for more. 2. Blocked clients are served the last message available in the queue. For more information, please refer to the file `queue.c`, especially the function `needJobsForQueue` and its callers. Are messages re-enqueued in the queue tail or head or what? --- Messages are put into the queue according to their *creation time* attribute. This means that they are enqueued in a best effort order in the local node queue. Messages that need to be put back into the queue again because their delivery failed are usually (but not always) older than messages already in queue, so they'll likely be among the first to be delivered to workers. What Disque means? --- DIStributed QUEue but is also a joke with "dis" as negation (like in *dis*order) of the strict concept of queue, since Disque is not able to guarantee the strict ordering you expect from something called *queue*. And because of this tradeof it gains many other interesting things. Community: how to get help and how to help === Get in touch with us in one of the following ways: 1. Post on [Stack Overflow](http://stackoverflow.com) using the `disque` tag. This is the preferred method to get general help about Disque: other users will easily find previous questions so we can incrementally build a knowledge base. 2. Join the `#disque` IRC channel at **irc.freenode.net**. 3. Create an Issue or Pull request if your question or issue is about the Disque implementation itself. Thanks === I would like to say thank you to the following persons and companies. * Pivotal, for allowing me to work on Disque, most in my spare time, but sometimes during work hours. Moreover Pivotal agreed to leave the copyright of the code to me. This is very generous. Thanks Pivotal! * Michel Martens and Damian Janowski for providing early feedbacks about Disque while the project was still private. * Everybody which is already writing client libraries, sending pull requests, creating issues in order to move this forward from alpha to something actually usable. disque-1.0-rc1/deps/000077500000000000000000000000001264176561100142735ustar00rootroot00000000000000disque-1.0-rc1/deps/Makefile000066400000000000000000000034021264176561100157320ustar00rootroot00000000000000# Redis dependency Makefile uname_S:= $(shell sh -c 'uname -s 2>/dev/null || echo not') CCCOLOR="\033[34m" LINKCOLOR="\033[34;1m" SRCCOLOR="\033[33m" BINCOLOR="\033[37;1m" MAKECOLOR="\033[32;1m" ENDCOLOR="\033[0m" default: @echo "Explicit target required" .PHONY: default # Prerequisites target .make-prerequisites: @touch $@ # Clean everything when CFLAGS is different ifneq ($(shell sh -c '[ -f .make-cflags ] && cat .make-cflags || echo none'), $(CFLAGS)) .make-cflags: distclean -(echo "$(CFLAGS)" > .make-cflags) .make-prerequisites: .make-cflags endif # Clean everything when LDFLAGS is different ifneq ($(shell sh -c '[ -f .make-ldflags ] && cat .make-ldflags || echo none'), $(LDFLAGS)) .make-ldflags: distclean -(echo "$(LDFLAGS)" > .make-ldflags) .make-prerequisites: .make-ldflags endif distclean: -(cd hiredis && $(MAKE) clean) > /dev/null || true -(cd linenoise && $(MAKE) clean) > /dev/null || true -(cd jemalloc && [ -f Makefile ] && $(MAKE) distclean) > /dev/null || true -(rm -f .make-*) .PHONY: distclean hiredis: .make-prerequisites @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) cd hiredis && $(MAKE) static .PHONY: hiredis linenoise: .make-prerequisites @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) cd linenoise && $(MAKE) .PHONY: linenoise JEMALLOC_CFLAGS= -std=gnu99 -Wall -pipe -g3 -O3 -funroll-loops $(CFLAGS) JEMALLOC_LDFLAGS= $(LDFLAGS) jemalloc: .make-prerequisites @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) cd jemalloc && ./configure --with-jemalloc-prefix=je_ --enable-cc-silence CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" cd jemalloc && $(MAKE) CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" lib/libjemalloc.a .PHONY: jemalloc disque-1.0-rc1/deps/README.md000066400000000000000000000066751264176561100155700ustar00rootroot00000000000000This directory contains all Redis dependencies, except for the libc that should be provided by the operating system. * **Jemalloc** is our memory allocator, used as replacement for libc malloc on Linux by default. It has good performances and excellent fragmentation behavior. This component is upgraded from time to time. * **geohash-int** is inside the dependencies directory but is actually part of the Redis project, since it is our private fork (heavily modified) of a library initially developed for Ardb, which is in turn a fork of Redis. * **hiredis** is the official C client library for Redis. It is used by redis-cli, redis-benchmark and Redis Sentinel. It is part of the Redis official ecosystem but is developed externally from the Redis repository, so we just upgrade it as needed. * **linenoise** is a readline replacement. It is developed by the same authors of Redis but is managed as a separated project and updated as needed. * **lua** is Lua 5.1 with minor changes for security and additional libraries. How to upgrade the above dependencies === Jemalloc --- Jemalloc is unmodified. We only change settings via the `configure` script of Jemalloc using the `--with-lg-quantum` option, setting it to the value of 3 instead of 4. This provides us with more size classes that better suit the Redis data structures, in order to gain memory efficiency. So in order to upgrade jemalloc: 1. Remove the jemalloc directory. 2. Substitute it with the new jemalloc source tree. Geohash --- This is never upgraded since it's part of the Redis project. If there are changes to merge from Ardb there is the need to manually check differences, but at this point the source code is pretty different. Hiredis --- Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is adviced to take a lot of care: 1. Check with diff if hiredis API changed and what impact it could have in Redis. 2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible. 3. After the upgrade, run the Redis Sentinel test. 4. Check manually that redis-cli and redis-benchmark behave as expecteed, since we have no tests for CLI utilities currently. Linenoise --- Linenoise is rarely upgraded as needed. The upgrade process is trivial since Redis uses a non modified version of linenoise, so to upgrade just do the following: 1. Remove the linenoise directory. 2. Substitute it with the new linenoise source tree. Lua --- We use Lua 5.1 and no upgrade is planned currently, since we don't want to break Lua scripts for new Lua features: in the context of Redis Lua scripts the capabilities of 5.1 are usually more than enough, the release is rock solid, and we definitely don't want to break old scripts. So upgrading of Lua is up to the Redis project maintainers and should be a manual procedure performed by taking a diff between the different versions. Currently we have at least the following differences between official Lua 5.1 and our version: 1. Makefile is modified to allow a different compiler than GCC. 2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`. 3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order toa void direct bytecode exectuion. disque-1.0-rc1/deps/hiredis/000077500000000000000000000000001264176561100157225ustar00rootroot00000000000000disque-1.0-rc1/deps/hiredis/.gitignore000066400000000000000000000001021264176561100177030ustar00rootroot00000000000000/hiredis-test /examples/hiredis-example* /*.o /*.so /*.dylib /*.a disque-1.0-rc1/deps/hiredis/.travis.yml000066400000000000000000000001041264176561100200260ustar00rootroot00000000000000language: c compiler: - gcc - clang script: make && make check disque-1.0-rc1/deps/hiredis/CHANGELOG.md000066400000000000000000000012751264176561100175400ustar00rootroot00000000000000### 0.11.0 * Increase the maximum multi-bulk reply depth to 7. * Increase the read buffer size from 2k to 16k. * Use poll(2) instead of select(2) to support large fds (>= 1024). ### 0.10.1 * Makefile overhaul. Important to check out if you override one or more variables using environment variables or via arguments to the "make" tool. * Issue #45: Fix potential memory leak for a multi bulk reply with 0 elements being created by the default reply object functions. * Issue #43: Don't crash in an asynchronous context when Redis returns an error reply after the connection has been made (this happens when the maximum number of connections is reached). ### 0.10.0 * See commit log. disque-1.0-rc1/deps/hiredis/COPYING000066400000000000000000000030641264176561100167600ustar00rootroot00000000000000Copyright (c) 2009-2011, Salvatore Sanfilippo Copyright (c) 2010-2011, Pieter Noordhuis All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Redis nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. disque-1.0-rc1/deps/hiredis/Makefile000066400000000000000000000120561264176561100173660ustar00rootroot00000000000000# Hiredis Makefile # Copyright (C) 2010-2011 Salvatore Sanfilippo # Copyright (C) 2010-2011 Pieter Noordhuis # This file is released under the BSD license, see the COPYING file OBJ=net.o hiredis.o sds.o async.o EXAMPLES=hiredis-example hiredis-example-libevent hiredis-example-libev TESTS=hiredis-test LIBNAME=libhiredis HIREDIS_MAJOR=0 HIREDIS_MINOR=11 # redis-server configuration used for testing REDIS_PORT=56379 REDIS_SERVER=redis-server define REDIS_TEST_CONFIG daemonize yes pidfile /tmp/hiredis-test-redis.pid port $(REDIS_PORT) bind 127.0.0.1 unixsocket /tmp/hiredis-test-redis.sock endef export REDIS_TEST_CONFIG # Fallback to gcc when $CC is not in $PATH. CC:=$(shell sh -c 'type $(CC) >/dev/null 2>/dev/null && echo $(CC) || echo gcc') OPTIMIZATION?=-O3 WARNINGS=-Wall -W -Wstrict-prototypes -Wwrite-strings DEBUG?= -g -ggdb REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CFLAGS) $(WARNINGS) $(DEBUG) $(ARCH) REAL_LDFLAGS=$(LDFLAGS) $(ARCH) DYLIBSUFFIX=so STLIBSUFFIX=a DYLIB_MINOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR).$(HIREDIS_MINOR) DYLIB_MAJOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR) DYLIBNAME=$(LIBNAME).$(DYLIBSUFFIX) DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(DYLIB_MINOR_NAME) -o $(DYLIBNAME) $(LDFLAGS) STLIBNAME=$(LIBNAME).$(STLIBSUFFIX) STLIB_MAKE_CMD=ar rcs $(STLIBNAME) # Platform-specific overrides uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') ifeq ($(uname_S),SunOS) REAL_LDFLAGS+= -ldl -lnsl -lsocket DYLIB_MAKE_CMD=$(CC) -G -o $(DYLIBNAME) -h $(DYLIB_MINOR_NAME) $(LDFLAGS) INSTALL= cp -r endif ifeq ($(uname_S),Darwin) DYLIBSUFFIX=dylib DYLIB_MINOR_NAME=$(LIBNAME).$(HIREDIS_MAJOR).$(HIREDIS_MINOR).$(DYLIBSUFFIX) DYLIB_MAJOR_NAME=$(LIBNAME).$(HIREDIS_MAJOR).$(DYLIBSUFFIX) DYLIB_MAKE_CMD=$(CC) -shared -Wl,-install_name,$(DYLIB_MINOR_NAME) -o $(DYLIBNAME) $(LDFLAGS) endif all: $(DYLIBNAME) # Deps (use make dep to generate this) net.o: net.c fmacros.h net.h hiredis.h async.o: async.c async.h hiredis.h sds.h dict.c dict.h hiredis.o: hiredis.c fmacros.h hiredis.h net.h sds.h sds.o: sds.c sds.h test.o: test.c hiredis.h $(DYLIBNAME): $(OBJ) $(DYLIB_MAKE_CMD) $(OBJ) $(STLIBNAME): $(OBJ) $(STLIB_MAKE_CMD) $(OBJ) dynamic: $(DYLIBNAME) static: $(STLIBNAME) # Binaries: hiredis-example-libevent: examples/example-libevent.c adapters/libevent.h $(STLIBNAME) $(CC) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. $< -levent $(STLIBNAME) hiredis-example-libev: examples/example-libev.c adapters/libev.h $(STLIBNAME) $(CC) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. $< -lev $(STLIBNAME) ifndef AE_DIR hiredis-example-ae: @echo "Please specify AE_DIR (e.g. /src)" @false else hiredis-example-ae: examples/example-ae.c adapters/ae.h $(STLIBNAME) $(CC) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. -I$(AE_DIR) $< $(AE_DIR)/ae.o $(AE_DIR)/zmalloc.o $(AE_DIR)/../deps/jemalloc/lib/libjemalloc.a -pthread $(STLIBNAME) endif ifndef LIBUV_DIR hiredis-example-libuv: @echo "Please specify LIBUV_DIR (e.g. ../libuv/)" @false else hiredis-example-libuv: examples/example-libuv.c adapters/libuv.h $(STLIBNAME) $(CC) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. -I$(LIBUV_DIR)/include $< $(LIBUV_DIR)/.libs/libuv.a -lpthread $(STLIBNAME) endif hiredis-example: examples/example.c $(STLIBNAME) $(CC) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. $< $(STLIBNAME) examples: $(EXAMPLES) hiredis-test: test.o $(STLIBNAME) $(CC) -o $@ $(REAL_LDFLAGS) $< $(STLIBNAME) test: hiredis-test ./hiredis-test check: hiredis-test @echo "$$REDIS_TEST_CONFIG" | $(REDIS_SERVER) - ./hiredis-test -h 127.0.0.1 -p $(REDIS_PORT) -s /tmp/hiredis-test-redis.sock || \ ( kill `cat /tmp/hiredis-test-redis.pid` && false ) kill `cat /tmp/hiredis-test-redis.pid` .c.o: $(CC) -std=c99 -pedantic -c $(REAL_CFLAGS) $< clean: rm -rf $(DYLIBNAME) $(STLIBNAME) $(TESTS) examples/hiredis-example* *.o *.gcda *.gcno *.gcov dep: $(CC) -MM *.c # Installation related variables and target PREFIX?=/usr/local INSTALL_INCLUDE_PATH= $(PREFIX)/include/hiredis INSTALL_LIBRARY_PATH= $(PREFIX)/lib ifeq ($(uname_S),SunOS) INSTALL?= cp -r endif INSTALL?= cp -a install: $(DYLIBNAME) $(STLIBNAME) mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_LIBRARY_PATH) $(INSTALL) hiredis.h async.h adapters $(INSTALL_INCLUDE_PATH) $(INSTALL) $(DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(DYLIB_MINOR_NAME) cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIB_MAJOR_NAME) cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MAJOR_NAME) $(DYLIBNAME) $(INSTALL) $(STLIBNAME) $(INSTALL_LIBRARY_PATH) 32bit: @echo "" @echo "WARNING: if this fails under Linux you probably need to install libc6-dev-i386" @echo "" $(MAKE) CFLAGS="-m32" LDFLAGS="-m32" gprof: $(MAKE) CFLAGS="-pg" LDFLAGS="-pg" gcov: $(MAKE) CFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS="-fprofile-arcs" coverage: gcov make check mkdir -p tmp/lcov lcov -d . -c -o tmp/lcov/hiredis.info genhtml --legend -o tmp/lcov/report tmp/lcov/hiredis.info noopt: $(MAKE) OPTIMIZATION="" .PHONY: all test check clean dep install 32bit gprof gcov noopt disque-1.0-rc1/deps/hiredis/README.md000066400000000000000000000420351264176561100172050ustar00rootroot00000000000000[![Build Status](https://travis-ci.org/redis/hiredis.png)](https://travis-ci.org/redis/hiredis) # HIREDIS Hiredis is a minimalistic C client library for the [Redis](http://redis.io/) database. It is minimalistic because it just adds minimal support for the protocol, but at the same time it uses an high level printf-alike API in order to make it much higher level than otherwise suggested by its minimal code base and the lack of explicit bindings for every Redis command. Apart from supporting sending commands and receiving replies, it comes with a reply parser that is decoupled from the I/O layer. It is a stream parser designed for easy reusability, which can for instance be used in higher level language bindings for efficient reply parsing. Hiredis only supports the binary-safe Redis protocol, so you can use it with any Redis version >= 1.2.0. The library comes with multiple APIs. There is the *synchronous API*, the *asynchronous API* and the *reply parsing API*. ## UPGRADING Version 0.9.0 is a major overhaul of hiredis in every aspect. However, upgrading existing code using hiredis should not be a big pain. The key thing to keep in mind when upgrading is that hiredis >= 0.9.0 uses a `redisContext*` to keep state, in contrast to the stateless 0.0.1 that only has a file descriptor to work with. ## Synchronous API To consume the synchronous API, there are only a few function calls that need to be introduced: redisContext *redisConnect(const char *ip, int port); void *redisCommand(redisContext *c, const char *format, ...); void freeReplyObject(void *reply); ### Connecting The function `redisConnect` is used to create a so-called `redisContext`. The context is where Hiredis holds state for a connection. The `redisContext` struct has an integer `err` field that is non-zero when an the connection is in an error state. The field `errstr` will contain a string with a description of the error. More information on errors can be found in the **Errors** section. After trying to connect to Redis using `redisConnect` you should check the `err` field to see if establishing the connection was successful: redisContext *c = redisConnect("127.0.0.1", 6379); if (c != NULL && c->err) { printf("Error: %s\n", c->errstr); // handle error } ### Sending commands There are several ways to issue commands to Redis. The first that will be introduced is `redisCommand`. This function takes a format similar to printf. In the simplest form, it is used like this: reply = redisCommand(context, "SET foo bar"); The specifier `%s` interpolates a string in the command, and uses `strlen` to determine the length of the string: reply = redisCommand(context, "SET foo %s", value); When you need to pass binary safe strings in a command, the `%b` specifier can be used. Together with a pointer to the string, it requires a `size_t` length argument of the string: reply = redisCommand(context, "SET foo %b", value, (size_t) valuelen); Internally, Hiredis splits the command in different arguments and will convert it to the protocol used to communicate with Redis. One or more spaces separates arguments, so you can use the specifiers anywhere in an argument: reply = redisCommand(context, "SET key:%s %s", myid, value); ### Using replies The return value of `redisCommand` holds a reply when the command was successfully executed. When an error occurs, the return value is `NULL` and the `err` field in the context will be set (see section on **Errors**). Once an error is returned the context cannot be reused and you should set up a new connection. The standard replies that `redisCommand` are of the type `redisReply`. The `type` field in the `redisReply` should be used to test what kind of reply was received: * **`REDIS_REPLY_STATUS`**: * The command replied with a status reply. The status string can be accessed using `reply->str`. The length of this string can be accessed using `reply->len`. * **`REDIS_REPLY_ERROR`**: * The command replied with an error. The error string can be accessed identical to `REDIS_REPLY_STATUS`. * **`REDIS_REPLY_INTEGER`**: * The command replied with an integer. The integer value can be accessed using the `reply->integer` field of type `long long`. * **`REDIS_REPLY_NIL`**: * The command replied with a **nil** object. There is no data to access. * **`REDIS_REPLY_STRING`**: * A bulk (string) reply. The value of the reply can be accessed using `reply->str`. The length of this string can be accessed using `reply->len`. * **`REDIS_REPLY_ARRAY`**: * A multi bulk reply. The number of elements in the multi bulk reply is stored in `reply->elements`. Every element in the multi bulk reply is a `redisReply` object as well and can be accessed via `reply->element[..index..]`. Redis may reply with nested arrays but this is fully supported. Replies should be freed using the `freeReplyObject()` function. Note that this function will take care of freeing sub-replies objects contained in arrays and nested arrays, so there is no need for the user to free the sub replies (it is actually harmful and will corrupt the memory). **Important:** the current version of hiredis (0.10.0) free's replies when the asynchronous API is used. This means you should not call `freeReplyObject` when you use this API. The reply is cleaned up by hiredis _after_ the callback returns. This behavior will probably change in future releases, so make sure to keep an eye on the changelog when upgrading (see issue #39). ### Cleaning up To disconnect and free the context the following function can be used: void redisFree(redisContext *c); This function immediately closes the socket and then free's the allocations done in creating the context. ### Sending commands (cont'd) Together with `redisCommand`, the function `redisCommandArgv` can be used to issue commands. It has the following prototype: void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); It takes the number of arguments `argc`, an array of strings `argv` and the lengths of the arguments `argvlen`. For convenience, `argvlen` may be set to `NULL` and the function will use `strlen(3)` on every argument to determine its length. Obviously, when any of the arguments need to be binary safe, the entire array of lengths `argvlen` should be provided. The return value has the same semantic as `redisCommand`. ### Pipelining To explain how Hiredis supports pipelining in a blocking connection, there needs to be understanding of the internal execution flow. When any of the functions in the `redisCommand` family is called, Hiredis first formats the command according to the Redis protocol. The formatted command is then put in the output buffer of the context. This output buffer is dynamic, so it can hold any number of commands. After the command is put in the output buffer, `redisGetReply` is called. This function has the following two execution paths: 1. The input buffer is non-empty: * Try to parse a single reply from the input buffer and return it * If no reply could be parsed, continue at *2* 2. The input buffer is empty: * Write the **entire** output buffer to the socket * Read from the socket until a single reply could be parsed The function `redisGetReply` is exported as part of the Hiredis API and can be used when a reply is expected on the socket. To pipeline commands, the only things that needs to be done is filling up the output buffer. For this cause, two commands can be used that are identical to the `redisCommand` family, apart from not returning a reply: void redisAppendCommand(redisContext *c, const char *format, ...); void redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); After calling either function one or more times, `redisGetReply` can be used to receive the subsequent replies. The return value for this function is either `REDIS_OK` or `REDIS_ERR`, where the latter means an error occurred while reading a reply. Just as with the other commands, the `err` field in the context can be used to find out what the cause of this error is. The following examples shows a simple pipeline (resulting in only a single call to `write(2)` and a single call to `read(2)`): redisReply *reply; redisAppendCommand(context,"SET foo bar"); redisAppendCommand(context,"GET foo"); redisGetReply(context,&reply); // reply for SET freeReplyObject(reply); redisGetReply(context,&reply); // reply for GET freeReplyObject(reply); This API can also be used to implement a blocking subscriber: reply = redisCommand(context,"SUBSCRIBE foo"); freeReplyObject(reply); while(redisGetReply(context,&reply) == REDIS_OK) { // consume message freeReplyObject(reply); } ### Errors When a function call is not successful, depending on the function either `NULL` or `REDIS_ERR` is returned. The `err` field inside the context will be non-zero and set to one of the following constants: * **`REDIS_ERR_IO`**: There was an I/O error while creating the connection, trying to write to the socket or read from the socket. If you included `errno.h` in your application, you can use the global `errno` variable to find out what is wrong. * **`REDIS_ERR_EOF`**: The server closed the connection which resulted in an empty read. * **`REDIS_ERR_PROTOCOL`**: There was an error while parsing the protocol. * **`REDIS_ERR_OTHER`**: Any other error. Currently, it is only used when a specified hostname to connect to cannot be resolved. In every case, the `errstr` field in the context will be set to hold a string representation of the error. ## Asynchronous API Hiredis comes with an asynchronous API that works easily with any event library. Examples are bundled that show using Hiredis with [libev](http://software.schmorp.de/pkg/libev.html) and [libevent](http://monkey.org/~provos/libevent/). ### Connecting The function `redisAsyncConnect` can be used to establish a non-blocking connection to Redis. It returns a pointer to the newly created `redisAsyncContext` struct. The `err` field should be checked after creation to see if there were errors creating the connection. Because the connection that will be created is non-blocking, the kernel is not able to instantly return if the specified host and port is able to accept a connection. redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); if (c->err) { printf("Error: %s\n", c->errstr); // handle error } The asynchronous context can hold a disconnect callback function that is called when the connection is disconnected (either because of an error or per user request). This function should have the following prototype: void(const redisAsyncContext *c, int status); On a disconnect, the `status` argument is set to `REDIS_OK` when disconnection was initiated by the user, or `REDIS_ERR` when the disconnection was caused by an error. When it is `REDIS_ERR`, the `err` field in the context can be accessed to find out the cause of the error. The context object is always free'd after the disconnect callback fired. When a reconnect is needed, the disconnect callback is a good point to do so. Setting the disconnect callback can only be done once per context. For subsequent calls it will return `REDIS_ERR`. The function to set the disconnect callback has the following prototype: int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); ### Sending commands and their callbacks In an asynchronous context, commands are automatically pipelined due to the nature of an event loop. Therefore, unlike the synchronous API, there is only a single way to send commands. Because commands are sent to Redis asynchronously, issuing a command requires a callback function that is called when the reply is received. Reply callbacks should have the following prototype: void(redisAsyncContext *c, void *reply, void *privdata); The `privdata` argument can be used to curry arbitrary data to the callback from the point where the command is initially queued for execution. The functions that can be used to issue commands in an asynchronous context are: int redisAsyncCommand( redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...); int redisAsyncCommandArgv( redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen); Both functions work like their blocking counterparts. The return value is `REDIS_OK` when the command was successfully added to the output buffer and `REDIS_ERR` otherwise. Example: when the connection is being disconnected per user-request, no new commands may be added to the output buffer and `REDIS_ERR` is returned on calls to the `redisAsyncCommand` family. If the reply for a command with a `NULL` callback is read, it is immediately free'd. When the callback for a command is non-`NULL`, the memory is free'd immediately following the callback: the reply is only valid for the duration of the callback. All pending callbacks are called with a `NULL` reply when the context encountered an error. ### Disconnecting An asynchronous connection can be terminated using: void redisAsyncDisconnect(redisAsyncContext *ac); When this function is called, the connection is **not** immediately terminated. Instead, new commands are no longer accepted and the connection is only terminated when all pending commands have been written to the socket, their respective replies have been read and their respective callbacks have been executed. After this, the disconnection callback is executed with the `REDIS_OK` status and the context object is free'd. ### Hooking it up to event library *X* There are a few hooks that need to be set on the context object after it is created. See the `adapters/` directory for bindings to *libev* and *libevent*. ## Reply parsing API Hiredis comes with a reply parsing API that makes it easy for writing higher level language bindings. The reply parsing API consists of the following functions: redisReader *redisReaderCreate(void); void redisReaderFree(redisReader *reader); int redisReaderFeed(redisReader *reader, const char *buf, size_t len); int redisReaderGetReply(redisReader *reader, void **reply); The same set of functions are used internally by hiredis when creating a normal Redis context, the above API just exposes it to the user for a direct usage. ### Usage The function `redisReaderCreate` creates a `redisReader` structure that holds a buffer with unparsed data and state for the protocol parser. Incoming data -- most likely from a socket -- can be placed in the internal buffer of the `redisReader` using `redisReaderFeed`. This function will make a copy of the buffer pointed to by `buf` for `len` bytes. This data is parsed when `redisReaderGetReply` is called. This function returns an integer status and a reply object (as described above) via `void **reply`. The returned status can be either `REDIS_OK` or `REDIS_ERR`, where the latter means something went wrong (either a protocol error, or an out of memory error). The parser limits the level of nesting for multi bulk payloads to 7. If the multi bulk nesting level is higher than this, the parser returns an error. ### Customizing replies The function `redisReaderGetReply` creates `redisReply` and makes the function argument `reply` point to the created `redisReply` variable. For instance, if the response of type `REDIS_REPLY_STATUS` then the `str` field of `redisReply` will hold the status as a vanilla C string. However, the functions that are responsible for creating instances of the `redisReply` can be customized by setting the `fn` field on the `redisReader` struct. This should be done immediately after creating the `redisReader`. For example, [hiredis-rb](https://github.com/pietern/hiredis-rb/blob/master/ext/hiredis_ext/reader.c) uses customized reply object functions to create Ruby objects. ### Reader max buffer Both when using the Reader API directly or when using it indirectly via a normal Redis context, the redisReader structure uses a buffer in order to accumulate data from the server. Usually this buffer is destroyed when it is empty and is larger than 16 kb in order to avoid wasting memory in unused buffers However when working with very big payloads destroying the buffer may slow down performances considerably, so it is possible to modify the max size of an idle buffer changing the value of the `maxbuf` field of the reader structure to the desired value. The special value of 0 means that there is no maximum value for an idle buffer, so the buffer will never get freed. For instance if you have a normal Redis context you can set the maximum idle buffer to zero (unlimited) just with: context->reader->maxbuf = 0; This should be done only in order to maximize performances when working with large payloads. The context should be set back to `REDIS_READER_MAX_BUF` again as soon as possible in order to prevent allocation of useless memory. ## AUTHORS Hiredis was written by Salvatore Sanfilippo (antirez at gmail) and Pieter Noordhuis (pcnoordhuis at gmail) and is released under the BSD license. disque-1.0-rc1/deps/hiredis/adapters/000077500000000000000000000000001264176561100175255ustar00rootroot00000000000000disque-1.0-rc1/deps/hiredis/adapters/ae.h000066400000000000000000000101731264176561100202650ustar00rootroot00000000000000/* * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __HIREDIS_AE_H__ #define __HIREDIS_AE_H__ #include #include #include "../hiredis.h" #include "../async.h" typedef struct redisAeEvents { redisAsyncContext *context; aeEventLoop *loop; int fd; int reading, writing; } redisAeEvents; static void redisAeReadEvent(aeEventLoop *el, int fd, void *privdata, int mask) { ((void)el); ((void)fd); ((void)mask); redisAeEvents *e = (redisAeEvents*)privdata; redisAsyncHandleRead(e->context); } static void redisAeWriteEvent(aeEventLoop *el, int fd, void *privdata, int mask) { ((void)el); ((void)fd); ((void)mask); redisAeEvents *e = (redisAeEvents*)privdata; redisAsyncHandleWrite(e->context); } static void redisAeAddRead(void *privdata) { redisAeEvents *e = (redisAeEvents*)privdata; aeEventLoop *loop = e->loop; if (!e->reading) { e->reading = 1; aeCreateFileEvent(loop,e->fd,AE_READABLE,redisAeReadEvent,e); } } static void redisAeDelRead(void *privdata) { redisAeEvents *e = (redisAeEvents*)privdata; aeEventLoop *loop = e->loop; if (e->reading) { e->reading = 0; aeDeleteFileEvent(loop,e->fd,AE_READABLE); } } static void redisAeAddWrite(void *privdata) { redisAeEvents *e = (redisAeEvents*)privdata; aeEventLoop *loop = e->loop; if (!e->writing) { e->writing = 1; aeCreateFileEvent(loop,e->fd,AE_WRITABLE,redisAeWriteEvent,e); } } static void redisAeDelWrite(void *privdata) { redisAeEvents *e = (redisAeEvents*)privdata; aeEventLoop *loop = e->loop; if (e->writing) { e->writing = 0; aeDeleteFileEvent(loop,e->fd,AE_WRITABLE); } } static void redisAeCleanup(void *privdata) { redisAeEvents *e = (redisAeEvents*)privdata; redisAeDelRead(privdata); redisAeDelWrite(privdata); free(e); } static int redisAeAttach(aeEventLoop *loop, redisAsyncContext *ac) { redisContext *c = &(ac->c); redisAeEvents *e; /* Nothing should be attached when something is already attached */ if (ac->ev.data != NULL) return REDIS_ERR; /* Create container for context and r/w events */ e = (redisAeEvents*)malloc(sizeof(*e)); e->context = ac; e->loop = loop; e->fd = c->fd; e->reading = e->writing = 0; /* Register functions to start/stop listening for events */ ac->ev.addRead = redisAeAddRead; ac->ev.delRead = redisAeDelRead; ac->ev.addWrite = redisAeAddWrite; ac->ev.delWrite = redisAeDelWrite; ac->ev.cleanup = redisAeCleanup; ac->ev.data = e; return REDIS_OK; } #endif disque-1.0-rc1/deps/hiredis/adapters/libev.h000066400000000000000000000107531264176561100210050ustar00rootroot00000000000000/* * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __HIREDIS_LIBEV_H__ #define __HIREDIS_LIBEV_H__ #include #include #include #include "../hiredis.h" #include "../async.h" typedef struct redisLibevEvents { redisAsyncContext *context; struct ev_loop *loop; int reading, writing; ev_io rev, wev; } redisLibevEvents; static void redisLibevReadEvent(EV_P_ ev_io *watcher, int revents) { #if EV_MULTIPLICITY ((void)loop); #endif ((void)revents); redisLibevEvents *e = (redisLibevEvents*)watcher->data; redisAsyncHandleRead(e->context); } static void redisLibevWriteEvent(EV_P_ ev_io *watcher, int revents) { #if EV_MULTIPLICITY ((void)loop); #endif ((void)revents); redisLibevEvents *e = (redisLibevEvents*)watcher->data; redisAsyncHandleWrite(e->context); } static void redisLibevAddRead(void *privdata) { redisLibevEvents *e = (redisLibevEvents*)privdata; struct ev_loop *loop = e->loop; ((void)loop); if (!e->reading) { e->reading = 1; ev_io_start(EV_A_ &e->rev); } } static void redisLibevDelRead(void *privdata) { redisLibevEvents *e = (redisLibevEvents*)privdata; struct ev_loop *loop = e->loop; ((void)loop); if (e->reading) { e->reading = 0; ev_io_stop(EV_A_ &e->rev); } } static void redisLibevAddWrite(void *privdata) { redisLibevEvents *e = (redisLibevEvents*)privdata; struct ev_loop *loop = e->loop; ((void)loop); if (!e->writing) { e->writing = 1; ev_io_start(EV_A_ &e->wev); } } static void redisLibevDelWrite(void *privdata) { redisLibevEvents *e = (redisLibevEvents*)privdata; struct ev_loop *loop = e->loop; ((void)loop); if (e->writing) { e->writing = 0; ev_io_stop(EV_A_ &e->wev); } } static void redisLibevCleanup(void *privdata) { redisLibevEvents *e = (redisLibevEvents*)privdata; redisLibevDelRead(privdata); redisLibevDelWrite(privdata); free(e); } static int redisLibevAttach(EV_P_ redisAsyncContext *ac) { redisContext *c = &(ac->c); redisLibevEvents *e; /* Nothing should be attached when something is already attached */ if (ac->ev.data != NULL) return REDIS_ERR; /* Create container for context and r/w events */ e = (redisLibevEvents*)malloc(sizeof(*e)); e->context = ac; #if EV_MULTIPLICITY e->loop = loop; #else e->loop = NULL; #endif e->reading = e->writing = 0; e->rev.data = e; e->wev.data = e; /* Register functions to start/stop listening for events */ ac->ev.addRead = redisLibevAddRead; ac->ev.delRead = redisLibevDelRead; ac->ev.addWrite = redisLibevAddWrite; ac->ev.delWrite = redisLibevDelWrite; ac->ev.cleanup = redisLibevCleanup; ac->ev.data = e; /* Initialize read/write events */ ev_io_init(&e->rev,redisLibevReadEvent,c->fd,EV_READ); ev_io_init(&e->wev,redisLibevWriteEvent,c->fd,EV_WRITE); return REDIS_OK; } #endif disque-1.0-rc1/deps/hiredis/adapters/libevent.h000066400000000000000000000076141264176561100215160ustar00rootroot00000000000000/* * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __HIREDIS_LIBEVENT_H__ #define __HIREDIS_LIBEVENT_H__ #include #include "../hiredis.h" #include "../async.h" typedef struct redisLibeventEvents { redisAsyncContext *context; struct event rev, wev; } redisLibeventEvents; static void redisLibeventReadEvent(int fd, short event, void *arg) { ((void)fd); ((void)event); redisLibeventEvents *e = (redisLibeventEvents*)arg; redisAsyncHandleRead(e->context); } static void redisLibeventWriteEvent(int fd, short event, void *arg) { ((void)fd); ((void)event); redisLibeventEvents *e = (redisLibeventEvents*)arg; redisAsyncHandleWrite(e->context); } static void redisLibeventAddRead(void *privdata) { redisLibeventEvents *e = (redisLibeventEvents*)privdata; event_add(&e->rev,NULL); } static void redisLibeventDelRead(void *privdata) { redisLibeventEvents *e = (redisLibeventEvents*)privdata; event_del(&e->rev); } static void redisLibeventAddWrite(void *privdata) { redisLibeventEvents *e = (redisLibeventEvents*)privdata; event_add(&e->wev,NULL); } static void redisLibeventDelWrite(void *privdata) { redisLibeventEvents *e = (redisLibeventEvents*)privdata; event_del(&e->wev); } static void redisLibeventCleanup(void *privdata) { redisLibeventEvents *e = (redisLibeventEvents*)privdata; event_del(&e->rev); event_del(&e->wev); free(e); } static int redisLibeventAttach(redisAsyncContext *ac, struct event_base *base) { redisContext *c = &(ac->c); redisLibeventEvents *e; /* Nothing should be attached when something is already attached */ if (ac->ev.data != NULL) return REDIS_ERR; /* Create container for context and r/w events */ e = (redisLibeventEvents*)malloc(sizeof(*e)); e->context = ac; /* Register functions to start/stop listening for events */ ac->ev.addRead = redisLibeventAddRead; ac->ev.delRead = redisLibeventDelRead; ac->ev.addWrite = redisLibeventAddWrite; ac->ev.delWrite = redisLibeventDelWrite; ac->ev.cleanup = redisLibeventCleanup; ac->ev.data = e; /* Initialize and install read/write events */ event_set(&e->rev,c->fd,EV_READ,redisLibeventReadEvent,e); event_set(&e->wev,c->fd,EV_WRITE,redisLibeventWriteEvent,e); event_base_set(base,&e->rev); event_base_set(base,&e->wev); return REDIS_OK; } #endif disque-1.0-rc1/deps/hiredis/adapters/libuv.h000066400000000000000000000046671264176561100210340ustar00rootroot00000000000000#ifndef __HIREDIS_LIBUV_H__ #define __HIREDIS_LIBUV_H__ #include #include "../hiredis.h" #include "../async.h" #include typedef struct redisLibuvEvents { redisAsyncContext* context; uv_poll_t handle; int events; } redisLibuvEvents; int redisLibuvAttach(redisAsyncContext*, uv_loop_t*); static void redisLibuvPoll(uv_poll_t* handle, int status, int events) { redisLibuvEvents* p = (redisLibuvEvents*)handle->data; if (status != 0) { return; } if (events & UV_READABLE) { redisAsyncHandleRead(p->context); } if (events & UV_WRITABLE) { redisAsyncHandleWrite(p->context); } } static void redisLibuvAddRead(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events |= UV_READABLE; uv_poll_start(&p->handle, p->events, redisLibuvPoll); } static void redisLibuvDelRead(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events &= ~UV_READABLE; if (p->events) { uv_poll_start(&p->handle, p->events, redisLibuvPoll); } else { uv_poll_stop(&p->handle); } } static void redisLibuvAddWrite(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events |= UV_WRITABLE; uv_poll_start(&p->handle, p->events, redisLibuvPoll); } static void redisLibuvDelWrite(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; p->events &= ~UV_WRITABLE; if (p->events) { uv_poll_start(&p->handle, p->events, redisLibuvPoll); } else { uv_poll_stop(&p->handle); } } static void on_close(uv_handle_t* handle) { redisLibuvEvents* p = (redisLibuvEvents*)handle->data; free(p); } static void redisLibuvCleanup(void *privdata) { redisLibuvEvents* p = (redisLibuvEvents*)privdata; uv_close((uv_handle_t*)&p->handle, on_close); } static int redisLibuvAttach(redisAsyncContext* ac, uv_loop_t* loop) { redisContext *c = &(ac->c); if (ac->ev.data != NULL) { return REDIS_ERR; } ac->ev.addRead = redisLibuvAddRead; ac->ev.delRead = redisLibuvDelRead; ac->ev.addWrite = redisLibuvAddWrite; ac->ev.delWrite = redisLibuvDelWrite; ac->ev.cleanup = redisLibuvCleanup; redisLibuvEvents* p = (redisLibuvEvents*)malloc(sizeof(*p)); if (!p) { return REDIS_ERR; } memset(p, 0, sizeof(*p)); if (uv_poll_init(loop, &p->handle, c->fd) != 0) { return REDIS_ERR; } ac->ev.data = p; p->handle.data = p; p->context = ac; return REDIS_OK; } #endif disque-1.0-rc1/deps/hiredis/async.c000066400000000000000000000534201264176561100172070ustar00rootroot00000000000000/* * Copyright (c) 2009-2011, Salvatore Sanfilippo * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include #include #include #include "async.h" #include "net.h" #include "dict.c" #include "sds.h" #define _EL_ADD_READ(ctx) do { \ if ((ctx)->ev.addRead) (ctx)->ev.addRead((ctx)->ev.data); \ } while(0) #define _EL_DEL_READ(ctx) do { \ if ((ctx)->ev.delRead) (ctx)->ev.delRead((ctx)->ev.data); \ } while(0) #define _EL_ADD_WRITE(ctx) do { \ if ((ctx)->ev.addWrite) (ctx)->ev.addWrite((ctx)->ev.data); \ } while(0) #define _EL_DEL_WRITE(ctx) do { \ if ((ctx)->ev.delWrite) (ctx)->ev.delWrite((ctx)->ev.data); \ } while(0) #define _EL_CLEANUP(ctx) do { \ if ((ctx)->ev.cleanup) (ctx)->ev.cleanup((ctx)->ev.data); \ } while(0); /* Forward declaration of function in hiredis.c */ void __redisAppendCommand(redisContext *c, char *cmd, size_t len); /* Functions managing dictionary of callbacks for pub/sub. */ static unsigned int callbackHash(const void *key) { return dictGenHashFunction((const unsigned char *)key, sdslen((const sds)key)); } static void *callbackValDup(void *privdata, const void *src) { ((void) privdata); redisCallback *dup = malloc(sizeof(*dup)); memcpy(dup,src,sizeof(*dup)); return dup; } static int callbackKeyCompare(void *privdata, const void *key1, const void *key2) { int l1, l2; ((void) privdata); l1 = sdslen((const sds)key1); l2 = sdslen((const sds)key2); if (l1 != l2) return 0; return memcmp(key1,key2,l1) == 0; } static void callbackKeyDestructor(void *privdata, void *key) { ((void) privdata); sdsfree((sds)key); } static void callbackValDestructor(void *privdata, void *val) { ((void) privdata); free(val); } static dictType callbackDict = { callbackHash, NULL, callbackValDup, callbackKeyCompare, callbackKeyDestructor, callbackValDestructor }; static redisAsyncContext *redisAsyncInitialize(redisContext *c) { redisAsyncContext *ac; ac = realloc(c,sizeof(redisAsyncContext)); if (ac == NULL) return NULL; c = &(ac->c); /* The regular connect functions will always set the flag REDIS_CONNECTED. * For the async API, we want to wait until the first write event is * received up before setting this flag, so reset it here. */ c->flags &= ~REDIS_CONNECTED; ac->err = 0; ac->errstr = NULL; ac->data = NULL; ac->ev.data = NULL; ac->ev.addRead = NULL; ac->ev.delRead = NULL; ac->ev.addWrite = NULL; ac->ev.delWrite = NULL; ac->ev.cleanup = NULL; ac->onConnect = NULL; ac->onDisconnect = NULL; ac->replies.head = NULL; ac->replies.tail = NULL; ac->sub.invalid.head = NULL; ac->sub.invalid.tail = NULL; ac->sub.channels = dictCreate(&callbackDict,NULL); ac->sub.patterns = dictCreate(&callbackDict,NULL); return ac; } /* We want the error field to be accessible directly instead of requiring * an indirection to the redisContext struct. */ static void __redisAsyncCopyError(redisAsyncContext *ac) { redisContext *c = &(ac->c); ac->err = c->err; ac->errstr = c->errstr; } redisAsyncContext *redisAsyncConnect(const char *ip, int port) { redisContext *c; redisAsyncContext *ac; c = redisConnectNonBlock(ip,port); if (c == NULL) return NULL; ac = redisAsyncInitialize(c); if (ac == NULL) { redisFree(c); return NULL; } __redisAsyncCopyError(ac); return ac; } redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, const char *source_addr) { redisContext *c = redisConnectBindNonBlock(ip,port,source_addr); redisAsyncContext *ac = redisAsyncInitialize(c); __redisAsyncCopyError(ac); return ac; } redisAsyncContext *redisAsyncConnectUnix(const char *path) { redisContext *c; redisAsyncContext *ac; c = redisConnectUnixNonBlock(path); if (c == NULL) return NULL; ac = redisAsyncInitialize(c); if (ac == NULL) { redisFree(c); return NULL; } __redisAsyncCopyError(ac); return ac; } int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn) { if (ac->onConnect == NULL) { ac->onConnect = fn; /* The common way to detect an established connection is to wait for * the first write event to be fired. This assumes the related event * library functions are already set. */ _EL_ADD_WRITE(ac); return REDIS_OK; } return REDIS_ERR; } int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn) { if (ac->onDisconnect == NULL) { ac->onDisconnect = fn; return REDIS_OK; } return REDIS_ERR; } /* Helper functions to push/shift callbacks */ static int __redisPushCallback(redisCallbackList *list, redisCallback *source) { redisCallback *cb; /* Copy callback from stack to heap */ cb = malloc(sizeof(*cb)); if (cb == NULL) return REDIS_ERR_OOM; if (source != NULL) { memcpy(cb,source,sizeof(*cb)); cb->next = NULL; } /* Store callback in list */ if (list->head == NULL) list->head = cb; if (list->tail != NULL) list->tail->next = cb; list->tail = cb; return REDIS_OK; } static int __redisShiftCallback(redisCallbackList *list, redisCallback *target) { redisCallback *cb = list->head; if (cb != NULL) { list->head = cb->next; if (cb == list->tail) list->tail = NULL; /* Copy callback from heap to stack */ if (target != NULL) memcpy(target,cb,sizeof(*cb)); free(cb); return REDIS_OK; } return REDIS_ERR; } static void __redisRunCallback(redisAsyncContext *ac, redisCallback *cb, redisReply *reply) { redisContext *c = &(ac->c); if (cb->fn != NULL) { c->flags |= REDIS_IN_CALLBACK; cb->fn(ac,reply,cb->privdata); c->flags &= ~REDIS_IN_CALLBACK; } } /* Helper function to free the context. */ static void __redisAsyncFree(redisAsyncContext *ac) { redisContext *c = &(ac->c); redisCallback cb; dictIterator *it; dictEntry *de; /* Execute pending callbacks with NULL reply. */ while (__redisShiftCallback(&ac->replies,&cb) == REDIS_OK) __redisRunCallback(ac,&cb,NULL); /* Execute callbacks for invalid commands */ while (__redisShiftCallback(&ac->sub.invalid,&cb) == REDIS_OK) __redisRunCallback(ac,&cb,NULL); /* Run subscription callbacks callbacks with NULL reply */ it = dictGetIterator(ac->sub.channels); while ((de = dictNext(it)) != NULL) __redisRunCallback(ac,dictGetEntryVal(de),NULL); dictReleaseIterator(it); dictRelease(ac->sub.channels); it = dictGetIterator(ac->sub.patterns); while ((de = dictNext(it)) != NULL) __redisRunCallback(ac,dictGetEntryVal(de),NULL); dictReleaseIterator(it); dictRelease(ac->sub.patterns); /* Signal event lib to clean up */ _EL_CLEANUP(ac); /* Execute disconnect callback. When redisAsyncFree() initiated destroying * this context, the status will always be REDIS_OK. */ if (ac->onDisconnect && (c->flags & REDIS_CONNECTED)) { if (c->flags & REDIS_FREEING) { ac->onDisconnect(ac,REDIS_OK); } else { ac->onDisconnect(ac,(ac->err == 0) ? REDIS_OK : REDIS_ERR); } } /* Cleanup self */ redisFree(c); } /* Free the async context. When this function is called from a callback, * control needs to be returned to redisProcessCallbacks() before actual * free'ing. To do so, a flag is set on the context which is picked up by * redisProcessCallbacks(). Otherwise, the context is immediately free'd. */ void redisAsyncFree(redisAsyncContext *ac) { redisContext *c = &(ac->c); c->flags |= REDIS_FREEING; if (!(c->flags & REDIS_IN_CALLBACK)) __redisAsyncFree(ac); } /* Helper function to make the disconnect happen and clean up. */ static void __redisAsyncDisconnect(redisAsyncContext *ac) { redisContext *c = &(ac->c); /* Make sure error is accessible if there is any */ __redisAsyncCopyError(ac); if (ac->err == 0) { /* For clean disconnects, there should be no pending callbacks. */ assert(__redisShiftCallback(&ac->replies,NULL) == REDIS_ERR); } else { /* Disconnection is caused by an error, make sure that pending * callbacks cannot call new commands. */ c->flags |= REDIS_DISCONNECTING; } /* For non-clean disconnects, __redisAsyncFree() will execute pending * callbacks with a NULL-reply. */ __redisAsyncFree(ac); } /* Tries to do a clean disconnect from Redis, meaning it stops new commands * from being issued, but tries to flush the output buffer and execute * callbacks for all remaining replies. When this function is called from a * callback, there might be more replies and we can safely defer disconnecting * to redisProcessCallbacks(). Otherwise, we can only disconnect immediately * when there are no pending callbacks. */ void redisAsyncDisconnect(redisAsyncContext *ac) { redisContext *c = &(ac->c); c->flags |= REDIS_DISCONNECTING; if (!(c->flags & REDIS_IN_CALLBACK) && ac->replies.head == NULL) __redisAsyncDisconnect(ac); } static int __redisGetSubscribeCallback(redisAsyncContext *ac, redisReply *reply, redisCallback *dstcb) { redisContext *c = &(ac->c); dict *callbacks; dictEntry *de; int pvariant; char *stype; sds sname; /* Custom reply functions are not supported for pub/sub. This will fail * very hard when they are used... */ if (reply->type == REDIS_REPLY_ARRAY) { assert(reply->elements >= 2); assert(reply->element[0]->type == REDIS_REPLY_STRING); stype = reply->element[0]->str; pvariant = (tolower(stype[0]) == 'p') ? 1 : 0; if (pvariant) callbacks = ac->sub.patterns; else callbacks = ac->sub.channels; /* Locate the right callback */ assert(reply->element[1]->type == REDIS_REPLY_STRING); sname = sdsnewlen(reply->element[1]->str,reply->element[1]->len); de = dictFind(callbacks,sname); if (de != NULL) { memcpy(dstcb,dictGetEntryVal(de),sizeof(*dstcb)); /* If this is an unsubscribe message, remove it. */ if (strcasecmp(stype+pvariant,"unsubscribe") == 0) { dictDelete(callbacks,sname); /* If this was the last unsubscribe message, revert to * non-subscribe mode. */ assert(reply->element[2]->type == REDIS_REPLY_INTEGER); if (reply->element[2]->integer == 0) c->flags &= ~REDIS_SUBSCRIBED; } } sdsfree(sname); } else { /* Shift callback for invalid commands. */ __redisShiftCallback(&ac->sub.invalid,dstcb); } return REDIS_OK; } void redisProcessCallbacks(redisAsyncContext *ac) { redisContext *c = &(ac->c); redisCallback cb = {NULL, NULL, NULL}; void *reply = NULL; int status; while((status = redisGetReply(c,&reply)) == REDIS_OK) { if (reply == NULL) { /* When the connection is being disconnected and there are * no more replies, this is the cue to really disconnect. */ if (c->flags & REDIS_DISCONNECTING && sdslen(c->obuf) == 0) { __redisAsyncDisconnect(ac); return; } /* If monitor mode, repush callback */ if(c->flags & REDIS_MONITORING) { __redisPushCallback(&ac->replies,&cb); } /* When the connection is not being disconnected, simply stop * trying to get replies and wait for the next loop tick. */ break; } /* Even if the context is subscribed, pending regular callbacks will * get a reply before pub/sub messages arrive. */ if (__redisShiftCallback(&ac->replies,&cb) != REDIS_OK) { /* * A spontaneous reply in a not-subscribed context can be the error * reply that is sent when a new connection exceeds the maximum * number of allowed connections on the server side. * * This is seen as an error instead of a regular reply because the * server closes the connection after sending it. * * To prevent the error from being overwritten by an EOF error the * connection is closed here. See issue #43. * * Another possibility is that the server is loading its dataset. * In this case we also want to close the connection, and have the * user wait until the server is ready to take our request. */ if (((redisReply*)reply)->type == REDIS_REPLY_ERROR) { c->err = REDIS_ERR_OTHER; snprintf(c->errstr,sizeof(c->errstr),"%s",((redisReply*)reply)->str); __redisAsyncDisconnect(ac); return; } /* No more regular callbacks and no errors, the context *must* be subscribed or monitoring. */ assert((c->flags & REDIS_SUBSCRIBED || c->flags & REDIS_MONITORING)); if(c->flags & REDIS_SUBSCRIBED) __redisGetSubscribeCallback(ac,reply,&cb); } if (cb.fn != NULL) { __redisRunCallback(ac,&cb,reply); c->reader->fn->freeObject(reply); /* Proceed with free'ing when redisAsyncFree() was called. */ if (c->flags & REDIS_FREEING) { __redisAsyncFree(ac); return; } } else { /* No callback for this reply. This can either be a NULL callback, * or there were no callbacks to begin with. Either way, don't * abort with an error, but simply ignore it because the client * doesn't know what the server will spit out over the wire. */ c->reader->fn->freeObject(reply); } } /* Disconnect when there was an error reading the reply */ if (status != REDIS_OK) __redisAsyncDisconnect(ac); } /* Internal helper function to detect socket status the first time a read or * write event fires. When connecting was not succesful, the connect callback * is called with a REDIS_ERR status and the context is free'd. */ static int __redisAsyncHandleConnect(redisAsyncContext *ac) { redisContext *c = &(ac->c); if (redisCheckSocketError(c) == REDIS_ERR) { /* Try again later when connect(2) is still in progress. */ if (errno == EINPROGRESS) return REDIS_OK; if (ac->onConnect) ac->onConnect(ac,REDIS_ERR); __redisAsyncDisconnect(ac); return REDIS_ERR; } /* Mark context as connected. */ c->flags |= REDIS_CONNECTED; if (ac->onConnect) ac->onConnect(ac,REDIS_OK); return REDIS_OK; } /* This function should be called when the socket is readable. * It processes all replies that can be read and executes their callbacks. */ void redisAsyncHandleRead(redisAsyncContext *ac) { redisContext *c = &(ac->c); if (!(c->flags & REDIS_CONNECTED)) { /* Abort connect was not successful. */ if (__redisAsyncHandleConnect(ac) != REDIS_OK) return; /* Try again later when the context is still not connected. */ if (!(c->flags & REDIS_CONNECTED)) return; } if (redisBufferRead(c) == REDIS_ERR) { __redisAsyncDisconnect(ac); } else { /* Always re-schedule reads */ _EL_ADD_READ(ac); redisProcessCallbacks(ac); } } void redisAsyncHandleWrite(redisAsyncContext *ac) { redisContext *c = &(ac->c); int done = 0; if (!(c->flags & REDIS_CONNECTED)) { /* Abort connect was not successful. */ if (__redisAsyncHandleConnect(ac) != REDIS_OK) return; /* Try again later when the context is still not connected. */ if (!(c->flags & REDIS_CONNECTED)) return; } if (redisBufferWrite(c,&done) == REDIS_ERR) { __redisAsyncDisconnect(ac); } else { /* Continue writing when not done, stop writing otherwise */ if (!done) _EL_ADD_WRITE(ac); else _EL_DEL_WRITE(ac); /* Always schedule reads after writes */ _EL_ADD_READ(ac); } } /* Sets a pointer to the first argument and its length starting at p. Returns * the number of bytes to skip to get to the following argument. */ static char *nextArgument(char *start, char **str, size_t *len) { char *p = start; if (p[0] != '$') { p = strchr(p,'$'); if (p == NULL) return NULL; } *len = (int)strtol(p+1,NULL,10); p = strchr(p,'\r'); assert(p); *str = p+2; return p+2+(*len)+2; } /* Helper function for the redisAsyncCommand* family of functions. Writes a * formatted command to the output buffer and registers the provided callback * function with the context. */ static int __redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, char *cmd, size_t len) { redisContext *c = &(ac->c); redisCallback cb; int pvariant, hasnext; char *cstr, *astr; size_t clen, alen; char *p; sds sname; /* Don't accept new commands when the connection is about to be closed. */ if (c->flags & (REDIS_DISCONNECTING | REDIS_FREEING)) return REDIS_ERR; /* Setup callback */ cb.fn = fn; cb.privdata = privdata; /* Find out which command will be appended. */ p = nextArgument(cmd,&cstr,&clen); assert(p != NULL); hasnext = (p[0] == '$'); pvariant = (tolower(cstr[0]) == 'p') ? 1 : 0; cstr += pvariant; clen -= pvariant; if (hasnext && strncasecmp(cstr,"subscribe\r\n",11) == 0) { c->flags |= REDIS_SUBSCRIBED; /* Add every channel/pattern to the list of subscription callbacks. */ while ((p = nextArgument(p,&astr,&alen)) != NULL) { sname = sdsnewlen(astr,alen); if (pvariant) dictReplace(ac->sub.patterns,sname,&cb); else dictReplace(ac->sub.channels,sname,&cb); } } else if (strncasecmp(cstr,"unsubscribe\r\n",13) == 0) { /* It is only useful to call (P)UNSUBSCRIBE when the context is * subscribed to one or more channels or patterns. */ if (!(c->flags & REDIS_SUBSCRIBED)) return REDIS_ERR; /* (P)UNSUBSCRIBE does not have its own response: every channel or * pattern that is unsubscribed will receive a message. This means we * should not append a callback function for this command. */ } else if(strncasecmp(cstr,"monitor\r\n",9) == 0) { /* Set monitor flag and push callback */ c->flags |= REDIS_MONITORING; __redisPushCallback(&ac->replies,&cb); } else { if (c->flags & REDIS_SUBSCRIBED) /* This will likely result in an error reply, but it needs to be * received and passed to the callback. */ __redisPushCallback(&ac->sub.invalid,&cb); else __redisPushCallback(&ac->replies,&cb); } __redisAppendCommand(c,cmd,len); /* Always schedule a write when the write buffer is non-empty */ _EL_ADD_WRITE(ac); return REDIS_OK; } int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap) { char *cmd; int len; int status; len = redisvFormatCommand(&cmd,format,ap); status = __redisAsyncCommand(ac,fn,privdata,cmd,len); free(cmd); return status; } int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...) { va_list ap; int status; va_start(ap,format); status = redisvAsyncCommand(ac,fn,privdata,format,ap); va_end(ap); return status; } int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen) { char *cmd; int len; int status; len = redisFormatCommandArgv(&cmd,argc,argv,argvlen); status = __redisAsyncCommand(ac,fn,privdata,cmd,len); free(cmd); return status; } disque-1.0-rc1/deps/hiredis/async.h000066400000000000000000000116351264176561100172160ustar00rootroot00000000000000/* * Copyright (c) 2009-2011, Salvatore Sanfilippo * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __HIREDIS_ASYNC_H #define __HIREDIS_ASYNC_H #include "hiredis.h" #ifdef __cplusplus extern "C" { #endif struct redisAsyncContext; /* need forward declaration of redisAsyncContext */ struct dict; /* dictionary header is included in async.c */ /* Reply callback prototype and container */ typedef void (redisCallbackFn)(struct redisAsyncContext*, void*, void*); typedef struct redisCallback { struct redisCallback *next; /* simple singly linked list */ redisCallbackFn *fn; void *privdata; } redisCallback; /* List of callbacks for either regular replies or pub/sub */ typedef struct redisCallbackList { redisCallback *head, *tail; } redisCallbackList; /* Connection callback prototypes */ typedef void (redisDisconnectCallback)(const struct redisAsyncContext*, int status); typedef void (redisConnectCallback)(const struct redisAsyncContext*, int status); /* Context for an async connection to Redis */ typedef struct redisAsyncContext { /* Hold the regular context, so it can be realloc'ed. */ redisContext c; /* Setup error flags so they can be used directly. */ int err; char *errstr; /* Not used by hiredis */ void *data; /* Event library data and hooks */ struct { void *data; /* Hooks that are called when the library expects to start * reading/writing. These functions should be idempotent. */ void (*addRead)(void *privdata); void (*delRead)(void *privdata); void (*addWrite)(void *privdata); void (*delWrite)(void *privdata); void (*cleanup)(void *privdata); } ev; /* Called when either the connection is terminated due to an error or per * user request. The status is set accordingly (REDIS_OK, REDIS_ERR). */ redisDisconnectCallback *onDisconnect; /* Called when the first write event was received. */ redisConnectCallback *onConnect; /* Regular command callbacks */ redisCallbackList replies; /* Subscription callbacks */ struct { redisCallbackList invalid; struct dict *channels; struct dict *patterns; } sub; } redisAsyncContext; /* Functions that proxy to hiredis */ redisAsyncContext *redisAsyncConnect(const char *ip, int port); redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, const char *source_addr); redisAsyncContext *redisAsyncConnectUnix(const char *path); int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn); int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); void redisAsyncDisconnect(redisAsyncContext *ac); void redisAsyncFree(redisAsyncContext *ac); /* Handle read/write events */ void redisAsyncHandleRead(redisAsyncContext *ac); void redisAsyncHandleWrite(redisAsyncContext *ac); /* Command functions for an async context. Write the command to the * output buffer and register the provided callback. */ int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap); int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...); int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen); #ifdef __cplusplus } #endif #endif disque-1.0-rc1/deps/hiredis/dict.c000066400000000000000000000244651264176561100170240ustar00rootroot00000000000000/* Hash table implementation. * * This file implements in memory hash tables with insert/del/replace/find/ * get-random-element operations. Hash tables will auto resize if needed * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * * Copyright (c) 2006-2010, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include "dict.h" /* -------------------------- private prototypes ---------------------------- */ static int _dictExpandIfNeeded(dict *ht); static unsigned long _dictNextPower(unsigned long size); static int _dictKeyIndex(dict *ht, const void *key); static int _dictInit(dict *ht, dictType *type, void *privDataPtr); /* -------------------------- hash functions -------------------------------- */ /* Generic hash function (a popular one from Bernstein). * I tested a few and this was the best. */ static unsigned int dictGenHashFunction(const unsigned char *buf, int len) { unsigned int hash = 5381; while (len--) hash = ((hash << 5) + hash) + (*buf++); /* hash * 33 + c */ return hash; } /* ----------------------------- API implementation ------------------------- */ /* Reset an hashtable already initialized with ht_init(). * NOTE: This function should only called by ht_destroy(). */ static void _dictReset(dict *ht) { ht->table = NULL; ht->size = 0; ht->sizemask = 0; ht->used = 0; } /* Create a new hash table */ static dict *dictCreate(dictType *type, void *privDataPtr) { dict *ht = malloc(sizeof(*ht)); _dictInit(ht,type,privDataPtr); return ht; } /* Initialize the hash table */ static int _dictInit(dict *ht, dictType *type, void *privDataPtr) { _dictReset(ht); ht->type = type; ht->privdata = privDataPtr; return DICT_OK; } /* Expand or create the hashtable */ static int dictExpand(dict *ht, unsigned long size) { dict n; /* the new hashtable */ unsigned long realsize = _dictNextPower(size), i; /* the size is invalid if it is smaller than the number of * elements already inside the hashtable */ if (ht->used > size) return DICT_ERR; _dictInit(&n, ht->type, ht->privdata); n.size = realsize; n.sizemask = realsize-1; n.table = calloc(realsize,sizeof(dictEntry*)); /* Copy all the elements from the old to the new table: * note that if the old hash table is empty ht->size is zero, * so dictExpand just creates an hash table. */ n.used = ht->used; for (i = 0; i < ht->size && ht->used > 0; i++) { dictEntry *he, *nextHe; if (ht->table[i] == NULL) continue; /* For each hash entry on this slot... */ he = ht->table[i]; while(he) { unsigned int h; nextHe = he->next; /* Get the new element index */ h = dictHashKey(ht, he->key) & n.sizemask; he->next = n.table[h]; n.table[h] = he; ht->used--; /* Pass to the next element */ he = nextHe; } } assert(ht->used == 0); free(ht->table); /* Remap the new hashtable in the old */ *ht = n; return DICT_OK; } /* Add an element to the target hash table */ static int dictAdd(dict *ht, void *key, void *val) { int index; dictEntry *entry; /* Get the index of the new element, or -1 if * the element already exists. */ if ((index = _dictKeyIndex(ht, key)) == -1) return DICT_ERR; /* Allocates the memory and stores key */ entry = malloc(sizeof(*entry)); entry->next = ht->table[index]; ht->table[index] = entry; /* Set the hash entry fields. */ dictSetHashKey(ht, entry, key); dictSetHashVal(ht, entry, val); ht->used++; return DICT_OK; } /* Add an element, discarding the old if the key already exists. * Return 1 if the key was added from scratch, 0 if there was already an * element with such key and dictReplace() just performed a value update * operation. */ static int dictReplace(dict *ht, void *key, void *val) { dictEntry *entry, auxentry; /* Try to add the element. If the key * does not exists dictAdd will suceed. */ if (dictAdd(ht, key, val) == DICT_OK) return 1; /* It already exists, get the entry */ entry = dictFind(ht, key); /* Free the old value and set the new one */ /* Set the new value and free the old one. Note that it is important * to do that in this order, as the value may just be exactly the same * as the previous one. In this context, think to reference counting, * you want to increment (set), and then decrement (free), and not the * reverse. */ auxentry = *entry; dictSetHashVal(ht, entry, val); dictFreeEntryVal(ht, &auxentry); return 0; } /* Search and remove an element */ static int dictDelete(dict *ht, const void *key) { unsigned int h; dictEntry *de, *prevde; if (ht->size == 0) return DICT_ERR; h = dictHashKey(ht, key) & ht->sizemask; de = ht->table[h]; prevde = NULL; while(de) { if (dictCompareHashKeys(ht,key,de->key)) { /* Unlink the element from the list */ if (prevde) prevde->next = de->next; else ht->table[h] = de->next; dictFreeEntryKey(ht,de); dictFreeEntryVal(ht,de); free(de); ht->used--; return DICT_OK; } prevde = de; de = de->next; } return DICT_ERR; /* not found */ } /* Destroy an entire hash table */ static int _dictClear(dict *ht) { unsigned long i; /* Free all the elements */ for (i = 0; i < ht->size && ht->used > 0; i++) { dictEntry *he, *nextHe; if ((he = ht->table[i]) == NULL) continue; while(he) { nextHe = he->next; dictFreeEntryKey(ht, he); dictFreeEntryVal(ht, he); free(he); ht->used--; he = nextHe; } } /* Free the table and the allocated cache structure */ free(ht->table); /* Re-initialize the table */ _dictReset(ht); return DICT_OK; /* never fails */ } /* Clear & Release the hash table */ static void dictRelease(dict *ht) { _dictClear(ht); free(ht); } static dictEntry *dictFind(dict *ht, const void *key) { dictEntry *he; unsigned int h; if (ht->size == 0) return NULL; h = dictHashKey(ht, key) & ht->sizemask; he = ht->table[h]; while(he) { if (dictCompareHashKeys(ht, key, he->key)) return he; he = he->next; } return NULL; } static dictIterator *dictGetIterator(dict *ht) { dictIterator *iter = malloc(sizeof(*iter)); iter->ht = ht; iter->index = -1; iter->entry = NULL; iter->nextEntry = NULL; return iter; } static dictEntry *dictNext(dictIterator *iter) { while (1) { if (iter->entry == NULL) { iter->index++; if (iter->index >= (signed)iter->ht->size) break; iter->entry = iter->ht->table[iter->index]; } else { iter->entry = iter->nextEntry; } if (iter->entry) { /* We need to save the 'next' here, the iterator user * may delete the entry we are returning. */ iter->nextEntry = iter->entry->next; return iter->entry; } } return NULL; } static void dictReleaseIterator(dictIterator *iter) { free(iter); } /* ------------------------- private functions ------------------------------ */ /* Expand the hash table if needed */ static int _dictExpandIfNeeded(dict *ht) { /* If the hash table is empty expand it to the intial size, * if the table is "full" dobule its size. */ if (ht->size == 0) return dictExpand(ht, DICT_HT_INITIAL_SIZE); if (ht->used == ht->size) return dictExpand(ht, ht->size*2); return DICT_OK; } /* Our hash table capability is a power of two */ static unsigned long _dictNextPower(unsigned long size) { unsigned long i = DICT_HT_INITIAL_SIZE; if (size >= LONG_MAX) return LONG_MAX; while(1) { if (i >= size) return i; i *= 2; } } /* Returns the index of a free slot that can be populated with * an hash entry for the given 'key'. * If the key already exists, -1 is returned. */ static int _dictKeyIndex(dict *ht, const void *key) { unsigned int h; dictEntry *he; /* Expand the hashtable if needed */ if (_dictExpandIfNeeded(ht) == DICT_ERR) return -1; /* Compute the key hash value */ h = dictHashKey(ht, key) & ht->sizemask; /* Search if this slot does not already contain the given key */ he = ht->table[h]; while(he) { if (dictCompareHashKeys(ht, key, he->key)) return -1; he = he->next; } return h; } disque-1.0-rc1/deps/hiredis/dict.h000066400000000000000000000111231264176561100170140ustar00rootroot00000000000000/* Hash table implementation. * * This file implements in memory hash tables with insert/del/replace/find/ * get-random-element operations. Hash tables will auto resize if needed * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * * Copyright (c) 2006-2010, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __DICT_H #define __DICT_H #define DICT_OK 0 #define DICT_ERR 1 /* Unused arguments generate annoying warnings... */ #define DICT_NOTUSED(V) ((void) V) typedef struct dictEntry { void *key; void *val; struct dictEntry *next; } dictEntry; typedef struct dictType { unsigned int (*hashFunction)(const void *key); void *(*keyDup)(void *privdata, const void *key); void *(*valDup)(void *privdata, const void *obj); int (*keyCompare)(void *privdata, const void *key1, const void *key2); void (*keyDestructor)(void *privdata, void *key); void (*valDestructor)(void *privdata, void *obj); } dictType; typedef struct dict { dictEntry **table; dictType *type; unsigned long size; unsigned long sizemask; unsigned long used; void *privdata; } dict; typedef struct dictIterator { dict *ht; int index; dictEntry *entry, *nextEntry; } dictIterator; /* This is the initial size of every hash table */ #define DICT_HT_INITIAL_SIZE 4 /* ------------------------------- Macros ------------------------------------*/ #define dictFreeEntryVal(ht, entry) \ if ((ht)->type->valDestructor) \ (ht)->type->valDestructor((ht)->privdata, (entry)->val) #define dictSetHashVal(ht, entry, _val_) do { \ if ((ht)->type->valDup) \ entry->val = (ht)->type->valDup((ht)->privdata, _val_); \ else \ entry->val = (_val_); \ } while(0) #define dictFreeEntryKey(ht, entry) \ if ((ht)->type->keyDestructor) \ (ht)->type->keyDestructor((ht)->privdata, (entry)->key) #define dictSetHashKey(ht, entry, _key_) do { \ if ((ht)->type->keyDup) \ entry->key = (ht)->type->keyDup((ht)->privdata, _key_); \ else \ entry->key = (_key_); \ } while(0) #define dictCompareHashKeys(ht, key1, key2) \ (((ht)->type->keyCompare) ? \ (ht)->type->keyCompare((ht)->privdata, key1, key2) : \ (key1) == (key2)) #define dictHashKey(ht, key) (ht)->type->hashFunction(key) #define dictGetEntryKey(he) ((he)->key) #define dictGetEntryVal(he) ((he)->val) #define dictSlots(ht) ((ht)->size) #define dictSize(ht) ((ht)->used) /* API */ static unsigned int dictGenHashFunction(const unsigned char *buf, int len); static dict *dictCreate(dictType *type, void *privDataPtr); static int dictExpand(dict *ht, unsigned long size); static int dictAdd(dict *ht, void *key, void *val); static int dictReplace(dict *ht, void *key, void *val); static int dictDelete(dict *ht, const void *key); static void dictRelease(dict *ht); static dictEntry * dictFind(dict *ht, const void *key); static dictIterator *dictGetIterator(dict *ht); static dictEntry *dictNext(dictIterator *iter); static void dictReleaseIterator(dictIterator *iter); #endif /* __DICT_H */ disque-1.0-rc1/deps/hiredis/examples/000077500000000000000000000000001264176561100175405ustar00rootroot00000000000000disque-1.0-rc1/deps/hiredis/examples/example-ae.c000066400000000000000000000030571264176561100217270ustar00rootroot00000000000000#include #include #include #include #include #include #include /* Put event loop in the global scope, so it can be explicitly stopped */ static aeEventLoop *loop; void getCallback(redisAsyncContext *c, void *r, void *privdata) { redisReply *reply = r; if (reply == NULL) return; printf("argv[%s]: %s\n", (char*)privdata, reply->str); /* Disconnect after receiving the reply to GET */ redisAsyncDisconnect(c); } void connectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); aeStop(loop); return; } printf("Connected...\n"); } void disconnectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); aeStop(loop); return; } printf("Disconnected...\n"); aeStop(loop); } int main (int argc, char **argv) { signal(SIGPIPE, SIG_IGN); redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); if (c->err) { /* Let *c leak for now... */ printf("Error: %s\n", c->errstr); return 1; } loop = aeCreateEventLoop(64); redisAeAttach(loop, c); redisAsyncSetConnectCallback(c,connectCallback); redisAsyncSetDisconnectCallback(c,disconnectCallback); redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); aeMain(loop); return 0; } disque-1.0-rc1/deps/hiredis/examples/example-libev.c000066400000000000000000000025751264176561100224470ustar00rootroot00000000000000#include #include #include #include #include #include #include void getCallback(redisAsyncContext *c, void *r, void *privdata) { redisReply *reply = r; if (reply == NULL) return; printf("argv[%s]: %s\n", (char*)privdata, reply->str); /* Disconnect after receiving the reply to GET */ redisAsyncDisconnect(c); } void connectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); return; } printf("Connected...\n"); } void disconnectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); return; } printf("Disconnected...\n"); } int main (int argc, char **argv) { signal(SIGPIPE, SIG_IGN); redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); if (c->err) { /* Let *c leak for now... */ printf("Error: %s\n", c->errstr); return 1; } redisLibevAttach(EV_DEFAULT_ c); redisAsyncSetConnectCallback(c,connectCallback); redisAsyncSetDisconnectCallback(c,disconnectCallback); redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); ev_loop(EV_DEFAULT_ 0); return 0; } disque-1.0-rc1/deps/hiredis/examples/example-libevent.c000066400000000000000000000026571264176561100231570ustar00rootroot00000000000000#include #include #include #include #include #include #include void getCallback(redisAsyncContext *c, void *r, void *privdata) { redisReply *reply = r; if (reply == NULL) return; printf("argv[%s]: %s\n", (char*)privdata, reply->str); /* Disconnect after receiving the reply to GET */ redisAsyncDisconnect(c); } void connectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); return; } printf("Connected...\n"); } void disconnectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); return; } printf("Disconnected...\n"); } int main (int argc, char **argv) { signal(SIGPIPE, SIG_IGN); struct event_base *base = event_base_new(); redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); if (c->err) { /* Let *c leak for now... */ printf("Error: %s\n", c->errstr); return 1; } redisLibeventAttach(c,base); redisAsyncSetConnectCallback(c,connectCallback); redisAsyncSetDisconnectCallback(c,disconnectCallback); redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); event_base_dispatch(base); return 0; } disque-1.0-rc1/deps/hiredis/examples/example-libuv.c000066400000000000000000000026451264176561100224650ustar00rootroot00000000000000#include #include #include #include #include #include #include void getCallback(redisAsyncContext *c, void *r, void *privdata) { redisReply *reply = r; if (reply == NULL) return; printf("argv[%s]: %s\n", (char*)privdata, reply->str); /* Disconnect after receiving the reply to GET */ redisAsyncDisconnect(c); } void connectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); return; } printf("Connected...\n"); } void disconnectCallback(const redisAsyncContext *c, int status) { if (status != REDIS_OK) { printf("Error: %s\n", c->errstr); return; } printf("Disconnected...\n"); } int main (int argc, char **argv) { signal(SIGPIPE, SIG_IGN); uv_loop_t* loop = uv_default_loop(); redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); if (c->err) { /* Let *c leak for now... */ printf("Error: %s\n", c->errstr); return 1; } redisLibuvAttach(c,loop); redisAsyncSetConnectCallback(c,connectCallback); redisAsyncSetDisconnectCallback(c,disconnectCallback); redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); uv_run(loop, UV_RUN_DEFAULT); return 0; } disque-1.0-rc1/deps/hiredis/examples/example.c000066400000000000000000000042741264176561100213460ustar00rootroot00000000000000#include #include #include #include int main(int argc, char **argv) { unsigned int j; redisContext *c; redisReply *reply; const char *hostname = (argc > 1) ? argv[1] : "127.0.0.1"; int port = (argc > 2) ? atoi(argv[2]) : 6379; struct timeval timeout = { 1, 500000 }; // 1.5 seconds c = redisConnectWithTimeout(hostname, port, timeout); if (c == NULL || c->err) { if (c) { printf("Connection error: %s\n", c->errstr); redisFree(c); } else { printf("Connection error: can't allocate redis context\n"); } exit(1); } /* PING server */ reply = redisCommand(c,"PING"); printf("PING: %s\n", reply->str); freeReplyObject(reply); /* Set a key */ reply = redisCommand(c,"SET %s %s", "foo", "hello world"); printf("SET: %s\n", reply->str); freeReplyObject(reply); /* Set a key using binary safe API */ reply = redisCommand(c,"SET %b %b", "bar", (size_t) 3, "hello", (size_t) 5); printf("SET (binary API): %s\n", reply->str); freeReplyObject(reply); /* Try a GET and two INCR */ reply = redisCommand(c,"GET foo"); printf("GET foo: %s\n", reply->str); freeReplyObject(reply); reply = redisCommand(c,"INCR counter"); printf("INCR counter: %lld\n", reply->integer); freeReplyObject(reply); /* again ... */ reply = redisCommand(c,"INCR counter"); printf("INCR counter: %lld\n", reply->integer); freeReplyObject(reply); /* Create a list of numbers, from 0 to 9 */ reply = redisCommand(c,"DEL mylist"); freeReplyObject(reply); for (j = 0; j < 10; j++) { char buf[64]; snprintf(buf,64,"%u",j); reply = redisCommand(c,"LPUSH mylist element-%s", buf); freeReplyObject(reply); } /* Let's check what we have inside the list */ reply = redisCommand(c,"LRANGE mylist 0 -1"); if (reply->type == REDIS_REPLY_ARRAY) { for (j = 0; j < reply->elements; j++) { printf("%u) %s\n", j, reply->element[j]->str); } } freeReplyObject(reply); /* Disconnects and frees the context */ redisFree(c); return 0; } disque-1.0-rc1/deps/hiredis/fmacros.h000066400000000000000000000006441264176561100175310ustar00rootroot00000000000000#ifndef __HIREDIS_FMACRO_H #define __HIREDIS_FMACRO_H #if !defined(_BSD_SOURCE) #define _BSD_SOURCE #define _DEFAULT_SOURCE #endif #if defined(_AIX) #define _ALL_SOURCE #endif #if defined(__sun__) #define _POSIX_C_SOURCE 200112L #elif defined(__linux__) || defined(__OpenBSD__) || defined(__NetBSD__) #define _XOPEN_SOURCE 600 #else #define _XOPEN_SOURCE #endif #if __APPLE__ && __MACH__ #define _OSX #endif #endif disque-1.0-rc1/deps/hiredis/hiredis.c000066400000000000000000001122501264176561100175160ustar00rootroot00000000000000/* * Copyright (c) 2009-2011, Salvatore Sanfilippo * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include #include #include #include "hiredis.h" #include "net.h" #include "sds.h" static redisReply *createReplyObject(int type); static void *createStringObject(const redisReadTask *task, char *str, size_t len); static void *createArrayObject(const redisReadTask *task, int elements); static void *createIntegerObject(const redisReadTask *task, long long value); static void *createNilObject(const redisReadTask *task); /* Default set of functions to build the reply. Keep in mind that such a * function returning NULL is interpreted as OOM. */ static redisReplyObjectFunctions defaultFunctions = { createStringObject, createArrayObject, createIntegerObject, createNilObject, freeReplyObject }; /* Create a reply object */ static redisReply *createReplyObject(int type) { redisReply *r = calloc(1,sizeof(*r)); if (r == NULL) return NULL; r->type = type; return r; } /* Free a reply object */ void freeReplyObject(void *reply) { redisReply *r = reply; size_t j; switch(r->type) { case REDIS_REPLY_INTEGER: break; /* Nothing to free */ case REDIS_REPLY_ARRAY: if (r->element != NULL) { for (j = 0; j < r->elements; j++) if (r->element[j] != NULL) freeReplyObject(r->element[j]); free(r->element); } break; case REDIS_REPLY_ERROR: case REDIS_REPLY_STATUS: case REDIS_REPLY_STRING: if (r->str != NULL) free(r->str); break; } free(r); } static void *createStringObject(const redisReadTask *task, char *str, size_t len) { redisReply *r, *parent; char *buf; r = createReplyObject(task->type); if (r == NULL) return NULL; buf = malloc(len+1); if (buf == NULL) { freeReplyObject(r); return NULL; } assert(task->type == REDIS_REPLY_ERROR || task->type == REDIS_REPLY_STATUS || task->type == REDIS_REPLY_STRING); /* Copy string value */ memcpy(buf,str,len); buf[len] = '\0'; r->str = buf; r->len = len; if (task->parent) { parent = task->parent->obj; assert(parent->type == REDIS_REPLY_ARRAY); parent->element[task->idx] = r; } return r; } static void *createArrayObject(const redisReadTask *task, int elements) { redisReply *r, *parent; r = createReplyObject(REDIS_REPLY_ARRAY); if (r == NULL) return NULL; if (elements > 0) { r->element = calloc(elements,sizeof(redisReply*)); if (r->element == NULL) { freeReplyObject(r); return NULL; } } r->elements = elements; if (task->parent) { parent = task->parent->obj; assert(parent->type == REDIS_REPLY_ARRAY); parent->element[task->idx] = r; } return r; } static void *createIntegerObject(const redisReadTask *task, long long value) { redisReply *r, *parent; r = createReplyObject(REDIS_REPLY_INTEGER); if (r == NULL) return NULL; r->integer = value; if (task->parent) { parent = task->parent->obj; assert(parent->type == REDIS_REPLY_ARRAY); parent->element[task->idx] = r; } return r; } static void *createNilObject(const redisReadTask *task) { redisReply *r, *parent; r = createReplyObject(REDIS_REPLY_NIL); if (r == NULL) return NULL; if (task->parent) { parent = task->parent->obj; assert(parent->type == REDIS_REPLY_ARRAY); parent->element[task->idx] = r; } return r; } static void __redisReaderSetError(redisReader *r, int type, const char *str) { size_t len; if (r->reply != NULL && r->fn && r->fn->freeObject) { r->fn->freeObject(r->reply); r->reply = NULL; } /* Clear input buffer on errors. */ if (r->buf != NULL) { sdsfree(r->buf); r->buf = NULL; r->pos = r->len = 0; } /* Reset task stack. */ r->ridx = -1; /* Set error. */ r->err = type; len = strlen(str); len = len < (sizeof(r->errstr)-1) ? len : (sizeof(r->errstr)-1); memcpy(r->errstr,str,len); r->errstr[len] = '\0'; } static size_t chrtos(char *buf, size_t size, char byte) { size_t len = 0; switch(byte) { case '\\': case '"': len = snprintf(buf,size,"\"\\%c\"",byte); break; case '\n': len = snprintf(buf,size,"\"\\n\""); break; case '\r': len = snprintf(buf,size,"\"\\r\""); break; case '\t': len = snprintf(buf,size,"\"\\t\""); break; case '\a': len = snprintf(buf,size,"\"\\a\""); break; case '\b': len = snprintf(buf,size,"\"\\b\""); break; default: if (isprint(byte)) len = snprintf(buf,size,"\"%c\"",byte); else len = snprintf(buf,size,"\"\\x%02x\"",(unsigned char)byte); break; } return len; } static void __redisReaderSetErrorProtocolByte(redisReader *r, char byte) { char cbuf[8], sbuf[128]; chrtos(cbuf,sizeof(cbuf),byte); snprintf(sbuf,sizeof(sbuf), "Protocol error, got %s as reply type byte", cbuf); __redisReaderSetError(r,REDIS_ERR_PROTOCOL,sbuf); } static void __redisReaderSetErrorOOM(redisReader *r) { __redisReaderSetError(r,REDIS_ERR_OOM,"Out of memory"); } static char *readBytes(redisReader *r, unsigned int bytes) { char *p; if (r->len-r->pos >= bytes) { p = r->buf+r->pos; r->pos += bytes; return p; } return NULL; } /* Find pointer to \r\n. */ static char *seekNewline(char *s, size_t len) { int pos = 0; int _len = len-1; /* Position should be < len-1 because the character at "pos" should be * followed by a \n. Note that strchr cannot be used because it doesn't * allow to search a limited length and the buffer that is being searched * might not have a trailing NULL character. */ while (pos < _len) { while(pos < _len && s[pos] != '\r') pos++; if (s[pos] != '\r') { /* Not found. */ return NULL; } else { if (s[pos+1] == '\n') { /* Found. */ return s+pos; } else { /* Continue searching. */ pos++; } } } return NULL; } /* Read a long long value starting at *s, under the assumption that it will be * terminated by \r\n. Ambiguously returns -1 for unexpected input. */ static long long readLongLong(char *s) { long long v = 0; int dec, mult = 1; char c; if (*s == '-') { mult = -1; s++; } else if (*s == '+') { mult = 1; s++; } while ((c = *(s++)) != '\r') { dec = c - '0'; if (dec >= 0 && dec < 10) { v *= 10; v += dec; } else { /* Should not happen... */ return -1; } } return mult*v; } static char *readLine(redisReader *r, int *_len) { char *p, *s; int len; p = r->buf+r->pos; s = seekNewline(p,(r->len-r->pos)); if (s != NULL) { len = s-(r->buf+r->pos); r->pos += len+2; /* skip \r\n */ if (_len) *_len = len; return p; } return NULL; } static void moveToNextTask(redisReader *r) { redisReadTask *cur, *prv; while (r->ridx >= 0) { /* Return a.s.a.p. when the stack is now empty. */ if (r->ridx == 0) { r->ridx--; return; } cur = &(r->rstack[r->ridx]); prv = &(r->rstack[r->ridx-1]); assert(prv->type == REDIS_REPLY_ARRAY); if (cur->idx == prv->elements-1) { r->ridx--; } else { /* Reset the type because the next item can be anything */ assert(cur->idx < prv->elements); cur->type = -1; cur->elements = -1; cur->idx++; return; } } } static int processLineItem(redisReader *r) { redisReadTask *cur = &(r->rstack[r->ridx]); void *obj; char *p; int len; if ((p = readLine(r,&len)) != NULL) { if (cur->type == REDIS_REPLY_INTEGER) { if (r->fn && r->fn->createInteger) obj = r->fn->createInteger(cur,readLongLong(p)); else obj = (void*)REDIS_REPLY_INTEGER; } else { /* Type will be error or status. */ if (r->fn && r->fn->createString) obj = r->fn->createString(cur,p,len); else obj = (void*)(size_t)(cur->type); } if (obj == NULL) { __redisReaderSetErrorOOM(r); return REDIS_ERR; } /* Set reply if this is the root object. */ if (r->ridx == 0) r->reply = obj; moveToNextTask(r); return REDIS_OK; } return REDIS_ERR; } static int processBulkItem(redisReader *r) { redisReadTask *cur = &(r->rstack[r->ridx]); void *obj = NULL; char *p, *s; long len; unsigned long bytelen; int success = 0; p = r->buf+r->pos; s = seekNewline(p,r->len-r->pos); if (s != NULL) { p = r->buf+r->pos; bytelen = s-(r->buf+r->pos)+2; /* include \r\n */ len = readLongLong(p); if (len < 0) { /* The nil object can always be created. */ if (r->fn && r->fn->createNil) obj = r->fn->createNil(cur); else obj = (void*)REDIS_REPLY_NIL; success = 1; } else { /* Only continue when the buffer contains the entire bulk item. */ bytelen += len+2; /* include \r\n */ if (r->pos+bytelen <= r->len) { if (r->fn && r->fn->createString) obj = r->fn->createString(cur,s+2,len); else obj = (void*)REDIS_REPLY_STRING; success = 1; } } /* Proceed when obj was created. */ if (success) { if (obj == NULL) { __redisReaderSetErrorOOM(r); return REDIS_ERR; } r->pos += bytelen; /* Set reply if this is the root object. */ if (r->ridx == 0) r->reply = obj; moveToNextTask(r); return REDIS_OK; } } return REDIS_ERR; } static int processMultiBulkItem(redisReader *r) { redisReadTask *cur = &(r->rstack[r->ridx]); void *obj; char *p; long elements; int root = 0; /* Set error for nested multi bulks with depth > 7 */ if (r->ridx == 8) { __redisReaderSetError(r,REDIS_ERR_PROTOCOL, "No support for nested multi bulk replies with depth > 7"); return REDIS_ERR; } if ((p = readLine(r,NULL)) != NULL) { elements = readLongLong(p); root = (r->ridx == 0); if (elements == -1) { if (r->fn && r->fn->createNil) obj = r->fn->createNil(cur); else obj = (void*)REDIS_REPLY_NIL; if (obj == NULL) { __redisReaderSetErrorOOM(r); return REDIS_ERR; } moveToNextTask(r); } else { if (r->fn && r->fn->createArray) obj = r->fn->createArray(cur,elements); else obj = (void*)REDIS_REPLY_ARRAY; if (obj == NULL) { __redisReaderSetErrorOOM(r); return REDIS_ERR; } /* Modify task stack when there are more than 0 elements. */ if (elements > 0) { cur->elements = elements; cur->obj = obj; r->ridx++; r->rstack[r->ridx].type = -1; r->rstack[r->ridx].elements = -1; r->rstack[r->ridx].idx = 0; r->rstack[r->ridx].obj = NULL; r->rstack[r->ridx].parent = cur; r->rstack[r->ridx].privdata = r->privdata; } else { moveToNextTask(r); } } /* Set reply if this is the root object. */ if (root) r->reply = obj; return REDIS_OK; } return REDIS_ERR; } static int processItem(redisReader *r) { redisReadTask *cur = &(r->rstack[r->ridx]); char *p; /* check if we need to read type */ if (cur->type < 0) { if ((p = readBytes(r,1)) != NULL) { switch (p[0]) { case '-': cur->type = REDIS_REPLY_ERROR; break; case '+': cur->type = REDIS_REPLY_STATUS; break; case ':': cur->type = REDIS_REPLY_INTEGER; break; case '$': cur->type = REDIS_REPLY_STRING; break; case '*': cur->type = REDIS_REPLY_ARRAY; break; default: __redisReaderSetErrorProtocolByte(r,*p); return REDIS_ERR; } } else { /* could not consume 1 byte */ return REDIS_ERR; } } /* process typed item */ switch(cur->type) { case REDIS_REPLY_ERROR: case REDIS_REPLY_STATUS: case REDIS_REPLY_INTEGER: return processLineItem(r); case REDIS_REPLY_STRING: return processBulkItem(r); case REDIS_REPLY_ARRAY: return processMultiBulkItem(r); default: assert(NULL); return REDIS_ERR; /* Avoid warning. */ } } redisReader *redisReaderCreate(void) { redisReader *r; r = calloc(sizeof(redisReader),1); if (r == NULL) return NULL; r->err = 0; r->errstr[0] = '\0'; r->fn = &defaultFunctions; r->buf = sdsempty(); r->maxbuf = REDIS_READER_MAX_BUF; if (r->buf == NULL) { free(r); return NULL; } r->ridx = -1; return r; } void redisReaderFree(redisReader *r) { if (r->reply != NULL && r->fn && r->fn->freeObject) r->fn->freeObject(r->reply); if (r->buf != NULL) sdsfree(r->buf); free(r); } int redisReaderFeed(redisReader *r, const char *buf, size_t len) { sds newbuf; /* Return early when this reader is in an erroneous state. */ if (r->err) return REDIS_ERR; /* Copy the provided buffer. */ if (buf != NULL && len >= 1) { /* Destroy internal buffer when it is empty and is quite large. */ if (r->len == 0 && r->maxbuf != 0 && sdsavail(r->buf) > r->maxbuf) { sdsfree(r->buf); r->buf = sdsempty(); r->pos = 0; /* r->buf should not be NULL since we just free'd a larger one. */ assert(r->buf != NULL); } newbuf = sdscatlen(r->buf,buf,len); if (newbuf == NULL) { __redisReaderSetErrorOOM(r); return REDIS_ERR; } r->buf = newbuf; r->len = sdslen(r->buf); } return REDIS_OK; } int redisReaderGetReply(redisReader *r, void **reply) { /* Default target pointer to NULL. */ if (reply != NULL) *reply = NULL; /* Return early when this reader is in an erroneous state. */ if (r->err) return REDIS_ERR; /* When the buffer is empty, there will never be a reply. */ if (r->len == 0) return REDIS_OK; /* Set first item to process when the stack is empty. */ if (r->ridx == -1) { r->rstack[0].type = -1; r->rstack[0].elements = -1; r->rstack[0].idx = -1; r->rstack[0].obj = NULL; r->rstack[0].parent = NULL; r->rstack[0].privdata = r->privdata; r->ridx = 0; } /* Process items in reply. */ while (r->ridx >= 0) if (processItem(r) != REDIS_OK) break; /* Return ASAP when an error occurred. */ if (r->err) return REDIS_ERR; /* Discard part of the buffer when we've consumed at least 1k, to avoid * doing unnecessary calls to memmove() in sds.c. */ if (r->pos >= 1024) { sdsrange(r->buf,r->pos,-1); r->pos = 0; r->len = sdslen(r->buf); } /* Emit a reply when there is one. */ if (r->ridx == -1) { if (reply != NULL) *reply = r->reply; r->reply = NULL; } return REDIS_OK; } /* Calculate the number of bytes needed to represent an integer as string. */ static int intlen(int i) { int len = 0; if (i < 0) { len++; i = -i; } do { len++; i /= 10; } while(i); return len; } /* Helper that calculates the bulk length given a certain string length. */ static size_t bulklen(size_t len) { return 1+intlen(len)+2+len+2; } int redisvFormatCommand(char **target, const char *format, va_list ap) { const char *c = format; char *cmd = NULL; /* final command */ int pos; /* position in final command */ sds curarg, newarg; /* current argument */ int touched = 0; /* was the current argument touched? */ char **curargv = NULL, **newargv = NULL; int argc = 0; int totlen = 0; int j; /* Abort if there is not target to set */ if (target == NULL) return -1; /* Build the command string accordingly to protocol */ curarg = sdsempty(); if (curarg == NULL) return -1; while(*c != '\0') { if (*c != '%' || c[1] == '\0') { if (*c == ' ') { if (touched) { newargv = realloc(curargv,sizeof(char*)*(argc+1)); if (newargv == NULL) goto err; curargv = newargv; curargv[argc++] = curarg; totlen += bulklen(sdslen(curarg)); /* curarg is put in argv so it can be overwritten. */ curarg = sdsempty(); if (curarg == NULL) goto err; touched = 0; } } else { newarg = sdscatlen(curarg,c,1); if (newarg == NULL) goto err; curarg = newarg; touched = 1; } } else { char *arg; size_t size; /* Set newarg so it can be checked even if it is not touched. */ newarg = curarg; switch(c[1]) { case 's': arg = va_arg(ap,char*); size = strlen(arg); if (size > 0) newarg = sdscatlen(curarg,arg,size); break; case 'b': arg = va_arg(ap,char*); size = va_arg(ap,size_t); if (size > 0) newarg = sdscatlen(curarg,arg,size); break; case '%': newarg = sdscat(curarg,"%"); break; default: /* Try to detect printf format */ { static const char intfmts[] = "diouxX"; char _format[16]; const char *_p = c+1; size_t _l = 0; va_list _cpy; /* Flags */ if (*_p != '\0' && *_p == '#') _p++; if (*_p != '\0' && *_p == '0') _p++; if (*_p != '\0' && *_p == '-') _p++; if (*_p != '\0' && *_p == ' ') _p++; if (*_p != '\0' && *_p == '+') _p++; /* Field width */ while (*_p != '\0' && isdigit(*_p)) _p++; /* Precision */ if (*_p == '.') { _p++; while (*_p != '\0' && isdigit(*_p)) _p++; } /* Copy va_list before consuming with va_arg */ va_copy(_cpy,ap); /* Integer conversion (without modifiers) */ if (strchr(intfmts,*_p) != NULL) { va_arg(ap,int); goto fmt_valid; } /* Double conversion (without modifiers) */ if (strchr("eEfFgGaA",*_p) != NULL) { va_arg(ap,double); goto fmt_valid; } /* Size: char */ if (_p[0] == 'h' && _p[1] == 'h') { _p += 2; if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { va_arg(ap,int); /* char gets promoted to int */ goto fmt_valid; } goto fmt_invalid; } /* Size: short */ if (_p[0] == 'h') { _p += 1; if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { va_arg(ap,int); /* short gets promoted to int */ goto fmt_valid; } goto fmt_invalid; } /* Size: long long */ if (_p[0] == 'l' && _p[1] == 'l') { _p += 2; if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { va_arg(ap,long long); goto fmt_valid; } goto fmt_invalid; } /* Size: long */ if (_p[0] == 'l') { _p += 1; if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { va_arg(ap,long); goto fmt_valid; } goto fmt_invalid; } fmt_invalid: va_end(_cpy); goto err; fmt_valid: _l = (_p+1)-c; if (_l < sizeof(_format)-2) { memcpy(_format,c,_l); _format[_l] = '\0'; newarg = sdscatvprintf(curarg,_format,_cpy); /* Update current position (note: outer blocks * increment c twice so compensate here) */ c = _p-1; } va_end(_cpy); break; } } if (newarg == NULL) goto err; curarg = newarg; touched = 1; c++; } c++; } /* Add the last argument if needed */ if (touched) { newargv = realloc(curargv,sizeof(char*)*(argc+1)); if (newargv == NULL) goto err; curargv = newargv; curargv[argc++] = curarg; totlen += bulklen(sdslen(curarg)); } else { sdsfree(curarg); } /* Clear curarg because it was put in curargv or was free'd. */ curarg = NULL; /* Add bytes needed to hold multi bulk count */ totlen += 1+intlen(argc)+2; /* Build the command at protocol level */ cmd = malloc(totlen+1); if (cmd == NULL) goto err; pos = sprintf(cmd,"*%d\r\n",argc); for (j = 0; j < argc; j++) { pos += sprintf(cmd+pos,"$%zu\r\n",sdslen(curargv[j])); memcpy(cmd+pos,curargv[j],sdslen(curargv[j])); pos += sdslen(curargv[j]); sdsfree(curargv[j]); cmd[pos++] = '\r'; cmd[pos++] = '\n'; } assert(pos == totlen); cmd[pos] = '\0'; free(curargv); *target = cmd; return totlen; err: while(argc--) sdsfree(curargv[argc]); free(curargv); if (curarg != NULL) sdsfree(curarg); /* No need to check cmd since it is the last statement that can fail, * but do it anyway to be as defensive as possible. */ if (cmd != NULL) free(cmd); return -1; } /* Format a command according to the Redis protocol. This function * takes a format similar to printf: * * %s represents a C null terminated string you want to interpolate * %b represents a binary safe string * * When using %b you need to provide both the pointer to the string * and the length in bytes as a size_t. Examples: * * len = redisFormatCommand(target, "GET %s", mykey); * len = redisFormatCommand(target, "SET %s %b", mykey, myval, myvallen); */ int redisFormatCommand(char **target, const char *format, ...) { va_list ap; int len; va_start(ap,format); len = redisvFormatCommand(target,format,ap); va_end(ap); return len; } /* Format a command according to the Redis protocol. This function takes the * number of arguments, an array with arguments and an array with their * lengths. If the latter is set to NULL, strlen will be used to compute the * argument lengths. */ int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen) { char *cmd = NULL; /* final command */ int pos; /* position in final command */ size_t len; int totlen, j; /* Calculate number of bytes needed for the command */ totlen = 1+intlen(argc)+2; for (j = 0; j < argc; j++) { len = argvlen ? argvlen[j] : strlen(argv[j]); totlen += bulklen(len); } /* Build the command at protocol level */ cmd = malloc(totlen+1); if (cmd == NULL) return -1; pos = sprintf(cmd,"*%d\r\n",argc); for (j = 0; j < argc; j++) { len = argvlen ? argvlen[j] : strlen(argv[j]); pos += sprintf(cmd+pos,"$%zu\r\n",len); memcpy(cmd+pos,argv[j],len); pos += len; cmd[pos++] = '\r'; cmd[pos++] = '\n'; } assert(pos == totlen); cmd[pos] = '\0'; *target = cmd; return totlen; } void __redisSetError(redisContext *c, int type, const char *str) { size_t len; c->err = type; if (str != NULL) { len = strlen(str); len = len < (sizeof(c->errstr)-1) ? len : (sizeof(c->errstr)-1); memcpy(c->errstr,str,len); c->errstr[len] = '\0'; } else { /* Only REDIS_ERR_IO may lack a description! */ assert(type == REDIS_ERR_IO); strerror_r(errno,c->errstr,sizeof(c->errstr)); } } static redisContext *redisContextInit(void) { redisContext *c; c = calloc(1,sizeof(redisContext)); if (c == NULL) return NULL; c->err = 0; c->errstr[0] = '\0'; c->obuf = sdsempty(); c->reader = redisReaderCreate(); return c; } void redisFree(redisContext *c) { if (c->fd > 0) close(c->fd); if (c->obuf != NULL) sdsfree(c->obuf); if (c->reader != NULL) redisReaderFree(c->reader); free(c); } int redisFreeKeepFd(redisContext *c) { int fd = c->fd; c->fd = -1; redisFree(c); return fd; } /* Connect to a Redis instance. On error the field error in the returned * context will be set to the return value of the error function. * When no set of reply functions is given, the default set will be used. */ redisContext *redisConnect(const char *ip, int port) { redisContext *c; c = redisContextInit(); if (c == NULL) return NULL; c->flags |= REDIS_BLOCK; redisContextConnectTcp(c,ip,port,NULL); return c; } redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv) { redisContext *c; c = redisContextInit(); if (c == NULL) return NULL; c->flags |= REDIS_BLOCK; redisContextConnectTcp(c,ip,port,&tv); return c; } redisContext *redisConnectNonBlock(const char *ip, int port) { redisContext *c; c = redisContextInit(); if (c == NULL) return NULL; c->flags &= ~REDIS_BLOCK; redisContextConnectTcp(c,ip,port,NULL); return c; } redisContext *redisConnectBindNonBlock(const char *ip, int port, const char *source_addr) { redisContext *c = redisContextInit(); c->flags &= ~REDIS_BLOCK; redisContextConnectBindTcp(c,ip,port,NULL,source_addr); return c; } redisContext *redisConnectUnix(const char *path) { redisContext *c; c = redisContextInit(); if (c == NULL) return NULL; c->flags |= REDIS_BLOCK; redisContextConnectUnix(c,path,NULL); return c; } redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv) { redisContext *c; c = redisContextInit(); if (c == NULL) return NULL; c->flags |= REDIS_BLOCK; redisContextConnectUnix(c,path,&tv); return c; } redisContext *redisConnectUnixNonBlock(const char *path) { redisContext *c; c = redisContextInit(); if (c == NULL) return NULL; c->flags &= ~REDIS_BLOCK; redisContextConnectUnix(c,path,NULL); return c; } redisContext *redisConnectFd(int fd) { redisContext *c; c = redisContextInit(); if (c == NULL) return NULL; c->fd = fd; c->flags |= REDIS_BLOCK | REDIS_CONNECTED; return c; } /* Set read/write timeout on a blocking socket. */ int redisSetTimeout(redisContext *c, const struct timeval tv) { if (c->flags & REDIS_BLOCK) return redisContextSetTimeout(c,tv); return REDIS_ERR; } /* Enable connection KeepAlive. */ int redisEnableKeepAlive(redisContext *c) { if (redisKeepAlive(c, REDIS_KEEPALIVE_INTERVAL) != REDIS_OK) return REDIS_ERR; return REDIS_OK; } /* Use this function to handle a read event on the descriptor. It will try * and read some bytes from the socket and feed them to the reply parser. * * After this function is called, you may use redisContextReadReply to * see if there is a reply available. */ int redisBufferRead(redisContext *c) { char buf[1024*16]; int nread; /* Return early when the context has seen an error. */ if (c->err) return REDIS_ERR; nread = read(c->fd,buf,sizeof(buf)); if (nread == -1) { if ((errno == EAGAIN && !(c->flags & REDIS_BLOCK)) || (errno == EINTR)) { /* Try again later */ } else { __redisSetError(c,REDIS_ERR_IO,NULL); return REDIS_ERR; } } else if (nread == 0) { __redisSetError(c,REDIS_ERR_EOF,"Server closed the connection"); return REDIS_ERR; } else { if (redisReaderFeed(c->reader,buf,nread) != REDIS_OK) { __redisSetError(c,c->reader->err,c->reader->errstr); return REDIS_ERR; } } return REDIS_OK; } /* Write the output buffer to the socket. * * Returns REDIS_OK when the buffer is empty, or (a part of) the buffer was * succesfully written to the socket. When the buffer is empty after the * write operation, "done" is set to 1 (if given). * * Returns REDIS_ERR if an error occured trying to write and sets * c->errstr to hold the appropriate error string. */ int redisBufferWrite(redisContext *c, int *done) { int nwritten; /* Return early when the context has seen an error. */ if (c->err) return REDIS_ERR; if (sdslen(c->obuf) > 0) { nwritten = write(c->fd,c->obuf,sdslen(c->obuf)); if (nwritten == -1) { if ((errno == EAGAIN && !(c->flags & REDIS_BLOCK)) || (errno == EINTR)) { /* Try again later */ } else { __redisSetError(c,REDIS_ERR_IO,NULL); return REDIS_ERR; } } else if (nwritten > 0) { if (nwritten == (signed)sdslen(c->obuf)) { sdsfree(c->obuf); c->obuf = sdsempty(); } else { sdsrange(c->obuf,nwritten,-1); } } } if (done != NULL) *done = (sdslen(c->obuf) == 0); return REDIS_OK; } /* Internal helper function to try and get a reply from the reader, * or set an error in the context otherwise. */ int redisGetReplyFromReader(redisContext *c, void **reply) { if (redisReaderGetReply(c->reader,reply) == REDIS_ERR) { __redisSetError(c,c->reader->err,c->reader->errstr); return REDIS_ERR; } return REDIS_OK; } int redisGetReply(redisContext *c, void **reply) { int wdone = 0; void *aux = NULL; /* Try to read pending replies */ if (redisGetReplyFromReader(c,&aux) == REDIS_ERR) return REDIS_ERR; /* For the blocking context, flush output buffer and read reply */ if (aux == NULL && c->flags & REDIS_BLOCK) { /* Write until done */ do { if (redisBufferWrite(c,&wdone) == REDIS_ERR) return REDIS_ERR; } while (!wdone); /* Read until there is a reply */ do { if (redisBufferRead(c) == REDIS_ERR) return REDIS_ERR; if (redisGetReplyFromReader(c,&aux) == REDIS_ERR) return REDIS_ERR; } while (aux == NULL); } /* Set reply object */ if (reply != NULL) *reply = aux; return REDIS_OK; } /* Helper function for the redisAppendCommand* family of functions. * * Write a formatted command to the output buffer. When this family * is used, you need to call redisGetReply yourself to retrieve * the reply (or replies in pub/sub). */ int __redisAppendCommand(redisContext *c, const char *cmd, size_t len) { sds newbuf; newbuf = sdscatlen(c->obuf,cmd,len); if (newbuf == NULL) { __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); return REDIS_ERR; } c->obuf = newbuf; return REDIS_OK; } int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len) { if (__redisAppendCommand(c, cmd, len) != REDIS_OK) { return REDIS_ERR; } return REDIS_OK; } int redisvAppendCommand(redisContext *c, const char *format, va_list ap) { char *cmd; int len; len = redisvFormatCommand(&cmd,format,ap); if (len == -1) { __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); return REDIS_ERR; } if (__redisAppendCommand(c,cmd,len) != REDIS_OK) { free(cmd); return REDIS_ERR; } free(cmd); return REDIS_OK; } int redisAppendCommand(redisContext *c, const char *format, ...) { va_list ap; int ret; va_start(ap,format); ret = redisvAppendCommand(c,format,ap); va_end(ap); return ret; } int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen) { char *cmd; int len; len = redisFormatCommandArgv(&cmd,argc,argv,argvlen); if (len == -1) { __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); return REDIS_ERR; } if (__redisAppendCommand(c,cmd,len) != REDIS_OK) { free(cmd); return REDIS_ERR; } free(cmd); return REDIS_OK; } /* Helper function for the redisCommand* family of functions. * * Write a formatted command to the output buffer. If the given context is * blocking, immediately read the reply into the "reply" pointer. When the * context is non-blocking, the "reply" pointer will not be used and the * command is simply appended to the write buffer. * * Returns the reply when a reply was succesfully retrieved. Returns NULL * otherwise. When NULL is returned in a blocking context, the error field * in the context will be set. */ static void *__redisBlockForReply(redisContext *c) { void *reply; if (c->flags & REDIS_BLOCK) { if (redisGetReply(c,&reply) != REDIS_OK) return NULL; return reply; } return NULL; } void *redisvCommand(redisContext *c, const char *format, va_list ap) { if (redisvAppendCommand(c,format,ap) != REDIS_OK) return NULL; return __redisBlockForReply(c); } void *redisCommand(redisContext *c, const char *format, ...) { va_list ap; void *reply = NULL; va_start(ap,format); reply = redisvCommand(c,format,ap); va_end(ap); return reply; } void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen) { if (redisAppendCommandArgv(c,argc,argv,argvlen) != REDIS_OK) return NULL; return __redisBlockForReply(c); } disque-1.0-rc1/deps/hiredis/hiredis.h000066400000000000000000000222731264176561100175300ustar00rootroot00000000000000/* * Copyright (c) 2009-2011, Salvatore Sanfilippo * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __HIREDIS_H #define __HIREDIS_H #include /* for size_t */ #include /* for va_list */ #include /* for struct timeval */ #define HIREDIS_MAJOR 0 #define HIREDIS_MINOR 11 #define HIREDIS_PATCH 0 #define REDIS_ERR -1 #define REDIS_OK 0 /* When an error occurs, the err flag in a context is set to hold the type of * error that occured. REDIS_ERR_IO means there was an I/O error and you * should use the "errno" variable to find out what is wrong. * For other values, the "errstr" field will hold a description. */ #define REDIS_ERR_IO 1 /* Error in read or write */ #define REDIS_ERR_EOF 3 /* End of file */ #define REDIS_ERR_PROTOCOL 4 /* Protocol error */ #define REDIS_ERR_OOM 5 /* Out of memory */ #define REDIS_ERR_OTHER 2 /* Everything else... */ /* Connection type can be blocking or non-blocking and is set in the * least significant bit of the flags field in redisContext. */ #define REDIS_BLOCK 0x1 /* Connection may be disconnected before being free'd. The second bit * in the flags field is set when the context is connected. */ #define REDIS_CONNECTED 0x2 /* The async API might try to disconnect cleanly and flush the output * buffer and read all subsequent replies before disconnecting. * This flag means no new commands can come in and the connection * should be terminated once all replies have been read. */ #define REDIS_DISCONNECTING 0x4 /* Flag specific to the async API which means that the context should be clean * up as soon as possible. */ #define REDIS_FREEING 0x8 /* Flag that is set when an async callback is executed. */ #define REDIS_IN_CALLBACK 0x10 /* Flag that is set when the async context has one or more subscriptions. */ #define REDIS_SUBSCRIBED 0x20 /* Flag that is set when monitor mode is active */ #define REDIS_MONITORING 0x40 #define REDIS_REPLY_STRING 1 #define REDIS_REPLY_ARRAY 2 #define REDIS_REPLY_INTEGER 3 #define REDIS_REPLY_NIL 4 #define REDIS_REPLY_STATUS 5 #define REDIS_REPLY_ERROR 6 #define REDIS_READER_MAX_BUF (1024*16) /* Default max unused reader buffer. */ #define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */ #ifdef __cplusplus extern "C" { #endif /* This is the reply object returned by redisCommand() */ typedef struct redisReply { int type; /* REDIS_REPLY_* */ long long integer; /* The integer when type is REDIS_REPLY_INTEGER */ int len; /* Length of string */ char *str; /* Used for both REDIS_REPLY_ERROR and REDIS_REPLY_STRING */ size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */ struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */ } redisReply; typedef struct redisReadTask { int type; int elements; /* number of elements in multibulk container */ int idx; /* index in parent (array) object */ void *obj; /* holds user-generated value for a read task */ struct redisReadTask *parent; /* parent task */ void *privdata; /* user-settable arbitrary field */ } redisReadTask; typedef struct redisReplyObjectFunctions { void *(*createString)(const redisReadTask*, char*, size_t); void *(*createArray)(const redisReadTask*, int); void *(*createInteger)(const redisReadTask*, long long); void *(*createNil)(const redisReadTask*); void (*freeObject)(void*); } redisReplyObjectFunctions; /* State for the protocol parser */ typedef struct redisReader { int err; /* Error flags, 0 when there is no error */ char errstr[128]; /* String representation of error when applicable */ char *buf; /* Read buffer */ size_t pos; /* Buffer cursor */ size_t len; /* Buffer length */ size_t maxbuf; /* Max length of unused buffer */ redisReadTask rstack[9]; int ridx; /* Index of current read task */ void *reply; /* Temporary reply pointer */ redisReplyObjectFunctions *fn; void *privdata; } redisReader; /* Public API for the protocol parser. */ redisReader *redisReaderCreate(void); void redisReaderFree(redisReader *r); int redisReaderFeed(redisReader *r, const char *buf, size_t len); int redisReaderGetReply(redisReader *r, void **reply); /* Backwards compatibility, can be removed on big version bump. */ #define redisReplyReaderCreate redisReaderCreate #define redisReplyReaderFree redisReaderFree #define redisReplyReaderFeed redisReaderFeed #define redisReplyReaderGetReply redisReaderGetReply #define redisReplyReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p)) #define redisReplyReaderGetObject(_r) (((redisReader*)(_r))->reply) #define redisReplyReaderGetError(_r) (((redisReader*)(_r))->errstr) /* Function to free the reply objects hiredis returns by default. */ void freeReplyObject(void *reply); /* Functions to format a command according to the protocol. */ int redisvFormatCommand(char **target, const char *format, va_list ap); int redisFormatCommand(char **target, const char *format, ...); int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen); /* Context for a connection to Redis */ typedef struct redisContext { int err; /* Error flags, 0 when there is no error */ char errstr[128]; /* String representation of error when applicable */ int fd; int flags; char *obuf; /* Write buffer */ redisReader *reader; /* Protocol reader */ } redisContext; redisContext *redisConnect(const char *ip, int port); redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv); redisContext *redisConnectNonBlock(const char *ip, int port); redisContext *redisConnectBindNonBlock(const char *ip, int port, const char *source_addr); redisContext *redisConnectUnix(const char *path); redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv); redisContext *redisConnectUnixNonBlock(const char *path); redisContext *redisConnectFd(int fd); int redisSetTimeout(redisContext *c, const struct timeval tv); int redisEnableKeepAlive(redisContext *c); void redisFree(redisContext *c); int redisFreeKeepFd(redisContext *c); int redisBufferRead(redisContext *c); int redisBufferWrite(redisContext *c, int *done); /* In a blocking context, this function first checks if there are unconsumed * replies to return and returns one if so. Otherwise, it flushes the output * buffer to the socket and reads until it has a reply. In a non-blocking * context, it will return unconsumed replies until there are no more. */ int redisGetReply(redisContext *c, void **reply); int redisGetReplyFromReader(redisContext *c, void **reply); /* Write a formatted command to the output buffer. Use these functions in blocking mode * to get a pipeline of commands. */ int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len); /* Write a command to the output buffer. Use these functions in blocking mode * to get a pipeline of commands. */ int redisvAppendCommand(redisContext *c, const char *format, va_list ap); int redisAppendCommand(redisContext *c, const char *format, ...); int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); /* Issue a command to Redis. In a blocking context, it is identical to calling * redisAppendCommand, followed by redisGetReply. The function will return * NULL if there was an error in performing the request, otherwise it will * return the reply. In a non-blocking context, it is identical to calling * only redisAppendCommand and will always return NULL. */ void *redisvCommand(redisContext *c, const char *format, va_list ap); void *redisCommand(redisContext *c, const char *format, ...); void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); #ifdef __cplusplus } #endif #endif disque-1.0-rc1/deps/hiredis/net.c000066400000000000000000000277161264176561100166710ustar00rootroot00000000000000/* Extracted from anet.c to work properly with Hiredis error reporting. * * Copyright (c) 2006-2011, Salvatore Sanfilippo * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "net.h" #include "sds.h" /* Defined in hiredis.c */ void __redisSetError(redisContext *c, int type, const char *str); static void redisContextCloseFd(redisContext *c) { if (c && c->fd >= 0) { close(c->fd); c->fd = -1; } } static void __redisSetErrorFromErrno(redisContext *c, int type, const char *prefix) { char buf[128] = { 0 }; size_t len = 0; if (prefix != NULL) len = snprintf(buf,sizeof(buf),"%s: ",prefix); strerror_r(errno,buf+len,sizeof(buf)-len); __redisSetError(c,type,buf); } static int redisSetReuseAddr(redisContext *c) { int on = 1; if (setsockopt(c->fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); redisContextCloseFd(c); return REDIS_ERR; } return REDIS_OK; } static int redisCreateSocket(redisContext *c, int type) { int s; if ((s = socket(type, SOCK_STREAM, 0)) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); return REDIS_ERR; } c->fd = s; if (type == AF_INET) { if (redisSetReuseAddr(c) == REDIS_ERR) { return REDIS_ERR; } } return REDIS_OK; } static int redisSetBlocking(redisContext *c, int blocking) { int flags; /* Set the socket nonblocking. * Note that fcntl(2) for F_GETFL and F_SETFL can't be * interrupted by a signal. */ if ((flags = fcntl(c->fd, F_GETFL)) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,"fcntl(F_GETFL)"); redisContextCloseFd(c); return REDIS_ERR; } if (blocking) flags &= ~O_NONBLOCK; else flags |= O_NONBLOCK; if (fcntl(c->fd, F_SETFL, flags) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,"fcntl(F_SETFL)"); redisContextCloseFd(c); return REDIS_ERR; } return REDIS_OK; } int redisKeepAlive(redisContext *c, int interval) { int val = 1; int fd = c->fd; if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)) == -1){ __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); return REDIS_ERR; } val = interval; #ifdef _OSX if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &val, sizeof(val)) < 0) { __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); return REDIS_ERR; } #else #ifndef __sun val = interval; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0) { __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); return REDIS_ERR; } val = interval/3; if (val == 0) val = 1; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &val, sizeof(val)) < 0) { __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); return REDIS_ERR; } val = 3; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &val, sizeof(val)) < 0) { __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); return REDIS_ERR; } #endif #endif return REDIS_OK; } static int redisSetTcpNoDelay(redisContext *c) { int yes = 1; if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, &yes, sizeof(yes)) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(TCP_NODELAY)"); redisContextCloseFd(c); return REDIS_ERR; } return REDIS_OK; } #define __MAX_MSEC (((LONG_MAX) - 999) / 1000) static int redisContextWaitReady(redisContext *c, const struct timeval *timeout) { struct pollfd wfd[1]; long msec; msec = -1; wfd[0].fd = c->fd; wfd[0].events = POLLOUT; /* Only use timeout when not NULL. */ if (timeout != NULL) { if (timeout->tv_usec > 1000000 || timeout->tv_sec > __MAX_MSEC) { __redisSetErrorFromErrno(c, REDIS_ERR_IO, NULL); redisContextCloseFd(c); return REDIS_ERR; } msec = (timeout->tv_sec * 1000) + ((timeout->tv_usec + 999) / 1000); if (msec < 0 || msec > INT_MAX) { msec = INT_MAX; } } if (errno == EINPROGRESS) { int res; if ((res = poll(wfd, 1, msec)) == -1) { __redisSetErrorFromErrno(c, REDIS_ERR_IO, "poll(2)"); redisContextCloseFd(c); return REDIS_ERR; } else if (res == 0) { errno = ETIMEDOUT; __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); redisContextCloseFd(c); return REDIS_ERR; } if (redisCheckSocketError(c) != REDIS_OK) return REDIS_ERR; return REDIS_OK; } __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); redisContextCloseFd(c); return REDIS_ERR; } int redisCheckSocketError(redisContext *c) { int err = 0; socklen_t errlen = sizeof(err); if (getsockopt(c->fd, SOL_SOCKET, SO_ERROR, &err, &errlen) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,"getsockopt(SO_ERROR)"); return REDIS_ERR; } if (err) { errno = err; __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); return REDIS_ERR; } return REDIS_OK; } int redisContextSetTimeout(redisContext *c, const struct timeval tv) { if (setsockopt(c->fd,SOL_SOCKET,SO_RCVTIMEO,&tv,sizeof(tv)) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(SO_RCVTIMEO)"); return REDIS_ERR; } if (setsockopt(c->fd,SOL_SOCKET,SO_SNDTIMEO,&tv,sizeof(tv)) == -1) { __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(SO_SNDTIMEO)"); return REDIS_ERR; } return REDIS_OK; } static int _redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout, const char *source_addr) { int s, rv; char _port[6]; /* strlen("65535"); */ struct addrinfo hints, *servinfo, *bservinfo, *p, *b; int blocking = (c->flags & REDIS_BLOCK); snprintf(_port, 6, "%d", port); memset(&hints,0,sizeof(hints)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; /* Try with IPv6 if no IPv4 address was found. We do it in this order since * in a Redis client you can't afford to test if you have IPv6 connectivity * as this would add latency to every connect. Otherwise a more sensible * route could be: Use IPv6 if both addresses are available and there is IPv6 * connectivity. */ if ((rv = getaddrinfo(addr,_port,&hints,&servinfo)) != 0) { hints.ai_family = AF_INET6; if ((rv = getaddrinfo(addr,_port,&hints,&servinfo)) != 0) { __redisSetError(c,REDIS_ERR_OTHER,gai_strerror(rv)); return REDIS_ERR; } } for (p = servinfo; p != NULL; p = p->ai_next) { if ((s = socket(p->ai_family,p->ai_socktype,p->ai_protocol)) == -1) continue; c->fd = s; if (redisSetBlocking(c,0) != REDIS_OK) goto error; if (source_addr) { int bound = 0; /* Using getaddrinfo saves us from self-determining IPv4 vs IPv6 */ if ((rv = getaddrinfo(source_addr, NULL, &hints, &bservinfo)) != 0) { char buf[128]; snprintf(buf,sizeof(buf),"Can't get addr: %s",gai_strerror(rv)); __redisSetError(c,REDIS_ERR_OTHER,buf); goto error; } for (b = bservinfo; b != NULL; b = b->ai_next) { if (bind(s,b->ai_addr,b->ai_addrlen) != -1) { bound = 1; break; } } freeaddrinfo(bservinfo); if (!bound) { char buf[128]; snprintf(buf,sizeof(buf),"Can't bind socket: %s",strerror(errno)); __redisSetError(c,REDIS_ERR_OTHER,buf); goto error; } } if (connect(s,p->ai_addr,p->ai_addrlen) == -1) { if (errno == EHOSTUNREACH) { redisContextCloseFd(c); continue; } else if (errno == EINPROGRESS && !blocking) { /* This is ok. */ } else { if (redisContextWaitReady(c,timeout) != REDIS_OK) goto error; } } if (blocking && redisSetBlocking(c,1) != REDIS_OK) goto error; if (redisSetTcpNoDelay(c) != REDIS_OK) goto error; c->flags |= REDIS_CONNECTED; rv = REDIS_OK; goto end; } if (p == NULL) { char buf[128]; snprintf(buf,sizeof(buf),"Can't create socket: %s",strerror(errno)); __redisSetError(c,REDIS_ERR_OTHER,buf); goto error; } error: rv = REDIS_ERR; end: freeaddrinfo(servinfo); return rv; // Need to return REDIS_OK if alright } int redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout) { return _redisContextConnectTcp(c, addr, port, timeout, NULL); } int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout, const char *source_addr) { return _redisContextConnectTcp(c, addr, port, timeout, source_addr); } int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout) { int blocking = (c->flags & REDIS_BLOCK); struct sockaddr_un sa; if (redisCreateSocket(c,AF_LOCAL) < 0) return REDIS_ERR; if (redisSetBlocking(c,0) != REDIS_OK) return REDIS_ERR; sa.sun_family = AF_LOCAL; strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); if (connect(c->fd, (struct sockaddr*)&sa, sizeof(sa)) == -1) { if (errno == EINPROGRESS && !blocking) { /* This is ok. */ } else { if (redisContextWaitReady(c,timeout) != REDIS_OK) return REDIS_ERR; } } /* Reset socket to be blocking after connect(2). */ if (blocking && redisSetBlocking(c,1) != REDIS_OK) return REDIS_ERR; c->flags |= REDIS_CONNECTED; return REDIS_OK; } disque-1.0-rc1/deps/hiredis/net.h000066400000000000000000000046251264176561100166700ustar00rootroot00000000000000/* Extracted from anet.c to work properly with Hiredis error reporting. * * Copyright (c) 2006-2011, Salvatore Sanfilippo * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __NET_H #define __NET_H #include "hiredis.h" #if defined(__sun) || defined(_AIX) #define AF_LOCAL AF_UNIX #endif int redisCheckSocketError(redisContext *c); int redisContextSetTimeout(redisContext *c, const struct timeval tv); int redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout); int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout, const char *source_addr); int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout); int redisKeepAlive(redisContext *c, int interval); #endif disque-1.0-rc1/deps/hiredis/sds.c000066400000000000000000001147441264176561100166720ustar00rootroot00000000000000/* SDSLib 2.0 -- A C dynamic strings library * * Copyright (c) 2006-2015, Salvatore Sanfilippo * Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include "sds.h" #include "sdsalloc.h" static inline int sdsHdrSize(char type) { switch(type&SDS_TYPE_MASK) { case SDS_TYPE_5: return sizeof(struct sdshdr5); case SDS_TYPE_8: return sizeof(struct sdshdr8); case SDS_TYPE_16: return sizeof(struct sdshdr16); case SDS_TYPE_32: return sizeof(struct sdshdr32); case SDS_TYPE_64: return sizeof(struct sdshdr64); } return 0; } static inline char sdsReqType(size_t string_size) { if (string_size < 32) return SDS_TYPE_5; if (string_size < 0xff) return SDS_TYPE_8; if (string_size < 0xffff) return SDS_TYPE_16; if (string_size < 0xffffffff) return SDS_TYPE_32; return SDS_TYPE_64; } /* Create a new sds string with the content specified by the 'init' pointer * and 'initlen'. * If NULL is used for 'init' the string is initialized with zero bytes. * * The string is always null-termined (all the sds strings are, always) so * even if you create an sds string with: * * mystring = sdsnewlen("abc",3); * * You can print the string with printf() as there is an implicit \0 at the * end of the string. However the string is binary safe and can contain * \0 characters in the middle, as the length is stored in the sds header. */ sds sdsnewlen(const void *init, size_t initlen) { void *sh; sds s; char type = sdsReqType(initlen); /* Empty strings are usually created in order to append. Use type 8 * since type 5 is not good at this. */ if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8; int hdrlen = sdsHdrSize(type); unsigned char *fp; /* flags pointer. */ sh = s_malloc(hdrlen+initlen+1); if (!init) memset(sh, 0, hdrlen+initlen+1); if (sh == NULL) return NULL; s = (char*)sh+hdrlen; fp = ((unsigned char*)s)-1; switch(type) { case SDS_TYPE_5: { *fp = type | (initlen << SDS_TYPE_BITS); break; } case SDS_TYPE_8: { SDS_HDR_VAR(8,s); sh->len = initlen; sh->alloc = initlen; *fp = type; break; } case SDS_TYPE_16: { SDS_HDR_VAR(16,s); sh->len = initlen; sh->alloc = initlen; *fp = type; break; } case SDS_TYPE_32: { SDS_HDR_VAR(32,s); sh->len = initlen; sh->alloc = initlen; *fp = type; break; } case SDS_TYPE_64: { SDS_HDR_VAR(64,s); sh->len = initlen; sh->alloc = initlen; *fp = type; break; } } if (initlen && init) memcpy(s, init, initlen); s[initlen] = '\0'; return s; } /* Create an empty (zero length) sds string. Even in this case the string * always has an implicit null term. */ sds sdsempty(void) { return sdsnewlen("",0); } /* Create a new sds string starting from a null terminated C string. */ sds sdsnew(const char *init) { size_t initlen = (init == NULL) ? 0 : strlen(init); return sdsnewlen(init, initlen); } /* Duplicate an sds string. */ sds sdsdup(const sds s) { return sdsnewlen(s, sdslen(s)); } /* Free an sds string. No operation is performed if 's' is NULL. */ void sdsfree(sds s) { if (s == NULL) return; s_free((char*)s-sdsHdrSize(s[-1])); } /* Set the sds string length to the length as obtained with strlen(), so * considering as content only up to the first null term character. * * This function is useful when the sds string is hacked manually in some * way, like in the following example: * * s = sdsnew("foobar"); * s[2] = '\0'; * sdsupdatelen(s); * printf("%d\n", sdslen(s)); * * The output will be "2", but if we comment out the call to sdsupdatelen() * the output will be "6" as the string was modified but the logical length * remains 6 bytes. */ void sdsupdatelen(sds s) { int reallen = strlen(s); sdssetlen(s, reallen); } /* Modify an sds string in-place to make it empty (zero length). * However all the existing buffer is not discarded but set as free space * so that next append operations will not require allocations up to the * number of bytes previously available. */ void sdsclear(sds s) { sdssetlen(s, 0); s[0] = '\0'; } /* Enlarge the free space at the end of the sds string so that the caller * is sure that after calling this function can overwrite up to addlen * bytes after the end of the string, plus one more byte for nul term. * * Note: this does not change the *length* of the sds string as returned * by sdslen(), but only the free buffer space we have. */ sds sdsMakeRoomFor(sds s, size_t addlen) { void *sh, *newsh; size_t avail = sdsavail(s); size_t len, newlen; char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen; /* Return ASAP if there is enough space left. */ if (avail >= addlen) return s; len = sdslen(s); sh = (char*)s-sdsHdrSize(oldtype); newlen = (len+addlen); if (newlen < SDS_MAX_PREALLOC) newlen *= 2; else newlen += SDS_MAX_PREALLOC; type = sdsReqType(newlen); /* Don't use type 5: the user is appending to the string and type 5 is * not able to remember empty space, so sdsMakeRoomFor() must be called * at every appending operation. */ if (type == SDS_TYPE_5) type = SDS_TYPE_8; hdrlen = sdsHdrSize(type); if (oldtype==type) { newsh = s_realloc(sh, hdrlen+newlen+1); if (newsh == NULL) return NULL; s = (char*)newsh+hdrlen; } else { /* Since the header size changes, need to move the string forward, * and can't use realloc */ newsh = s_malloc(hdrlen+newlen+1); if (newsh == NULL) return NULL; memcpy((char*)newsh+hdrlen, s, len+1); s_free(sh); s = (char*)newsh+hdrlen; s[-1] = type; sdssetlen(s, len); } sdssetalloc(s, newlen); return s; } /* Reallocate the sds string so that it has no free space at the end. The * contained string remains not altered, but next concatenation operations * will require a reallocation. * * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdsRemoveFreeSpace(sds s) { void *sh, *newsh; char type, oldtype = s[-1] & SDS_TYPE_MASK; int hdrlen; size_t len = sdslen(s); sh = (char*)s-sdsHdrSize(oldtype); type = sdsReqType(len); hdrlen = sdsHdrSize(type); if (oldtype==type) { newsh = s_realloc(sh, hdrlen+len+1); if (newsh == NULL) return NULL; s = (char*)newsh+hdrlen; } else { newsh = s_malloc(hdrlen+len+1); if (newsh == NULL) return NULL; memcpy((char*)newsh+hdrlen, s, len+1); s_free(sh); s = (char*)newsh+hdrlen; s[-1] = type; sdssetlen(s, len); } sdssetalloc(s, len); return s; } /* Return the total size of the allocation of the specifed sds string, * including: * 1) The sds header before the pointer. * 2) The string. * 3) The free buffer at the end if any. * 4) The implicit null term. */ size_t sdsAllocSize(sds s) { size_t alloc = sdsalloc(s); return sdsHdrSize(s[-1])+alloc+1; } /* Return the pointer of the actual SDS allocation (normally SDS strings * are referenced by the start of the string buffer). */ void *sdsAllocPtr(sds s) { return (void*) (s-sdsHdrSize(s[-1])); } /* Increment the sds length and decrements the left free space at the * end of the string according to 'incr'. Also set the null term * in the new end of the string. * * This function is used in order to fix the string length after the * user calls sdsMakeRoomFor(), writes something after the end of * the current string, and finally needs to set the new length. * * Note: it is possible to use a negative increment in order to * right-trim the string. * * Usage example: * * Using sdsIncrLen() and sdsMakeRoomFor() it is possible to mount the * following schema, to cat bytes coming from the kernel to the end of an * sds string without copying into an intermediate buffer: * * oldlen = sdslen(s); * s = sdsMakeRoomFor(s, BUFFER_SIZE); * nread = read(fd, s+oldlen, BUFFER_SIZE); * ... check for nread <= 0 and handle it ... * sdsIncrLen(s, nread); */ void sdsIncrLen(sds s, int incr) { unsigned char flags = s[-1]; size_t len; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: { unsigned char *fp = ((unsigned char*)s)-1; unsigned char oldlen = SDS_TYPE_5_LEN(flags); assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr))); *fp = SDS_TYPE_5 | ((oldlen+incr) << SDS_TYPE_BITS); len = oldlen+incr; break; } case SDS_TYPE_8: { SDS_HDR_VAR(8,s); assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); len = (sh->len += incr); break; } case SDS_TYPE_16: { SDS_HDR_VAR(16,s); assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); len = (sh->len += incr); break; } case SDS_TYPE_32: { SDS_HDR_VAR(32,s); assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); len = (sh->len += incr); break; } case SDS_TYPE_64: { SDS_HDR_VAR(64,s); assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr))); len = (sh->len += incr); break; } default: len = 0; /* Just to avoid compilation warnings. */ } s[len] = '\0'; } /* Grow the sds to have the specified length. Bytes that were not part of * the original length of the sds will be set to zero. * * if the specified length is smaller than the current length, no operation * is performed. */ sds sdsgrowzero(sds s, size_t len) { size_t curlen = sdslen(s); if (len <= curlen) return s; s = sdsMakeRoomFor(s,len-curlen); if (s == NULL) return NULL; /* Make sure added region doesn't contain garbage */ memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */ sdssetlen(s, len); return s; } /* Append the specified binary-safe string pointed by 't' of 'len' bytes to the * end of the specified sds string 's'. * * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdscatlen(sds s, const void *t, size_t len) { size_t curlen = sdslen(s); s = sdsMakeRoomFor(s,len); if (s == NULL) return NULL; memcpy(s+curlen, t, len); sdssetlen(s, curlen+len); s[curlen+len] = '\0'; return s; } /* Append the specified null termianted C string to the sds string 's'. * * After the call, the passed sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdscat(sds s, const char *t) { return sdscatlen(s, t, strlen(t)); } /* Append the specified sds 't' to the existing sds 's'. * * After the call, the modified sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdscatsds(sds s, const sds t) { return sdscatlen(s, t, sdslen(t)); } /* Destructively modify the sds string 's' to hold the specified binary * safe string pointed by 't' of length 'len' bytes. */ sds sdscpylen(sds s, const char *t, size_t len) { if (sdsalloc(s) < len) { s = sdsMakeRoomFor(s,len-sdslen(s)); if (s == NULL) return NULL; } memcpy(s, t, len); s[len] = '\0'; sdssetlen(s, len); return s; } /* Like sdscpylen() but 't' must be a null-termined string so that the length * of the string is obtained with strlen(). */ sds sdscpy(sds s, const char *t) { return sdscpylen(s, t, strlen(t)); } /* Helper for sdscatlonglong() doing the actual number -> string * conversion. 's' must point to a string with room for at least * SDS_LLSTR_SIZE bytes. * * The function returns the length of the null-terminated string * representation stored at 's'. */ #define SDS_LLSTR_SIZE 21 int sdsll2str(char *s, long long value) { char *p, aux; unsigned long long v; size_t l; /* Generate the string representation, this method produces * an reversed string. */ v = (value < 0) ? -value : value; p = s; do { *p++ = '0'+(v%10); v /= 10; } while(v); if (value < 0) *p++ = '-'; /* Compute length and add null term. */ l = p-s; *p = '\0'; /* Reverse the string. */ p--; while(s < p) { aux = *s; *s = *p; *p = aux; s++; p--; } return l; } /* Identical sdsll2str(), but for unsigned long long type. */ int sdsull2str(char *s, unsigned long long v) { char *p, aux; size_t l; /* Generate the string representation, this method produces * an reversed string. */ p = s; do { *p++ = '0'+(v%10); v /= 10; } while(v); /* Compute length and add null term. */ l = p-s; *p = '\0'; /* Reverse the string. */ p--; while(s < p) { aux = *s; *s = *p; *p = aux; s++; p--; } return l; } /* Create an sds string from a long long value. It is much faster than: * * sdscatprintf(sdsempty(),"%lld\n", value); */ sds sdsfromlonglong(long long value) { char buf[SDS_LLSTR_SIZE]; int len = sdsll2str(buf,value); return sdsnewlen(buf,len); } /* Like sdscatprintf() but gets va_list instead of being variadic. */ sds sdscatvprintf(sds s, const char *fmt, va_list ap) { va_list cpy; char staticbuf[1024], *buf = staticbuf, *t; size_t buflen = strlen(fmt)*2; /* We try to start using a static buffer for speed. * If not possible we revert to heap allocation. */ if (buflen > sizeof(staticbuf)) { buf = s_malloc(buflen); if (buf == NULL) return NULL; } else { buflen = sizeof(staticbuf); } /* Try with buffers two times bigger every time we fail to * fit the string in the current buffer size. */ while(1) { buf[buflen-2] = '\0'; va_copy(cpy,ap); vsnprintf(buf, buflen, fmt, cpy); va_end(cpy); if (buf[buflen-2] != '\0') { if (buf != staticbuf) s_free(buf); buflen *= 2; buf = s_malloc(buflen); if (buf == NULL) return NULL; continue; } break; } /* Finally concat the obtained string to the SDS string and return it. */ t = sdscat(s, buf); if (buf != staticbuf) s_free(buf); return t; } /* Append to the sds string 's' a string obtained using printf-alike format * specifier. * * After the call, the modified sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. * * Example: * * s = sdsnew("Sum is: "); * s = sdscatprintf(s,"%d+%d = %d",a,b,a+b). * * Often you need to create a string from scratch with the printf-alike * format. When this is the need, just use sdsempty() as the target string: * * s = sdscatprintf(sdsempty(), "... your format ...", args); */ sds sdscatprintf(sds s, const char *fmt, ...) { va_list ap; char *t; va_start(ap, fmt); t = sdscatvprintf(s,fmt,ap); va_end(ap); return t; } /* This function is similar to sdscatprintf, but much faster as it does * not rely on sprintf() family functions implemented by the libc that * are often very slow. Moreover directly handling the sds string as * new data is concatenated provides a performance improvement. * * However this function only handles an incompatible subset of printf-alike * format specifiers: * * %s - C String * %S - SDS string * %i - signed int * %I - 64 bit signed integer (long long, int64_t) * %u - unsigned int * %U - 64 bit unsigned integer (unsigned long long, uint64_t) * %% - Verbatim "%" character. */ sds sdscatfmt(sds s, char const *fmt, ...) { size_t initlen = sdslen(s); const char *f = fmt; int i; va_list ap; va_start(ap,fmt); f = fmt; /* Next format specifier byte to process. */ i = initlen; /* Position of the next byte to write to dest str. */ while(*f) { char next, *str; size_t l; long long num; unsigned long long unum; /* Make sure there is always space for at least 1 char. */ if (sdsavail(s)==0) { s = sdsMakeRoomFor(s,1); } switch(*f) { case '%': next = *(f+1); f++; switch(next) { case 's': case 'S': str = va_arg(ap,char*); l = (next == 's') ? strlen(str) : sdslen(str); if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); } memcpy(s+i,str,l); sdsinclen(s,l); i += l; break; case 'i': case 'I': if (next == 'i') num = va_arg(ap,int); else num = va_arg(ap,long long); { char buf[SDS_LLSTR_SIZE]; l = sdsll2str(buf,num); if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); } memcpy(s+i,buf,l); sdsinclen(s,l); i += l; } break; case 'u': case 'U': if (next == 'u') unum = va_arg(ap,unsigned int); else unum = va_arg(ap,unsigned long long); { char buf[SDS_LLSTR_SIZE]; l = sdsull2str(buf,unum); if (sdsavail(s) < l) { s = sdsMakeRoomFor(s,l); } memcpy(s+i,buf,l); sdsinclen(s,l); i += l; } break; default: /* Handle %% and generally %. */ s[i++] = next; sdsinclen(s,1); break; } break; default: s[i++] = *f; sdsinclen(s,1); break; } f++; } va_end(ap); /* Add null-term */ s[i] = '\0'; return s; } /* Remove the part of the string from left and from right composed just of * contiguous characters found in 'cset', that is a null terminted C string. * * After the call, the modified sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. * * Example: * * s = sdsnew("AA...AA.a.aa.aHelloWorld :::"); * s = sdstrim(s,"Aa. :"); * printf("%s\n", s); * * Output will be just "Hello World". */ sds sdstrim(sds s, const char *cset) { char *start, *end, *sp, *ep; size_t len; sp = start = s; ep = end = s+sdslen(s)-1; while(sp <= end && strchr(cset, *sp)) sp++; while(ep > sp && strchr(cset, *ep)) ep--; len = (sp > ep) ? 0 : ((ep-sp)+1); if (s != sp) memmove(s, sp, len); s[len] = '\0'; sdssetlen(s,len); return s; } /* Turn the string into a smaller (or equal) string containing only the * substring specified by the 'start' and 'end' indexes. * * start and end can be negative, where -1 means the last character of the * string, -2 the penultimate character, and so forth. * * The interval is inclusive, so the start and end characters will be part * of the resulting string. * * The string is modified in-place. * * Example: * * s = sdsnew("Hello World"); * sdsrange(s,1,-1); => "ello World" */ void sdsrange(sds s, int start, int end) { size_t newlen, len = sdslen(s); if (len == 0) return; if (start < 0) { start = len+start; if (start < 0) start = 0; } if (end < 0) { end = len+end; if (end < 0) end = 0; } newlen = (start > end) ? 0 : (end-start)+1; if (newlen != 0) { if (start >= (signed)len) { newlen = 0; } else if (end >= (signed)len) { end = len-1; newlen = (start > end) ? 0 : (end-start)+1; } } else { start = 0; } if (start && newlen) memmove(s, s+start, newlen); s[newlen] = 0; sdssetlen(s,newlen); } /* Apply tolower() to every character of the sds string 's'. */ void sdstolower(sds s) { int len = sdslen(s), j; for (j = 0; j < len; j++) s[j] = tolower(s[j]); } /* Apply toupper() to every character of the sds string 's'. */ void sdstoupper(sds s) { int len = sdslen(s), j; for (j = 0; j < len; j++) s[j] = toupper(s[j]); } /* Compare two sds strings s1 and s2 with memcmp(). * * Return value: * * positive if s1 > s2. * negative if s1 < s2. * 0 if s1 and s2 are exactly the same binary string. * * If two strings share exactly the same prefix, but one of the two has * additional characters, the longer string is considered to be greater than * the smaller one. */ int sdscmp(const sds s1, const sds s2) { size_t l1, l2, minlen; int cmp; l1 = sdslen(s1); l2 = sdslen(s2); minlen = (l1 < l2) ? l1 : l2; cmp = memcmp(s1,s2,minlen); if (cmp == 0) return l1-l2; return cmp; } /* Split 's' with separator in 'sep'. An array * of sds strings is returned. *count will be set * by reference to the number of tokens returned. * * On out of memory, zero length string, zero length * separator, NULL is returned. * * Note that 'sep' is able to split a string using * a multi-character separator. For example * sdssplit("foo_-_bar","_-_"); will return two * elements "foo" and "bar". * * This version of the function is binary-safe but * requires length arguments. sdssplit() is just the * same function but for zero-terminated strings. */ sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count) { int elements = 0, slots = 5, start = 0, j; sds *tokens; if (seplen < 1 || len < 0) return NULL; tokens = s_malloc(sizeof(sds)*slots); if (tokens == NULL) return NULL; if (len == 0) { *count = 0; return tokens; } for (j = 0; j < (len-(seplen-1)); j++) { /* make sure there is room for the next element and the final one */ if (slots < elements+2) { sds *newtokens; slots *= 2; newtokens = s_realloc(tokens,sizeof(sds)*slots); if (newtokens == NULL) goto cleanup; tokens = newtokens; } /* search the separator */ if ((seplen == 1 && *(s+j) == sep[0]) || (memcmp(s+j,sep,seplen) == 0)) { tokens[elements] = sdsnewlen(s+start,j-start); if (tokens[elements] == NULL) goto cleanup; elements++; start = j+seplen; j = j+seplen-1; /* skip the separator */ } } /* Add the final element. We are sure there is room in the tokens array. */ tokens[elements] = sdsnewlen(s+start,len-start); if (tokens[elements] == NULL) goto cleanup; elements++; *count = elements; return tokens; cleanup: { int i; for (i = 0; i < elements; i++) sdsfree(tokens[i]); s_free(tokens); *count = 0; return NULL; } } /* Free the result returned by sdssplitlen(), or do nothing if 'tokens' is NULL. */ void sdsfreesplitres(sds *tokens, int count) { if (!tokens) return; while(count--) sdsfree(tokens[count]); s_free(tokens); } /* Append to the sds string "s" an escaped string representation where * all the non-printable characters (tested with isprint()) are turned into * escapes in the form "\n\r\a...." or "\x". * * After the call, the modified sds string is no longer valid and all the * references must be substituted with the new pointer returned by the call. */ sds sdscatrepr(sds s, const char *p, size_t len) { s = sdscatlen(s,"\"",1); while(len--) { switch(*p) { case '\\': case '"': s = sdscatprintf(s,"\\%c",*p); break; case '\n': s = sdscatlen(s,"\\n",2); break; case '\r': s = sdscatlen(s,"\\r",2); break; case '\t': s = sdscatlen(s,"\\t",2); break; case '\a': s = sdscatlen(s,"\\a",2); break; case '\b': s = sdscatlen(s,"\\b",2); break; default: if (isprint(*p)) s = sdscatprintf(s,"%c",*p); else s = sdscatprintf(s,"\\x%02x",(unsigned char)*p); break; } p++; } return sdscatlen(s,"\"",1); } /* Helper function for sdssplitargs() that returns non zero if 'c' * is a valid hex digit. */ int is_hex_digit(char c) { return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'); } /* Helper function for sdssplitargs() that converts a hex digit into an * integer from 0 to 15 */ int hex_digit_to_int(char c) { switch(c) { case '0': return 0; case '1': return 1; case '2': return 2; case '3': return 3; case '4': return 4; case '5': return 5; case '6': return 6; case '7': return 7; case '8': return 8; case '9': return 9; case 'a': case 'A': return 10; case 'b': case 'B': return 11; case 'c': case 'C': return 12; case 'd': case 'D': return 13; case 'e': case 'E': return 14; case 'f': case 'F': return 15; default: return 0; } } /* Split a line into arguments, where every argument can be in the * following programming-language REPL-alike form: * * foo bar "newline are supported\n" and "\xff\x00otherstuff" * * The number of arguments is stored into *argc, and an array * of sds is returned. * * The caller should free the resulting array of sds strings with * sdsfreesplitres(). * * Note that sdscatrepr() is able to convert back a string into * a quoted string in the same format sdssplitargs() is able to parse. * * The function returns the allocated tokens on success, even when the * input string is empty, or NULL if the input contains unbalanced * quotes or closed quotes followed by non space characters * as in: "foo"bar or "foo' */ sds *sdssplitargs(const char *line, int *argc) { const char *p = line; char *current = NULL; char **vector = NULL; *argc = 0; while(1) { /* skip blanks */ while(*p && isspace(*p)) p++; if (*p) { /* get a token */ int inq=0; /* set to 1 if we are in "quotes" */ int insq=0; /* set to 1 if we are in 'single quotes' */ int done=0; if (current == NULL) current = sdsempty(); while(!done) { if (inq) { if (*p == '\\' && *(p+1) == 'x' && is_hex_digit(*(p+2)) && is_hex_digit(*(p+3))) { unsigned char byte; byte = (hex_digit_to_int(*(p+2))*16)+ hex_digit_to_int(*(p+3)); current = sdscatlen(current,(char*)&byte,1); p += 3; } else if (*p == '\\' && *(p+1)) { char c; p++; switch(*p) { case 'n': c = '\n'; break; case 'r': c = '\r'; break; case 't': c = '\t'; break; case 'b': c = '\b'; break; case 'a': c = '\a'; break; default: c = *p; break; } current = sdscatlen(current,&c,1); } else if (*p == '"') { /* closing quote must be followed by a space or * nothing at all. */ if (*(p+1) && !isspace(*(p+1))) goto err; done=1; } else if (!*p) { /* unterminated quotes */ goto err; } else { current = sdscatlen(current,p,1); } } else if (insq) { if (*p == '\\' && *(p+1) == '\'') { p++; current = sdscatlen(current,"'",1); } else if (*p == '\'') { /* closing quote must be followed by a space or * nothing at all. */ if (*(p+1) && !isspace(*(p+1))) goto err; done=1; } else if (!*p) { /* unterminated quotes */ goto err; } else { current = sdscatlen(current,p,1); } } else { switch(*p) { case ' ': case '\n': case '\r': case '\t': case '\0': done=1; break; case '"': inq=1; break; case '\'': insq=1; break; default: current = sdscatlen(current,p,1); break; } } if (*p) p++; } /* add the token to the vector */ vector = s_realloc(vector,((*argc)+1)*sizeof(char*)); vector[*argc] = current; (*argc)++; current = NULL; } else { /* Even on empty input string return something not NULL. */ if (vector == NULL) vector = s_malloc(sizeof(void*)); return vector; } } err: while((*argc)--) sdsfree(vector[*argc]); s_free(vector); if (current) sdsfree(current); *argc = 0; return NULL; } /* Modify the string substituting all the occurrences of the set of * characters specified in the 'from' string to the corresponding character * in the 'to' array. * * For instance: sdsmapchars(mystring, "ho", "01", 2) * will have the effect of turning the string "hello" into "0ell1". * * The function returns the sds string pointer, that is always the same * as the input pointer since no resize is needed. */ sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen) { size_t j, i, l = sdslen(s); for (j = 0; j < l; j++) { for (i = 0; i < setlen; i++) { if (s[j] == from[i]) { s[j] = to[i]; break; } } } return s; } /* Join an array of C strings using the specified separator (also a C string). * Returns the result as an sds string. */ sds sdsjoin(char **argv, int argc, char *sep) { sds join = sdsempty(); int j; for (j = 0; j < argc; j++) { join = sdscat(join, argv[j]); if (j != argc-1) join = sdscat(join,sep); } return join; } /* Like sdsjoin, but joins an array of SDS strings. */ sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen) { sds join = sdsempty(); int j; for (j = 0; j < argc; j++) { join = sdscatsds(join, argv[j]); if (j != argc-1) join = sdscatlen(join,sep,seplen); } return join; } #if defined(SDS_TEST_MAIN) #include #include "testhelp.h" #include "limits.h" #define UNUSED(x) (void)(x) int sdsTest(void) { { sds x = sdsnew("foo"), y; test_cond("Create a string and obtain the length", sdslen(x) == 3 && memcmp(x,"foo\0",4) == 0) sdsfree(x); x = sdsnewlen("foo",2); test_cond("Create a string with specified length", sdslen(x) == 2 && memcmp(x,"fo\0",3) == 0) x = sdscat(x,"bar"); test_cond("Strings concatenation", sdslen(x) == 5 && memcmp(x,"fobar\0",6) == 0); x = sdscpy(x,"a"); test_cond("sdscpy() against an originally longer string", sdslen(x) == 1 && memcmp(x,"a\0",2) == 0) x = sdscpy(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk"); test_cond("sdscpy() against an originally shorter string", sdslen(x) == 33 && memcmp(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk\0",33) == 0) sdsfree(x); x = sdscatprintf(sdsempty(),"%d",123); test_cond("sdscatprintf() seems working in the base case", sdslen(x) == 3 && memcmp(x,"123\0",4) == 0) sdsfree(x); x = sdsnew("--"); x = sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN,LLONG_MAX); test_cond("sdscatfmt() seems working in the base case", sdslen(x) == 60 && memcmp(x,"--Hello Hi! World -9223372036854775808," "9223372036854775807--",60) == 0) printf("[%s]\n",x); sdsfree(x); x = sdsnew("--"); x = sdscatfmt(x, "%u,%U--", UINT_MAX, ULLONG_MAX); test_cond("sdscatfmt() seems working with unsigned numbers", sdslen(x) == 35 && memcmp(x,"--4294967295,18446744073709551615--",35) == 0) sdsfree(x); x = sdsnew(" x "); sdstrim(x," x"); test_cond("sdstrim() works when all chars match", sdslen(x) == 0) sdsfree(x); x = sdsnew(" x "); sdstrim(x," "); test_cond("sdstrim() works when a single char remains", sdslen(x) == 1 && x[0] == 'x') sdsfree(x); x = sdsnew("xxciaoyyy"); sdstrim(x,"xy"); test_cond("sdstrim() correctly trims characters", sdslen(x) == 4 && memcmp(x,"ciao\0",5) == 0) y = sdsdup(x); sdsrange(y,1,1); test_cond("sdsrange(...,1,1)", sdslen(y) == 1 && memcmp(y,"i\0",2) == 0) sdsfree(y); y = sdsdup(x); sdsrange(y,1,-1); test_cond("sdsrange(...,1,-1)", sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) sdsfree(y); y = sdsdup(x); sdsrange(y,-2,-1); test_cond("sdsrange(...,-2,-1)", sdslen(y) == 2 && memcmp(y,"ao\0",3) == 0) sdsfree(y); y = sdsdup(x); sdsrange(y,2,1); test_cond("sdsrange(...,2,1)", sdslen(y) == 0 && memcmp(y,"\0",1) == 0) sdsfree(y); y = sdsdup(x); sdsrange(y,1,100); test_cond("sdsrange(...,1,100)", sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) sdsfree(y); y = sdsdup(x); sdsrange(y,100,100); test_cond("sdsrange(...,100,100)", sdslen(y) == 0 && memcmp(y,"\0",1) == 0) sdsfree(y); sdsfree(x); x = sdsnew("foo"); y = sdsnew("foa"); test_cond("sdscmp(foo,foa)", sdscmp(x,y) > 0) sdsfree(y); sdsfree(x); x = sdsnew("bar"); y = sdsnew("bar"); test_cond("sdscmp(bar,bar)", sdscmp(x,y) == 0) sdsfree(y); sdsfree(x); x = sdsnew("aar"); y = sdsnew("bar"); test_cond("sdscmp(bar,bar)", sdscmp(x,y) < 0) sdsfree(y); sdsfree(x); x = sdsnewlen("\a\n\0foo\r",7); y = sdscatrepr(sdsempty(),x,sdslen(x)); test_cond("sdscatrepr(...data...)", memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0) { unsigned int oldfree; char *p; int step = 10, j, i; sdsfree(x); sdsfree(y); x = sdsnew("0"); test_cond("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0); /* Run the test a few times in order to hit the first two * SDS header types. */ for (i = 0; i < 10; i++) { int oldlen = sdslen(x); x = sdsMakeRoomFor(x,step); int type = x[-1]&SDS_TYPE_MASK; test_cond("sdsMakeRoomFor() len", sdslen(x) == oldlen); if (type != SDS_TYPE_5) { test_cond("sdsMakeRoomFor() free", sdsavail(x) >= step); oldfree = sdsavail(x); } p = x+oldlen; for (j = 0; j < step; j++) { p[j] = 'A'+j; } sdsIncrLen(x,step); } test_cond("sdsMakeRoomFor() content", memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0); test_cond("sdsMakeRoomFor() final length",sdslen(x)==101); sdsfree(x); } } test_report() return 0; } #endif #ifdef SDS_TEST_MAIN int main(void) { return sdsTest(); } #endif disque-1.0-rc1/deps/hiredis/sds.h000066400000000000000000000206231264176561100166670ustar00rootroot00000000000000/* SDSLib 2.0 -- A C dynamic strings library * * Copyright (c) 2006-2015, Salvatore Sanfilippo * Copyright (c) 2015, Oran Agra * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __SDS_H #define __SDS_H #define SDS_MAX_PREALLOC (1024*1024) #include #include #include typedef char *sds; /* Note: sdshdr5 is never used, we just access the flags byte directly. * However is here to document the layout of type 5 SDS strings. */ struct __attribute__ ((__packed__)) sdshdr5 { unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr8 { uint8_t len; /* used */ uint8_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr16 { uint16_t len; /* used */ uint16_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr32 { uint32_t len; /* used */ uint32_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; struct __attribute__ ((__packed__)) sdshdr64 { uint64_t len; /* used */ uint64_t alloc; /* excluding the header and null terminator */ unsigned char flags; /* 3 lsb of type, 5 unused bits */ char buf[]; }; #define SDS_TYPE_5 0 #define SDS_TYPE_8 1 #define SDS_TYPE_16 2 #define SDS_TYPE_32 3 #define SDS_TYPE_64 4 #define SDS_TYPE_MASK 7 #define SDS_TYPE_BITS 3 #define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (void*)((s)-(sizeof(struct sdshdr##T))); #define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) #define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS) static inline size_t sdslen(const sds s) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: return SDS_TYPE_5_LEN(flags); case SDS_TYPE_8: return SDS_HDR(8,s)->len; case SDS_TYPE_16: return SDS_HDR(16,s)->len; case SDS_TYPE_32: return SDS_HDR(32,s)->len; case SDS_TYPE_64: return SDS_HDR(64,s)->len; } return 0; } static inline size_t sdsavail(const sds s) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: { return 0; } case SDS_TYPE_8: { SDS_HDR_VAR(8,s); return sh->alloc - sh->len; } case SDS_TYPE_16: { SDS_HDR_VAR(16,s); return sh->alloc - sh->len; } case SDS_TYPE_32: { SDS_HDR_VAR(32,s); return sh->alloc - sh->len; } case SDS_TYPE_64: { SDS_HDR_VAR(64,s); return sh->alloc - sh->len; } } return 0; } static inline void sdssetlen(sds s, size_t newlen) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: { unsigned char *fp = ((unsigned char*)s)-1; *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); } break; case SDS_TYPE_8: SDS_HDR(8,s)->len = newlen; break; case SDS_TYPE_16: SDS_HDR(16,s)->len = newlen; break; case SDS_TYPE_32: SDS_HDR(32,s)->len = newlen; break; case SDS_TYPE_64: SDS_HDR(64,s)->len = newlen; break; } } static inline void sdsinclen(sds s, size_t inc) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: { unsigned char *fp = ((unsigned char*)s)-1; unsigned char newlen = SDS_TYPE_5_LEN(flags)+inc; *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); } break; case SDS_TYPE_8: SDS_HDR(8,s)->len += inc; break; case SDS_TYPE_16: SDS_HDR(16,s)->len += inc; break; case SDS_TYPE_32: SDS_HDR(32,s)->len += inc; break; case SDS_TYPE_64: SDS_HDR(64,s)->len += inc; break; } } /* sdsalloc() = sdsavail() + sdslen() */ static inline size_t sdsalloc(const sds s) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: return SDS_TYPE_5_LEN(flags); case SDS_TYPE_8: return SDS_HDR(8,s)->alloc; case SDS_TYPE_16: return SDS_HDR(16,s)->alloc; case SDS_TYPE_32: return SDS_HDR(32,s)->alloc; case SDS_TYPE_64: return SDS_HDR(64,s)->alloc; } return 0; } static inline void sdssetalloc(sds s, size_t newlen) { unsigned char flags = s[-1]; switch(flags&SDS_TYPE_MASK) { case SDS_TYPE_5: /* Nothing to do, this type has no total allocation info. */ break; case SDS_TYPE_8: SDS_HDR(8,s)->alloc = newlen; break; case SDS_TYPE_16: SDS_HDR(16,s)->alloc = newlen; break; case SDS_TYPE_32: SDS_HDR(32,s)->alloc = newlen; break; case SDS_TYPE_64: SDS_HDR(64,s)->alloc = newlen; break; } } sds sdsnewlen(const void *init, size_t initlen); sds sdsnew(const char *init); sds sdsempty(void); sds sdsdup(const sds s); void sdsfree(sds s); sds sdsgrowzero(sds s, size_t len); sds sdscatlen(sds s, const void *t, size_t len); sds sdscat(sds s, const char *t); sds sdscatsds(sds s, const sds t); sds sdscpylen(sds s, const char *t, size_t len); sds sdscpy(sds s, const char *t); sds sdscatvprintf(sds s, const char *fmt, va_list ap); #ifdef __GNUC__ sds sdscatprintf(sds s, const char *fmt, ...) __attribute__((format(printf, 2, 3))); #else sds sdscatprintf(sds s, const char *fmt, ...); #endif sds sdscatfmt(sds s, char const *fmt, ...); sds sdstrim(sds s, const char *cset); void sdsrange(sds s, int start, int end); void sdsupdatelen(sds s); void sdsclear(sds s); int sdscmp(const sds s1, const sds s2); sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count); void sdsfreesplitres(sds *tokens, int count); void sdstolower(sds s); void sdstoupper(sds s); sds sdsfromlonglong(long long value); sds sdscatrepr(sds s, const char *p, size_t len); sds *sdssplitargs(const char *line, int *argc); sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen); sds sdsjoin(char **argv, int argc, char *sep); sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen); /* Low level functions exposed to the user API */ sds sdsMakeRoomFor(sds s, size_t addlen); void sdsIncrLen(sds s, int incr); sds sdsRemoveFreeSpace(sds s); size_t sdsAllocSize(sds s); void *sdsAllocPtr(sds s); #ifdef REDIS_TEST int sdsTest(int argc, char *argv[]); #endif #endif disque-1.0-rc1/deps/hiredis/sdsalloc.h000066400000000000000000000040431264176561100177000ustar00rootroot00000000000000/* SDSLib 2.0 -- A C dynamic strings library * * Copyright (c) 2006-2015, Salvatore Sanfilippo * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* SDS allocator selection. * * This file is used in order to change the SDS allocator at compile time. * Just define the following defines to what you want to use. Also add * the include of your alternate allocator if needed (not needed in order * to use the default libc allocator). */ #include "zmalloc.h" #define s_malloc zmalloc #define s_realloc zrealloc #define s_free zfree disque-1.0-rc1/deps/hiredis/test.c000066400000000000000000000617421264176561100170570ustar00rootroot00000000000000#include "fmacros.h" #include #include #include #include #include #include #include #include #include #include #include "hiredis.h" enum connection_type { CONN_TCP, CONN_UNIX, CONN_FD }; struct config { enum connection_type type; struct { const char *host; int port; struct timeval timeout; } tcp; struct { const char *path; } unix; }; /* The following lines make up our testing "framework" :) */ static int tests = 0, fails = 0; #define test(_s) { printf("#%02d ", ++tests); printf(_s); } #define test_cond(_c) if(_c) printf("\033[0;32mPASSED\033[0;0m\n"); else {printf("\033[0;31mFAILED\033[0;0m\n"); fails++;} static long long usec(void) { struct timeval tv; gettimeofday(&tv,NULL); return (((long long)tv.tv_sec)*1000000)+tv.tv_usec; } static redisContext *select_database(redisContext *c) { redisReply *reply; /* Switch to DB 9 for testing, now that we know we can chat. */ reply = redisCommand(c,"SELECT 9"); assert(reply != NULL); freeReplyObject(reply); /* Make sure the DB is empty */ reply = redisCommand(c,"DBSIZE"); assert(reply != NULL); if (reply->type == REDIS_REPLY_INTEGER && reply->integer == 0) { /* Awesome, DB 9 is empty and we can continue. */ freeReplyObject(reply); } else { printf("Database #9 is not empty, test can not continue\n"); exit(1); } return c; } static int disconnect(redisContext *c, int keep_fd) { redisReply *reply; /* Make sure we're on DB 9. */ reply = redisCommand(c,"SELECT 9"); assert(reply != NULL); freeReplyObject(reply); reply = redisCommand(c,"FLUSHDB"); assert(reply != NULL); freeReplyObject(reply); /* Free the context as well, but keep the fd if requested. */ if (keep_fd) return redisFreeKeepFd(c); redisFree(c); return -1; } static redisContext *connect(struct config config) { redisContext *c = NULL; if (config.type == CONN_TCP) { c = redisConnect(config.tcp.host, config.tcp.port); } else if (config.type == CONN_UNIX) { c = redisConnectUnix(config.unix.path); } else if (config.type == CONN_FD) { /* Create a dummy connection just to get an fd to inherit */ redisContext *dummy_ctx = redisConnectUnix(config.unix.path); if (dummy_ctx) { int fd = disconnect(dummy_ctx, 1); printf("Connecting to inherited fd %d\n", fd); c = redisConnectFd(fd); } } else { assert(NULL); } if (c == NULL) { printf("Connection error: can't allocate redis context\n"); exit(1); } else if (c->err) { printf("Connection error: %s\n", c->errstr); exit(1); } return select_database(c); } static void test_format_commands(void) { char *cmd; int len; test("Format command without interpolation: "); len = redisFormatCommand(&cmd,"SET foo bar"); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && len == 4+4+(3+2)+4+(3+2)+4+(3+2)); free(cmd); test("Format command with %%s string interpolation: "); len = redisFormatCommand(&cmd,"SET %s %s","foo","bar"); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && len == 4+4+(3+2)+4+(3+2)+4+(3+2)); free(cmd); test("Format command with %%s and an empty string: "); len = redisFormatCommand(&cmd,"SET %s %s","foo",""); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$0\r\n\r\n",len) == 0 && len == 4+4+(3+2)+4+(3+2)+4+(0+2)); free(cmd); test("Format command with an empty string in between proper interpolations: "); len = redisFormatCommand(&cmd,"SET %s %s","","foo"); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$0\r\n\r\n$3\r\nfoo\r\n",len) == 0 && len == 4+4+(3+2)+4+(0+2)+4+(3+2)); free(cmd); test("Format command with %%b string interpolation: "); len = redisFormatCommand(&cmd,"SET %b %b","foo",(size_t)3,"b\0r",(size_t)3); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nb\0r\r\n",len) == 0 && len == 4+4+(3+2)+4+(3+2)+4+(3+2)); free(cmd); test("Format command with %%b and an empty string: "); len = redisFormatCommand(&cmd,"SET %b %b","foo",(size_t)3,"",(size_t)0); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$0\r\n\r\n",len) == 0 && len == 4+4+(3+2)+4+(3+2)+4+(0+2)); free(cmd); test("Format command with literal %%: "); len = redisFormatCommand(&cmd,"SET %% %%"); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$1\r\n%\r\n$1\r\n%\r\n",len) == 0 && len == 4+4+(3+2)+4+(1+2)+4+(1+2)); free(cmd); /* Vararg width depends on the type. These tests make sure that the * width is correctly determined using the format and subsequent varargs * can correctly be interpolated. */ #define INTEGER_WIDTH_TEST(fmt, type) do { \ type value = 123; \ test("Format command with printf-delegation (" #type "): "); \ len = redisFormatCommand(&cmd,"key:%08" fmt " str:%s", value, "hello"); \ test_cond(strncmp(cmd,"*2\r\n$12\r\nkey:00000123\r\n$9\r\nstr:hello\r\n",len) == 0 && \ len == 4+5+(12+2)+4+(9+2)); \ free(cmd); \ } while(0) #define FLOAT_WIDTH_TEST(type) do { \ type value = 123.0; \ test("Format command with printf-delegation (" #type "): "); \ len = redisFormatCommand(&cmd,"key:%08.3f str:%s", value, "hello"); \ test_cond(strncmp(cmd,"*2\r\n$12\r\nkey:0123.000\r\n$9\r\nstr:hello\r\n",len) == 0 && \ len == 4+5+(12+2)+4+(9+2)); \ free(cmd); \ } while(0) INTEGER_WIDTH_TEST("d", int); INTEGER_WIDTH_TEST("hhd", char); INTEGER_WIDTH_TEST("hd", short); INTEGER_WIDTH_TEST("ld", long); INTEGER_WIDTH_TEST("lld", long long); INTEGER_WIDTH_TEST("u", unsigned int); INTEGER_WIDTH_TEST("hhu", unsigned char); INTEGER_WIDTH_TEST("hu", unsigned short); INTEGER_WIDTH_TEST("lu", unsigned long); INTEGER_WIDTH_TEST("llu", unsigned long long); FLOAT_WIDTH_TEST(float); FLOAT_WIDTH_TEST(double); test("Format command with invalid printf format: "); len = redisFormatCommand(&cmd,"key:%08p %b",(void*)1234,"foo",(size_t)3); test_cond(len == -1); const char *argv[3]; argv[0] = "SET"; argv[1] = "foo\0xxx"; argv[2] = "bar"; size_t lens[3] = { 3, 7, 3 }; int argc = 3; test("Format command by passing argc/argv without lengths: "); len = redisFormatCommandArgv(&cmd,argc,argv,NULL); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && len == 4+4+(3+2)+4+(3+2)+4+(3+2)); free(cmd); test("Format command by passing argc/argv with lengths: "); len = redisFormatCommandArgv(&cmd,argc,argv,lens); test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$7\r\nfoo\0xxx\r\n$3\r\nbar\r\n",len) == 0 && len == 4+4+(3+2)+4+(7+2)+4+(3+2)); free(cmd); } static void test_append_formatted_commands(struct config config) { redisContext *c; redisReply *reply; char *cmd; int len; c = connect(config); test("Append format command: "); len = redisFormatCommand(&cmd, "SET foo bar"); test_cond(redisAppendFormattedCommand(c, cmd, len) == REDIS_OK); assert(redisGetReply(c, (void*)&reply) == REDIS_OK); free(cmd); freeReplyObject(reply); disconnect(c, 0); } static void test_reply_reader(void) { redisReader *reader; void *reply; int ret; int i; test("Error handling in reply parser: "); reader = redisReaderCreate(); redisReaderFeed(reader,(char*)"@foo\r\n",6); ret = redisReaderGetReply(reader,NULL); test_cond(ret == REDIS_ERR && strcasecmp(reader->errstr,"Protocol error, got \"@\" as reply type byte") == 0); redisReaderFree(reader); /* when the reply already contains multiple items, they must be free'd * on an error. valgrind will bark when this doesn't happen. */ test("Memory cleanup in reply parser: "); reader = redisReaderCreate(); redisReaderFeed(reader,(char*)"*2\r\n",4); redisReaderFeed(reader,(char*)"$5\r\nhello\r\n",11); redisReaderFeed(reader,(char*)"@foo\r\n",6); ret = redisReaderGetReply(reader,NULL); test_cond(ret == REDIS_ERR && strcasecmp(reader->errstr,"Protocol error, got \"@\" as reply type byte") == 0); redisReaderFree(reader); test("Set error on nested multi bulks with depth > 7: "); reader = redisReaderCreate(); for (i = 0; i < 9; i++) { redisReaderFeed(reader,(char*)"*1\r\n",4); } ret = redisReaderGetReply(reader,NULL); test_cond(ret == REDIS_ERR && strncasecmp(reader->errstr,"No support for",14) == 0); redisReaderFree(reader); test("Works with NULL functions for reply: "); reader = redisReaderCreate(); reader->fn = NULL; redisReaderFeed(reader,(char*)"+OK\r\n",5); ret = redisReaderGetReply(reader,&reply); test_cond(ret == REDIS_OK && reply == (void*)REDIS_REPLY_STATUS); redisReaderFree(reader); test("Works when a single newline (\\r\\n) covers two calls to feed: "); reader = redisReaderCreate(); reader->fn = NULL; redisReaderFeed(reader,(char*)"+OK\r",4); ret = redisReaderGetReply(reader,&reply); assert(ret == REDIS_OK && reply == NULL); redisReaderFeed(reader,(char*)"\n",1); ret = redisReaderGetReply(reader,&reply); test_cond(ret == REDIS_OK && reply == (void*)REDIS_REPLY_STATUS); redisReaderFree(reader); test("Don't reset state after protocol error: "); reader = redisReaderCreate(); reader->fn = NULL; redisReaderFeed(reader,(char*)"x",1); ret = redisReaderGetReply(reader,&reply); assert(ret == REDIS_ERR); ret = redisReaderGetReply(reader,&reply); test_cond(ret == REDIS_ERR && reply == NULL); redisReaderFree(reader); /* Regression test for issue #45 on GitHub. */ test("Don't do empty allocation for empty multi bulk: "); reader = redisReaderCreate(); redisReaderFeed(reader,(char*)"*0\r\n",4); ret = redisReaderGetReply(reader,&reply); test_cond(ret == REDIS_OK && ((redisReply*)reply)->type == REDIS_REPLY_ARRAY && ((redisReply*)reply)->elements == 0); freeReplyObject(reply); redisReaderFree(reader); } static void test_blocking_connection_errors(void) { redisContext *c; test("Returns error when host cannot be resolved: "); c = redisConnect((char*)"idontexist.local", 6379); test_cond(c->err == REDIS_ERR_OTHER && (strcmp(c->errstr,"Name or service not known") == 0 || strcmp(c->errstr,"Can't resolve: idontexist.local") == 0 || strcmp(c->errstr,"nodename nor servname provided, or not known") == 0 || strcmp(c->errstr,"No address associated with hostname") == 0 || strcmp(c->errstr,"no address associated with name") == 0)); redisFree(c); test("Returns error when the port is not open: "); c = redisConnect((char*)"localhost", 1); test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr,"Connection refused") == 0); redisFree(c); test("Returns error when the unix socket path doesn't accept connections: "); c = redisConnectUnix((char*)"/tmp/idontexist.sock"); test_cond(c->err == REDIS_ERR_IO); /* Don't care about the message... */ redisFree(c); } static void test_blocking_connection(struct config config) { redisContext *c; redisReply *reply; c = connect(config); test("Is able to deliver commands: "); reply = redisCommand(c,"PING"); test_cond(reply->type == REDIS_REPLY_STATUS && strcasecmp(reply->str,"pong") == 0) freeReplyObject(reply); test("Is a able to send commands verbatim: "); reply = redisCommand(c,"SET foo bar"); test_cond (reply->type == REDIS_REPLY_STATUS && strcasecmp(reply->str,"ok") == 0) freeReplyObject(reply); test("%%s String interpolation works: "); reply = redisCommand(c,"SET %s %s","foo","hello world"); freeReplyObject(reply); reply = redisCommand(c,"GET foo"); test_cond(reply->type == REDIS_REPLY_STRING && strcmp(reply->str,"hello world") == 0); freeReplyObject(reply); test("%%b String interpolation works: "); reply = redisCommand(c,"SET %b %b","foo",(size_t)3,"hello\x00world",(size_t)11); freeReplyObject(reply); reply = redisCommand(c,"GET foo"); test_cond(reply->type == REDIS_REPLY_STRING && memcmp(reply->str,"hello\x00world",11) == 0) test("Binary reply length is correct: "); test_cond(reply->len == 11) freeReplyObject(reply); test("Can parse nil replies: "); reply = redisCommand(c,"GET nokey"); test_cond(reply->type == REDIS_REPLY_NIL) freeReplyObject(reply); /* test 7 */ test("Can parse integer replies: "); reply = redisCommand(c,"INCR mycounter"); test_cond(reply->type == REDIS_REPLY_INTEGER && reply->integer == 1) freeReplyObject(reply); test("Can parse multi bulk replies: "); freeReplyObject(redisCommand(c,"LPUSH mylist foo")); freeReplyObject(redisCommand(c,"LPUSH mylist bar")); reply = redisCommand(c,"LRANGE mylist 0 -1"); test_cond(reply->type == REDIS_REPLY_ARRAY && reply->elements == 2 && !memcmp(reply->element[0]->str,"bar",3) && !memcmp(reply->element[1]->str,"foo",3)) freeReplyObject(reply); /* m/e with multi bulk reply *before* other reply. * specifically test ordering of reply items to parse. */ test("Can handle nested multi bulk replies: "); freeReplyObject(redisCommand(c,"MULTI")); freeReplyObject(redisCommand(c,"LRANGE mylist 0 -1")); freeReplyObject(redisCommand(c,"PING")); reply = (redisCommand(c,"EXEC")); test_cond(reply->type == REDIS_REPLY_ARRAY && reply->elements == 2 && reply->element[0]->type == REDIS_REPLY_ARRAY && reply->element[0]->elements == 2 && !memcmp(reply->element[0]->element[0]->str,"bar",3) && !memcmp(reply->element[0]->element[1]->str,"foo",3) && reply->element[1]->type == REDIS_REPLY_STATUS && strcasecmp(reply->element[1]->str,"pong") == 0); freeReplyObject(reply); disconnect(c, 0); } static void test_blocking_io_errors(struct config config) { redisContext *c; redisReply *reply; void *_reply; int major, minor; /* Connect to target given by config. */ c = connect(config); { /* Find out Redis version to determine the path for the next test */ const char *field = "redis_version:"; char *p, *eptr; reply = redisCommand(c,"INFO"); p = strstr(reply->str,field); major = strtol(p+strlen(field),&eptr,10); p = eptr+1; /* char next to the first "." */ minor = strtol(p,&eptr,10); freeReplyObject(reply); } test("Returns I/O error when the connection is lost: "); reply = redisCommand(c,"QUIT"); if (major >= 2 && minor > 0) { /* > 2.0 returns OK on QUIT and read() should be issued once more * to know the descriptor is at EOF. */ test_cond(strcasecmp(reply->str,"OK") == 0 && redisGetReply(c,&_reply) == REDIS_ERR); freeReplyObject(reply); } else { test_cond(reply == NULL); } /* On 2.0, QUIT will cause the connection to be closed immediately and * the read(2) for the reply on QUIT will set the error to EOF. * On >2.0, QUIT will return with OK and another read(2) needed to be * issued to find out the socket was closed by the server. In both * conditions, the error will be set to EOF. */ assert(c->err == REDIS_ERR_EOF && strcmp(c->errstr,"Server closed the connection") == 0); redisFree(c); c = connect(config); test("Returns I/O error on socket timeout: "); struct timeval tv = { 0, 1000 }; assert(redisSetTimeout(c,tv) == REDIS_OK); test_cond(redisGetReply(c,&_reply) == REDIS_ERR && c->err == REDIS_ERR_IO && errno == EAGAIN); redisFree(c); } static void test_invalid_timeout_errors(struct config config) { redisContext *c; test("Set error when an invalid timeout usec value is given to redisConnectWithTimeout: "); config.tcp.timeout.tv_sec = 0; config.tcp.timeout.tv_usec = 10000001; c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout); test_cond(c->err == REDIS_ERR_IO); test("Set error when an invalid timeout sec value is given to redisConnectWithTimeout: "); config.tcp.timeout.tv_sec = (((LONG_MAX) - 999) / 1000) + 1; config.tcp.timeout.tv_usec = 0; c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout); test_cond(c->err == REDIS_ERR_IO); redisFree(c); } static void test_throughput(struct config config) { redisContext *c = connect(config); redisReply **replies; int i, num; long long t1, t2; test("Throughput:\n"); for (i = 0; i < 500; i++) freeReplyObject(redisCommand(c,"LPUSH mylist foo")); num = 1000; replies = malloc(sizeof(redisReply*)*num); t1 = usec(); for (i = 0; i < num; i++) { replies[i] = redisCommand(c,"PING"); assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_STATUS); } t2 = usec(); for (i = 0; i < num; i++) freeReplyObject(replies[i]); free(replies); printf("\t(%dx PING: %.3fs)\n", num, (t2-t1)/1000000.0); replies = malloc(sizeof(redisReply*)*num); t1 = usec(); for (i = 0; i < num; i++) { replies[i] = redisCommand(c,"LRANGE mylist 0 499"); assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_ARRAY); assert(replies[i] != NULL && replies[i]->elements == 500); } t2 = usec(); for (i = 0; i < num; i++) freeReplyObject(replies[i]); free(replies); printf("\t(%dx LRANGE with 500 elements: %.3fs)\n", num, (t2-t1)/1000000.0); num = 10000; replies = malloc(sizeof(redisReply*)*num); for (i = 0; i < num; i++) redisAppendCommand(c,"PING"); t1 = usec(); for (i = 0; i < num; i++) { assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_STATUS); } t2 = usec(); for (i = 0; i < num; i++) freeReplyObject(replies[i]); free(replies); printf("\t(%dx PING (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); replies = malloc(sizeof(redisReply*)*num); for (i = 0; i < num; i++) redisAppendCommand(c,"LRANGE mylist 0 499"); t1 = usec(); for (i = 0; i < num; i++) { assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_ARRAY); assert(replies[i] != NULL && replies[i]->elements == 500); } t2 = usec(); for (i = 0; i < num; i++) freeReplyObject(replies[i]); free(replies); printf("\t(%dx LRANGE with 500 elements (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); disconnect(c, 0); } // static long __test_callback_flags = 0; // static void __test_callback(redisContext *c, void *privdata) { // ((void)c); // /* Shift to detect execution order */ // __test_callback_flags <<= 8; // __test_callback_flags |= (long)privdata; // } // // static void __test_reply_callback(redisContext *c, redisReply *reply, void *privdata) { // ((void)c); // /* Shift to detect execution order */ // __test_callback_flags <<= 8; // __test_callback_flags |= (long)privdata; // if (reply) freeReplyObject(reply); // } // // static redisContext *__connect_nonblock() { // /* Reset callback flags */ // __test_callback_flags = 0; // return redisConnectNonBlock("127.0.0.1", port, NULL); // } // // static void test_nonblocking_connection() { // redisContext *c; // int wdone = 0; // // test("Calls command callback when command is issued: "); // c = __connect_nonblock(); // redisSetCommandCallback(c,__test_callback,(void*)1); // redisCommand(c,"PING"); // test_cond(__test_callback_flags == 1); // redisFree(c); // // test("Calls disconnect callback on redisDisconnect: "); // c = __connect_nonblock(); // redisSetDisconnectCallback(c,__test_callback,(void*)2); // redisDisconnect(c); // test_cond(__test_callback_flags == 2); // redisFree(c); // // test("Calls disconnect callback and free callback on redisFree: "); // c = __connect_nonblock(); // redisSetDisconnectCallback(c,__test_callback,(void*)2); // redisSetFreeCallback(c,__test_callback,(void*)4); // redisFree(c); // test_cond(__test_callback_flags == ((2 << 8) | 4)); // // test("redisBufferWrite against empty write buffer: "); // c = __connect_nonblock(); // test_cond(redisBufferWrite(c,&wdone) == REDIS_OK && wdone == 1); // redisFree(c); // // test("redisBufferWrite against not yet connected fd: "); // c = __connect_nonblock(); // redisCommand(c,"PING"); // test_cond(redisBufferWrite(c,NULL) == REDIS_ERR && // strncmp(c->error,"write:",6) == 0); // redisFree(c); // // test("redisBufferWrite against closed fd: "); // c = __connect_nonblock(); // redisCommand(c,"PING"); // redisDisconnect(c); // test_cond(redisBufferWrite(c,NULL) == REDIS_ERR && // strncmp(c->error,"write:",6) == 0); // redisFree(c); // // test("Process callbacks in the right sequence: "); // c = __connect_nonblock(); // redisCommandWithCallback(c,__test_reply_callback,(void*)1,"PING"); // redisCommandWithCallback(c,__test_reply_callback,(void*)2,"PING"); // redisCommandWithCallback(c,__test_reply_callback,(void*)3,"PING"); // // /* Write output buffer */ // wdone = 0; // while(!wdone) { // usleep(500); // redisBufferWrite(c,&wdone); // } // // /* Read until at least one callback is executed (the 3 replies will // * arrive in a single packet, causing all callbacks to be executed in // * a single pass). */ // while(__test_callback_flags == 0) { // assert(redisBufferRead(c) == REDIS_OK); // redisProcessCallbacks(c); // } // test_cond(__test_callback_flags == 0x010203); // redisFree(c); // // test("redisDisconnect executes pending callbacks with NULL reply: "); // c = __connect_nonblock(); // redisSetDisconnectCallback(c,__test_callback,(void*)1); // redisCommandWithCallback(c,__test_reply_callback,(void*)2,"PING"); // redisDisconnect(c); // test_cond(__test_callback_flags == 0x0201); // redisFree(c); // } int main(int argc, char **argv) { struct config cfg = { .tcp = { .host = "127.0.0.1", .port = 6379 }, .unix = { .path = "/tmp/redis.sock" } }; int throughput = 1; int test_inherit_fd = 1; /* Ignore broken pipe signal (for I/O error tests). */ signal(SIGPIPE, SIG_IGN); /* Parse command line options. */ argv++; argc--; while (argc) { if (argc >= 2 && !strcmp(argv[0],"-h")) { argv++; argc--; cfg.tcp.host = argv[0]; } else if (argc >= 2 && !strcmp(argv[0],"-p")) { argv++; argc--; cfg.tcp.port = atoi(argv[0]); } else if (argc >= 2 && !strcmp(argv[0],"-s")) { argv++; argc--; cfg.unix.path = argv[0]; } else if (argc >= 1 && !strcmp(argv[0],"--skip-throughput")) { throughput = 0; } else if (argc >= 1 && !strcmp(argv[0],"--skip-inherit-fd")) { test_inherit_fd = 0; } else { fprintf(stderr, "Invalid argument: %s\n", argv[0]); exit(1); } argv++; argc--; } test_format_commands(); test_reply_reader(); test_blocking_connection_errors(); printf("\nTesting against TCP connection (%s:%d):\n", cfg.tcp.host, cfg.tcp.port); cfg.type = CONN_TCP; test_blocking_connection(cfg); test_blocking_io_errors(cfg); test_invalid_timeout_errors(cfg); test_append_formatted_commands(cfg); if (throughput) test_throughput(cfg); printf("\nTesting against Unix socket connection (%s):\n", cfg.unix.path); cfg.type = CONN_UNIX; test_blocking_connection(cfg); test_blocking_io_errors(cfg); if (throughput) test_throughput(cfg); if (test_inherit_fd) { printf("\nTesting against inherited fd (%s):\n", cfg.unix.path); cfg.type = CONN_FD; test_blocking_connection(cfg); } if (fails) { printf("*** %d TESTS FAILED ***\n", fails); return 1; } printf("ALL TESTS PASSED\n"); return 0; } disque-1.0-rc1/deps/hiredis/zmalloc.h000066400000000000000000000004131264176561100175320ustar00rootroot00000000000000/* Drop in replacement for zmalloc.h in order to just use libc malloc without * any wrappering. */ #ifndef ZMALLOC_H #define ZMALLOC_H #define zmalloc malloc #define zrealloc realloc #define zcalloc(x) calloc(x,1) #define zfree free #define zstrdup strdup #endif disque-1.0-rc1/deps/jemalloc/000077500000000000000000000000001264176561100160615ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/.autom4te.cfg000066400000000000000000000001531264176561100203610ustar00rootroot00000000000000begin-language: "Autoconf-without-aclocal-m4" args: --no-cache end-language: "Autoconf-without-aclocal-m4" disque-1.0-rc1/deps/jemalloc/.gitattributes000066400000000000000000000000231264176561100207470ustar00rootroot00000000000000* text=auto eol=lf disque-1.0-rc1/deps/jemalloc/.gitignore000066400000000000000000000027671264176561100200650ustar00rootroot00000000000000/*.gcov.* /bin/jemalloc-config /bin/jemalloc.sh /bin/jeprof /config.stamp /config.log /config.status /configure /doc/html.xsl /doc/manpages.xsl /doc/jemalloc.xml /doc/jemalloc.html /doc/jemalloc.3 /jemalloc.pc /lib/ /Makefile /include/jemalloc/internal/jemalloc_internal.h /include/jemalloc/internal/jemalloc_internal_defs.h /include/jemalloc/internal/private_namespace.h /include/jemalloc/internal/private_unnamespace.h /include/jemalloc/internal/public_namespace.h /include/jemalloc/internal/public_symbols.txt /include/jemalloc/internal/public_unnamespace.h /include/jemalloc/internal/size_classes.h /include/jemalloc/jemalloc.h /include/jemalloc/jemalloc_defs.h /include/jemalloc/jemalloc_macros.h /include/jemalloc/jemalloc_mangle.h /include/jemalloc/jemalloc_mangle_jet.h /include/jemalloc/jemalloc_protos.h /include/jemalloc/jemalloc_protos_jet.h /include/jemalloc/jemalloc_rename.h /include/jemalloc/jemalloc_typedefs.h /src/*.[od] /src/*.gcda /src/*.gcno /test/test.sh test/include/test/jemalloc_test.h test/include/test/jemalloc_test_defs.h /test/integration/[A-Za-z]* !/test/integration/[A-Za-z]*.* /test/integration/*.[od] /test/integration/*.gcda /test/integration/*.gcno /test/integration/*.out /test/src/*.[od] /test/src/*.gcda /test/src/*.gcno /test/stress/[A-Za-z]* !/test/stress/[A-Za-z]*.* /test/stress/*.[od] /test/stress/*.gcda /test/stress/*.gcno /test/stress/*.out /test/unit/[A-Za-z]* !/test/unit/[A-Za-z]*.* /test/unit/*.[od] /test/unit/*.gcda /test/unit/*.gcno /test/unit/*.out /VERSION disque-1.0-rc1/deps/jemalloc/COPYING000066400000000000000000000032471264176561100171220ustar00rootroot00000000000000Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- Copyright (C) 2002-2015 Jason Evans . All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. Copyright (C) 2009-2015 Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice(s), this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice(s), this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- disque-1.0-rc1/deps/jemalloc/ChangeLog000066400000000000000000001115761264176561100176460ustar00rootroot00000000000000Following are change highlights associated with official releases. Important bug fixes are all mentioned, but some internal enhancements are omitted here for brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc * 4.0.3 (September 24, 2015) This bugfix release continues the trend of xallocx() and heap profiling fixes. Bug fixes: - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large allocations when --enable-cache-oblivious configure option is enabled. - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations when resizing from/to a size class that is not a multiple of the chunk size. - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap profile dumping started. - Work around a potentially bad thread-specific data initialization interaction with NPTL (glibc's pthreads implementation). * 4.0.2 (September 21, 2015) This bugfix release addresses a few bugs specific to heap profiling. Bug fixes: - Fix ixallocx_prof_sample() to never modify nor create sampled small allocations. xallocx() is in general incapable of moving small allocations, so this fix removes buggy code without loss of generality. - Fix irallocx_prof_sample() to always allocate large regions, even when alignment is non-zero. - Fix prof_alloc_rollback() to read tdata from thread-specific data rather than dereferencing a potentially invalid tctx. * 4.0.1 (September 15, 2015) This is a bugfix release that is somewhat high risk due to the amount of refactoring required to address deep xallocx() problems. As a side effect of these fixes, xallocx() now tries harder to partially fulfill requests for optional extra space. Note that a couple of minor heap profiling optimizations are included, but these are better thought of as performance fixes that were integral to disovering most of the other bugs. Optimizations: - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the fast path when heap profiling is enabled. Additionally, split a special case out into arena_prof_tctx_reset(), which also avoids chunk metadata reads. - Optimize irallocx_prof() to optimistically update the sampler state. The prior implementation appears to have been a holdover from when rallocx()/xallocx() functionality was combined as rallocm(). Bug fixes: - Fix TLS configuration such that it is enabled by default for platforms on which it works correctly. - Fix arenas_cache_cleanup() and arena_get_hard() to handle allocation/deallocation within the application's thread-specific data cleanup functions even after arenas_cache is torn down. - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. - Fix chunk purge hook calls for in-place huge shrinking reallocation to specify the old chunk size rather than the new chunk size. This bug caused no correctness issues for the default chunk purge function, but was visible to custom functions set via the "arena..chunk_hooks" mallctl. - Fix heap profiling bugs: + Fix heap profiling to distinguish among otherwise identical sample sites with interposed resets (triggered via the "prof.reset" mallctl). This bug could cause data structure corruption that would most likely result in a segfault. + Fix irealloc_prof() to prof_alloc_rollback() on OOM. + Make one call to prof_active_get_unlocked() per allocation event, and use the result throughout the relevant functions that handle an allocation event. Also add a missing check in prof_realloc(). These fixes protect allocation events against concurrent prof_active changes. + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() in the correct order. + Fix prof_realloc() to call prof_free_sampled_object() after calling prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were the same, the tctx could have been prematurely destroyed. - Fix portability bugs: + Don't bitshift by negative amounts when encoding/decoding run sizes in chunk header maps. This affected systems with page sizes greater than 8 KiB. + Rename index_t to szind_t to avoid an existing type on Solaris. + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to match glibc and avoid compilation errors when including both jemalloc/jemalloc.h and malloc.h in C++ code. + Don't assume that /bin/sh is appropriate when running size_classes.sh during configuration. + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. + Link tests to librt if it contains clock_gettime(2). * 4.0.0 (August 17, 2015) This version contains many speed and space optimizations, both minor and major. The major themes are generalization, unification, and simplification. Although many of these optimizations cause no visible behavior change, their cumulative effect is substantial. New features: - Normalize size class spacing to be consistent across the complete size range. By default there are four size classes per size doubling, but this is now configurable via the --with-lg-size-class-group option. Also add the --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and --with-lg-tiny-min options, which can be used to tweak page and size class settings. Impacts: + Worst case performance for incrementally growing/shrinking reallocation is improved because there are far fewer size classes, and therefore copying happens less often. + Internal fragmentation is limited to 20% for all but the smallest size classes (those less than four times the quantum). (1B + 4 KiB) and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. + Chunk fragmentation tends to be lower because there are fewer distinct run sizes to pack. - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and "tcache.destroy" mallctls control tcache lifetime and flushing, and the MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API control which tcache is used for each operation. - Implement per thread heap profiling, as well as the ability to enable/disable heap profiling on a per thread basis. Add the "prof.reset", "prof.lg_sample", "thread.prof.name", "thread.prof.active", "opt.prof_thread_active_init", "prof.thread_active_init", and "thread.prof.active" mallctls. - Add support for per arena application-specified chunk allocators, configured via the "arena..chunk_hooks" mallctl. - Refactor huge allocation to be managed by arenas, so that arenas now function as general purpose independent allocators. This is important in the context of user-specified chunk allocators, aside from the scalability benefits. Related new statistics: + The "stats.arenas..huge.allocated", "stats.arenas..huge.nmalloc", "stats.arenas..huge.ndalloc", and "stats.arenas..huge.nrequests" mallctls provide high level per arena huge allocation statistics. + The "arenas.nhchunks", "arenas.hchunk..size", "stats.arenas..hchunks..nmalloc", "stats.arenas..hchunks..ndalloc", "stats.arenas..hchunks..nrequests", and "stats.arenas..hchunks..curhchunks" mallctls provide per size class statistics. - Add the 'util' column to malloc_stats_print() output, which reports the proportion of available regions that are currently in use for each small size class. - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" mallctl), so that it is possible to separately enable junk filling for allocation versus deallocation. - Add the jemalloc-config script, which provides information about how jemalloc was configured, and how to integrate it into application builds. - Add metadata statistics, which are accessible via the "stats.metadata", "stats.arenas..metadata.mapped", and "stats.arenas..metadata.allocated" mallctls. - Add the "stats.resident" mallctl, which reports the upper limit of physically resident memory mapped by the allocator. - Add per arena control over unused dirty page purging, via the "arenas.lg_dirty_mult", "arena..lg_dirty_mult", and "stats.arenas..lg_dirty_mult" mallctls. - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump feature on/off during program execution. - Add sdallocx(), which implements sized deallocation. The primary optimization over dallocx() is the removal of a metadata read, which often suffers an L1 cache miss. - Add missing header includes in jemalloc/jemalloc.h, so that applications only have to #include . - Add support for additional platforms: + Bitrig + Cygwin + DragonFlyBSD + iOS + OpenBSD + OpenRISC/or1k Optimizations: - Maintain dirty runs in per arena LRUs rather than in per arena trees of dirty-run-containing chunks. In practice this change significantly reduces dirty page purging volume. - Integrate whole chunks into the unused dirty page purging machinery. This reduces the cost of repeated huge allocation/deallocation, because it effectively introduces a cache of chunks. - Split the arena chunk map into two separate arrays, in order to increase cache locality for the frequently accessed bits. - Move small run metadata out of runs, into arena chunk headers. This reduces run fragmentation, smaller runs reduce external fragmentation for small size classes, and packed (less uniformly aligned) metadata layout improves CPU cache set distribution. - Randomly distribute large allocation base pointer alignment relative to page boundaries in order to more uniformly utilize CPU cache sets. This can be disabled via the --disable-cache-oblivious configure option, and queried via the "config.cache_oblivious" mallctl. - Micro-optimize the fast paths for the public API functions. - Refactor thread-specific data to reside in a single structure. This assures that only a single TLS read is necessary per call into the public API. - Implement in-place huge allocation growing and shrinking. - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make additional optimizations that reduce maximum lookup depth to one or two levels. This resolves what was a concurrency bottleneck for per arena huge allocation, because a global data structure is critical for determining which arenas own which huge allocations. Incompatible changes: - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious warnings by default. - Assure that the constness of malloc_usable_size()'s return type matches that of the system implementation. - Change the heap profile dump format to support per thread heap profiling, rename pprof to jeprof, and enhance it with the --thread= option. As a result, the bundled jeprof must now be used rather than the upstream (gperftools) pprof. - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can internally deadlock on some platforms. - Change the "arenas.nlruns" mallctl type from size_t to unsigned. - Replace the "stats.arenas..bins..allocated" mallctl with "stats.arenas..bins..curregs". - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. Removed features: - Remove the *allocm() API, which is superseded by the *allocx() API. - Remove the --enable-dss options, and make dss non-optional on all platforms which support sbrk(2). - Remove the "arenas.purge" mallctl, which was obsoleted by the "arena..purge" mallctl in 3.1.0. - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically detects whether it is running inside Valgrind. - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and "stats.huge.ndalloc" mallctls. - Remove the --enable-mremap option. - Remove the "stats.chunks.current", "stats.chunks.total", and "stats.chunks.high" mallctls. Bug fixes: - Fix the cactive statistic to decrease (rather than increase) when active memory decreases. This regression was first released in 3.5.0. - Fix OOM handling in memalign() and valloc(). A variant of this bug existed in all releases since 2.0.0, which introduced these functions. - Fix an OOM-related regression in arena_tcache_fill_small(), which could cause cache corruption on OOM. This regression was present in all releases from 2.2.0 through 3.6.0. - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), calloc(), and realloc() when profiling is enabled. - Fix the "arena..dss" mallctl to return an error if "primary" or "secondary" precedence is specified, but sbrk(2) is not supported. - Fix fallback lg_floor() implementations to handle extremely large inputs. - Ensure the default purgeable zone is after the default zone on OS X. - Fix latent bugs in atomic_*(). - Fix the "arena..dss" mallctl to handle read-only calls. - Fix tls_model configuration to enable the initial-exec model when possible. - Mark malloc_conf as a weak symbol so that the application can override it. - Correctly detect glibc's adaptive pthread mutexes. - Fix the --without-export configure option. * 3.6.0 (March 31, 2014) This version contains a critical bug fix for a regression present in 3.5.0 and 3.5.1. Bug fixes: - Fix a regression in arena_chunk_alloc() that caused crashes during small/large allocation if chunk allocation failed. In the absence of this bug, chunk allocation failure would result in allocation failure, e.g. NULL return from malloc(). This regression was introduced in 3.5.0. - Fix backtracing for gcc intrinsics-based backtracing by specifying -fno-omit-frame-pointer to gcc. Note that the application (and all the libraries it links to) must also be compiled with this option for backtracing to be reliable. - Use dss allocation precedence for huge allocations as well as small/large allocations. - Fix test assertion failure message formatting. This bug did not manifest on x86_64 systems because of implementation subtleties in va_list. - Fix inconsequential test failures for hash and SFMT code. New features: - Support heap profiling on FreeBSD. This feature depends on the proc filesystem being mounted during heap profile dumping. * 3.5.1 (February 25, 2014) This version primarily addresses minor bugs in test code. Bug fixes: - Configure Solaris/Illumos to use MADV_FREE. - Fix junk filling for mremap(2)-based huge reallocation. This is only relevant if configuring with the --enable-mremap option specified. - Avoid compilation failure if 'restrict' C99 keyword is not supported by the compiler. - Add a configure test for SSE2 rather than assuming it is usable on i686 systems. This fixes test compilation errors, especially on 32-bit Linux systems. - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit test. - Fix/remove flawed alignment-related overflow tests. - Prevent compiler optimizations that could change backtraces in the prof_accum unit test. * 3.5.0 (January 22, 2014) This version focuses on refactoring and automated testing, though it also includes some non-trivial heap profiling optimizations not mentioned below. New features: - Add the *allocx() API, which is a successor to the experimental *allocm() API. The *allocx() functions are slightly simpler to use because they have fewer parameters, they directly return the results of primary interest, and mallocx()/rallocx() avoid the strict aliasing pitfall that allocm()/rallocm() share with posix_memalign(). Note that *allocm() is slated for removal in the next non-bugfix release. - Add support for LinuxThreads. Bug fixes: - Unless heap profiling is enabled, disable floating point code and don't link with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 systems, makes it possible to completely disable floating point register use. Some versions of glibc neglect to save/restore caller-saved floating point registers during dynamic lazy symbol loading, and the symbol loading code uses whatever malloc the application happens to have linked/loaded with, the result being potential floating point register corruption. - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling backtrace creation in imemalign(). This bug impacted posix_memalign() and aligned_alloc(). - Fix a file descriptor leak in a prof_dump_maps() error path. - Fix prof_dump() to close the dump file descriptor for all relevant error paths. - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for allocation, not just deallocation. - Fix a data race for large allocation stats counters. - Fix a potential infinite loop during thread exit. This bug occurred on Solaris, and could affect other platforms with similar pthreads TSD implementations. - Don't junk-fill reallocations unless usable size changes. This fixes a violation of the *allocx()/*allocm() semantics. - Fix growing large reallocation to junk fill new space. - Fix huge deallocation to junk fill when munmap is disabled. - Change the default private namespace prefix from empty to je_, and change --with-private-namespace-prefix so that it prepends an additional prefix rather than replacing je_. This reduces the likelihood of applications which statically link jemalloc experiencing symbol name collisions. - Add missing private namespace mangling (relevant when --with-private-namespace is specified). - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as static even for debug builds. - Add a missing mutex unlock in a malloc_init_hard() error path. In practice this error path is never executed. - Fix numerous bugs in malloc_strotumax() error handling/reporting. These bugs had no impact except for malformed inputs. - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by existing calls, so they had no impact. * 3.4.1 (October 20, 2013) Bug fixes: - Fix a race in the "arenas.extend" mallctl that could cause memory corruption of internal data structures and subsequent crashes. - Fix Valgrind integration flaws that caused Valgrind warnings about reads of uninitialized memory in: + arena chunk headers + internal zero-initialized data structures (relevant to tcache and prof code) - Preserve errno during the first allocation. A readlink(2) call during initialization fails unless /etc/malloc.conf exists, so errno was typically set during the first allocation prior to this fix. - Fix compilation warnings reported by gcc 4.8.1. * 3.4.0 (June 2, 2013) This version is essentially a small bugfix release, but the addition of aarch64 support requires that the minor version be incremented. Bug fixes: - Fix race-triggered deadlocks in chunk_record(). These deadlocks were typically triggered by multiple threads concurrently deallocating huge objects. New features: - Add support for the aarch64 architecture. * 3.3.1 (March 6, 2013) This version fixes bugs that are typically encountered only when utilizing custom run-time options. Bug fixes: - Fix a locking order bug that could cause deadlock during fork if heap profiling were enabled. - Fix a chunk recycling bug that could cause the allocator to lose track of whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause corruption if allocating via sbrk(2) (unlikely unless running with the "dss:primary" option specified). This was completely harmless on Linux unless using mlockall(2) (and unlikely even then, unless the --disable-munmap configure option or the "dss:primary" option was specified). This regression was introduced in 3.1.0 by the mlockall(2)/madvise(2) interaction fix. - Fix TLS-related memory corruption that could occur during thread exit if the thread never allocated memory. Only the quarantine and prof facilities were susceptible. - Fix two quarantine bugs: + Internal reallocation of the quarantined object array leaked the old array. + Reallocation failure for internal reallocation of the quarantined object array (very unlikely) resulted in memory corruption. - Fix Valgrind integration to annotate all internally allocated memory in a way that keeps Valgrind happy about internal data structure access. - Fix building for s390 systems. * 3.3.0 (January 23, 2013) This version includes a few minor performance improvements in addition to the listed new features and bug fixes. New features: - Add clipping support to lg_chunk option processing. - Add the --enable-ivsalloc option. - Add the --without-export option. - Add the --disable-zone-allocator option. Bug fixes: - Fix "arenas.extend" mallctl to output the number of arenas. - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory is undefined. - Fix build break on FreeBSD related to alloca.h. * 3.2.0 (November 9, 2012) In addition to a couple of bug fixes, this version modifies page run allocation and dirty page purging algorithms in order to better control page-level virtual memory fragmentation. Incompatible changes: - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). Bug fixes: - Fix dss/mmap allocation precedence code to use recyclable mmap memory only after primary dss allocation fails. - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced in 3.1.0 by the addition of the "arena..purge" mallctl. * 3.1.0 (October 16, 2012) New features: - Auto-detect whether running inside Valgrind, thus removing the need to manually specify MALLOC_CONF=valgrind:true. - Add the "arenas.extend" mallctl, which allows applications to create manually managed arenas. - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). - Add the "opt.dss", "arena..dss", and "stats.arenas..dss" mallctls, which provide control over dss/mmap precedence. - Add the "arena..purge" mallctl, which obsoletes "arenas.purge". - Define LG_QUANTUM for hppa. Incompatible changes: - Disable tcache by default if running inside Valgrind, in order to avoid making unallocated objects appear reachable to Valgrind. - Drop const from malloc_usable_size() argument on Linux. Bug fixes: - Fix heap profiling crash if sampled object is freed via realloc(p, 0). - Remove const from __*_hook variable declarations, so that glibc can modify them during process forking. - Fix mlockall(2)/madvise(2) interaction. - Fix fork(2)-related deadlocks. - Fix error return value for "thread.tcache.enabled" mallctl. * 3.0.0 (May 11, 2012) Although this version adds some major new features, the primary focus is on internal code cleanup that facilitates maintainability and portability, most of which is not reflected in the ChangeLog. This is the first release to incorporate substantial contributions from numerous other developers, and the result is a more broadly useful allocator (see the git revision history for contribution details). Note that the license has been unified, thanks to Facebook granting a license under the same terms as the other copyright holders (see COPYING). New features: - Implement Valgrind support, redzones, and quarantine. - Add support for additional platforms: + FreeBSD + Mac OS X Lion + MinGW + Windows (no support yet for replacing the system malloc) - Add support for additional architectures: + MIPS + SH4 + Tilera - Add support for cross compiling. - Add nallocm(), which rounds a request size up to the nearest size class without actually allocating. - Implement aligned_alloc() (blame C11). - Add the "thread.tcache.enabled" mallctl. - Add the "opt.prof_final" mallctl. - Update pprof (from gperftools 2.0). - Add the --with-mangling option. - Add the --disable-experimental option. - Add the --disable-munmap option, and make it the default on Linux. - Add the --enable-mremap option, which disables use of mremap(2) by default. Incompatible changes: - Enable stats by default. - Enable fill by default. - Disable lazy locking by default. - Rename the "tcache.flush" mallctl to "thread.tcache.flush". - Rename the "arenas.pagesize" mallctl to "arenas.page". - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). - Change the "opt.prof_accum" default from true to false. Removed features: - Remove the swap feature, including the "config.swap", "swap.avail", "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. - Remove highruns statistics, including the "stats.arenas..bins..highruns" and "stats.arenas..lruns..highruns" mallctls. - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and "arenas.[tqcs]bins" mallctls. - Remove the "arenas.chunksize" mallctl. - Remove the "opt.lg_prof_tcmax" option. - Remove the "opt.lg_prof_bt_max" option. - Remove the "opt.lg_tcache_gc_sweep" option. - Remove the --disable-tiny option, including the "config.tiny" mallctl. - Remove the --enable-dynamic-page-shift configure option. - Remove the --enable-sysv configure option. Bug fixes: - Fix a statistics-related bug in the "thread.arena" mallctl that could cause invalid statistics and crashes. - Work around TLS deallocation via free() on Linux. This bug could cause write-after-free memory corruption. - Fix a potential deadlock that could occur during interval- and growth-triggered heap profile dumps. - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could cause memory corruption and crashes with --enable-dss specified. - Fix fork-related bugs that could cause deadlock in children between fork and exec. - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - Fix realloc(p, 0) to act like free(p). - Do not enforce minimum alignment in memalign(). - Check for NULL pointer in malloc_usable_size(). - Fix an off-by-one heap profile statistics bug that could be observed in interval- and growth-triggered heap profiles. - Fix the "epoch" mallctl to update cached stats even if the passed in epoch is 0. - Fix bin->runcur management to fix a layout policy bug. This bug did not affect correctness. - Fix a bug in choose_arena_hard() that potentially caused more arenas to be initialized than necessary. - Add missing "opt.lg_tcache_max" mallctl implementation. - Use glibc allocator hooks to make mixed allocator usage less likely. - Fix build issues for --disable-tcache. - Don't mangle pthread_create() when --with-private-namespace is specified. * 2.2.5 (November 14, 2011) Bug fixes: - Fix huge_ralloc() race when using mremap(2). This is a serious bug that could cause memory corruption and/or crashes. - Fix huge_ralloc() to maintain chunk statistics. - Fix malloc_stats_print(..., "a") output. * 2.2.4 (November 5, 2011) Bug fixes: - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as well as for --disable-tls builds in earlier releases. - Do not assume a 4 KiB page size in test/rallocm.c. * 2.2.3 (August 31, 2011) This version fixes numerous bugs related to heap profiling. Bug fixes: - Fix a prof-related race condition. This bug could cause memory corruption, but only occurred in non-default configurations (prof_accum:false). - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is excluded from backtraces). - Fix a prof-related bug in realloc() (only triggered by OOM errors). - Fix prof-related bugs in allocm() and rallocm(). - Fix prof_tdata_cleanup() for --disable-tls builds. - Fix a relative include path, to fix objdir builds. * 2.2.2 (July 30, 2011) Bug fixes: - Fix a build error for --disable-tcache. - Fix assertions in arena_purge() (for real this time). - Add the --with-private-namespace option. This is a workaround for symbol conflicts that can inadvertently arise when using static libraries. * 2.2.1 (March 30, 2011) Bug fixes: - Implement atomic operations for x86/x64. This fixes compilation failures for versions of gcc that are still in wide use. - Fix an assertion in arena_purge(). * 2.2.0 (March 22, 2011) This version incorporates several improvements to algorithms and data structures that tend to reduce fragmentation and increase speed. New features: - Add the "stats.cactive" mallctl. - Update pprof (from google-perftools 1.7). - Improve backtracing-related configuration logic, and add the --disable-prof-libgcc option. Bug fixes: - Change default symbol visibility from "internal", to "hidden", which decreases the overhead of library-internal function calls. - Fix symbol visibility so that it is also set on OS X. - Fix a build dependency regression caused by the introduction of the .pic.o suffix for PIC object files. - Add missing checks for mutex initialization failures. - Don't use libgcc-based backtracing except on x64, where it is known to work. - Fix deadlocks on OS X that were due to memory allocation in pthread_mutex_lock(). - Heap profiling-specific fixes: + Fix memory corruption due to integer overflow in small region index computation, when using a small enough sample interval that profiling context pointers are stored in small run headers. + Fix a bootstrap ordering bug that only occurred with TLS disabled. + Fix a rallocm() rsize bug. + Fix error detection bugs for aligned memory allocation. * 2.1.3 (March 14, 2011) Bug fixes: - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix for OS X in 2.1.2). - Fix a "thread.arena" mallctl bug. - Fix a thread cache stats merging bug. * 2.1.2 (March 2, 2011) Bug fixes: - Fix "thread.{de,}allocatedp" mallctl for OS X. - Add missing jemalloc.a to build system. * 2.1.1 (January 31, 2011) Bug fixes: - Fix aligned huge reallocation (affected allocm()). - Fix the ALLOCM_LG_ALIGN macro definition. - Fix a heap dumping deadlock. - Fix a "thread.arena" mallctl bug. * 2.1.0 (December 3, 2010) This version incorporates some optimizations that can't quite be considered bug fixes. New features: - Use Linux's mremap(2) for huge object reallocation when possible. - Avoid locking in mallctl*() when possible. - Add the "thread.[de]allocatedp" mallctl's. - Convert the manual page source from roff to DocBook, and generate both roff and HTML manuals. Bug fixes: - Fix a crash due to incorrect bootstrap ordering. This only impacted --enable-debug --enable-dss configurations. - Fix a minor statistics bug for mallctl("swap.avail", ...). * 2.0.1 (October 29, 2010) Bug fixes: - Fix a race condition in heap profiling that could cause undefined behavior if "opt.prof_accum" were disabled. - Add missing mutex unlocks for some OOM error paths in the heap profiling code. - Fix a compilation error for non-C99 builds. * 2.0.0 (October 24, 2010) This version focuses on the experimental *allocm() API, and on improved run-time configuration/introspection. Nonetheless, numerous performance improvements are also included. New features: - Implement the experimental {,r,s,d}allocm() API, which provides a superset of the functionality available via malloc(), calloc(), posix_memalign(), realloc(), malloc_usable_size(), and free(). These functions can be used to allocate/reallocate aligned zeroed memory, ask for optional extra memory during reallocation, prevent object movement during reallocation, etc. - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is more human-readable, and more flexible. For example: JEMALLOC_OPTIONS=AJP is now: MALLOC_CONF=abort:true,fill:true,stats_print:true - Port to Apple OS X. Sponsored by Mozilla. - Make it possible for the application to control thread-->arena mappings via the "thread.arena" mallctl. - Add compile-time support for all TLS-related functionality via pthreads TSD. This is mainly of interest for OS X, which does not support TLS, but has a TSD implementation with similar performance. - Override memalign() and valloc() if they are provided by the system. - Add the "arenas.purge" mallctl, which can be used to synchronously purge all dirty unused pages. - Make cumulative heap profiling data optional, so that it is possible to limit the amount of memory consumed by heap profiling data structures. - Add per thread allocation counters that can be accessed via the "thread.allocated" and "thread.deallocated" mallctls. Incompatible changes: - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). - Increase default backtrace depth from 4 to 128 for heap profiling. - Disable interval-based profile dumps by default. Bug fixes: - Remove bad assertions in fork handler functions. These assertions could cause aborts for some combinations of configure settings. - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. - Fix leak context reporting. This bug tended to cause the number of contexts to be underreported (though the reported number of objects and bytes were correct). - Fix a realloc() bug for large in-place growing reallocation. This bug could cause memory corruption, but it was hard to trigger. - Fix an allocation bug for small allocations that could be triggered if multiple threads raced to create a new run of backing pages. - Enhance the heap profiler to trigger samples based on usable size, rather than request size. - Fix a heap profiling bug due to sometimes losing track of requested object size for sampled objects. * 1.0.3 (August 12, 2010) Bug fixes: - Fix the libunwind-based implementation of stack backtracing (used for heap profiling). This bug could cause zero-length backtraces to be reported. - Add a missing mutex unlock in library initialization code. If multiple threads raced to initialize malloc, some of them could end up permanently blocked. * 1.0.2 (May 11, 2010) Bug fixes: - Fix junk filling of large objects, which could cause memory corruption. - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual memory limits could cause swap file configuration to fail. Contributed by Jordan DeLong. * 1.0.1 (April 14, 2010) Bug fixes: - Fix compilation when --enable-fill is specified. - Fix threads-related profiling bugs that affected accuracy and caused memory to be leaked during thread exit. - Fix dirty page purging race conditions that could cause crashes. - Fix crash in tcache flushing code during thread destruction. * 1.0.0 (April 11, 2010) This release focuses on speed and run-time introspection. Numerous algorithmic improvements make this release substantially faster than its predecessors. New features: - Implement autoconf-based configuration system. - Add mallctl*(), for the purposes of introspection and run-time configuration. - Make it possible for the application to manually flush a thread's cache, via the "tcache.flush" mallctl. - Base maximum dirty page count on proportion of active memory. - Compute various additional run-time statistics, including per size class statistics for large objects. - Expose malloc_stats_print(), which can be called repeatedly by the application. - Simplify the malloc_message() signature to only take one string argument, and incorporate an opaque data pointer argument for use by the application in combination with malloc_stats_print(). - Add support for allocation backed by one or more swap files, and allow the application to disable over-commit if swap files are in use. - Implement allocation profiling and leak checking. Removed features: - Remove the dynamic arena rebalancing code, since thread-specific caching reduces its utility. Bug fixes: - Modify chunk allocation to work when address space layout randomization (ASLR) is in use. - Fix thread cleanup bugs related to TLS destruction. - Handle 0-size allocation requests in posix_memalign(). - Fix a chunk leak. The leaked chunks were never touched, so this impacted virtual memory usage, but not physical memory usage. * linux_2008082[78]a (August 27/28, 2008) These snapshot releases are the simple result of incorporating Linux-specific support into the FreeBSD malloc sources. -------------------------------------------------------------------------------- vim:filetype=text:textwidth=80 disque-1.0-rc1/deps/jemalloc/INSTALL000066400000000000000000000365341264176561100171250ustar00rootroot00000000000000Building and installing a packaged release of jemalloc can be as simple as typing the following while in the root directory of the source tree: ./configure make make install If building from unpackaged developer sources, the simplest command sequence that might work is: ./autogen.sh make dist make make install Note that documentation is not built by the default target because doing so would create a dependency on xsltproc in packaged releases, hence the requirement to either run 'make dist' or avoid installing docs via the various install_* targets documented below. === Advanced configuration ===================================================== The 'configure' script supports numerous options that allow control of which functionality is enabled, where jemalloc is installed, etc. Optionally, pass any of the following arguments (not a definitive list) to 'configure': --help Print a definitive list of options. --prefix= Set the base directory in which to install. For example: ./configure --prefix=/usr/local will cause files to be installed into /usr/local/include, /usr/local/lib, and /usr/local/man. --with-rpath= Embed one or more library paths, so that libjemalloc can find the libraries it is linked to. This works only on ELF-based systems. --with-mangling= Mangle public symbols specified in which is a comma-separated list of name:mangled pairs. For example, to use ld's --wrap option as an alternative method for overriding libc's malloc implementation, specify something like: --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...] Note that mangling happens prior to application of the prefix specified by --with-jemalloc-prefix, and mangled symbols are then ignored when applying the prefix. --with-jemalloc-prefix= Prefix all public APIs with . For example, if is "prefix_", API changes like the following occur: malloc() --> prefix_malloc() malloc_conf --> prefix_malloc_conf /etc/malloc.conf --> /etc/prefix_malloc.conf MALLOC_CONF --> PREFIX_MALLOC_CONF This makes it possible to use jemalloc at the same time as the system allocator, or even to use multiple copies of jemalloc simultaneously. By default, the prefix is "", except on OS X, where it is "je_". On OS X, jemalloc overlays the default malloc zone, but makes no attempt to actually replace the "malloc", "calloc", etc. symbols. --without-export Don't export public APIs. This can be useful when building jemalloc as a static library, or to avoid exporting public APIs when using the zone allocator on OSX. --with-private-namespace= Prefix all library-private APIs with je_. For shared libraries, symbol visibility mechanisms prevent these symbols from being exported, but for static libraries, naming collisions are a real possibility. By default, is empty, which results in a symbol prefix of je_ . --with-install-suffix= Append to the base name of all installed files, such that multiple versions of jemalloc can coexist in the same installation directory. For example, libjemalloc.so.0 becomes libjemalloc.so.0. --disable-cc-silence Disable code that silences non-useful compiler warnings. This is mainly useful during development when auditing the set of warnings that are being silenced. --enable-debug Enable assertions and validation code. This incurs a substantial performance hit, but is very useful during application development. Implies --enable-ivsalloc. --enable-code-coverage Enable code coverage support, for use during jemalloc test development. Additional testing targets are available if this option is enabled: coverage coverage_unit coverage_integration coverage_stress These targets do not clear code coverage results from previous runs, and there are interactions between the various coverage targets, so it is usually advisable to run 'make clean' between repeated code coverage runs. --disable-stats Disable statistics gathering functionality. See the "opt.stats_print" option documentation for usage details. --enable-ivsalloc Enable validation code, which verifies that pointers reside within jemalloc-owned chunks before dereferencing them. This incurs a minor performance hit. --enable-prof Enable heap profiling and leak detection functionality. See the "opt.prof" option documentation for usage details. When enabled, there are several approaches to backtracing, and the configure script chooses the first one in the following list that appears to function correctly: + libunwind (requires --enable-prof-libunwind) + libgcc (unless --disable-prof-libgcc) + gcc intrinsics (unless --disable-prof-gcc) --enable-prof-libunwind Use the libunwind library (http://www.nongnu.org/libunwind/) for stack backtracing. --disable-prof-libgcc Disable the use of libgcc's backtracing functionality. --disable-prof-gcc Disable the use of gcc intrinsics for backtracing. --with-static-libunwind= Statically link against the specified libunwind.a rather than dynamically linking with -lunwind. --disable-tcache Disable thread-specific caches for small objects. Objects are cached and released in bulk, thus reducing the total number of mutex operations. See the "opt.tcache" option for usage details. --disable-munmap Disable virtual memory deallocation via munmap(2); instead keep track of the virtual memory for later use. munmap() is disabled by default (i.e. --disable-munmap is implied) on Linux, which has a quirk in its virtual memory allocation algorithm that causes semi-permanent VM map holes under normal jemalloc operation. --disable-fill Disable support for junk/zero filling of memory, quarantine, and redzones. See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option documentation for usage details. --disable-valgrind Disable support for Valgrind. --disable-zone-allocator Disable zone allocator for Darwin. This means jemalloc won't be hooked as the default allocator on OSX/iOS. --enable-utrace Enable utrace(2)-based allocation tracing. This feature is not broadly portable (FreeBSD has it, but Linux and OS X do not). --enable-xmalloc Enable support for optional immediate termination due to out-of-memory errors, as is commonly implemented by "xmalloc" wrapper function for malloc. See the "opt.xmalloc" option documentation for usage details. --enable-lazy-lock Enable code that wraps pthread_create() to detect when an application switches from single-threaded to multi-threaded mode, so that it can avoid mutex locking/unlocking operations while in single-threaded mode. In practice, this feature usually has little impact on performance unless thread-specific caching is disabled. --disable-tls Disable thread-local storage (TLS), which allows for fast access to thread-local variables via the __thread keyword. If TLS is available, jemalloc uses it for several purposes. --disable-cache-oblivious Disable cache-oblivious large allocation alignment for large allocation requests with no alignment constraints. If this feature is disabled, all large allocations are page-aligned as an implementation artifact, which can severely harm CPU cache utilization. However, the cache-oblivious layout comes at the cost of one extra page per large allocation, which in the most extreme case increases physical memory usage for the 16 KiB size class to 20 KiB. --with-xslroot= Specify where to find DocBook XSL stylesheets when building the documentation. --with-lg-page= Specify the base 2 log of the system page size. This option is only useful when cross compiling, since the configure script automatically determines the host's page size by default. --with-lg-page-sizes= Specify the comma-separated base 2 logs of the page sizes to support. This option may be useful when cross-compiling in combination with --with-lg-page, but its primary use case is for integration with FreeBSD's libc, wherein jemalloc is embedded. --with-lg-size-class-group= Specify the base 2 log of how many size classes to use for each doubling in size. By default jemalloc uses =2, which results in e.g. the following size classes: [...], 64, 80, 96, 112, 128, 160, [...] =3 results in e.g. the following size classes: [...], 64, 72, 80, 88, 96, 104, 112, 120, 128, 144, [...] The minimal =0 causes jemalloc to only provide size classes that are powers of 2: [...], 64, 128, 256, [...] An implementation detail currently limits the total number of small size classes to 255, and a compilation error will result if the you specify cannot be supported. The limit is roughly =4, depending on page size. --with-lg-quantum= Specify the base 2 log of the minimum allocation alignment. jemalloc needs to know the minimum alignment that meets the following C standard requirement (quoted from the April 12, 2011 draft of the C11 standard): The pointer returned if the allocation succeeds is suitably aligned so that it may be assigned to a pointer to any type of object with a fundamental alignment requirement and then used to access such an object or an array of such objects in the space allocated [...] This setting is architecture-specific, and although jemalloc includes known safe values for the most commonly used modern architectures, there is a wrinkle related to GNU libc (glibc) that may impact your choice of . On most modern architectures, this mandates 16-byte alignment (=4), but the glibc developers chose not to meet this requirement for performance reasons. An old discussion can be found at https://sourceware.org/bugzilla/show_bug.cgi?id=206 . Unlike glibc, jemalloc does follow the C standard by default (caveat: jemalloc technically cheats if --with-lg-tiny-min is smaller than --with-lg-quantum), but the fact that Linux systems already work around this allocator noncompliance means that it is generally safe in practice to let jemalloc's minimum alignment follow glibc's lead. If you specify --with-lg-quantum=3 during configuration, jemalloc will provide additional size classes that are not 16-byte-aligned (24, 40, and 56, assuming --with-lg-size-class-group=2). --with-lg-tiny-min= Specify the base 2 log of the minimum tiny size class to support. Tiny size classes are powers of 2 less than the quantum, and are only incorporated if is less than (see --with-lg-quantum). Tiny size classes technically violate the C standard requirement for minimum alignment, and crashes could conceivably result if the compiler were to generate instructions that made alignment assumptions, both because illegal instruction traps could result, and because accesses could straddle page boundaries and cause segmentation faults due to accessing unmapped addresses. The default of =3 works well in practice even on architectures that technically require 16-byte alignment, probably for the same reason --with-lg-quantum=3 works. Smaller tiny size classes can, and will, cause crashes (see https://bugzilla.mozilla.org/show_bug.cgi?id=691003 for an example). This option is rarely useful, and is mainly provided as documentation of a subtle implementation detail. If you do use this option, specify a value in [3, ..., ]. The following environment variables (not a definitive list) impact configure's behavior: CFLAGS="?" Pass these flags to the compiler. You probably shouldn't define this unless you know what you are doing. (Use EXTRA_CFLAGS instead.) EXTRA_CFLAGS="?" Append these flags to CFLAGS. This makes it possible to add flags such as -Werror, while allowing the configure script to determine what other flags are appropriate for the specified configuration. The configure script specifically checks whether an optimization flag (-O*) is specified in EXTRA_CFLAGS, and refrains from specifying an optimization level if it finds that one has already been specified. CPPFLAGS="?" Pass these flags to the C preprocessor. Note that CFLAGS is not passed to 'cpp' when 'configure' is looking for include files, so you must use CPPFLAGS instead if you need to help 'configure' find header files. LD_LIBRARY_PATH="?" 'ld' uses this colon-separated list to find libraries. LDFLAGS="?" Pass these flags when linking. PATH="?" 'configure' uses this to find programs. === Advanced compilation ======================================================= To build only parts of jemalloc, use the following targets: build_lib_shared build_lib_static build_lib build_doc_html build_doc_man build_doc To install only parts of jemalloc, use the following targets: install_bin install_include install_lib_shared install_lib_static install_lib install_doc_html install_doc_man install_doc To clean up build results to varying degrees, use the following make targets: clean distclean relclean === Advanced installation ====================================================== Optionally, define make variables when invoking make, including (not exclusively): INCLUDEDIR="?" Use this as the installation prefix for header files. LIBDIR="?" Use this as the installation prefix for libraries. MANDIR="?" Use this as the installation prefix for man pages. DESTDIR="?" Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful when installing to a different path than was specified via --prefix. CC="?" Use this to invoke the C compiler. CFLAGS="?" Pass these flags to the compiler. CPPFLAGS="?" Pass these flags to the C preprocessor. LDFLAGS="?" Pass these flags when linking. PATH="?" Use this to search for programs used during configuration and building. === Development ================================================================ If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh' script rather than 'configure'. This re-generates 'configure', enables configuration dependency rules, and enables re-generation of automatically generated source files. The build system supports using an object directory separate from the source tree. For example, you can create an 'obj' directory, and from within that directory, issue configuration and build commands: autoconf mkdir obj cd obj ../configure --enable-autogen make === Documentation ============================================================== The manual page is generated in both html and roff formats. Any web browser can be used to view the html manual. The roff manual page can be formatted prior to installation via the following command: nroff -man -t doc/jemalloc.3 disque-1.0-rc1/deps/jemalloc/Makefile.in000066400000000000000000000406321264176561100201330ustar00rootroot00000000000000# Clear out all vpaths, then set just one (default vpath) for the main build # directory. vpath vpath % . # Clear the default suffixes, so that built-in rules are not used. .SUFFIXES : SHELL := /bin/sh CC := @CC@ # Configuration parameters. DESTDIR = BINDIR := $(DESTDIR)@BINDIR@ INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@ LIBDIR := $(DESTDIR)@LIBDIR@ DATADIR := $(DESTDIR)@DATADIR@ MANDIR := $(DESTDIR)@MANDIR@ srcroot := @srcroot@ objroot := @objroot@ abs_srcroot := @abs_srcroot@ abs_objroot := @abs_objroot@ # Build parameters. CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include CFLAGS := @CFLAGS@ LDFLAGS := @LDFLAGS@ EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ LIBS := @LIBS@ TESTLIBS := @TESTLIBS@ RPATH_EXTRA := @RPATH_EXTRA@ SO := @so@ IMPORTLIB := @importlib@ O := @o@ A := @a@ EXE := @exe@ LIBPREFIX := @libprefix@ REV := @rev@ install_suffix := @install_suffix@ ABI := @abi@ XSLTPROC := @XSLTPROC@ AUTOCONF := @AUTOCONF@ _RPATH = @RPATH@ RPATH = $(if $(1),$(call _RPATH,$(1))) cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@) cfghdrs_out := @cfghdrs_out@ cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@) cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ enable_code_coverage := @enable_code_coverage@ enable_prof := @enable_prof@ enable_valgrind := @enable_valgrind@ enable_zone_allocator := @enable_zone_allocator@ MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF DSO_LDFLAGS = @DSO_LDFLAGS@ SOREV = @SOREV@ PIC_CFLAGS = @PIC_CFLAGS@ CTARGET = @CTARGET@ LDTARGET = @LDTARGET@ MKLIB = @MKLIB@ AR = @AR@ ARFLAGS = @ARFLAGS@ CC_MM = @CC_MM@ ifeq (macho, $(ABI)) TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" else ifeq (pecoff, $(ABI)) TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" else TEST_LIBRARY_PATH := endif endif LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) # Lists of files. BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \ $(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \ $(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \ $(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \ $(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \ $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/pages.c \ $(srcroot)src/prof.c $(srcroot)src/quarantine.c $(srcroot)src/rtree.c \ $(srcroot)src/stats.c $(srcroot)src/tcache.c $(srcroot)src/util.c \ $(srcroot)src/tsd.c ifeq ($(enable_valgrind), 1) C_SRCS += $(srcroot)src/valgrind.c endif ifeq ($(enable_zone_allocator), 1) C_SRCS += $(srcroot)src/zone.c endif ifeq ($(IMPORTLIB),$(SO)) STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) endif ifdef PIC_CFLAGS STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) else STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) endif DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) ifneq ($(SOREV),$(SO)) DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) endif PC := $(objroot)jemalloc.pc MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html) DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3) DOCS := $(DOCS_HTML) $(DOCS_MAN3) C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c TESTS_UNIT := $(srcroot)test/unit/atomic.c \ $(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/ckh.c \ $(srcroot)test/unit/hash.c \ $(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_free.c \ $(srcroot)test/unit/lg_chunk.c \ $(srcroot)test/unit/mallctl.c \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mtx.c \ $(srcroot)test/unit/prof_accum.c \ $(srcroot)test/unit/prof_active.c \ $(srcroot)test/unit/prof_gdump.c \ $(srcroot)test/unit/prof_idump.c \ $(srcroot)test/unit/prof_reset.c \ $(srcroot)test/unit/prof_thread_name.c \ $(srcroot)test/unit/ql.c \ $(srcroot)test/unit/qr.c \ $(srcroot)test/unit/quarantine.c \ $(srcroot)test/unit/rb.c \ $(srcroot)test/unit/rtree.c \ $(srcroot)test/unit/SFMT.c \ $(srcroot)test/unit/size_classes.c \ $(srcroot)test/unit/stats.c \ $(srcroot)test/unit/tsd.c \ $(srcroot)test/unit/util.c \ $(srcroot)test/unit/zero.c TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ $(srcroot)test/integration/allocated.c \ $(srcroot)test/integration/sdallocx.c \ $(srcroot)test/integration/mallocx.c \ $(srcroot)test/integration/MALLOCX_ARENA.c \ $(srcroot)test/integration/overflow.c \ $(srcroot)test/integration/posix_memalign.c \ $(srcroot)test/integration/rallocx.c \ $(srcroot)test/integration/thread_arena.c \ $(srcroot)test/integration/thread_tcache_enabled.c \ $(srcroot)test/integration/xallocx.c \ $(srcroot)test/integration/chunk.c TESTS_STRESS := $(srcroot)test/stress/microbench.c TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS) C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O)) C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS) TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) .PHONY: all dist build_doc_html build_doc_man build_doc .PHONY: install_bin install_include install_lib .PHONY: install_doc_html install_doc_man install_doc install .PHONY: tests check clean distclean relclean .SECONDARY : $(TESTS_OBJS) # Default target. all: build_lib dist: build_doc $(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< $(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< build_doc_html: $(DOCS_HTML) build_doc_man: $(DOCS_MAN3) build_doc: $(DOCS) # # Include generated dependency files. # ifdef CC_MM -include $(C_OBJS:%.$(O)=%.d) -include $(C_PIC_OBJS:%.$(O)=%.d) -include $(C_JET_OBJS:%.$(O)=%.d) -include $(C_TESTLIB_OBJS:%.$(O)=%.d) -include $(TESTS_OBJS:%.$(O)=%.d) endif $(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c $(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c $(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) $(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c $(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET $(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c $(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB $(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST $(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST $(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c $(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include ifneq ($(IMPORTLIB),$(SO)) $(C_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT endif ifndef CC_MM # Dependencies. HEADER_DIRS = $(srcroot)include/jemalloc/internal \ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS) $(TESTS_OBJS): $(objroot)test/include/test/jemalloc_test.h endif $(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): @mkdir -p $(@D) $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif ifneq ($(SOREV),$(SO)) %.$(SO) : %.$(SOREV) @mkdir -p $(@D) ln -sf $( $(srcroot)config.stamp.in $(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure ./$(objroot)config.status @touch $@ # There must be some action in order for make to re-read Makefile when it is # out of date. $(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp @true endif disque-1.0-rc1/deps/jemalloc/README000066400000000000000000000020741264176561100167440ustar00rootroot00000000000000jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. jemalloc first came into use as the FreeBSD libc allocator in 2005, and since then it has found its way into numerous applications that rely on its predictable behavior. In 2010 jemalloc development efforts broadened to include developer support features such as heap profiling, Valgrind integration, and extensive monitoring/tuning hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, and therefore versatility remains critical. Ongoing development efforts trend toward making jemalloc among the best allocators for a broad range of demanding applications, and eliminating/mitigating weaknesses that have practical repercussions for real world applications. The COPYING file contains copyright and licensing information. The INSTALL file contains information on how to configure, build, and install jemalloc. The ChangeLog file contains a brief summary of changes for each release. URL: http://www.canonware.com/jemalloc/ disque-1.0-rc1/deps/jemalloc/VERSION000066400000000000000000000000621264176561100171270ustar00rootroot000000000000004.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c disque-1.0-rc1/deps/jemalloc/autogen.sh000077500000000000000000000004121264176561100200570ustar00rootroot00000000000000#!/bin/sh for i in autoconf; do echo "$i" $i if [ $? -ne 0 ]; then echo "Error $? in $i" exit 1 fi done echo "./configure --enable-autogen $@" ./configure --enable-autogen $@ if [ $? -ne 0 ]; then echo "Error $? in ./configure" exit 1 fi disque-1.0-rc1/deps/jemalloc/bin/000077500000000000000000000000001264176561100166315ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/bin/jemalloc-config.in000066400000000000000000000027311264176561100222150ustar00rootroot00000000000000#!/bin/sh usage() { cat < Options: --help | -h : Print usage. --version : Print jemalloc version. --revision : Print shared library revision number. --config : Print configure options used to build jemalloc. --prefix : Print installation directory prefix. --bindir : Print binary installation directory. --datadir : Print data installation directory. --includedir : Print include installation directory. --libdir : Print library installation directory. --mandir : Print manual page installation directory. --cc : Print compiler used to build jemalloc. --cflags : Print compiler flags used to build jemalloc. --cppflags : Print preprocessor flags used to build jemalloc. --ldflags : Print library flags used to build jemalloc. --libs : Print libraries jemalloc was linked against. EOF } prefix="@prefix@" exec_prefix="@exec_prefix@" case "$1" in --help | -h) usage exit 0 ;; --version) echo "@jemalloc_version@" ;; --revision) echo "@rev@" ;; --config) echo "@CONFIG@" ;; --prefix) echo "@PREFIX@" ;; --bindir) echo "@BINDIR@" ;; --datadir) echo "@DATADIR@" ;; --includedir) echo "@INCLUDEDIR@" ;; --libdir) echo "@LIBDIR@" ;; --mandir) echo "@MANDIR@" ;; --cc) echo "@CC@" ;; --cflags) echo "@CFLAGS@" ;; --cppflags) echo "@CPPFLAGS@" ;; --ldflags) echo "@LDFLAGS@ @EXTRA_LDFLAGS@" ;; --libs) echo "@LIBS@" ;; *) usage exit 1 esac disque-1.0-rc1/deps/jemalloc/bin/jemalloc.sh.in000066400000000000000000000002271264176561100213610ustar00rootroot00000000000000#!/bin/sh prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ @LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ export @LD_PRELOAD_VAR@ exec "$@" disque-1.0-rc1/deps/jemalloc/bin/jeprof.in000066400000000000000000005263171264176561100204640ustar00rootroot00000000000000#! /usr/bin/env perl # Copyright (c) 1998-2007, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # --- # Program for printing the profile generated by common/profiler.cc, # or by the heap profiler (common/debugallocation.cc) # # The profile contains a sequence of entries of the form: # # This program parses the profile, and generates user-readable # output. # # Examples: # # % tools/jeprof "program" "profile" # Enters "interactive" mode # # % tools/jeprof --text "program" "profile" # Generates one line per procedure # # % tools/jeprof --gv "program" "profile" # Generates annotated call-graph and displays via "gv" # # % tools/jeprof --gv --focus=Mutex "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # # % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile" # Restrict to code paths that involve an entry that matches "Mutex" # and does not match "string" # # % tools/jeprof --list=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --list= pattern. The listing is # annotated with the flat and cumulative sample counts at each line. # # % tools/jeprof --disasm=IBF_CheckDocid "program" "profile" # Generates disassembly listing of all routines with at least one # sample that match the --disasm= pattern. The listing is # annotated with the flat and cumulative sample counts at each PC value. # # TODO: Use color to indicate files? use strict; use warnings; use Getopt::Long; my $JEPROF_VERSION = "@jemalloc_version@"; my $PPROF_VERSION = "2.0"; # These are the object tools we use which can come from a # user-specified location using --tools, from the JEPROF_TOOLS # environment variable, or from the environment. my %obj_tool_map = ( "objdump" => "objdump", "nm" => "nm", "addr2line" => "addr2line", "c++filt" => "c++filt", ## ConfigureObjTools may add architecture-specific entries: #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables #"addr2line_pdb" => "addr2line-pdb", # ditto #"otool" => "otool", # equivalent of objdump on OS X ); # NOTE: these are lists, so you can put in commandline flags if you want. my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local my @GV = ("gv"); my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread my @KCACHEGRIND = ("kcachegrind"); my @PS2PDF = ("ps2pdf"); # These are used for dynamic profiles my @URL_FETCHER = ("curl", "-s"); # These are the web pages that servers need to support for dynamic profiles my $HEAP_PAGE = "/pprof/heap"; my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#" my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param # ?seconds=#&event=x&period=n my $GROWTH_PAGE = "/pprof/growth"; my $CONTENTION_PAGE = "/pprof/contention"; my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?"; my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param # "?seconds=#", # "?tags_regexp=#" and # "?type=#". my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST my $PROGRAM_NAME_PAGE = "/pprof/cmdline"; # These are the web pages that can be named on the command line. # All the alternatives must begin with /. my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" . "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" . "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)"; # default binary name my $UNKNOWN_BINARY = "(unknown)"; # There is a pervasive dependency on the length (in hex characters, # i.e., nibbles) of an address, distinguishing between 32-bit and # 64-bit profiles. To err on the safe size, default to 64-bit here: my $address_length = 16; my $dev_null = "/dev/null"; if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for $dev_null = "nul"; } # A list of paths to search for shared object files my @prefix_list = (); # Special routine name that should not have any symbols. # Used as separator to parse "addr2line -i" output. my $sep_symbol = '_fini'; my $sep_address = undef; ##### Argument parsing ##### sub usage_string { return < is a space separated list of profile names. jeprof [options] is a list of profile files where each file contains the necessary symbol mappings as well as profile data (likely generated with --raw). jeprof [options] is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE Each name can be: /path/to/profile - a path to a profile file host:port[/] - a location of a service to get profile from The / can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile, $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall, $CENSUSPROFILE_PAGE, or /pprof/filteredprofile. For instance: jeprof http://myserver.com:80$HEAP_PAGE If / is omitted, the service defaults to $PROFILE_PAGE (cpu profiling). jeprof --symbols Maps addresses to symbol names. In this mode, stdin should be a list of library mappings, in the same format as is found in the heap- and cpu-profile files (this loosely matches that of /proc/self/maps on linux), followed by a list of hex addresses to map, one per line. For more help with querying remote servers, including how to add the necessary server-side support code, see this filename (or one like it): /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html Options: --cum Sort by cumulative data --base= Subtract from before display --interactive Run in interactive mode (interactive "help" gives help) [default] --seconds= Length of time for dynamic profiles [default=30 secs] --add_lib= Read additional symbols and line info from the given library --lib_prefix= Comma separated list of library path prefixes Reporting Granularity: --addresses Report at address level --lines Report at source line level --functions Report at function level [default] --files Report at source file level Output type: --text Generate text report --callgrind Generate callgrind format to stdout --gv Generate Postscript and display --evince Generate PDF and display --web Generate SVG and display --list= Generate source listing of matching routines --disasm= Generate disassembly of matching routines --symbols Print demangled symbol names found at given addresses --dot Generate DOT file to stdout --ps Generate Postcript to stdout --pdf Generate PDF to stdout --svg Generate SVG to stdout --gif Generate GIF to stdout --raw Generate symbolized jeprof data (useful with remote fetch) Heap-Profile Options: --inuse_space Display in-use (mega)bytes [default] --inuse_objects Display in-use objects --alloc_space Display allocated (mega)bytes --alloc_objects Display allocated objects --show_bytes Display space in bytes --drop_negative Ignore negative differences Contention-profile options: --total_delay Display total delay at each region [default] --contentions Display number of delays at each region --mean_delay Display mean delay at each region Call-graph Options: --nodecount= Show at most so many nodes [default=80] --nodefraction= Hide nodes below *total [default=.005] --edgefraction= Hide edges below *total [default=.001] --maxdegree= Max incoming/outgoing edges per node [default=8] --focus= Focus on nodes matching --thread= Show profile for thread --ignore= Ignore nodes matching --scale= Set GV scaling [default=0] --heapcheck Make nodes with non-0 object counts (i.e. direct leak generators) more visible Miscellaneous: --tools=[,...] \$PATH for object tool pathnames --test Run unit tests --help This message --version Version information Environment Variables: JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof JEPROF_TOOLS Prefix for object tools pathnames Examples: jeprof /bin/ls ls.prof Enters "interactive" mode jeprof --text /bin/ls ls.prof Outputs one line per procedure jeprof --web /bin/ls ls.prof Displays annotated call-graph in web browser jeprof --gv /bin/ls ls.prof Displays annotated call-graph via 'gv' jeprof --gv --focus=Mutex /bin/ls ls.prof Restricts to code paths including a .*Mutex.* entry jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof Code paths including Mutex but not string jeprof --list=getdir /bin/ls ls.prof (Per-line) annotated source listing for getdir() jeprof --disasm=getdir /bin/ls ls.prof (Per-PC) annotated disassembly for getdir() jeprof http://localhost:1234/ Enters "interactive" mode jeprof --text localhost:1234 Outputs one line per procedure for localhost:1234 jeprof --raw localhost:1234 > ./local.raw jeprof --text ./local.raw Fetches a remote profile for later analysis and then analyzes it in text mode. EOF } sub version_string { return < \$main::opt_help, "version!" => \$main::opt_version, "cum!" => \$main::opt_cum, "base=s" => \$main::opt_base, "seconds=i" => \$main::opt_seconds, "add_lib=s" => \$main::opt_lib, "lib_prefix=s" => \$main::opt_lib_prefix, "functions!" => \$main::opt_functions, "lines!" => \$main::opt_lines, "addresses!" => \$main::opt_addresses, "files!" => \$main::opt_files, "text!" => \$main::opt_text, "callgrind!" => \$main::opt_callgrind, "list=s" => \$main::opt_list, "disasm=s" => \$main::opt_disasm, "symbols!" => \$main::opt_symbols, "gv!" => \$main::opt_gv, "evince!" => \$main::opt_evince, "web!" => \$main::opt_web, "dot!" => \$main::opt_dot, "ps!" => \$main::opt_ps, "pdf!" => \$main::opt_pdf, "svg!" => \$main::opt_svg, "gif!" => \$main::opt_gif, "raw!" => \$main::opt_raw, "interactive!" => \$main::opt_interactive, "nodecount=i" => \$main::opt_nodecount, "nodefraction=f" => \$main::opt_nodefraction, "edgefraction=f" => \$main::opt_edgefraction, "maxdegree=i" => \$main::opt_maxdegree, "focus=s" => \$main::opt_focus, "thread=s" => \$main::opt_thread, "ignore=s" => \$main::opt_ignore, "scale=i" => \$main::opt_scale, "heapcheck" => \$main::opt_heapcheck, "inuse_space!" => \$main::opt_inuse_space, "inuse_objects!" => \$main::opt_inuse_objects, "alloc_space!" => \$main::opt_alloc_space, "alloc_objects!" => \$main::opt_alloc_objects, "show_bytes!" => \$main::opt_show_bytes, "drop_negative!" => \$main::opt_drop_negative, "total_delay!" => \$main::opt_total_delay, "contentions!" => \$main::opt_contentions, "mean_delay!" => \$main::opt_mean_delay, "tools=s" => \$main::opt_tools, "test!" => \$main::opt_test, "debug!" => \$main::opt_debug, # Undocumented flags used only by unittests: "test_stride=i" => \$main::opt_test_stride, ) || usage("Invalid option(s)"); # Deal with the standard --help and --version if ($main::opt_help) { print usage_string(); exit(0); } if ($main::opt_version) { print version_string(); exit(0); } # Disassembly/listing/symbols mode requires address-level info if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) { $main::opt_functions = 0; $main::opt_lines = 0; $main::opt_addresses = 1; $main::opt_files = 0; } # Check heap-profiling flags if ($main::opt_inuse_space + $main::opt_inuse_objects + $main::opt_alloc_space + $main::opt_alloc_objects > 1) { usage("Specify at most on of --inuse/--alloc options"); } # Check output granularities my $grains = $main::opt_functions + $main::opt_lines + $main::opt_addresses + $main::opt_files + 0; if ($grains > 1) { usage("Only specify one output granularity option"); } if ($grains == 0) { $main::opt_functions = 1; } # Check output modes my $modes = $main::opt_text + $main::opt_callgrind + ($main::opt_list eq '' ? 0 : 1) + ($main::opt_disasm eq '' ? 0 : 1) + ($main::opt_symbols == 0 ? 0 : 1) + $main::opt_gv + $main::opt_evince + $main::opt_web + $main::opt_dot + $main::opt_ps + $main::opt_pdf + $main::opt_svg + $main::opt_gif + $main::opt_raw + $main::opt_interactive + 0; if ($modes > 1) { usage("Only specify one output mode"); } if ($modes == 0) { if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode $main::opt_interactive = 1; } else { $main::opt_text = 1; } } if ($main::opt_test) { RunUnitTests(); # Should not return exit(1); } # Binary name and profile arguments list $main::prog = ""; @main::pfile_args = (); # Remote profiling without a binary (using $SYMBOL_PAGE instead) if (@ARGV > 0) { if (IsProfileURL($ARGV[0])) { $main::use_symbol_page = 1; } elsif (IsSymbolizedProfileFile($ARGV[0])) { $main::use_symbolized_profile = 1; $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file } } if ($main::use_symbol_page || $main::use_symbolized_profile) { # We don't need a binary! my %disabled = ('--lines' => $main::opt_lines, '--disasm' => $main::opt_disasm); for my $option (keys %disabled) { usage("$option cannot be used without a binary") if $disabled{$option}; } # Set $main::prog later... scalar(@ARGV) || usage("Did not specify profile file"); } elsif ($main::opt_symbols) { # --symbols needs a binary-name (to run nm on, etc) but not profiles $main::prog = shift(@ARGV) || usage("Did not specify program"); } else { $main::prog = shift(@ARGV) || usage("Did not specify program"); scalar(@ARGV) || usage("Did not specify profile file"); } # Parse profile file/location arguments foreach my $farg (@ARGV) { if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) { my $machine = $1; my $num_machines = $2; my $path = $3; for (my $i = 0; $i < $num_machines; $i++) { unshift(@main::pfile_args, "$i.$machine$path"); } } else { unshift(@main::pfile_args, $farg); } } if ($main::use_symbol_page) { unless (IsProfileURL($main::pfile_args[0])) { error("The first profile should be a remote form to use $SYMBOL_PAGE\n"); } CheckSymbolPage(); $main::prog = FetchProgramName(); } elsif (!$main::use_symbolized_profile) { # may not need objtools! ConfigureObjTools($main::prog) } # Break the opt_lib_prefix into the prefix_list array @prefix_list = split (',', $main::opt_lib_prefix); # Remove trailing / from the prefixes, in the list to prevent # searching things like /my/path//lib/mylib.so foreach (@prefix_list) { s|/+$||; } } sub FilterAndPrint { my ($profile, $symbols, $libs, $thread) = @_; # Get total data in profile my $total = TotalProfile($profile); # Remove uniniteresting stack items $profile = RemoveUninterestingFrames($symbols, $profile); # Focus? if ($main::opt_focus ne '') { $profile = FocusProfile($symbols, $profile, $main::opt_focus); } # Ignore? if ($main::opt_ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore); } my $calls = ExtractCalls($symbols, $profile); # Reduce profiles to required output granularity, and also clean # each stack trace so a given entry exists at most once. my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); # Print if (!$main::opt_interactive) { if ($main::opt_disasm) { PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm); } elsif ($main::opt_list) { PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0); } elsif ($main::opt_text) { # Make sure the output is empty when have nothing to report # (only matters when --heapcheck is given but we must be # compatible with old branches that did not pass --heapcheck always): if ($total != 0) { printf("Total%s: %s %s\n", (defined($thread) ? " (t$thread)" : ""), Unparse($total), Units()); } PrintText($symbols, $flat, $cumulative, -1); } elsif ($main::opt_raw) { PrintSymbolizedProfile($symbols, $profile, $main::prog); } elsif ($main::opt_callgrind) { PrintCallgrind($calls); } else { if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), ""); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), ""); } elsif ($main::opt_web) { my $tmp = TempName($main::next_tmpfile, "svg"); RunWeb($tmp); # The command we run might hand the file name off # to an already running browser instance and then exit. # Normally, we'd remove $tmp on exit (right now), # but fork a child to remove $tmp a little later, so that the # browser has time to load it first. delete $main::tempnames{$tmp}; if (fork() == 0) { sleep 5; unlink($tmp); exit(0); } } } else { cleanup(); exit(1); } } } else { InteractiveMode($profile, $symbols, $libs, $total); } } sub Main() { Init(); $main::collected_profile = undef; @main::profile_files = (); $main::op_time = time(); # Printing symbols is special and requires a lot less info that most. if ($main::opt_symbols) { PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin return; } # Fetch all profile data FetchDynamicProfiles(); # this will hold symbols that we read from the profile files my $symbol_map = {}; # Read one profile, pick the last item on the list my $data = ReadProfile($main::prog, pop(@main::profile_files)); my $profile = $data->{profile}; my $pcs = $data->{pcs}; my $libs = $data->{libs}; # Info about main program and shared libraries $symbol_map = MergeSymbols($symbol_map, $data->{symbols}); # Add additional profiles, if available. if (scalar(@main::profile_files) > 0) { foreach my $pname (@main::profile_files) { my $data2 = ReadProfile($main::prog, $pname); $profile = AddProfile($profile, $data2->{profile}); $pcs = AddPcs($pcs, $data2->{pcs}); $symbol_map = MergeSymbols($symbol_map, $data2->{symbols}); } } # Subtract base from profile, if specified if ($main::opt_base ne '') { my $base = ReadProfile($main::prog, $main::opt_base); $profile = SubtractProfile($profile, $base->{profile}); $pcs = AddPcs($pcs, $base->{pcs}); $symbol_map = MergeSymbols($symbol_map, $base->{symbols}); } # Collect symbols my $symbols; if ($main::use_symbolized_profile) { $symbols = FetchSymbols($pcs, $symbol_map); } elsif ($main::use_symbol_page) { $symbols = FetchSymbols($pcs); } else { # TODO(csilvers): $libs uses the /proc/self/maps data from profile1, # which may differ from the data from subsequent profiles, especially # if they were run on different machines. Use appropriate libs for # each pc somehow. $symbols = ExtractSymbols($libs, $pcs); } if (!defined($main::opt_thread)) { FilterAndPrint($profile, $symbols, $libs); } if (defined($data->{threads})) { foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) { if (defined($main::opt_thread) && ($main::opt_thread eq '*' || $main::opt_thread == $thread)) { my $thread_profile = $data->{threads}{$thread}; FilterAndPrint($thread_profile, $symbols, $libs, $thread); } } } cleanup(); exit(0); } ##### Entry Point ##### Main(); # Temporary code to detect if we're running on a Goobuntu system. # These systems don't have the right stuff installed for the special # Readline libraries to work, so as a temporary workaround, we default # to using the normal stdio code, rather than the fancier readline-based # code sub ReadlineMightFail { if (-e '/lib/libtermcap.so.2') { return 0; # libtermcap exists, so readline should be okay } else { return 1; } } sub RunGV { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) { # Options using double dash are supported by this gv version. # Also, turn on noantialias to better handle bug in gv for # postscript files with large dimensions. # TODO: Maybe we should not pass the --noantialias flag # if the gv version is known to work properly without the flag. system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname) . $bg); } else { # Old gv version - only supports options that use single dash. print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n"; system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg); } } sub RunEvince { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background system(ShellEscape(@EVINCE, $fname) . $bg); } sub RunWeb { my $fname = shift; print STDERR "Loading web page file:///$fname\n"; if (`uname` =~ /Darwin/) { # OS X: open will use standard preference for SVG files. system("/usr/bin/open", $fname); return; } # Some kind of Unix; try generic symlinks, then specific browsers. # (Stop once we find one.) # Works best if the browser is already running. my @alt = ( "/etc/alternatives/gnome-www-browser", "/etc/alternatives/x-www-browser", "google-chrome", "firefox", ); foreach my $b (@alt) { if (system($b, $fname) == 0) { return; } } print STDERR "Could not load web browser.\n"; } sub RunKcachegrind { my $fname = shift; my $bg = shift; # "" or " &" if we should run in background print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n"; system(ShellEscape(@KCACHEGRIND, $fname) . $bg); } ##### Interactive helper routines ##### sub InteractiveMode { $| = 1; # Make output unbuffered for interactive mode my ($orig_profile, $symbols, $libs, $total) = @_; print STDERR "Welcome to jeprof! For help, type 'help'.\n"; # Use ReadLine if it's installed and input comes from a console. if ( -t STDIN && !ReadlineMightFail() && defined(eval {require Term::ReadLine}) ) { my $term = new Term::ReadLine 'jeprof'; while ( defined ($_ = $term->readline('(jeprof) '))) { $term->addhistory($_) if /\S/; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } } } else { # don't have readline while (1) { print STDERR "(jeprof) "; $_ = ; last if ! defined $_ ; s/\r//g; # turn windows-looking lines into unix-looking lines # Save some flags that might be reset by InteractiveCommand() my $save_opt_lines = $main::opt_lines; if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { last; # exit when we get an interactive command to quit } # Restore flags $main::opt_lines = $save_opt_lines; } } } # Takes two args: orig profile, and command to run. # Returns 1 if we should keep going, or 0 if we were asked to quit sub InteractiveCommand { my($orig_profile, $symbols, $libs, $total, $command) = @_; $_ = $command; # just to make future m//'s easier if (!defined($_)) { print STDERR "\n"; return 0; } if (m/^\s*quit/) { return 0; } if (m/^\s*help/) { InteractiveHelpMessage(); return 1; } # Clear all the mode options -- mode is controlled by "$command" $main::opt_text = 0; $main::opt_callgrind = 0; $main::opt_disasm = 0; $main::opt_list = 0; $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_cum = 0; if (m/^\s*(text|top)(\d*)\s*(.*)/) { $main::opt_text = 1; my $line_limit = ($2 ne "") ? int($2) : 10; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($3); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintText($symbols, $flat, $cumulative, $line_limit); return 1; } if (m/^\s*callgrind\s*([^ \n]*)/) { $main::opt_callgrind = 1; # Get derived profiles my $calls = ExtractCalls($symbols, $orig_profile); my $filename = $1; if ( $1 eq '' ) { $filename = TempName($main::next_tmpfile, "callgrind"); } PrintCallgrind($calls, $filename); if ( $1 eq '' ) { RunKcachegrind($filename, " & "); $main::next_tmpfile++; } return 1; } if (m/^\s*(web)?list\s*(.+)/) { my $html = (defined($1) && ($1 eq "web")); $main::opt_list = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($2); my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintListing($total, $libs, $flat, $cumulative, $routine, $html); return 1; } if (m/^\s*disasm\s*(.+)/) { $main::opt_disasm = 1; my $routine; my $ignore; ($routine, $ignore) = ParseInteractiveArgs($1); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); PrintDisassembly($libs, $flat, $cumulative, $routine); return 1; } if (m/^\s*(gv|web|evince)\s*(.*)/) { $main::opt_gv = 0; $main::opt_evince = 0; $main::opt_web = 0; if ($1 eq "gv") { $main::opt_gv = 1; } elsif ($1 eq "evince") { $main::opt_evince = 1; } elsif ($1 eq "web") { $main::opt_web = 1; } my $focus; my $ignore; ($focus, $ignore) = ParseInteractiveArgs($2); # Process current profile to account for various settings my $profile = ProcessProfile($total, $orig_profile, $symbols, $focus, $ignore); my $reduced = ReduceProfile($symbols, $profile); # Get derived profiles my $flat = FlatProfile($reduced); my $cumulative = CumulativeProfile($reduced); if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { if ($main::opt_gv) { RunGV(TempName($main::next_tmpfile, "ps"), " &"); } elsif ($main::opt_evince) { RunEvince(TempName($main::next_tmpfile, "pdf"), " &"); } elsif ($main::opt_web) { RunWeb(TempName($main::next_tmpfile, "svg")); } $main::next_tmpfile++; } return 1; } if (m/^\s*$/) { return 1; } print STDERR "Unknown command: try 'help'.\n"; return 1; } sub ProcessProfile { my $total_count = shift; my $orig_profile = shift; my $symbols = shift; my $focus = shift; my $ignore = shift; # Process current profile to account for various settings my $profile = $orig_profile; printf("Total: %s %s\n", Unparse($total_count), Units()); if ($focus ne '') { $profile = FocusProfile($symbols, $profile, $focus); my $focus_count = TotalProfile($profile); printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n", $focus, Unparse($focus_count), Units(), Unparse($total_count), ($focus_count*100.0) / $total_count); } if ($ignore ne '') { $profile = IgnoreProfile($symbols, $profile, $ignore); my $ignore_count = TotalProfile($profile); printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n", $ignore, Unparse($ignore_count), Units(), Unparse($total_count), ($ignore_count*100.0) / $total_count); } return $profile; } sub InteractiveHelpMessage { print STDERR <{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { my $depth = $#addrs + 1; # int(foo / 2**32) is the only reliable way to get rid of bottom # 32 bits on both 32- and 64-bit systems. print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32)); print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32)); foreach my $full_addr (@addrs) { my $addr = $full_addr; $addr =~ s/0x0*//; # strip off leading 0x, zeroes if (length($addr) > 16) { print STDERR "Invalid address in profile: $full_addr\n"; next; } my $low_addr = substr($addr, -8); # get last 8 hex chars my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr)); } } } } # Print symbols and profile data sub PrintSymbolizedProfile { my $symbols = shift; my $profile = shift; my $prog = shift; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; print '--- ', $symbol_marker, "\n"; if (defined($prog)) { print 'binary=', $prog, "\n"; } while (my ($pc, $name) = each(%{$symbols})) { my $sep = ' '; print '0x', $pc; # We have a list of function names, which include the inlined # calls. They are separated (and terminated) by --, which is # illegal in function names. for (my $j = 2; $j <= $#{$name}; $j += 3) { print $sep, $name->[$j]; $sep = '--'; } print "\n"; } print '---', "\n"; $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $profile_marker = $&; print '--- ', $profile_marker, "\n"; if (defined($main::collected_profile)) { # if used with remote fetch, simply dump the collected profile to output. open(SRC, "<$main::collected_profile"); while () { print $_; } close(SRC); } else { # dump a cpu-format profile to standard out PrintProfileData($profile); } } # Print text output sub PrintText { my $symbols = shift; my $flat = shift; my $cumulative = shift; my $line_limit = shift; my $total = TotalProfile($flat); # Which profile to sort by? my $s = $main::opt_cum ? $cumulative : $flat; my $running_sum = 0; my $lines = 0; foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b } keys(%{$cumulative})) { my $f = GetEntry($flat, $k); my $c = GetEntry($cumulative, $k); $running_sum += $f; my $sym = $k; if (exists($symbols->{$k})) { $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1]; if ($main::opt_addresses) { $sym = $k . " " . $sym; } } if ($f != 0 || $c != 0) { printf("%8s %6s %6s %8s %6s %s\n", Unparse($f), Percent($f, $total), Percent($running_sum, $total), Unparse($c), Percent($c, $total), $sym); } $lines++; last if ($line_limit >= 0 && $lines >= $line_limit); } } # Callgrind format has a compression for repeated function and file # names. You show the name the first time, and just use its number # subsequently. This can cut down the file to about a third or a # quarter of its uncompressed size. $key and $val are the key/value # pair that would normally be printed by callgrind; $map is a map from # value to number. sub CompressedCGName { my($key, $val, $map) = @_; my $idx = $map->{$val}; # For very short keys, providing an index hurts rather than helps. if (length($val) <= 3) { return "$key=$val\n"; } elsif (defined($idx)) { return "$key=($idx)\n"; } else { # scalar(keys $map) gives the number of items in the map. $idx = scalar(keys(%{$map})) + 1; $map->{$val} = $idx; return "$key=($idx) $val\n"; } } # Print the call graph in a way that's suiteable for callgrind. sub PrintCallgrind { my $calls = shift; my $filename; my %filename_to_index_map; my %fnname_to_index_map; if ($main::opt_interactive) { $filename = shift; print STDERR "Writing callgrind file to '$filename'.\n" } else { $filename = "&STDOUT"; } open(CG, ">$filename"); printf CG ("events: Hits\n\n"); foreach my $call ( map { $_->[0] } sort { $a->[1] cmp $b ->[1] || $a->[2] <=> $b->[2] } map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; [$_, $1, $2] } keys %$calls ) { my $count = int($calls->{$call}); $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; my ( $caller_file, $caller_line, $caller_function, $callee_file, $callee_line, $callee_function ) = ( $1, $2, $3, $5, $6, $7 ); # TODO(csilvers): for better compression, collect all the # caller/callee_files and functions first, before printing # anything, and only compress those referenced more than once. printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map); printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map); if (defined $6) { printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map); printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map); printf CG ("calls=$count $callee_line\n"); } printf CG ("$caller_line $count\n\n"); } } # Print disassembly for all all routines that match $main::opt_disasm sub PrintDisassembly { my $libs = shift; my $flat = shift; my $cumulative = shift; my $disasm_opts = shift; my $total = TotalProfile($flat); foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; # See if there are any samples in this routine my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { PrintDisassembledFunction($lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $total); last; } $addr = AddressInc($addr); } } } } # Return reference to array of tuples of the form: # [start_address, filename, linenumber, instruction, limit_address] # E.g., # ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"] sub Disassemble { my $prog = shift; my $offset = shift; my $start_addr = shift; my $end_addr = shift; my $objdump = $obj_tool_map{"objdump"}; my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn", "--start-address=0x$start_addr", "--stop-address=0x$end_addr", $prog); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); my @result = (); my $filename = ""; my $linenumber = -1; my $last = ["", "", "", ""]; while () { s/\r//g; # turn windows-looking lines into unix-looking lines chop; if (m|\s*([^:\s]+):(\d+)\s*$|) { # Location line of the form: # : $filename = $1; $linenumber = $2; } elsif (m/^ +([0-9a-f]+):\s*(.*)/) { # Disassembly line -- zero-extend address to full length my $addr = HexExtend($1); my $k = AddressAdd($addr, $offset); $last->[4] = $k; # Store ending address for previous instruction $last = [$k, $filename, $linenumber, $2, $end_addr]; push(@result, $last); } } close(OBJDUMP); return @result; } # The input file should contain lines of the form /proc/maps-like # output (same format as expected from the profiles) or that looks # like hex addresses (like "0xDEADBEEF"). We will parse all # /proc/maps output, and for all the hex addresses, we will output # "short" symbol names, one per line, in the same order as the input. sub PrintSymbols { my $maps_and_symbols_file = shift; # ParseLibraries expects pcs to be in a set. Fine by us... my @pclist = (); # pcs in sorted order my $pcs = {}; my $map = ""; foreach my $line (<$maps_and_symbols_file>) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /\b(0x[0-9a-f]+)\b/i) { push(@pclist, HexExtend($1)); $pcs->{$pclist[-1]} = 1; } else { $map .= $line; } } my $libs = ParseLibraries($main::prog, $map, $pcs); my $symbols = ExtractSymbols($libs, $pcs); foreach my $pc (@pclist) { # ->[0] is the shortname, ->[2] is the full name print(($symbols->{$pc}->[0] || "??") . "\n"); } } # For sorting functions by name sub ByName { return ShortFunctionName($a) cmp ShortFunctionName($b); } # Print source-listing for all all routines that match $list_opts sub PrintListing { my $total = shift; my $libs = shift; my $flat = shift; my $cumulative = shift; my $list_opts = shift; my $html = shift; my $output = \*STDOUT; my $fname = ""; if ($html) { # Arrange to write the output to a temporary file $fname = TempName($main::next_tmpfile, "html"); $main::next_tmpfile++; if (!open(TEMP, ">$fname")) { print STDERR "$fname: $!\n"; return; } $output = \*TEMP; print $output HtmlListingHeader(); printf $output ("
%s
Total: %s %s
\n", $main::prog, Unparse($total), Units()); } my $listed = 0; foreach my $lib (@{$libs}) { my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts); my $offset = AddressSub($lib->[1], $lib->[3]); foreach my $routine (sort ByName keys(%{$symbol_table})) { # Print if there are any samples in this routine my $start_addr = $symbol_table->{$routine}->[0]; my $end_addr = $symbol_table->{$routine}->[1]; my $length = hex(AddressSub($end_addr, $start_addr)); my $addr = AddressAdd($start_addr, $offset); for (my $i = 0; $i < $length; $i++) { if (defined($cumulative->{$addr})) { $listed += PrintSource( $lib->[0], $offset, $routine, $flat, $cumulative, $start_addr, $end_addr, $html, $output); last; } $addr = AddressInc($addr); } } } if ($html) { if ($listed > 0) { print $output HtmlListingFooter(); close($output); RunWeb($fname); } else { close($output); unlink($fname); } } } sub HtmlListingHeader { return <<'EOF'; Pprof listing EOF } sub HtmlListingFooter { return <<'EOF'; EOF } sub HtmlEscape { my $text = shift; $text =~ s/&/&/g; $text =~ s//>/g; return $text; } # Returns the indentation of the line, if it has any non-whitespace # characters. Otherwise, returns -1. sub Indentation { my $line = shift; if (m/^(\s*)\S/) { return length($1); } else { return -1; } } # If the symbol table contains inlining info, Disassemble() may tag an # instruction with a location inside an inlined function. But for # source listings, we prefer to use the location in the function we # are listing. So use MapToSymbols() to fetch full location # information for each instruction and then pick out the first # location from a location list (location list contains callers before # callees in case of inlining). # # After this routine has run, each entry in $instructions contains: # [0] start address # [1] filename for function we are listing # [2] line number for function we are listing # [3] disassembly # [4] limit address # [5] most specific filename (may be different from [1] due to inlining) # [6] most specific line number (may be different from [2] due to inlining) sub GetTopLevelLineNumbers { my ($lib, $offset, $instructions) = @_; my $pcs = []; for (my $i = 0; $i <= $#{$instructions}; $i++) { push(@{$pcs}, $instructions->[$i]->[0]); } my $symbols = {}; MapToSymbols($lib, $offset, $pcs, $symbols); for (my $i = 0; $i <= $#{$instructions}; $i++) { my $e = $instructions->[$i]; push(@{$e}, $e->[1]); push(@{$e}, $e->[2]); my $addr = $e->[0]; my $sym = $symbols->{$addr}; if (defined($sym)) { if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { $e->[1] = $1; # File name $e->[2] = $2; # Line number } } } } # Print source-listing for one routine sub PrintSource { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $html = shift; my $output = shift; # Disassemble all instructions (just to get line numbers) my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); GetTopLevelLineNumbers($prog, $offset, \@instructions); # Hack 1: assume that the first source file encountered in the # disassembly contains the routine my $filename = undef; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[2] >= 0) { $filename = $instructions[$i]->[1]; last; } } if (!defined($filename)) { print STDERR "no filename found in $routine\n"; return 0; } # Hack 2: assume that the largest line number from $filename is the # end of the procedure. This is typically safe since if P1 contains # an inlined call to P2, then P2 usually occurs earlier in the # source file. If this does not work, we might have to compute a # density profile or just print all regions we find. my $lastline = 0; for (my $i = 0; $i <= $#instructions; $i++) { my $f = $instructions[$i]->[1]; my $l = $instructions[$i]->[2]; if (($f eq $filename) && ($l > $lastline)) { $lastline = $l; } } # Hack 3: assume the first source location from "filename" is the start of # the source code. my $firstline = 1; for (my $i = 0; $i <= $#instructions; $i++) { if ($instructions[$i]->[1] eq $filename) { $firstline = $instructions[$i]->[2]; last; } } # Hack 4: Extend last line forward until its indentation is less than # the indentation we saw on $firstline my $oldlastline = $lastline; { if (!open(FILE, "<$filename")) { print STDERR "$filename: $!\n"; return 0; } my $l = 0; my $first_indentation = -1; while () { s/\r//g; # turn windows-looking lines into unix-looking lines $l++; my $indent = Indentation($_); if ($l >= $firstline) { if ($first_indentation < 0 && $indent >= 0) { $first_indentation = $indent; last if ($first_indentation == 0); } } if ($l >= $lastline && $indent >= 0) { if ($indent >= $first_indentation) { $lastline = $l+1; } else { last; } } } close(FILE); } # Assign all samples to the range $firstline,$lastline, # Hack 4: If an instruction does not occur in the range, its samples # are moved to the next instruction that occurs in the range. my $samples1 = {}; # Map from line number to flat count my $samples2 = {}; # Map from line number to cumulative count my $running1 = 0; # Unassigned flat counts my $running2 = 0; # Unassigned cumulative counts my $total1 = 0; # Total flat counts my $total2 = 0; # Total cumulative counts my %disasm = (); # Map from line number to disassembly my $running_disasm = ""; # Unassigned disassembly my $skip_marker = "---\n"; if ($html) { $skip_marker = ""; for (my $l = $firstline; $l <= $lastline; $l++) { $disasm{$l} = ""; } } my $last_dis_filename = ''; my $last_dis_linenum = -1; my $last_touched_line = -1; # To detect gaps in disassembly for a line foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } if ($html) { my $dis = sprintf(" %6s %6s \t\t%8s: %s ", HtmlPrintNumber($c1), HtmlPrintNumber($c2), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); # Append the most specific source line associated with this instruction if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; $dis = HtmlEscape($dis); my $f = $e->[5]; my $l = $e->[6]; if ($f ne $last_dis_filename) { $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } elsif ($l ne $last_dis_linenum) { # De-emphasize the unchanged file name portion $dis .= sprintf("%s" . ":%d", HtmlEscape(CleanFileName($f)), $l); } else { # De-emphasize the entire location $dis .= sprintf("%s:%d", HtmlEscape(CleanFileName($f)), $l); } $last_dis_filename = $f; $last_dis_linenum = $l; $running_disasm .= $dis; $running_disasm .= "\n"; } $running1 += $c1; $running2 += $c2; $total1 += $c1; $total2 += $c2; my $file = $e->[1]; my $line = $e->[2]; if (($file eq $filename) && ($line >= $firstline) && ($line <= $lastline)) { # Assign all accumulated samples to this line AddEntry($samples1, $line, $running1); AddEntry($samples2, $line, $running2); $running1 = 0; $running2 = 0; if ($html) { if ($line != $last_touched_line && $disasm{$line} ne '') { $disasm{$line} .= "\n"; } $disasm{$line} .= $running_disasm; $running_disasm = ''; $last_touched_line = $line; } } } # Assign any leftover samples to $lastline AddEntry($samples1, $lastline, $running1); AddEntry($samples2, $lastline, $running2); if ($html) { if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { $disasm{$lastline} .= "\n"; } $disasm{$lastline} .= $running_disasm; } if ($html) { printf $output ( "

%s

%s\n
\n" .
      "Total:%6s %6s (flat / cumulative %s)\n",
      HtmlEscape(ShortFunctionName($routine)),
      HtmlEscape(CleanFileName($filename)),
      Unparse($total1),
      Unparse($total2),
      Units());
  } else {
    printf $output (
      "ROUTINE ====================== %s in %s\n" .
      "%6s %6s Total %s (flat / cumulative)\n",
      ShortFunctionName($routine),
      CleanFileName($filename),
      Unparse($total1),
      Unparse($total2),
      Units());
  }
  if (!open(FILE, "<$filename")) {
    print STDERR "$filename: $!\n";
    return 0;
  }
  my $l = 0;
  while () {
    s/\r//g;         # turn windows-looking lines into unix-looking lines
    $l++;
    if ($l >= $firstline - 5 &&
        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
      chop;
      my $text = $_;
      if ($l == $firstline) { print $output $skip_marker; }
      my $n1 = GetEntry($samples1, $l);
      my $n2 = GetEntry($samples2, $l);
      if ($html) {
        # Emit a span that has one of the following classes:
        #    livesrc -- has samples
        #    deadsrc -- has disassembly, but with no samples
        #    nop     -- has no matching disasembly
        # Also emit an optional span containing disassembly.
        my $dis = $disasm{$l};
        my $asm = "";
        if (defined($dis) && $dis ne '') {
          $asm = "" . $dis . "";
        }
        my $source_class = (($n1 + $n2 > 0)
                            ? "livesrc"
                            : (($asm ne "") ? "deadsrc" : "nop"));
        printf $output (
          "%5d " .
          "%6s %6s %s%s\n",
          $l, $source_class,
          HtmlPrintNumber($n1),
          HtmlPrintNumber($n2),
          HtmlEscape($text),
          $asm);
      } else {
        printf $output(
          "%6s %6s %4d: %s\n",
          UnparseAlt($n1),
          UnparseAlt($n2),
          $l,
          $text);
      }
      if ($l == $lastline)  { print $output $skip_marker; }
    };
  }
  close(FILE);
  if ($html) {
    print $output "
\n"; } return 1; } # Return the source line for the specified file/linenumber. # Returns undef if not found. sub SourceLine { my $file = shift; my $line = shift; # Look in cache if (!defined($main::source_cache{$file})) { if (100 < scalar keys(%main::source_cache)) { # Clear the cache when it gets too big $main::source_cache = (); } # Read all lines from the file if (!open(FILE, "<$file")) { print STDERR "$file: $!\n"; $main::source_cache{$file} = []; # Cache the negative result return undef; } my $lines = []; push(@{$lines}, ""); # So we can use 1-based line numbers as indices while () { push(@{$lines}, $_); } close(FILE); # Save the lines in the cache $main::source_cache{$file} = $lines; } my $lines = $main::source_cache{$file}; if (($line < 0) || ($line > $#{$lines})) { return undef; } else { return $lines->[$line]; } } # Print disassembly for one routine with interspersed source if available sub PrintDisassembledFunction { my $prog = shift; my $offset = shift; my $routine = shift; my $flat = shift; my $cumulative = shift; my $start_addr = shift; my $end_addr = shift; my $total = shift; # Disassemble all instructions my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); # Make array of counts per instruction my @flat_count = (); my @cum_count = (); my $flat_total = 0; my $cum_total = 0; foreach my $e (@instructions) { # Add up counts for all address that fall inside this instruction my $c1 = 0; my $c2 = 0; for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { $c1 += GetEntry($flat, $a); $c2 += GetEntry($cumulative, $a); } push(@flat_count, $c1); push(@cum_count, $c2); $flat_total += $c1; $cum_total += $c2; } # Print header with total counts printf("ROUTINE ====================== %s\n" . "%6s %6s %s (flat, cumulative) %.1f%% of total\n", ShortFunctionName($routine), Unparse($flat_total), Unparse($cum_total), Units(), ($cum_total * 100.0) / $total); # Process instructions in order my $current_file = ""; for (my $i = 0; $i <= $#instructions; ) { my $e = $instructions[$i]; # Print the new file name whenever we switch files if ($e->[1] ne $current_file) { $current_file = $e->[1]; my $fname = $current_file; $fname =~ s|^\./||; # Trim leading "./" # Shorten long file names if (length($fname) >= 58) { $fname = "..." . substr($fname, -55); } printf("-------------------- %s\n", $fname); } # TODO: Compute range of lines to print together to deal with # small reorderings. my $first_line = $e->[2]; my $last_line = $first_line; my %flat_sum = (); my %cum_sum = (); for (my $l = $first_line; $l <= $last_line; $l++) { $flat_sum{$l} = 0; $cum_sum{$l} = 0; } # Find run of instructions for this range of source lines my $first_inst = $i; while (($i <= $#instructions) && ($instructions[$i]->[2] >= $first_line) && ($instructions[$i]->[2] <= $last_line)) { $e = $instructions[$i]; $flat_sum{$e->[2]} += $flat_count[$i]; $cum_sum{$e->[2]} += $cum_count[$i]; $i++; } my $last_inst = $i - 1; # Print source lines for (my $l = $first_line; $l <= $last_line; $l++) { my $line = SourceLine($current_file, $l); if (!defined($line)) { $line = "?\n"; next; } else { $line =~ s/^\s+//; } printf("%6s %6s %5d: %s", UnparseAlt($flat_sum{$l}), UnparseAlt($cum_sum{$l}), $l, $line); } # Print disassembly for (my $x = $first_inst; $x <= $last_inst; $x++) { my $e = $instructions[$x]; printf("%6s %6s %8s: %6s\n", UnparseAlt($flat_count[$x]), UnparseAlt($cum_count[$x]), UnparseAddress($offset, $e->[0]), CleanDisassembly($e->[3])); } } } # Print DOT graph sub PrintDot { my $prog = shift; my $symbols = shift; my $raw = shift; my $flat = shift; my $cumulative = shift; my $overall_total = shift; # Get total my $local_total = TotalProfile($flat); my $nodelimit = int($main::opt_nodefraction * $local_total); my $edgelimit = int($main::opt_edgefraction * $local_total); my $nodecount = $main::opt_nodecount; # Find nodes to include my @list = (sort { abs(GetEntry($cumulative, $b)) <=> abs(GetEntry($cumulative, $a)) || $a cmp $b } keys(%{$cumulative})); my $last = $nodecount - 1; if ($last > $#list) { $last = $#list; } while (($last >= 0) && (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { $last--; } if ($last < 0) { print STDERR "No nodes to print\n"; return 0; } if ($nodelimit > 0 || $edgelimit > 0) { printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", Unparse($nodelimit), Units(), Unparse($edgelimit), Units()); } # Open DOT output file my $output; my $escaped_dot = ShellEscape(@DOT); my $escaped_ps2pdf = ShellEscape(@PS2PDF); if ($main::opt_gv) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); $output = "| $escaped_dot -Tps2 >$escaped_outfile"; } elsif ($main::opt_evince) { my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; } elsif ($main::opt_ps) { $output = "| $escaped_dot -Tps2"; } elsif ($main::opt_pdf) { $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; } elsif ($main::opt_web || $main::opt_svg) { # We need to post-process the SVG, so write to a temporary file always. my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); $output = "| $escaped_dot -Tsvg >$escaped_outfile"; } elsif ($main::opt_gif) { $output = "| $escaped_dot -Tgif"; } else { $output = ">&STDOUT"; } open(DOT, $output) || error("$output: $!\n"); # Title printf DOT ("digraph \"%s; %s %s\" {\n", $prog, Unparse($overall_total), Units()); if ($main::opt_pdf) { # The output is more printable if we set the page size for dot. printf DOT ("size=\"8,11\"\n"); } printf DOT ("node [width=0.375,height=0.25];\n"); # Print legend printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", $prog, sprintf("Total %s: %s", Units(), Unparse($overall_total)), sprintf("Focusing on: %s", Unparse($local_total)), sprintf("Dropped nodes with <= %s abs(%s)", Unparse($nodelimit), Units()), sprintf("Dropped edges with <= %s %s", Unparse($edgelimit), Units()) ); # Print nodes my %node = (); my $nextnode = 1; foreach my $a (@list[0..$last]) { # Pick font size my $f = GetEntry($flat, $a); my $c = GetEntry($cumulative, $a); my $fs = 8; if ($local_total > 0) { $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); } $node{$a} = $nextnode++; my $sym = $a; $sym =~ s/\s+/\\n/g; $sym =~ s/::/\\n/g; # Extra cumulative info to print for non-leaves my $extra = ""; if ($f != $c) { $extra = sprintf("\\rof %s (%s)", Unparse($c), Percent($c, $local_total)); } my $style = ""; if ($main::opt_heapcheck) { if ($f > 0) { # make leak-causing nodes more visible (add a background) $style = ",style=filled,fillcolor=gray" } elsif ($f < 0) { # make anti-leak-causing nodes (which almost never occur) # stand out as well (triple border) $style = ",peripheries=3" } } printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . "\",shape=box,fontsize=%.1f%s];\n", $node{$a}, $sym, Unparse($f), Percent($f, $local_total), $extra, $fs, $style, ); } # Get edges and counts per edge my %edge = (); my $n; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$raw})) { # TODO: omit low %age edges $n = $raw->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); for (my $i = 1; $i <= $#translated; $i++) { my $src = $translated[$i]; my $dst = $translated[$i-1]; #next if ($src eq $dst); # Avoid self-edges? if (exists($node{$src}) && exists($node{$dst})) { my $edge_label = "$src\001$dst"; if (!exists($edge{$edge_label})) { $edge{$edge_label} = 0; } $edge{$edge_label} += $n; } } } # Print edges (process in order of decreasing counts) my %indegree = (); # Number of incoming edges added per node so far my %outdegree = (); # Number of outgoing edges added per node so far foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { my @x = split(/\001/, $e); $n = $edge{$e}; # Initialize degree of kept incoming and outgoing edges if necessary my $src = $x[0]; my $dst = $x[1]; if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } my $keep; if ($indegree{$dst} == 0) { # Keep edge if needed for reachability $keep = 1; } elsif (abs($n) <= $edgelimit) { # Drop if we are below --edgefraction $keep = 0; } elsif ($outdegree{$src} >= $main::opt_maxdegree || $indegree{$dst} >= $main::opt_maxdegree) { # Keep limited number of in/out edges per node $keep = 0; } else { $keep = 1; } if ($keep) { $outdegree{$src}++; $indegree{$dst}++; # Compute line width based on edge count my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); if ($fraction > 1) { $fraction = 1; } my $w = $fraction * 2; if ($w < 1 && ($main::opt_web || $main::opt_svg)) { # SVG output treats line widths < 1 poorly. $w = 1; } # Dot sometimes segfaults if given edge weights that are too large, so # we cap the weights at a large value my $edgeweight = abs($n) ** 0.7; if ($edgeweight > 100000) { $edgeweight = 100000; } $edgeweight = int($edgeweight); my $style = sprintf("setlinewidth(%f)", $w); if ($x[1] =~ m/\(inline\)/) { $style .= ",dashed"; } # Use a slightly squashed function of the edge count as the weight printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", $node{$x[0]}, $node{$x[1]}, Unparse($n), $edgeweight, $style); } } print DOT ("}\n"); close(DOT); if ($main::opt_web || $main::opt_svg) { # Rewrite SVG to be more usable inside web browser. RewriteSvg(TempName($main::next_tmpfile, "svg")); } return 1; } sub RewriteSvg { my $svgfile = shift; open(SVG, $svgfile) || die "open temp svg: $!"; my @svg = ; close(SVG); unlink $svgfile; my $svg = join('', @svg); # Dot's SVG output is # # # # ... # # # # Change it to # # # $svg_javascript # # # ... # # # # Fix width, height; drop viewBox. $svg =~ s/(?s) above first my $svg_javascript = SvgJavascript(); my $viewport = "\n"; $svg =~ s/ above . $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; $svg =~ s/$svgfile") || die "open $svgfile: $!"; print SVG $svg; close(SVG); } } sub SvgJavascript { return <<'EOF'; EOF } # Provides a map from fullname to shortname for cases where the # shortname is ambiguous. The symlist has both the fullname and # shortname for all symbols, which is usually fine, but sometimes -- # such as overloaded functions -- two different fullnames can map to # the same shortname. In that case, we use the address of the # function to disambiguate the two. This function fills in a map that # maps fullnames to modified shortnames in such cases. If a fullname # is not present in the map, the 'normal' shortname provided by the # symlist is the appropriate one to use. sub FillFullnameToShortnameMap { my $symbols = shift; my $fullname_to_shortname_map = shift; my $shortnames_seen_once = {}; my $shortnames_seen_more_than_once = {}; foreach my $symlist (values(%{$symbols})) { # TODO(csilvers): deal with inlined symbols too. my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address next; # the only collisions we care about are when addresses differ } if (defined($shortnames_seen_once->{$shortname}) && $shortnames_seen_once->{$shortname} ne $fullname) { $shortnames_seen_more_than_once->{$shortname} = 1; } else { $shortnames_seen_once->{$shortname} = $fullname; } } foreach my $symlist (values(%{$symbols})) { my $shortname = $symlist->[0]; my $fullname = $symlist->[2]; # TODO(csilvers): take in a list of addresses we care about, and only # store in the map if $symlist->[1] is in that list. Saves space. next if defined($fullname_to_shortname_map->{$fullname}); if (defined($shortnames_seen_more_than_once->{$shortname})) { if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; } } } } # Return a small number that identifies the argument. # Multiple calls with the same argument will return the same number. # Calls with different arguments will return different numbers. sub ShortIdFor { my $key = shift; my $id = $main::uniqueid{$key}; if (!defined($id)) { $id = keys(%main::uniqueid) + 1; $main::uniqueid{$key} = $id; } return $id; } # Translate a stack of addresses into a stack of symbols sub TranslateStack { my $symbols = shift; my $fullname_to_shortname_map = shift; my $k = shift; my @addrs = split(/\n/, $k); my @result = (); for (my $i = 0; $i <= $#addrs; $i++) { my $a = $addrs[$i]; # Skip large addresses since they sometimes show up as fake entries on RH9 if (length($a) > 8 && $a gt "7fffffffffffffff") { next; } if ($main::opt_disasm || $main::opt_list) { # We want just the address for the key push(@result, $a); next; } my $symlist = $symbols->{$a}; if (!defined($symlist)) { $symlist = [$a, "", $a]; } # We can have a sequence of symbols for a particular entry # (more than one symbol in the case of inlining). Callers # come before callees in symlist, so walk backwards since # the translated stack should contain callees before callers. for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { my $func = $symlist->[$j-2]; my $fileline = $symlist->[$j-1]; my $fullfunc = $symlist->[$j]; if (defined($fullname_to_shortname_map->{$fullfunc})) { $func = $fullname_to_shortname_map->{$fullfunc}; } if ($j > 2) { $func = "$func (inline)"; } # Do not merge nodes corresponding to Callback::Run since that # causes confusing cycles in dot display. Instead, we synthesize # a unique name for this frame per caller. if ($func =~ m/Callback.*::Run$/) { my $caller = ($i > 0) ? $addrs[$i-1] : 0; $func = "Run#" . ShortIdFor($caller); } if ($main::opt_addresses) { push(@result, "$a $func $fileline"); } elsif ($main::opt_lines) { if ($func eq '??' && $fileline eq '??:0') { push(@result, "$a"); } else { push(@result, "$func $fileline"); } } elsif ($main::opt_functions) { if ($func eq '??') { push(@result, "$a"); } else { push(@result, $func); } } elsif ($main::opt_files) { if ($fileline eq '??:0' || $fileline eq '') { push(@result, "$a"); } else { my $f = $fileline; $f =~ s/:\d+$//; push(@result, $f); } } else { push(@result, $a); last; # Do not print inlined info } } } # print join(",", @addrs), " => ", join(",", @result), "\n"; return @result; } # Generate percent string for a number and a total sub Percent { my $num = shift; my $tot = shift; if ($tot != 0) { return sprintf("%.1f%%", $num * 100.0 / $tot); } else { return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); } } # Generate pretty-printed form of number sub Unparse { my $num = shift; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return sprintf("%d", $num); } else { if ($main::opt_show_bytes) { return sprintf("%d", $num); } else { return sprintf("%.1f", $num / 1048576.0); } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds } else { return sprintf("%d", $num); } } # Alternate pretty-printed form: 0 maps to "." sub UnparseAlt { my $num = shift; if ($num == 0) { return "."; } else { return Unparse($num); } } # Alternate pretty-printed form: 0 maps to "" sub HtmlPrintNumber { my $num = shift; if ($num == 0) { return ""; } else { return Unparse($num); } } # Return output units sub Units { if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { if ($main::opt_inuse_objects || $main::opt_alloc_objects) { return "objects"; } else { if ($main::opt_show_bytes) { return "B"; } else { return "MB"; } } } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { return "seconds"; } else { return "samples"; } } ##### Profile manipulation code ##### # Generate flattened profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a] sub FlatProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); if ($#addrs >= 0) { AddEntry($result, $addrs[0], $count); } } return $result; } # Generate cumulative profile: # If count is charged to stack [a,b,c,d], in generated profile, # it will be charged to [a], [b], [c], [d] sub CumulativeProfile { my $profile = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { AddEntry($result, $a, $count); } } return $result; } # If the second-youngest PC on the stack is always the same, returns # that pc. Otherwise, returns undef. sub IsSecondPcAlwaysTheSame { my $profile = shift; my $second_pc = undef; foreach my $k (keys(%{$profile})) { my @addrs = split(/\n/, $k); if ($#addrs < 1) { return undef; } if (not defined $second_pc) { $second_pc = $addrs[1]; } else { if ($second_pc ne $addrs[1]) { return undef; } } } return $second_pc; } sub ExtractSymbolLocation { my $symbols = shift; my $address = shift; # 'addr2line' outputs "??:0" for unknown locations; we do the # same to be consistent. my $location = "??:0:unknown"; if (exists $symbols->{$address}) { my $file = $symbols->{$address}->[1]; if ($file eq "?") { $file = "??:0" } $location = $file . ":" . $symbols->{$address}->[0]; } return $location; } # Extracts a graph of calls. sub ExtractCalls { my $symbols = shift; my $profile = shift; my $calls = {}; while( my ($stack_trace, $count) = each %$profile ) { my @address = split(/\n/, $stack_trace); my $destination = ExtractSymbolLocation($symbols, $address[0]); AddEntry($calls, $destination, $count); for (my $i = 1; $i <= $#address; $i++) { my $source = ExtractSymbolLocation($symbols, $address[$i]); my $call = "$source -> $destination"; AddEntry($calls, $call, $count); $destination = $source; } } return $calls; } sub RemoveUninterestingFrames { my $symbols = shift; my $profile = shift; # List of function names to skip my %skip = (); my $skip_regexp = 'NOMATCH'; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { foreach my $name ('calloc', 'cfree', 'malloc', 'free', 'memalign', 'posix_memalign', 'aligned_alloc', 'pvalloc', 'valloc', 'realloc', 'mallocx', # jemalloc 'rallocx', # jemalloc 'xallocx', # jemalloc 'dallocx', # jemalloc 'sdallocx', # jemalloc 'tc_calloc', 'tc_cfree', 'tc_malloc', 'tc_free', 'tc_memalign', 'tc_posix_memalign', 'tc_pvalloc', 'tc_valloc', 'tc_realloc', 'tc_new', 'tc_delete', 'tc_newarray', 'tc_deletearray', 'tc_new_nothrow', 'tc_newarray_nothrow', 'do_malloc', '::do_malloc', # new name -- got moved to an unnamed ns '::do_malloc_or_cpp_alloc', 'DoSampledAllocation', 'simple_alloc::allocate', '__malloc_alloc_template::allocate', '__builtin_delete', '__builtin_new', '__builtin_vec_delete', '__builtin_vec_new', 'operator new', 'operator new[]', # The entry to our memory-allocation routines on OS X 'malloc_zone_malloc', 'malloc_zone_calloc', 'malloc_zone_valloc', 'malloc_zone_realloc', 'malloc_zone_memalign', 'malloc_zone_free', # These mark the beginning/end of our custom sections '__start_google_malloc', '__stop_google_malloc', '__start_malloc_hook', '__stop_malloc_hook') { $skip{$name} = 1; $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything } # TODO: Remove TCMalloc once everything has been # moved into the tcmalloc:: namespace and we have flushed # old code out of the system. $skip_regexp = "TCMalloc|^tcmalloc::"; } elsif ($main::profile_type eq 'contention') { foreach my $vname ('base::RecordLockProfileData', 'base::SubmitMutexProfileData', 'base::SubmitSpinLockProfileData', 'Mutex::Unlock', 'Mutex::UnlockSlow', 'Mutex::ReaderUnlock', 'MutexLock::~MutexLock', 'SpinLock::Unlock', 'SpinLock::SlowUnlock', 'SpinLockHolder::~SpinLockHolder') { $skip{$vname} = 1; } } elsif ($main::profile_type eq 'cpu') { # Drop signal handlers used for CPU profile collection # TODO(dpeng): this should not be necessary; it's taken # care of by the general 2nd-pc mechanism below. foreach my $name ('ProfileData::Add', # historical 'ProfileData::prof_handler', # historical 'CpuProfiler::prof_handler', '__FRAME_END__', '__pthread_sighandler', '__restore') { $skip{$name} = 1; } } else { # Nothing skipped for unknown types } if ($main::profile_type eq 'cpu') { # If all the second-youngest program counters are the same, # this STRONGLY suggests that it is an artifact of measurement, # i.e., stack frames pushed by the CPU profiler signal handler. # Hence, we delete them. # (The topmost PC is read from the signal structure, not from # the stack, so it does not get involved.) while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { my $result = {}; my $func = ''; if (exists($symbols->{$second_pc})) { $second_pc = $symbols->{$second_pc}->[0]; } print STDERR "Removing $second_pc from all stack traces.\n"; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); splice @addrs, 1, 1; my $reduced_path = join("\n", @addrs); AddEntry($result, $reduced_path, $count); } $profile = $result; } } my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my @path = (); foreach my $a (@addrs) { if (exists($symbols->{$a})) { my $func = $symbols->{$a}->[0]; if ($skip{$func} || ($func =~ m/$skip_regexp/)) { # Throw away the portion of the backtrace seen so far, under the # assumption that previous frames were for functions internal to the # allocator. @path = (); next; } } push(@path, $a); } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } return $result; } # Reduce profile to granularity given by user sub ReduceProfile { my $symbols = shift; my $profile = shift; my $result = {}; my $fullname_to_shortname_map = {}; FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); my @path = (); my %seen = (); $seen{''} = 1; # So that empty keys are skipped foreach my $e (@translated) { # To avoid double-counting due to recursion, skip a stack-trace # entry if it has already been seen if (!$seen{$e}) { $seen{$e} = 1; push(@path, $e); } } my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } return $result; } # Does the specified symbol array match the regexp? sub SymbolMatches { my $sym = shift; my $re = shift; if (defined($sym)) { for (my $i = 0; $i < $#{$sym}; $i += 3) { if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { return 1; } } } return 0; } # Focus only on paths involving specified regexps sub FocusProfile { my $symbols = shift; my $profile = shift; my $focus = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { AddEntry($result, $k, $count); last; } } } return $result; } # Focus only on paths not involving specified regexps sub IgnoreProfile { my $symbols = shift; my $profile = shift; my $ignore = shift; my $result = {}; foreach my $k (keys(%{$profile})) { my $count = $profile->{$k}; my @addrs = split(/\n/, $k); my $matched = 0; foreach my $a (@addrs) { # Reply if it matches either the address/shortname/fileline if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { $matched = 1; last; } } if (!$matched) { AddEntry($result, $k, $count); } } return $result; } # Get total count in profile sub TotalProfile { my $profile = shift; my $result = 0; foreach my $k (keys(%{$profile})) { $result += $profile->{$k}; } return $result; } # Add A to B sub AddProfile { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { my $v = $A->{$k}; AddEntry($R, $k, $v); } # add all keys in B foreach my $k (keys(%{$B})) { my $v = $B->{$k}; AddEntry($R, $k, $v); } return $R; } # Merges symbol maps sub MergeSymbols { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { $R->{$k} = $A->{$k}; } if (defined($B)) { foreach my $k (keys(%{$B})) { $R->{$k} = $B->{$k}; } } return $R; } # Add A to B sub AddPcs { my $A = shift; my $B = shift; my $R = {}; # add all keys in A foreach my $k (keys(%{$A})) { $R->{$k} = 1 } # add all keys in B foreach my $k (keys(%{$B})) { $R->{$k} = 1 } return $R; } # Subtract B from A sub SubtractProfile { my $A = shift; my $B = shift; my $R = {}; foreach my $k (keys(%{$A})) { my $v = $A->{$k} - GetEntry($B, $k); if ($v < 0 && $main::opt_drop_negative) { $v = 0; } AddEntry($R, $k, $v); } if (!$main::opt_drop_negative) { # Take care of when subtracted profile has more entries foreach my $k (keys(%{$B})) { if (!exists($A->{$k})) { AddEntry($R, $k, 0 - $B->{$k}); } } } return $R; } # Get entry from profile; zero if not present sub GetEntry { my $profile = shift; my $k = shift; if (exists($profile->{$k})) { return $profile->{$k}; } else { return 0; } } # Add entry to specified profile sub AddEntry { my $profile = shift; my $k = shift; my $n = shift; if (!exists($profile->{$k})) { $profile->{$k} = 0; } $profile->{$k} += $n; } # Add a stack of entries to specified profile, and add them to the $pcs # list. sub AddEntries { my $profile = shift; my $pcs = shift; my $stack = shift; my $count = shift; my @k = (); foreach my $e (split(/\s+/, $stack)) { my $pc = HexExtend($e); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $count); } ##### Code to profile a server dynamically ##### sub CheckSymbolPage { my $url = SymbolPageURL(); my $command = ShellEscape(@URL_FETCHER, $url); open(SYMBOL, "$command |") or error($command); my $line = ; $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(SYMBOL); unless (defined($line)) { error("$url doesn't exist\n"); } if ($line =~ /^num_symbols:\s+(\d+)$/) { if ($1 == 0) { error("Stripped binary. No symbols available.\n"); } } else { error("Failed to get the number of symbols from $url\n"); } } sub IsProfileURL { my $profile_name = shift; if (-f $profile_name) { printf STDERR "Using local file $profile_name.\n"; return 0; } return 1; } sub ParseProfileURL { my $profile_name = shift; if (!defined($profile_name) || $profile_name eq "") { return (); } # Split profile URL - matches all non-empty strings, so no test. $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; my $proto = $1 || "http://"; my $hostport = $2; my $prefix = $3; my $profile = $4 || "/"; my $host = $hostport; $host =~ s/:.*//; my $baseurl = "$proto$hostport$prefix"; return ($host, $baseurl, $profile); } # We fetch symbols from the first profile argument. sub SymbolPageURL { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); return "$baseURL$SYMBOL_PAGE"; } sub FetchProgramName() { my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); my $url = "$baseURL$PROGRAM_NAME_PAGE"; my $command_line = ShellEscape(@URL_FETCHER, $url); open(CMDLINE, "$command_line |") or error($command_line); my $cmdline = ; $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines close(CMDLINE); error("Failed to get program name from $url\n") unless defined($cmdline); $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. $cmdline =~ s!\n!!g; # Remove LFs. return $cmdline; } # Gee, curl's -L (--location) option isn't reliable at least # with its 7.12.3 version. Curl will forget to post data if # there is a redirection. This function is a workaround for # curl. Redirection happens on borg hosts. sub ResolveRedirectionForCurl { my $url = shift; my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); open(CMDLINE, "$command_line |") or error($command_line); while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^Location: (.*)/) { $url = $1; } } close(CMDLINE); return $url; } # Add a timeout flat to URL_FETCHER. Returns a new list. sub AddFetchTimeout { my $timeout = shift; my @fetcher = shift; if (defined($timeout)) { if (join(" ", @fetcher) =~ m/\bcurl -s/) { push(@fetcher, "--max-time", sprintf("%d", $timeout)); } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { push(@fetcher, sprintf("--deadline=%d", $timeout)); } } return @fetcher; } # Reads a symbol map from the file handle name given as $1, returning # the resulting symbol map. Also processes variables relating to symbols. # Currently, the only variable processed is 'binary=' which updates # $main::prog to have the correct program name. sub ReadSymbols { my $in = shift; my $map = {}; while (<$in>) { s/\r//g; # turn windows-looking lines into unix-looking lines # Removes all the leading zeroes from the symbols, see comment below. if (m/^0x0*([0-9a-f]+)\s+(.+)/) { $map->{$1} = $2; } elsif (m/^---/) { last; } elsif (m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1, $2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "binary") { if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", $main::prog, $value); } $main::prog = $value; } else { printf STDERR ("Ignoring unknown variable in symbols list: " . "'%s' = '%s'\n", $variable, $value); } } } return $map; } # Fetches and processes symbols to prepare them for use in the profile output # code. If the optional 'symbol_map' arg is not given, fetches symbols from # $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols # are assumed to have already been fetched into 'symbol_map' and are simply # extracted and processed. sub FetchSymbols { my $pcset = shift; my $symbol_map = shift; my %seen = (); my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq if (!defined($symbol_map)) { my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); open(POSTFILE, ">$main::tmpfile_sym"); print POSTFILE $post_data; close(POSTFILE); my $url = SymbolPageURL(); my $command_line; if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { $url = ResolveRedirectionForCurl($url); $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", $url); } else { $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) . " < " . ShellEscape($main::tmpfile_sym)); } # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); $symbol_map = ReadSymbols(*SYMBOL{IO}); close(SYMBOL); } my $symbols = {}; foreach my $pc (@pcs) { my $fullname; # For 64 bits binaries, symbols are extracted with 8 leading zeroes. # Then /symbol reads the long symbols in as uint64, and outputs # the result with a "0x%08llx" format which get rid of the zeroes. # By removing all the leading zeroes in both $pc and the symbols from # /symbol, the symbols match and are retrievable from the map. my $shortpc = $pc; $shortpc =~ s/^0*//; # Each line may have a list of names, which includes the function # and also other functions it has inlined. They are separated (in # PrintSymbolizedProfile), by --, which is illegal in function names. my $fullnames; if (defined($symbol_map->{$shortpc})) { $fullnames = $symbol_map->{$shortpc}; } else { $fullnames = "0x" . $pc; # Just use addresses } my $sym = []; $symbols->{$pc} = $sym; foreach my $fullname (split("--", $fullnames)) { my $name = ShortFunctionName($fullname); push(@{$sym}, $name, "?", $fullname); } } return $symbols; } sub BaseName { my $file_name = shift; $file_name =~ s!^.*/!!; # Remove directory name return $file_name; } sub MakeProfileBaseName { my ($binary_name, $profile_name) = @_; my ($host, $baseURL, $path) = ParseProfileURL($profile_name); my $binary_shortname = BaseName($binary_name); return sprintf("%s.%s.%s", $binary_shortname, $main::op_time, $host); } sub FetchDynamicProfile { my $binary_name = shift; my $profile_name = shift; my $fetch_name_only = shift; my $encourage_patience = shift; if (!IsProfileURL($profile_name)) { return $profile_name; } else { my ($host, $baseURL, $path) = ParseProfileURL($profile_name); if ($path eq "" || $path eq "/") { # Missing type specifier defaults to cpu-profile $path = $PROFILE_PAGE; } my $profile_file = MakeProfileBaseName($binary_name, $profile_name); my $url = "$baseURL$path"; my $fetch_timeout = undef; if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { if ($path =~ m/[?]/) { $url .= "&"; } else { $url .= "?"; } $url .= sprintf("seconds=%d", $main::opt_seconds); $fetch_timeout = $main::opt_seconds * 1.01 + 60; } else { # For non-CPU profiles, we add a type-extension to # the target profile file name. my $suffix = $path; $suffix =~ s,/,.,g; $profile_file .= $suffix; } my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof"); if (! -d $profile_dir) { mkdir($profile_dir) || die("Unable to create profile directory $profile_dir: $!\n"); } my $tmp_profile = "$profile_dir/.tmp.$profile_file"; my $real_profile = "$profile_dir/$profile_file"; if ($fetch_name_only > 0) { return $real_profile; } my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; if ($encourage_patience) { print STDERR "Be patient...\n"; } } else { print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; } (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); print STDERR "Wrote profile to $real_profile\n"; $main::collected_profile = $real_profile; return $main::collected_profile; } } # Collect profiles in parallel sub FetchDynamicProfiles { my $items = scalar(@main::pfile_args); my $levels = log($items) / log(2); if ($items == 1) { $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); } else { # math rounding issues if ((2 ** $levels) < $items) { $levels++; } my $count = scalar(@main::pfile_args); for (my $i = 0; $i < $count; $i++) { $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); } print STDERR "Fetching $count profiles, Be patient...\n"; FetchDynamicProfilesRecurse($levels, 0, 0); $main::collected_profile = join(" \\\n ", @main::profile_files); } } # Recursively fork a process to get enough processes # collecting profiles sub FetchDynamicProfilesRecurse { my $maxlevel = shift; my $level = shift; my $position = shift; if (my $pid = fork()) { $position = 0 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); wait; } else { $position = 1 | ($position << 1); TryCollectProfile($maxlevel, $level, $position); cleanup(); exit(0); } } # Collect a single profile sub TryCollectProfile { my $maxlevel = shift; my $level = shift; my $position = shift; if ($level >= ($maxlevel - 1)) { if ($position < scalar(@main::pfile_args)) { FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); } } else { FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); } } ##### Parsing code ##### # Provide a small streaming-read module to handle very large # cpu-profile files. Stream in chunks along a sliding window. # Provides an interface to get one 'slot', correctly handling # endian-ness differences. A slot is one 32-bit or 64-bit word # (depending on the input profile). We tell endianness and bit-size # for the profile by looking at the first 8 bytes: in cpu profiles, # the second slot is always 3 (we'll accept anything that's not 0). BEGIN { package CpuProfileStream; sub new { my ($class, $file, $fname) = @_; my $self = { file => $file, base => 0, stride => 512 * 1024, # must be a multiple of bitsize/8 slots => [], unpack_code => "", # N for big-endian, V for little perl_is_64bit => 1, # matters if profile is 64-bit }; bless $self, $class; # Let unittests adjust the stride if ($main::opt_test_stride > 0) { $self->{stride} = $main::opt_test_stride; } # Read the first two slots to figure out bitsize and endianness. my $slots = $self->{slots}; my $str; read($self->{file}, $str, 8); # Set the global $address_length based on what we see here. # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). $address_length = ($str eq (chr(0)x8)) ? 16 : 8; if ($address_length == 8) { if (substr($str, 6, 2) eq chr(0)x2) { $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 4, 2) eq chr(0)x2) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**16\n"); } @$slots = unpack($self->{unpack_code} . "*", $str); } else { # If we're a 64-bit profile, check if we're a 64-bit-capable # perl. Otherwise, each slot will be represented as a float # instead of an int64, losing precision and making all the # 64-bit addresses wrong. We won't complain yet, but will # later if we ever see a value that doesn't fit in 32 bits. my $has_q = 0; eval { $has_q = pack("Q", "1") ? 1 : 1; }; if (!$has_q) { $self->{perl_is_64bit} = 0; } read($self->{file}, $str, 8); if (substr($str, 4, 4) eq chr(0)x4) { # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. $self->{unpack_code} = 'V'; # Little-endian. } elsif (substr($str, 0, 4) eq chr(0)x4) { $self->{unpack_code} = 'N'; # Big-endian } else { ::error("$fname: header size >= 2**32\n"); } my @pair = unpack($self->{unpack_code} . "*", $str); # Since we know one of the pair is 0, it's fine to just add them. @$slots = (0, $pair[0] + $pair[1]); } return $self; } # Load more data when we access slots->get(X) which is not yet in memory. sub overflow { my ($self) = @_; my $slots = $self->{slots}; $self->{base} += $#$slots + 1; # skip over data we're replacing my $str; read($self->{file}, $str, $self->{stride}); if ($address_length == 8) { # the 32-bit case # This is the easy case: unpack provides 32-bit unpacking primitives. @$slots = unpack($self->{unpack_code} . "*", $str); } else { # We need to unpack 32 bits at a time and combine. my @b32_values = unpack($self->{unpack_code} . "*", $str); my @b64_values = (); for (my $i = 0; $i < $#b32_values; $i += 2) { # TODO(csilvers): if this is a 32-bit perl, the math below # could end up in a too-large int, which perl will promote # to a double, losing necessary precision. Deal with that. # Right now, we just die. my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); if ($self->{unpack_code} eq 'N') { # big-endian ($lo, $hi) = ($hi, $lo); } my $value = $lo + $hi * (2**32); if (!$self->{perl_is_64bit} && # check value is exactly represented (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { ::error("Need a 64-bit perl to process this 64-bit profile.\n"); } push(@b64_values, $value); } @$slots = @b64_values; } } # Access the i-th long in the file (logically), or -1 at EOF. sub get { my ($self, $idx) = @_; my $slots = $self->{slots}; while ($#$slots >= 0) { if ($idx < $self->{base}) { # The only time we expect a reference to $slots[$i - something] # after referencing $slots[$i] is reading the very first header. # Since $stride > |header|, that shouldn't cause any lookback # errors. And everything after the header is sequential. print STDERR "Unexpected look-back reading CPU profile"; return -1; # shrug, don't know what better to return } elsif ($idx > $self->{base} + $#$slots) { $self->overflow(); } else { return $slots->[$idx - $self->{base}]; } } # If we get here, $slots is [], which means we've reached EOF return -1; # unique since slots is supposed to hold unsigned numbers } } # Reads the top, 'header' section of a profile, and returns the last # line of the header, commonly called a 'header line'. The header # section of a profile consists of zero or more 'command' lines that # are instructions to jeprof, which jeprof executes when reading the # header. All 'command' lines start with a %. After the command # lines is the 'header line', which is a profile-specific line that # indicates what type of profile it is, and perhaps other global # information about the profile. For instance, here's a header line # for a heap profile: # heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile # For historical reasons, the CPU profile does not contain a text- # readable header line. If the profile looks like a CPU profile, # this function returns "". If no header line could be found, this # function returns undef. # # The following commands are recognized: # %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' # # The input file should be in binmode. sub ReadProfileHeader { local *PROFILE = shift; my $firstchar = ""; my $line = ""; read(PROFILE, $firstchar, 1); seek(PROFILE, -1, 1); # unread the firstchar if ($firstchar !~ /[[:print:]]/) { # is not a text character return ""; } while (defined($line = )) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ($line =~ /^%warn\s+(.*)/) { # 'warn' command # Note this matches both '%warn blah\n' and '%warn\n'. print STDERR "WARNING: $1\n"; # print the rest of the line } elsif ($line =~ /^%/) { print STDERR "Ignoring unknown command from profile header: $line"; } else { # End of commands, must be the header line. return $line; } } return undef; # got to EOF without seeing a header line } sub IsSymbolizedProfileFile { my $file_name = shift; if (!(-e $file_name) || !(-r $file_name)) { return 0; } # Check if the file contains a symbol-section marker. open(TFILE, "<$file_name"); binmode TFILE; my $firstline = ReadProfileHeader(*TFILE); close(TFILE); if (!$firstline) { return 0; } $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; return $firstline =~ /^--- *$symbol_marker/; } # Parse profile generated by common/profiler.cc and return a reference # to a map: # $result->{version} Version number of profile file # $result->{period} Sampling period (in microseconds) # $result->{profile} Profile object # $result->{threads} Map of thread IDs to profile objects # $result->{map} Memory map info from profile # $result->{pcs} Hash of all PC values seen, key is hex address sub ReadProfile { my $prog = shift; my $fname = shift; my $result; # return value $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $contention_marker = $&; $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $growth_marker = $&; $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $symbol_marker = $&; $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $profile_marker = $&; # Look at first line to see if it is a heap or a CPU profile. # CPU profile may start with no header at all, and just binary data # (starting with \0\0\0\0) -- in that case, don't try to read the # whole firstline, since it may be gigabytes(!) of data. open(PROFILE, "<$fname") || error("$fname: $!\n"); binmode PROFILE; # New perls do UTF-8 processing my $header = ReadProfileHeader(*PROFILE); if (!defined($header)) { # means "at EOF" error("Profile is empty.\n"); } my $symbols; if ($header =~ m/^--- *$symbol_marker/o) { # Verify that the user asked for a symbolized profile if (!$main::use_symbolized_profile) { # we have both a binary and symbolized profiles, abort error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . "a binary arg. Try again without passing\n $prog\n"); } # Read the symbol section of the symbolized profile file. $symbols = ReadSymbols(*PROFILE{IO}); # Read the next line to get the header for the remaining profile. $header = ReadProfileHeader(*PROFILE) || ""; } $main::profile_type = ''; if ($header =~ m/^heap profile:.*$growth_marker/o) { $main::profile_type = 'growth'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap profile:/) { $main::profile_type = 'heap'; $result = ReadHeapProfile($prog, *PROFILE, $header); } elsif ($header =~ m/^heap/) { $main::profile_type = 'heap'; $result = ReadThreadedHeapProfile($prog, $fname, $header); } elsif ($header =~ m/^--- *$contention_marker/o) { $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *Stacks:/) { print STDERR "Old format contention profile: mistakenly reports " . "condition variable signals as lock contentions.\n"; $main::profile_type = 'contention'; $result = ReadSynchProfile($prog, *PROFILE); } elsif ($header =~ m/^--- *$profile_marker/) { # the binary cpu profile data starts immediately after this line $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } else { if (defined($symbols)) { # a symbolized profile contains a format we don't recognize, bail out error("$fname: Cannot recognize profile section after symbols.\n"); } # no ascii header present -- must be a CPU profile $main::profile_type = 'cpu'; $result = ReadCPUProfile($prog, $fname, *PROFILE); } close(PROFILE); # if we got symbols along with the profile, return those as well if (defined($symbols)) { $result->{symbols} = $symbols; } return $result; } # Subtract one from caller pc so we map back to call instr. # However, don't do this if we're reading a symbolized profile # file, in which case the subtract-one was done when the file # was written. # # We apply the same logic to all readers, though ReadCPUProfile uses an # independent implementation. sub FixCallerAddresses { my $stack = shift; if ($main::use_symbolized_profile) { return $stack; } else { $stack =~ /(\s)/; my $delimiter = $1; my @addrs = split(' ', $stack); my @fixedaddrs; $#fixedaddrs = $#addrs; if ($#addrs >= 0) { $fixedaddrs[0] = $addrs[0]; } for (my $i = 1; $i <= $#addrs; $i++) { $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); } return join $delimiter, @fixedaddrs; } } # CPU profile reader sub ReadCPUProfile { my $prog = shift; my $fname = shift; # just used for logging local *PROFILE = shift; my $version; my $period; my $i; my $profile = {}; my $pcs = {}; # Parse string into array of slots. my $slots = CpuProfileStream->new(*PROFILE, $fname); # Read header. The current header version is a 5-element structure # containing: # 0: header count (always 0) # 1: header "words" (after this one: 3) # 2: format version (0) # 3: sampling period (usec) # 4: unused padding (always 0) if ($slots->get(0) != 0 ) { error("$fname: not a profile file, or old format profile file\n"); } $i = 2 + $slots->get(1); $version = $slots->get(2); $period = $slots->get(3); # Do some sanity checking on these header values. if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { error("$fname: not a profile file, or corrupted profile file\n"); } # Parse profile while ($slots->get($i) != -1) { my $n = $slots->get($i++); my $d = $slots->get($i++); if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); print STDERR "At index $i (address $addr):\n"; error("$fname: stack trace depth >= 2**32\n"); } if ($slots->get($i) == 0) { # End of profile data marker $i += $d; last; } # Make key out of the stack entries my @k = (); for (my $j = 0; $j < $d; $j++) { my $pc = $slots->get($i+$j); # Subtract one from caller pc so we map back to call instr. # However, don't do this if we're reading a symbolized profile # file, in which case the subtract-one was done when the file # was written. if ($j > 0 && !$main::use_symbolized_profile) { $pc--; } $pc = sprintf("%0*x", $address_length, $pc); $pcs->{$pc} = 1; push @k, $pc; } AddEntry($profile, (join "\n", @k), $n); $i += $d; } # Parse map my $map = ''; seek(PROFILE, $i * 4, 0); read(PROFILE, $map, (stat PROFILE)[7]); my $r = {}; $r->{version} = $version; $r->{period} = $period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub HeapProfileIndex { my $index = 1; if ($main::opt_inuse_space) { $index = 1; } elsif ($main::opt_inuse_objects) { $index = 0; } elsif ($main::opt_alloc_space) { $index = 3; } elsif ($main::opt_alloc_objects) { $index = 2; } return $index; } sub ReadMappedLibraries { my $fh = shift; my $map = ""; # Read the /proc/self/maps data while (<$fh>) { s/\r//g; # turn windows-looking lines into unix-looking lines $map .= $_; } return $map; } sub ReadMemoryMap { my $fh = shift; my $map = ""; # Read /proc/self/maps data as formatted by DumpAddressMap() my $buildvar = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Parse "build=" specification if supplied if (m/^\s*build=(.*)\n/) { $buildvar = $1; } # Expand "$build" variable if available $_ =~ s/\$build\b/$buildvar/g; $map .= $_; } return $map; } sub AdjustSamples { my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_; if ($sample_adjustment) { if ($sampling_algorithm == 2) { # Remote-heap version 2 # The sampling frequency is the rate of a Poisson process. # This means that the probability of sampling an allocation of # size X with sampling rate Y is 1 - exp(-X/Y) if ($n1 != 0) { my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n1 *= $scale_factor; $s1 *= $scale_factor; } if ($n2 != 0) { my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); my $scale_factor = 1/(1 - exp(-$ratio)); $n2 *= $scale_factor; $s2 *= $scale_factor; } } else { # Remote-heap version 1 my $ratio; $ratio = (($s1*1.0)/$n1)/($sample_adjustment); if ($ratio < 1) { $n1 /= $ratio; $s1 /= $ratio; } $ratio = (($s2*1.0)/$n2)/($sample_adjustment); if ($ratio < 1) { $n2 /= $ratio; $s2 /= $ratio; } } } return ($n1, $s1, $n2, $s2); } sub ReadHeapProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $index = HeapProfileIndex(); # Find the type of this profile. The header line looks like: # heap profile: 1246: 8800744 [ 1246: 8800744] @ /266053 # There are two pairs , the first inuse objects/space, and the # second allocated objects/space. This is followed optionally by a profile # type, and if that is present, optionally by a sampling frequency. # For remote heap profiles (v1): # The interpretation of the sampling frequency is that the profiler, for # each sample, calculates a uniformly distributed random integer less than # the given value, and records the next sample after that many bytes have # been allocated. Therefore, the expected sample interval is half of the # given frequency. By default, if not specified, the expected sample # interval is 128KB. Only remote-heap-page profiles are adjusted for # sample size. # For remote heap profiles (v2): # The sampling frequency is the rate of a Poisson process. This means that # the probability of sampling an allocation of size X with sampling rate Y # is 1 - exp(-X/Y) # For version 2, a typical header line might look like this: # heap profile: 1922: 127792360 [ 1922: 127792360] @ _v2/524288 # the trailing number (524288) is the sampling rate. (Version 1 showed # double the 'rate' here) my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { if (defined($6) && ($6 ne '')) { $type = $6; my $sample_period = $8; # $type is "heapprofile" for profiles generated by the # heap-profiler, and either "heap" or "heap_v2" for profiles # generated by sampling directly within tcmalloc. It can also # be "growth" for heap-growth profiles. The first is typically # found for profiles generated locally, and the others for # remote profiles. if (($type eq "heapprofile") || ($type !~ /heap/) ) { # No need to adjust for the sampling rate with heap-profiler-derived data $sampling_algorithm = 0; } elsif ($type =~ /_v2/) { $sampling_algorithm = 2; # version 2 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period); } } else { $sampling_algorithm = 1; # version 1 sampling if (defined($sample_period) && ($sample_period ne '')) { $sample_adjustment = int($sample_period)/2; } } } else { # We detect whether or not this is a remote-heap profile by checking # that the total-allocated stats ($n2,$s2) are exactly the # same as the in-use stats ($n1,$s1). It is remotely conceivable # that a non-remote-heap profile may pass this check, but it is hard # to imagine how that could happen. # In this case it's so old it's guaranteed to be remote-heap version 1. my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); if (($n1 == $n2) && ($s1 == $s2)) { # This is likely to be a remote-heap based sample profile $sampling_algorithm = 1; } } } if ($sampling_algorithm > 0) { # For remote-heap generated profiles, adjust the counts and sizes to # account for the sample rate (we sample once every 128KB by default). if ($sample_adjustment == 0) { # Turn on profile adjustment. $sample_adjustment = 128*1024; print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; } else { printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", $sample_adjustment); } if ($sampling_algorithm > 1) { # We don't bother printing anything for the original version (version 1) printf STDERR "Heap version $sampling_algorithm\n"; } } my $profile = {}; my $pcs = {}; my $map = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # : [: ] @ a1 a2 a3 ... an s/^\s*//; s/\s*$//; if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { my $stack = $5; my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadThreadedHeapProfile { my ($prog, $fname, $header) = @_; my $index = HeapProfileIndex(); my $sampling_algorithm = 0; my $sample_adjustment = 0; chomp($header); my $type = "unknown"; # Assuming a very specific type of header for now. if ($header =~ m"^heap_v2/(\d+)") { $type = "_v2"; $sampling_algorithm = 2; $sample_adjustment = int($1); } if ($type ne "_v2" || !defined($sample_adjustment)) { die "Threaded heap profiles require v2 sampling with a sample rate\n"; } my $profile = {}; my $thread_profiles = {}; my $pcs = {}; my $map = ""; my $stack = ""; while () { s/\r//g; if (/^MAPPED_LIBRARIES:/) { $map .= ReadMappedLibraries(*PROFILE); last; } if (/^--- Memory map:/) { $map .= ReadMemoryMap(*PROFILE); last; } # Read entry of the form: # @ a1 a2 ... an # t*: : [: ] # t1: : [: ] # ... # tn: : [: ] s/^\s*//; s/\s*$//; if (m/^@\s+(.*)$/) { $stack = $1; } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) { if ($stack eq "") { # Still in the header, so this is just a per-thread summary. next; } my $thread = $2; my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6); my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2); if ($thread eq "*") { AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); } else { if (!exists($thread_profiles->{$thread})) { $thread_profiles->{$thread} = {}; } AddEntries($thread_profiles->{$thread}, $pcs, FixCallerAddresses($stack), $counts[$index]); } } } my $r = {}; $r->{version} = "heap"; $r->{period} = 1; $r->{profile} = $profile; $r->{threads} = $thread_profiles; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } sub ReadSynchProfile { my $prog = shift; local *PROFILE = shift; my $header = shift; my $map = ''; my $profile = {}; my $pcs = {}; my $sampling_period = 1; my $cyclespernanosec = 2.8; # Default assumption for old binaries my $seen_clockrate = 0; my $line; my $index = 0; if ($main::opt_total_delay) { $index = 0; } elsif ($main::opt_contentions) { $index = 1; } elsif ($main::opt_mean_delay) { $index = 2; } while ( $line = ) { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $count, $stack) = ($1, $2, $3); # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; $count *= $sampling_period; my @values = ($cycles, $count, $cycles / $count); AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { my ($cycles, $stack) = ($1, $2); if ($cycles !~ /^\d+$/) { next; } # Convert cycles to nanoseconds $cycles /= $cyclespernanosec; # Adjust for sampling done by application $cycles *= $sampling_period; AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { my ($variable, $value) = ($1,$2); for ($variable, $value) { s/^\s+//; s/\s+$//; } if ($variable eq "cycles/second") { $cyclespernanosec = $value / 1e9; $seen_clockrate = 1; } elsif ($variable eq "sampling period") { $sampling_period = $value; } elsif ($variable eq "ms since reset") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } elsif ($variable eq "discarded samples") { # Currently nothing is done with this value in jeprof # So we just silently ignore it for now } else { printf STDERR ("Ignoring unnknown variable in /contention output: " . "'%s' = '%s'\n",$variable,$value); } } else { # Memory map entry $map .= $line; } } if (!$seen_clockrate) { printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", $cyclespernanosec); } my $r = {}; $r->{version} = 0; $r->{period} = $sampling_period; $r->{profile} = $profile; $r->{libs} = ParseLibraries($prog, $map, $pcs); $r->{pcs} = $pcs; return $r; } # Given a hex value in the form "0x1abcd" or "1abcd", return either # "0001abcd" or "000000000001abcd", depending on the current (global) # address length. sub HexExtend { my $addr = shift; $addr =~ s/^(0x)?0*//; my $zeros_needed = $address_length - length($addr); if ($zeros_needed < 0) { printf STDERR "Warning: address $addr is longer than address length $address_length\n"; return $addr; } return ("0" x $zeros_needed) . $addr; } ##### Symbol extraction ##### # Aggressively search the lib_prefix values for the given library # If all else fails, just return the name of the library unmodified. # If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" # it will search the following locations in this order, until it finds a file: # /my/path/lib/dir/mylib.so # /other/path/lib/dir/mylib.so # /my/path/dir/mylib.so # /other/path/dir/mylib.so # /my/path/mylib.so # /other/path/mylib.so # /lib/dir/mylib.so (returned as last resort) sub FindLibrary { my $file = shift; my $suffix = $file; # Search for the library as described above do { foreach my $prefix (@prefix_list) { my $fullpath = $prefix . $suffix; if (-e $fullpath) { return $fullpath; } } } while ($suffix =~ s|^/[^/]+/|/|); return $file; } # Return path to library with debugging symbols. # For libc libraries, the copy in /usr/lib/debug contains debugging symbols sub DebuggingLibrary { my $file = shift; if ($file =~ m|^/|) { if (-f "/usr/lib/debug$file") { return "/usr/lib/debug$file"; } elsif (-f "/usr/lib/debug$file.debug") { return "/usr/lib/debug$file.debug"; } } return undef; } # Parse text section header of a library using objdump sub ParseTextSectionHeaderFromObjdump { my $lib = shift; my $size = undef; my $vma; my $file_offset; # Get objdump output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); while () { s/\r//g; # turn windows-looking lines into unix-looking lines # Idx Name Size VMA LMA File off Algn # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file # offset may still be 8. But AddressSub below will still handle that. my @x = split; if (($#x >= 6) && ($x[1] eq '.text')) { $size = $x[2]; $vma = $x[3]; $file_offset = $x[5]; last; } } close(OBJDUMP); if (!defined($size)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } # Parse text section header of a library using otool (on OS X) sub ParseTextSectionHeaderFromOtool { my $lib = shift; my $size = undef; my $vma = undef; my $file_offset = undef; # Get otool output from the library file to figure out how to # map between mapped addresses and addresses in the library. my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); open(OTOOL, "$command |") || error("$command: $!\n"); my $cmd = ""; my $sectname = ""; my $segname = ""; foreach my $line () { $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines # Load command <#> # cmd LC_SEGMENT # [...] # Section # sectname __text # segname __TEXT # addr 0x000009f8 # size 0x00018b9e # offset 2552 # align 2^2 (4) # We will need to strip off the leading 0x from the hex addresses, # and convert the offset into hex. if ($line =~ /Load command/) { $cmd = ""; $sectname = ""; $segname = ""; } elsif ($line =~ /Section/) { $sectname = ""; $segname = ""; } elsif ($line =~ /cmd (\w+)/) { $cmd = $1; } elsif ($line =~ /sectname (\w+)/) { $sectname = $1; } elsif ($line =~ /segname (\w+)/) { $segname = $1; } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && $sectname eq "__text" && $segname eq "__TEXT")) { next; } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { $vma = $1; } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { $size = $1; } elsif ($line =~ /\boffset ([0-9]+)/) { $file_offset = sprintf("%016x", $1); } if (defined($vma) && defined($size) && defined($file_offset)) { last; } } close(OTOOL); if (!defined($vma) || !defined($size) || !defined($file_offset)) { return undef; } my $r = {}; $r->{size} = $size; $r->{vma} = $vma; $r->{file_offset} = $file_offset; return $r; } sub ParseTextSectionHeader { # obj_tool_map("otool") is only defined if we're in a Mach-O environment if (defined($obj_tool_map{"otool"})) { my $r = ParseTextSectionHeaderFromOtool(@_); if (defined($r)){ return $r; } } # If otool doesn't work, or we don't have it, fall back to objdump return ParseTextSectionHeaderFromObjdump(@_); } # Split /proc/pid/maps dump into a list of libraries sub ParseLibraries { return if $main::use_symbol_page; # We don't need libraries info. my $prog = shift; my $map = shift; my $pcs = shift; my $result = []; my $h = "[a-f0-9]+"; my $zero_offset = HexExtend("0"); my $buildvar = ""; foreach my $l (split("\n", $map)) { if ($l =~ m/^\s*build=(.*)$/) { $buildvar = $1; } my $start; my $finish; my $offset; my $lib; if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { # Full line from /proc/self/maps. Example: # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = HexExtend($3); $lib = $4; $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { # Cooked line from DumpAddressMap. Example: # 40000000-40015000: /lib/ld-2.3.2.so $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = $3; } # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) # # Example: # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s # o.1 NCH -1 elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) { $start = HexExtend($1); $finish = HexExtend($2); $offset = $zero_offset; $lib = FindLibrary($5); } else { next; } # Expand "$build" variable if available $lib =~ s/\$build\b/$buildvar/g; $lib = FindLibrary($lib); # Check for pre-relocated libraries, which use pre-relocated symbol tables # and thus require adjusting the offset that we'll use to translate # VM addresses into symbol table addresses. # Only do this if we're not going to fetch the symbol table from a # debugging copy of the library. if (!DebuggingLibrary($lib)) { my $text = ParseTextSectionHeader($lib); if (defined($text)) { my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); $offset = AddressAdd($offset, $vma_offset); } } if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; } push(@{$result}, [$lib, $start, $finish, $offset]); } # Append special entry for additional library (not relocated) if ($main::opt_lib ne "") { my $text = ParseTextSectionHeader($main::opt_lib); if (defined($text)) { my $start = $text->{vma}; my $finish = AddressAdd($start, $text->{size}); push(@{$result}, [$main::opt_lib, $start, $finish, $start]); } } # Append special entry for the main program. This covers # 0..max_pc_value_seen, so that we assume pc values not found in one # of the library ranges will be treated as coming from the main # program binary. my $min_pc = HexExtend("0"); my $max_pc = $min_pc; # find the maximal PC value in any sample foreach my $pc (keys(%{$pcs})) { if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } } push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); return $result; } # Add two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressAdd { my $addr1 = shift; my $addr2 = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. if ($main::opt_debug and $main::opt_test) { print STDERR "AddressAdd $addr1 + $addr2 = "; } my $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); my $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2); my $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } my $r = sprintf("%07x", $sum); $a1 = substr($addr1,-7); $addr1 = substr($addr1,0,-7); $a2 = substr($addr2,-7); $addr2 = substr($addr2,0,-7); $sum = hex($a1) + hex($a2) + $c; $c = 0; if ($sum > 0xfffffff) { $c = 1; $sum -= 0x10000000; } $r = sprintf("%07x", $sum) . $r; $sum = hex($addr1) + hex($addr2) + $c; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } return $r; } } # Subtract two hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressSub { my $addr1 = shift; my $addr2 = shift; my $diff; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); return sprintf("%08x", $diff); } else { # Do the addition in 7-nibble chunks to trivialize borrow handling. # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } my $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); my $a2 = hex(substr($addr2,-7)); $addr2 = substr($addr2,0,-7); my $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; my $r = sprintf("%07x", $diff); $a1 = hex(substr($addr1,-7)); $addr1 = substr($addr1,0,-7); $a2 = hex(substr($addr2,-7)) + $b; $addr2 = substr($addr2,0,-7); $b = 0; if ($a2 > $a1) { $b = 1; $a1 += 0x10000000; } $diff = $a1 - $a2; $r = sprintf("%07x", $diff) . $r; $a1 = hex($addr1); $a2 = hex($addr2) + $b; if ($a2 > $a1) { $a1 += 0x100; } $diff = $a1 - $a2; $r = sprintf("%02x", $diff) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Increment a hex addresses of length $address_length. # Run jeprof --test for unit test if this is changed. sub AddressInc { my $addr = shift; my $sum; if ($address_length == 8) { # Perl doesn't cope with wraparound arithmetic, so do it explicitly: $sum = (hex($addr)+1) % (0x10000000 * 16); return sprintf("%08x", $sum); } else { # Do the addition in 7-nibble chunks to trivialize carry handling. # We are always doing this to step through the addresses in a function, # and will almost never overflow the first chunk, so we check for this # case and exit early. # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } my $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; my $r = sprintf("%07x", $sum); if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "0000000"; } $a1 = substr($addr,-7); $addr = substr($addr,0,-7); $sum = hex($a1) + 1; $r = sprintf("%07x", $sum) . $r; if ($sum <= 0xfffffff) { $r = $addr . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return HexExtend($r); } else { $r = "00000000000000"; } $sum = hex($addr) + 1; if ($sum > 0xff) { $sum -= 0x100; } $r = sprintf("%02x", $sum) . $r; # if ($main::opt_debug) { print STDERR "$r\n"; } return $r; } } # Extract symbols for all PC values found in profile sub ExtractSymbols { my $libs = shift; my $pcset = shift; my $symbols = {}; # Map each PC value to the containing library. To make this faster, # we sort libraries by their starting pc value (highest first), and # advance through the libraries as we advance the pc. Sometimes the # addresses of libraries may overlap with the addresses of the main # binary, so to make sure the libraries 'win', we iterate over the # libraries in reverse order (which assumes the binary doesn't start # in the middle of a library, which seems a fair assumption). my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { my $libname = $lib->[0]; my $start = $lib->[1]; my $finish = $lib->[2]; my $offset = $lib->[3]; # Use debug library if it exists my $debug_libname = DebuggingLibrary($libname); if ($debug_libname) { $libname = $debug_libname; } # Get list of pcs that belong in this library. my $contained = []; my ($start_pc_index, $finish_pc_index); # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; $finish_pc_index--) { last if $pcs[$finish_pc_index - 1] le $finish; } # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; $start_pc_index--) { last if $pcs[$start_pc_index - 1] lt $start; } # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, # in case there are overlaps in libraries and the main binary. @{$contained} = splice(@pcs, $start_pc_index, $finish_pc_index - $start_pc_index); # Map to symbols MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); } return $symbols; } # Map list of PC values to symbols for a given image sub MapToSymbols { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; my $debug = 0; # Ignore empty binaries if ($#{$pclist} < 0) { return; } # Figure out the addr2line command to use my $addr2line = $obj_tool_map{"addr2line"}; my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); if (exists $obj_tool_map{"addr2line_pdb"}) { $addr2line = $obj_tool_map{"addr2line_pdb"}; $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); } # If "addr2line" isn't installed on the system at all, just use # nm to get what info we can (function names, but not line numbers). if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { MapSymbolsWithNM($image, $offset, $pclist, $symbols); return; } # "addr2line -i" can produce a variable number of lines per input # address, with no separator that allows us to tell when data for # the next address starts. So we find the address for a special # symbol (_fini) and interleave this address between all real # addresses passed to addr2line. The name of this special symbol # can then be used as a separator. $sep_address = undef; # May be filled in by MapSymbolsWithNM() my $nm_symbols = {}; MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); if (defined($sep_address)) { # Only add " -i" to addr2line if the binary supports it. # addr2line --help returns 0, but not if it sees an unknown flag first. if (system("$cmd -i --help >$dev_null 2>&1") == 0) { $cmd .= " -i"; } else { $sep_address = undef; # no need for sep_address if we don't support -i } } # Make file with all PC values with intervening 'sep_address' so # that we can reliably detect the end of inlined function list open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); if ($debug) { print("---- $image ---\n"); } for (my $i = 0; $i <= $#{$pclist}; $i++) { # addr2line always reads hex addresses, and does not need '0x' prefix. if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); if (defined($sep_address)) { printf ADDRESSES ("%s\n", $sep_address); } } close(ADDRESSES); if ($debug) { print("----\n"); system("cat", $main::tmpfile_sym); print("----\n"); system("$cmd < " . ShellEscape($main::tmpfile_sym)); print("----\n"); } open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") || error("$cmd: $!\n"); my $count = 0; # Index in pclist while () { # Read fullfunction and filelineinfo from next pair of lines s/\r?\n$//g; my $fullfunction = $_; $_ = ; s/\r?\n$//g; my $filelinenum = $_; if (defined($sep_address) && $fullfunction eq $sep_symbol) { # Terminating marker for data for this address $count++; next; } $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths my $pcstr = $pclist->[$count]; my $function = ShortFunctionName($fullfunction); my $nms = $nm_symbols->{$pcstr}; if (defined($nms)) { if ($fullfunction eq '??') { # nm found a symbol for us. $function = $nms->[0]; $fullfunction = $nms->[2]; } else { # MapSymbolsWithNM tags each routine with its starting address, # useful in case the image has multiple occurrences of this # routine. (It uses a syntax that resembles template paramters, # that are automatically stripped out by ShortFunctionName().) # addr2line does not provide the same information. So we check # if nm disambiguated our symbol, and if so take the annotated # (nm) version of the routine-name. TODO(csilvers): this won't # catch overloaded, inlined symbols, which nm doesn't see. # Better would be to do a check similar to nm's, in this fn. if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn $function = $nms->[0]; $fullfunction = $nms->[2]; } } } # Prepend to accumulated symbols for pcstr # (so that caller comes before callee) my $sym = $symbols->{$pcstr}; if (!defined($sym)) { $sym = []; $symbols->{$pcstr} = $sym; } unshift(@{$sym}, $function, $filelinenum, $fullfunction); if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } if (!defined($sep_address)) { # Inlining is off, so this entry ends immediately $count++; } } close(SYMBOLS); } # Use nm to map the list of referenced PCs to symbols. Return true iff we # are able to read procedure information via nm. sub MapSymbolsWithNM { my $image = shift; my $offset = shift; my $pclist = shift; my $symbols = shift; # Get nm output sorted by increasing address my $symbol_table = GetProcedureBoundaries($image, "."); if (!%{$symbol_table}) { return 0; } # Start addresses are already the right length (8 or 16 hex digits). my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } keys(%{$symbol_table}); if ($#names < 0) { # No symbols: just use addresses foreach my $pc (@{$pclist}) { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } return 0; } # Sort addresses so we can do a join against nm output my $index = 0; my $fullname = $names[0]; my $name = ShortFunctionName($fullname); foreach my $pc (sort { $a cmp $b } @{$pclist}) { # Adjust for mapped offset my $mpc = AddressSub($pc, $offset); while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ $index++; $fullname = $names[$index]; $name = ShortFunctionName($fullname); } if ($mpc lt $symbol_table->{$fullname}->[1]) { $symbols->{$pc} = [$name, "?", $fullname]; } else { my $pcstr = "0x" . $pc; $symbols->{$pc} = [$pcstr, "?", $pcstr]; } } return 1; } sub ShortFunctionName { my $function = shift; while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type return $function; } # Trim overly long symbols found in disassembler output sub CleanDisassembly { my $d = shift; while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments return $d; } # Clean file name for display sub CleanFileName { my ($f) = @_; $f =~ s|^/proc/self/cwd/||; $f =~ s|^\./||; return $f; } # Make address relative to section and clean up for display sub UnparseAddress { my ($offset, $address) = @_; $address = AddressSub($address, $offset); $address =~ s/^0x//; $address =~ s/^0*//; return $address; } ##### Miscellaneous ##### # Find the right versions of the above object tools to use. The # argument is the program file being analyzed, and should be an ELF # 32-bit or ELF 64-bit executable file. The location of the tools # is determined by considering the following options in this order: # 1) --tools option, if set # 2) JEPROF_TOOLS environment variable, if set # 3) the environment sub ConfigureObjTools { my $prog_file = shift; # Check for the existence of $prog_file because /usr/bin/file does not # predictably return error status in prod. (-e $prog_file) || error("$prog_file does not exist.\n"); my $file_type = undef; if (-e "/usr/bin/file") { # Follow symlinks (at least for systems where "file" supports that). my $escaped_prog_file = ShellEscape($prog_file); $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || /usr/bin/file $escaped_prog_file`; } elsif ($^O == "MSWin32") { $file_type = "MS Windows"; } else { print STDERR "WARNING: Can't determine the file type of $prog_file"; } if ($file_type =~ /64-bit/) { # Change $address_length to 16 if the program file is ELF 64-bit. # We can't detect this from many (most?) heap or lock contention # profiles, since the actual addresses referenced are generally in low # memory even for 64-bit programs. $address_length = 16; } if ($file_type =~ /MS Windows/) { # For windows, we provide a version of nm and addr2line as part of # the opensource release, which is capable of parsing # Windows-style PDB executables. It should live in the path, or # in the same directory as jeprof. $obj_tool_map{"nm_pdb"} = "nm-pdb"; $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; } if ($file_type =~ /Mach-O/) { # OS X uses otool to examine Mach-O files, rather than objdump. $obj_tool_map{"otool"} = "otool"; $obj_tool_map{"addr2line"} = "false"; # no addr2line $obj_tool_map{"objdump"} = "false"; # no objdump } # Go fill in %obj_tool_map with the pathnames to use: foreach my $tool (keys %obj_tool_map) { $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); } } # Returns the path of a caller-specified object tool. If --tools or # JEPROF_TOOLS are specified, then returns the full path to the tool # with that prefix. Otherwise, returns the path unmodified (which # means we will look for it on PATH). sub ConfigureTool { my $tool = shift; my $path; # --tools (or $JEPROF_TOOLS) is a comma separated list, where each # item is either a) a pathname prefix, or b) a map of the form # :. First we look for an entry of type (b) for our # tool. If one is found, we use it. Otherwise, we consider all the # pathname prefixes in turn, until one yields an existing file. If # none does, we use a default path. my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || ""; if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { $path = $2; # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. } elsif ($tools ne '') { foreach my $prefix (split(',', $tools)) { next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list if (-x $prefix . $tool) { $path = $prefix . $tool; last; } } if (!$path) { error("No '$tool' found with prefix specified by " . "--tools (or \$JEPROF_TOOLS) '$tools'\n"); } } else { # ... otherwise use the version that exists in the same directory as # jeprof. If there's nothing there, use $PATH. $0 =~ m,[^/]*$,; # this is everything after the last slash my $dirname = $`; # this is everything up to and including the last slash if (-x "$dirname$tool") { $path = "$dirname$tool"; } else { $path = $tool; } } if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } return $path; } sub ShellEscape { my @escaped_words = (); foreach my $word (@_) { my $escaped_word = $word; if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist $escaped_word =~ s/'/'\\''/; $escaped_word = "'$escaped_word'"; } push(@escaped_words, $escaped_word); } return join(" ", @escaped_words); } sub cleanup { unlink($main::tmpfile_sym); unlink(keys %main::tempnames); # We leave any collected profiles in $HOME/jeprof in case the user wants # to look at them later. We print a message informing them of this. if ((scalar(@main::profile_files) > 0) && defined($main::collected_profile)) { if (scalar(@main::profile_files) == 1) { print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; } print STDERR "If you want to investigate this profile further, you can do:\n"; print STDERR "\n"; print STDERR " jeprof \\\n"; print STDERR " $main::prog \\\n"; print STDERR " $main::collected_profile\n"; print STDERR "\n"; } } sub sighandler { cleanup(); exit(1); } sub error { my $msg = shift; print STDERR $msg; cleanup(); exit(1); } # Run $nm_command and get all the resulting procedure boundaries whose # names match "$regexp" and returns them in a hashtable mapping from # procedure name to a two-element vector of [start address, end address] sub GetProcedureBoundariesViaNm { my $escaped_nm_command = shift; # shell-escaped my $regexp = shift; my $symbol_table = {}; open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); my $last_start = "0"; my $routine = ""; while () { s/\r//g; # turn windows-looking lines into unix-looking lines if (m/^\s*([0-9a-f]+) (.) (..*)/) { my $start_val = $1; my $type = $2; my $this_routine = $3; # It's possible for two symbols to share the same address, if # one is a zero-length variable (like __start_google_malloc) or # one symbol is a weak alias to another (like __libc_malloc). # In such cases, we want to ignore all values except for the # actual symbol, which in nm-speak has type "T". The logic # below does this, though it's a bit tricky: what happens when # we have a series of lines with the same address, is the first # one gets queued up to be processed. However, it won't # *actually* be processed until later, when we read a line with # a different address. That means that as long as we're reading # lines with the same address, we have a chance to replace that # item in the queue, which we do whenever we see a 'T' entry -- # that is, a line with type 'T'. If we never see a 'T' entry, # we'll just go ahead and process the first entry (which never # got touched in the queue), and ignore the others. if ($start_val eq $last_start && $type =~ /t/i) { # We are the 'T' symbol at this address, replace previous symbol. $routine = $this_routine; next; } elsif ($start_val eq $last_start) { # We're not the 'T' symbol at this address, so ignore us. next; } if ($this_routine eq $sep_symbol) { $sep_address = HexExtend($start_val); } # Tag this routine with the starting address in case the image # has multiple occurrences of this routine. We use a syntax # that resembles template parameters that are automatically # stripped out by ShortFunctionName() $this_routine .= "<$start_val>"; if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($start_val)]; } $last_start = $start_val; $routine = $this_routine; } elsif (m/^Loaded image name: (.+)/) { # The win32 nm workalike emits information about the binary it is using. if ($main::opt_debug) { print STDERR "Using Image $1\n"; } } elsif (m/^PDB file name: (.+)/) { # The win32 nm workalike emits information about the pdb it is using. if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } } } close(NM); # Handle the last line in the nm output. Unfortunately, we don't know # how big this last symbol is, because we don't know how big the file # is. For now, we just give it a size of 0. # TODO(csilvers): do better here. if (defined($routine) && $routine =~ m/$regexp/) { $symbol_table->{$routine} = [HexExtend($last_start), HexExtend($last_start)]; } return $symbol_table; } # Gets the procedure boundaries for all routines in "$image" whose names # match "$regexp" and returns them in a hashtable mapping from procedure # name to a two-element vector of [start address, end address]. # Will return an empty map if nm is not installed or not working properly. sub GetProcedureBoundaries { my $image = shift; my $regexp = shift; # If $image doesn't start with /, then put ./ in front of it. This works # around an obnoxious bug in our probing of nm -f behavior. # "nm -f $image" is supposed to fail on GNU nm, but if: # # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND # b. you have a.out in your current directory (a not uncommon occurence) # # then "nm -f $image" succeeds because -f only looks at the first letter of # the argument, which looks valid because it's [BbSsPp], and then since # there's no image provided, it looks for a.out and finds it. # # This regex makes sure that $image starts with . or /, forcing the -f # parsing to fail since . and / are not valid formats. $image =~ s#^[^/]#./$&#; # For libc libraries, the copy in /usr/lib/debug contains debugging symbols my $debugging = DebuggingLibrary($image); if ($debugging) { $image = $debugging; } my $nm = $obj_tool_map{"nm"}; my $cppfilt = $obj_tool_map{"c++filt"}; # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm # binary doesn't support --demangle. In addition, for OS X we need # to use the -f flag to get 'flat' nm output (otherwise we don't sort # properly and get incorrect results). Unfortunately, GNU nm uses -f # in an incompatible way. So first we test whether our nm supports # --demangle and -f. my $demangle_flag = ""; my $cppfilt_flag = ""; my $to_devnull = ">$dev_null 2>&1"; if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { # In this mode, we do "nm --demangle " $demangle_flag = "--demangle"; $cppfilt_flag = ""; } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { # In this mode, we do "nm | c++filt" $cppfilt_flag = " | " . ShellEscape($cppfilt); }; my $flatten_flag = ""; if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { $flatten_flag = "-f"; } # Finally, in the case $imagie isn't a debug library, we try again with # -D to at least get *exported* symbols. If we can't use --demangle, # we use c++filt instead, if it exists on this system. my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, $image) . " 2>$dev_null $cppfilt_flag", # 6nm is for Go binaries ShellEscape("6nm", "$image") . " 2>$dev_null | sort", ); # If the executable is an MS Windows PDB-format executable, we'll # have set up obj_tool_map("nm_pdb"). In this case, we actually # want to use both unix nm and windows-specific nm_pdb, since # PDB-format executables can apparently include dwarf .o files. if (exists $obj_tool_map{"nm_pdb"}) { push(@nm_commands, ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) . " 2>$dev_null"); } foreach my $nm_command (@nm_commands) { my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); return $symbol_table if (%{$symbol_table}); } my $symbol_table = {}; return $symbol_table; } # The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. # To make them more readable, we add underscores at interesting places. # This routine removes the underscores, producing the canonical representation # used by jeprof to represent addresses, particularly in the tested routines. sub CanonicalHex { my $arg = shift; return join '', (split '_',$arg); } # Unit test for AddressAdd: sub AddressAddUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd ($row->[0], $row->[1]); if ($sum ne $row->[2]) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); my $expected = join '', (split '_',$row->[2]); if ($sum ne CanonicalHex($row->[2])) { printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, $row->[0], $row->[1], $row->[2]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressSub: sub AddressSubUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub ($row->[0], $row->[1]); if ($sum ne $row->[3]) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); if ($sum ne CanonicalHex($row->[3])) { printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, $row->[0], $row->[1], $row->[3]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Unit test for AddressInc: sub AddressIncUnitTest { my $test_data_8 = shift; my $test_data_16 = shift; my $error_count = 0; my $fail_count = 0; my $pass_count = 0; # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; # First a few 8-nibble addresses. Note that this implementation uses # plain old arithmetic, so a quick sanity check along with verifying what # happens to overflow (we want it to wrap): $address_length = 8; foreach my $row (@{$test_data_8}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc ($row->[0]); if ($sum ne $row->[4]) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count = $fail_count; $fail_count = 0; $pass_count = 0; # Now 16-nibble addresses. $address_length = 16; foreach my $row (@{$test_data_16}) { if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } my $sum = AddressInc (CanonicalHex($row->[0])); if ($sum ne CanonicalHex($row->[4])) { printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, $row->[0], $row->[4]; ++$fail_count; } else { ++$pass_count; } } printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", $pass_count, $fail_count; $error_count += $fail_count; return $error_count; } # Driver for unit tests. # Currently just the address add/subtract/increment routines for 64-bit. sub RunUnitTests { my $error_count = 0; # This is a list of tuples [a, b, a+b, a-b, a+1] my $unit_test_data_8 = [ [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], [qw(00000001 ffffffff 00000000 00000002 00000002)], [qw(00000001 fffffff0 fffffff1 00000011 00000002)], ]; my $unit_test_data_16 = [ # The implementation handles data in 7-nibble chunks, so those are the # interesting boundaries. [qw(aaaaaaaa 50505050 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], [qw(50505050 aaaaaaaa 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], [qw(ffffffff aaaaaaaa 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], [qw(00000001 ffffffff 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], [qw(00000001 fffffff0 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], [qw(00_a00000a_aaaaaaa 50505050 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], [qw(0f_fff0005_0505050 aaaaaaaa 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], [qw(00_000000f_fffffff 01_800000a_aaaaaaa 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], [qw(00_0000000_0000001 ff_fffffff_fffffff 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], [qw(00_0000000_0000001 ff_fffffff_ffffff0 ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], ]; $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); if ($error_count > 0) { print STDERR $error_count, " errors: FAILED\n"; } else { print STDERR "PASS\n"; } exit ($error_count); } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/config.guess�����������������������������������������������������������0000775�0000000�0000000�00000123550�12641765611�0020407�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2014 Free Software Foundation, Inc. timestamp='2014-03-23' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD # # Please send patches with a ChangeLog entry to config-patches@gnu.org. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2014 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown case "${UNAME_SYSTEM}" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu eval $set_cc_for_build cat <<-EOF > $dummy.c #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #else LIBC=gnu #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` ;; esac # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently, or will in the future. case "${UNAME_MACHINE_ARCH}" in arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE="alpha" ;; "EV4.5 (21064)") UNAME_MACHINE="alpha" ;; "LCA4 (21066/21068)") UNAME_MACHINE="alpha" ;; "EV5 (21164)") UNAME_MACHINE="alphaev5" ;; "EV5.6 (21164A)") UNAME_MACHINE="alphaev56" ;; "EV5.6 (21164PC)") UNAME_MACHINE="alphapca56" ;; "EV5.7 (21164PC)") UNAME_MACHINE="alphapca57" ;; "EV6 (21264)") UNAME_MACHINE="alphaev6" ;; "EV6.7 (21264A)") UNAME_MACHINE="alphaev67" ;; "EV6.8CB (21264C)") UNAME_MACHINE="alphaev68" ;; "EV6.8AL (21264B)") UNAME_MACHINE="alphaev68" ;; "EV6.8CX (21264D)") UNAME_MACHINE="alphaev68" ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE="alphaev69" ;; "EV7 (21364)") UNAME_MACHINE="alphaev7" ;; "EV7.9 (21364A)") UNAME_MACHINE="alphaev79" ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH="i386" # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH="x86_64" fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH="hppa2.0n" ;; 64) HP_ARCH="hppa2.0w" ;; '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = "hppa2.0w" ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH="hppa2.0w" else HP_ARCH="hppa64" fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case ${UNAME_PROCESSOR} in amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW64*:*) echo ${UNAME_MACHINE}-pc-mingw64 exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; *:MSYS*:*) echo ${UNAME_MACHINE}-pc-msys exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; aarch64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC="gnulibc1" ; fi echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-${LIBC} else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi else echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf fi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; cris:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; crisv32:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; frv:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; hexagon:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-${LIBC} exit ;; or32:Linux:*:* | or1k*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; padre:Linux:*:*) echo sparc-unknown-linux-${LIBC} exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-${LIBC} exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; *) echo hppa-unknown-linux-${LIBC} ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-${LIBC} exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-${LIBC} exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-${LIBC} exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-${LIBC} exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; tile*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-${LIBC} exit ;; x86_64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configury will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown eval $set_cc_for_build if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi fi elif test "$UNAME_PROCESSOR" = i386 ; then # Avoid executing cc on OS X 10.9, as it ships with a stub # that puts up a graphical alert prompting to install # developer tools. Any system running Mac OS X 10.7 or # later (Darwin 11 and later) is required to have a 64-bit # processor. This is not true of the ARM version of Darwin # that Apple uses in portable devices. UNAME_PROCESSOR=x86_64 fi echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = "x86"; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-?:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk${UNAME_RELEASE} exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = "386"; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; x86_64:VMkernel:*:*) echo ${UNAME_MACHINE}-unknown-esx exit ;; esac cat >&2 < in order to provide the needed information to handle your system. config.guess timestamp = $timestamp uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: ��������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/config.stamp.in��������������������������������������������������������0000664�0000000�0000000�00000000000�12641765611�0020767�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/config.sub�������������������������������������������������������������0000775�0000000�0000000�00000105666�12641765611�0020062�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Configuration validation subroutine script. # Copyright 1992-2014 Free Software Foundation, Inc. timestamp='2014-05-01' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # Please send patches with a ChangeLog entry to config-patches@gnu.org. # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS $0 [OPTION] ALIAS Canonicalize a configuration name. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright 1992-2014 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo $1 exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ knetbsd*-gnu* | netbsd*-gnu* | \ kopensolaris*-gnu* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; android-linux) os=-linux-android basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown ;; *) basic_machine=`echo $1 | sed 's/-[^-]*$//'` if [ $basic_machine != $1 ] then os=`echo $1 | sed 's/.*-/-/'` else os=; fi ;; esac ### Let's recognize common machines as not being operating systems so ### that things like config.sub decstation-3100 work. We also ### recognize some manufacturers as not being operating systems, so we ### can provide default operating systems below. case $os in -sun*os*) # Prevent following clause from handling this invalid input. ;; -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -apple | -axis | -knuth | -cray | -microblaze*) os= basic_machine=$1 ;; -bluegene*) os=-cnk ;; -sim | -cisco | -oki | -wec | -winbond) os= basic_machine=$1 ;; -scout) ;; -wrs) os=-vxworks basic_machine=$1 ;; -chorusos*) os=-chorusos basic_machine=$1 ;; -chorusrdb) os=-chorusrdb basic_machine=$1 ;; -hiux*) os=-hiuxwe2 ;; -sco6) os=-sco5v6 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -udk*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -lynx*178) os=-lynxos178 ;; -lynx*5) os=-lynxos5 ;; -lynx*) os=-lynxos ;; -ptx*) basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` ;; -windowsnt*) os=`echo $os | sed -e 's/windowsnt/winnt/'` ;; -psos*) os=-psos ;; -mint | -mint[0-9]*) basic_machine=m68k-atari os=-mint ;; esac # Decode aliases for certain CPU-COMPANY combinations. case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | aarch64 | aarch64_be \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arceb \ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ | avr | avr32 \ | be32 | be64 \ | bfin \ | c4x | c8051 | clipper \ | d10v | d30v | dlx | dsp16xx \ | epiphany \ | fido | fr30 | frv \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ | mips64octeon | mips64octeonel \ | mips64orion | mips64orionel \ | mips64r5900 | mips64r5900el \ | mips64vr | mips64vrel \ | mips64vr4100 | mips64vr4100el \ | mips64vr4300 | mips64vr4300el \ | mips64vr5000 | mips64vr5000el \ | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa32r6 | mipsisa32r6el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ | mipsisa64r6 | mipsisa64r6el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ | open8 | or1k | or1knd | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pyramid \ | rl78 | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ | spu \ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ | we32k \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; c54x) basic_machine=tic54x-unknown ;; c55x) basic_machine=tic55x-unknown ;; c6x) basic_machine=tic6x-unknown ;; m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown os=-none ;; m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) ;; ms1) basic_machine=mt-unknown ;; strongarm | thumb | xscale) basic_machine=arm-unknown ;; xgate) basic_machine=$basic_machine-unknown os=-none ;; xscaleeb) basic_machine=armeb-unknown ;; xscaleel) basic_machine=armel-unknown ;; # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) basic_machine=$basic_machine-pc ;; # Object if more than one company name word. *-*-*) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ | aarch64-* | aarch64_be-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ | c8051-* | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | hexagon-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ | mips64octeon-* | mips64octeonel-* \ | mips64orion-* | mips64orionel-* \ | mips64r5900-* | mips64r5900el-* \ | mips64vr-* | mips64vrel-* \ | mips64vr4100-* | mips64vr4100el-* \ | mips64vr4300-* | mips64vr4300el-* \ | mips64vr5000-* | mips64vr5000el-* \ | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa32r6-* | mipsisa32r6el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64r6-* | mipsisa64r6el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ | or1k*-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ | pyramid-* \ | rl78-* | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ | tile*-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) ;; # Recognize the basic CPU types without company name, with glob match. xtensa*) basic_machine=$basic_machine-unknown ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) basic_machine=i386-unknown os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; a29khif) basic_machine=a29k-amd os=-udi ;; abacus) basic_machine=abacus-unknown ;; adobe68k) basic_machine=m68010-adobe os=-scout ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; am29k) basic_machine=a29k-none os=-bsd ;; amd64) basic_machine=x86_64-pc ;; amd64-*) basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl os=-sysv ;; amiga | amiga-*) basic_machine=m68k-unknown ;; amigaos | amigados) basic_machine=m68k-unknown os=-amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=-sysv4 ;; apollo68) basic_machine=m68k-apollo os=-sysv ;; apollo68bsd) basic_machine=m68k-apollo os=-bsd ;; aros) basic_machine=i386-pc os=-aros ;; aux) basic_machine=m68k-apple os=-aux ;; balance) basic_machine=ns32k-sequent os=-dynix ;; blackfin) basic_machine=bfin-unknown os=-linux ;; blackfin-*) basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) basic_machine=powerpc-ibm os=-cnk ;; c54x-*) basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c55x-*) basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c6x-*) basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray os=-unicos ;; cegcc) basic_machine=arm-unknown os=-cegcc ;; convex-c1) basic_machine=c1-convex os=-bsd ;; convex-c2) basic_machine=c2-convex os=-bsd ;; convex-c32) basic_machine=c32-convex os=-bsd ;; convex-c34) basic_machine=c34-convex os=-bsd ;; convex-c38) basic_machine=c38-convex os=-bsd ;; cray | j90) basic_machine=j90-cray os=-unicos ;; craynv) basic_machine=craynv-cray os=-unicosmp ;; cr16 | cr16-*) basic_machine=cr16-unknown os=-elf ;; crds | unos) basic_machine=m68k-crds ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; crx) basic_machine=crx-unknown os=-elf ;; da30 | da30-*) basic_machine=m68k-da30 ;; decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) basic_machine=mips-dec ;; decsystem10* | dec10*) basic_machine=pdp10-dec os=-tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec os=-tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; delta88) basic_machine=m88k-motorola os=-sysv3 ;; dicos) basic_machine=i686-pc os=-dicos ;; djgpp) basic_machine=i586-pc os=-msdosdjgpp ;; dpx20 | dpx20-*) basic_machine=rs6000-bull os=-bosx ;; dpx2* | dpx2*-bull) basic_machine=m68k-bull os=-sysv3 ;; ebmon29k) basic_machine=a29k-amd os=-ebmon ;; elxsi) basic_machine=elxsi-elxsi os=-bsd ;; encore | umax | mmax) basic_machine=ns32k-encore ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=-ose ;; fx2800) basic_machine=i860-alliant ;; genix) basic_machine=ns32k-ns ;; gmicro) basic_machine=tron-gmicro os=-sysv ;; go32) basic_machine=i386-pc os=-go32 ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; h8300hms) basic_machine=h8300-hitachi os=-hms ;; h8300xray) basic_machine=h8300-hitachi os=-xray ;; h8500hms) basic_machine=h8500-hitachi os=-hms ;; harris) basic_machine=m88k-harris os=-sysv3 ;; hp300-*) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=-bsd ;; hp300hpux) basic_machine=m68k-hp os=-hpux ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) basic_machine=m68000-hp ;; hp9k3[2-9][0-9]) basic_machine=m68k-hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) basic_machine=hppa1.1-hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) basic_machine=hppa1.1-hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; hppa-next) os=-nextstep3 ;; hppaosf) basic_machine=hppa1.1-hp os=-osf ;; hppro) basic_machine=hppa1.1-hp os=-proelf ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; i*86v32) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; i386-vsta | vsta) basic_machine=i386-unknown os=-vsta ;; iris | iris4d) basic_machine=mips-sgi case $os in -irix*) ;; *) os=-irix4 ;; esac ;; isi68 | isi) basic_machine=m68k-isi os=-sysv ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; m88k-omron*) basic_machine=m88k-omron ;; magnum | m3230) basic_machine=mips-mips os=-sysv ;; merlin) basic_machine=ns32k-utek os=-sysv ;; microblaze*) basic_machine=microblaze-xilinx ;; mingw64) basic_machine=x86_64-pc os=-mingw64 ;; mingw32) basic_machine=i686-pc os=-mingw32 ;; mingw32ce) basic_machine=arm-unknown os=-mingw32ce ;; miniframe) basic_machine=m68000-convergent ;; *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari os=-mint ;; mips3*-*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` ;; mips3*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k os=-coff ;; morphos) basic_machine=powerpc-unknown os=-morphos ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; msys) basic_machine=i686-pc os=-msys ;; mvs) basic_machine=i370-ibm os=-mvs ;; nacl) basic_machine=le32-unknown os=-nacl ;; ncr3000) basic_machine=i486-ncr os=-sysv4 ;; netbsd386) basic_machine=i386-unknown os=-netbsd ;; netwinder) basic_machine=armv4l-rebel os=-linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=-newsos ;; news1000) basic_machine=m68030-sony os=-newsos ;; news-3600 | risc-news) basic_machine=mips-sony os=-newsos ;; necv70) basic_machine=v70-nec os=-sysv ;; next | m*-next ) basic_machine=m68k-next case $os in -nextstep* ) ;; -ns2*) os=-nextstep2 ;; *) os=-nextstep3 ;; esac ;; nh3000) basic_machine=m68k-harris os=-cxux ;; nh[45]000) basic_machine=m88k-harris os=-cxux ;; nindy960) basic_machine=i960-intel os=-nindy ;; mon960) basic_machine=i960-intel os=-mon960 ;; nonstopux) basic_machine=mips-compaq os=-nonstopux ;; np1) basic_machine=np1-gould ;; neo-tandem) basic_machine=neo-tandem ;; nse-tandem) basic_machine=nse-tandem ;; nsr-tandem) basic_machine=nsr-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; os400) basic_machine=powerpc-ibm os=-os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose ;; os68k) basic_machine=m68k-none os=-os68k ;; pa-hitachi) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; paragon) basic_machine=i860-intel os=-osf ;; parisc) basic_machine=hppa-unknown os=-linux ;; parisc-*) basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; pbd) basic_machine=sparc-tti ;; pbb) basic_machine=m68k-tti ;; pc532 | pc532-*) basic_machine=ns32k-pc532 ;; pc98) basic_machine=i386-pc ;; pc98-*) basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc ;; pentiumpro | p6 | 6x86 | athlon | athlon_*) basic_machine=i686-pc ;; pentiumii | pentium2 | pentiumiii | pentium3) basic_machine=i686-pc ;; pentium4) basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium4-*) basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould ;; power) basic_machine=power-ibm ;; ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle | ppc-le | powerpc-little) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little | ppc64-le | powerpc64-little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; pw32) basic_machine=i586-unknown os=-pw32 ;; rdos | rdos64) basic_machine=x86_64-pc os=-rdos ;; rdos32) basic_machine=i386-pc os=-rdos ;; rom68k) basic_machine=m68k-rom68k os=-coff ;; rm[46]00) basic_machine=mips-siemens ;; rtpc | rtpc-*) basic_machine=romp-ibm ;; s390 | s390-*) basic_machine=s390-ibm ;; s390x | s390x-*) basic_machine=s390x-ibm ;; sa29200) basic_machine=a29k-amd os=-udi ;; sb1) basic_machine=mipsisa64sb1-unknown ;; sb1el) basic_machine=mipsisa64sb1el-unknown ;; sde) basic_machine=mipsisa32-sde os=-elf ;; sei) basic_machine=mips-sei os=-seiux ;; sequent) basic_machine=i386-sequent ;; sh) basic_machine=sh-hitachi os=-hms ;; sh5el) basic_machine=sh5le-unknown ;; sh64) basic_machine=sh64-unknown ;; sparclite-wrs | simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; sps7) basic_machine=m68k-bull os=-sysv2 ;; spur) basic_machine=spur-unknown ;; st2000) basic_machine=m68k-tandem ;; stratus) basic_machine=i860-stratus os=-sysv4 ;; strongarm-* | thumb-*) basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` ;; sun2) basic_machine=m68000-sun ;; sun2os3) basic_machine=m68000-sun os=-sunos3 ;; sun2os4) basic_machine=m68000-sun os=-sunos4 ;; sun3os3) basic_machine=m68k-sun os=-sunos3 ;; sun3os4) basic_machine=m68k-sun os=-sunos4 ;; sun4os3) basic_machine=sparc-sun os=-sunos3 ;; sun4os4) basic_machine=sparc-sun os=-sunos4 ;; sun4sol2) basic_machine=sparc-sun os=-solaris2 ;; sun3 | sun3-*) basic_machine=m68k-sun ;; sun4) basic_machine=sparc-sun ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; sv1) basic_machine=sv1-cray os=-unicos ;; symmetry) basic_machine=i386-sequent os=-dynix ;; t3e) basic_machine=alphaev5-cray os=-unicos ;; t90) basic_machine=t90-cray os=-unicos ;; tile*) basic_machine=$basic_machine-unknown os=-linux-gnu ;; tx39) basic_machine=mipstx39-unknown ;; tx39el) basic_machine=mipstx39el-unknown ;; toad1) basic_machine=pdp10-xkl os=-tops20 ;; tower | tower-32) basic_machine=m68k-ncr ;; tpf) basic_machine=s390x-ibm os=-tpf ;; udi29k) basic_machine=a29k-amd os=-udi ;; ultra3) basic_machine=a29k-nyu os=-sym1 ;; v810 | necv810) basic_machine=v810-nec os=-none ;; vaxv) basic_machine=vax-dec os=-sysv ;; vms) basic_machine=vax-dec os=-vms ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; vxworks960) basic_machine=i960-wrs os=-vxworks ;; vxworks68) basic_machine=m68k-wrs os=-vxworks ;; vxworks29k) basic_machine=a29k-wrs os=-vxworks ;; w65*) basic_machine=w65-wdc os=-none ;; w89k-*) basic_machine=hppa1.1-winbond os=-proelf ;; xbox) basic_machine=i686-pc os=-mingw32 ;; xps | xps100) basic_machine=xps100-honeywell ;; xscale-* | xscalee[bl]-*) basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` ;; ymp) basic_machine=ymp-cray os=-unicos ;; z8k-*-coff) basic_machine=z8k-unknown os=-sim ;; z80-*-coff) basic_machine=z80-unknown os=-sim ;; none) basic_machine=none-none os=-none ;; # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) basic_machine=hppa1.1-winbond ;; op50n) basic_machine=hppa1.1-oki ;; op60c) basic_machine=hppa1.1-oki ;; romp) basic_machine=romp-ibm ;; mmix) basic_machine=mmix-knuth ;; rs6000) basic_machine=rs6000-ibm ;; vax) basic_machine=vax-dec ;; pdp10) # there are many clones, so DEC is not a safe bet basic_machine=pdp10-unknown ;; pdp11) basic_machine=pdp11-dec ;; we32k) basic_machine=we32k-att ;; sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) basic_machine=sparc-sun ;; cydra) basic_machine=cydra-cydrome ;; orion) basic_machine=orion-highlevel ;; orion105) basic_machine=clipper-highlevel ;; mac | mpw | mac-mpw) basic_machine=m68k-apple ;; pmac | pmac-mpw) basic_machine=powerpc-apple ;; *-unknown) # Make sure to match an already-canonicalized machine name. ;; *) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` ;; *-commodore*) basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x"$os" != x"" ] then case $os in # First match some system type aliases # that might get confused with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux ;; -solaris1 | -solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; -solaris) os=-solaris2 ;; -svr4*) os=-sysv4 ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # First accept the basic system types. # The portable systems comes first. # Each alternative MUST END IN A *, to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ | -bitrig* | -openbsd* | -solidbsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) case $basic_machine in x86-* | i*86-*) ;; *) os=-nto$os ;; esac ;; -nto-qnx*) ;; -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) os=`echo $os | sed -e 's|mac|macos|'` ;; # Apple iOS -ios*) ;; -linux-dietlibc) os=-linux-dietlibc ;; -linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) os=`echo $os | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) os=`echo $os | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition ;; -os400*) os=-os400 ;; -wince*) os=-wince ;; -osfrose*) os=-osfrose ;; -osf*) os=-osf ;; -utek*) os=-bsd ;; -dynix*) os=-bsd ;; -acis*) os=-aos ;; -atheos*) os=-atheos ;; -syllable*) os=-syllable ;; -386bsd) os=-bsd ;; -ctix* | -uts*) os=-sysv ;; -nova*) os=-rtmk-nova ;; -ns2 ) os=-nextstep2 ;; -nsk*) os=-nsk ;; # Preserve the version number of sinix5. -sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; -sinix*) os=-sysv4 ;; -tpf*) os=-tpf ;; -triton*) os=-sysv3 ;; -oss*) os=-sysv3 ;; -svr4) os=-sysv4 ;; -svr3) os=-sysv3 ;; -sysvr4) os=-sysv4 ;; # This must come after -sysvr4. -sysv*) ;; -ose*) os=-ose ;; -es1800*) os=-ose ;; -xenix) os=-xenix ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; -aros*) os=-aros ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; -nacl*) ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $basic_machine in score-*) os=-elf ;; spu-*) os=-elf ;; *-acorn) os=-riscix1.2 ;; arm*-rebel) os=-linux ;; arm*-semi) os=-aout ;; c4x-* | tic4x-*) os=-coff ;; c8051-*) os=-elf ;; hexagon-*) os=-elf ;; tic54x-*) os=-coff ;; tic55x-*) os=-coff ;; tic6x-*) os=-coff ;; # This must come before the *-dec entry. pdp10-*) os=-tops20 ;; pdp11-*) os=-none ;; *-dec | vax-*) os=-ultrix4.2 ;; m68*-apollo) os=-domain ;; i386-sun) os=-sunos4.0.2 ;; m68000-sun) os=-sunos3 ;; m68*-cisco) os=-aout ;; mep-*) os=-elf ;; mips*-cisco) os=-elf ;; mips*-*) os=-elf ;; or32-*) os=-coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=-sysv3 ;; sparc-* | *-sun) os=-sunos4.1.1 ;; *-be) os=-beos ;; *-haiku) os=-haiku ;; *-ibm) os=-aix ;; *-knuth) os=-mmixware ;; *-wec) os=-proelf ;; *-winbond) os=-proelf ;; *-oki) os=-proelf ;; *-hp) os=-hpux ;; *-hitachi) os=-hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=-sysv ;; *-cbm) os=-amigaos ;; *-dg) os=-dgux ;; *-dolphin) os=-sysv3 ;; m68k-ccur) os=-rtu ;; m88k-omron*) os=-luna ;; *-next ) os=-nextstep ;; *-sequent) os=-ptx ;; *-crds) os=-unos ;; *-ns) os=-genix ;; i370-*) os=-mvs ;; *-next) os=-nextstep3 ;; *-gould) os=-sysv ;; *-highlevel) os=-bsd ;; *-encore) os=-bsd ;; *-sgi) os=-irix ;; *-siemens) os=-sysv4 ;; *-masscomp) os=-rtu ;; f30[01]-fujitsu | f700-fujitsu) os=-uxpv ;; *-rom68k) os=-coff ;; *-*bug) os=-coff ;; *-apple) os=-macos ;; *-atari*) os=-mint ;; *) os=-none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. vendor=unknown case $basic_machine in *-unknown) case $os in -riscix*) vendor=acorn ;; -sunos*) vendor=sun ;; -cnk*|-aix*) vendor=ibm ;; -beos*) vendor=be ;; -hpux*) vendor=hp ;; -mpeix*) vendor=hp ;; -hiux*) vendor=hitachi ;; -unos*) vendor=crds ;; -dgux*) vendor=dg ;; -luna*) vendor=omron ;; -genix*) vendor=ns ;; -mvs* | -opened*) vendor=ibm ;; -os400*) vendor=ibm ;; -ptx*) vendor=sequent ;; -tpf*) vendor=ibm ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; -aux*) vendor=apple ;; -hms*) vendor=hitachi ;; -mpw* | -macos*) vendor=apple ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) vendor=atari ;; -vos*) vendor=stratus ;; esac basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` ;; esac echo $basic_machine$os exit # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: ��������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/configure��������������������������������������������������������������0000775�0000000�0000000�00001013367�12641765611�0020003�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME= PACKAGE_TARNAME= PACKAGE_VERSION= PACKAGE_STRING= PACKAGE_BUGREPORT= PACKAGE_URL= ac_unique_file="Makefile.in" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='LTLIBOBJS LIBOBJS cfgoutputs_out cfgoutputs_in cfghdrs_out cfghdrs_in enable_zone_allocator enable_tls enable_lazy_lock TESTLIBS jemalloc_version_gid jemalloc_version_nrev jemalloc_version_bugfix jemalloc_version_minor jemalloc_version_major jemalloc_version enable_cache_oblivious enable_xmalloc enable_valgrind enable_utrace enable_fill enable_munmap enable_tcache enable_prof enable_stats enable_debug je_ install_suffix private_namespace JEMALLOC_CPREFIX enable_code_coverage AUTOCONF LD RANLIB INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM enable_autogen RPATH_EXTRA CC_MM AROUT ARFLAGS MKLIB LDTARGET CTARGET PIC_CFLAGS SOREV EXTRA_LDFLAGS DSO_LDFLAGS libprefix exe a o importlib so LD_PRELOAD_VAR RPATH abi AR host_os host_vendor host_cpu host build_os build_vendor build_cpu build EGREP GREP CPP OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC XSLROOT XSLTPROC MANDIR DATADIR LIBDIR INCLUDEDIR BINDIR PREFIX abs_objroot objroot abs_srcroot srcroot rev CONFIG target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking with_xslroot with_rpath enable_autogen enable_code_coverage with_mangling with_jemalloc_prefix with_export with_private_namespace with_install_suffix enable_cc_silence enable_debug enable_ivsalloc enable_stats enable_prof enable_prof_libunwind with_static_libunwind enable_prof_libgcc enable_prof_gcc enable_tcache enable_munmap enable_fill enable_utrace enable_valgrind enable_xmalloc enable_cache_oblivious with_lg_tiny_min with_lg_quantum with_lg_page with_lg_page_sizes with_lg_size_class_group enable_lazy_lock enable_tls enable_zone_allocator ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures this package to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-autogen Automatically regenerate configure output --enable-code-coverage Enable code coverage --disable-cc-silence Do not silence irrelevant compiler warnings --enable-debug Build debugging code (implies --enable-ivsalloc) --enable-ivsalloc Validate pointers passed through the public API --disable-stats Disable statistics calculation/reporting --enable-prof Enable allocation profiling --enable-prof-libunwind Use libunwind for backtracing --disable-prof-libgcc Do not use libgcc for backtracing --disable-prof-gcc Do not use gcc intrinsics for backtracing --disable-tcache Disable per thread caches --disable-munmap Disable VM deallocation via munmap(2) --disable-fill Disable support for junk/zero filling, quarantine, and redzones --enable-utrace Enable utrace(2)-based tracing --disable-valgrind Disable support for Valgrind --enable-xmalloc Support xmalloc option --disable-cache-oblivious Disable support for cache-oblivious allocation alignment --enable-lazy-lock Enable lazy locking (only lock when multi-threaded) --disable-tls Disable thread-local storage (__thread keyword) --disable-zone-allocator Disable zone allocator for Darwin Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-xslroot= XSL stylesheet root path --with-rpath= Colon-separated rpath (ELF systems only) --with-mangling= Mangle symbols in --with-jemalloc-prefix= Prefix to prepend to all public APIs --without-export disable exporting jemalloc public APIs --with-private-namespace= Prefix to prepend to all library-private APIs --with-install-suffix= Suffix to append to all installed files --with-static-libunwind= Path to static libunwind library; use rather than dynamically linking --with-lg-tiny-min= Base 2 log of minimum tiny size class to support --with-lg-quantum= Base 2 log of minimum allocation alignment --with-lg-page= Base 2 log of system page size --with-lg-page-sizes= Base 2 logs of system page sizes to support --with-lg-size-class-group= Base 2 log of size classes per doubling Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF configure generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_compute_int LINENO EXPR VAR INCLUDES # -------------------------------------------- # Tries to find the compile-time value of EXPR in a program that includes # INCLUDES, setting VAR accordingly. Returns whether the value could be # computed ac_fn_c_compute_int () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if test "$cross_compiling" = yes; then # Depending upon the size, compute the lo and hi bounds. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=0 ac_mid=0 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid; break else as_fn_arith $ac_mid + 1 && ac_lo=$as_val if test $ac_lo -le $ac_mid; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) < 0)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=-1 ac_mid=-1 while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) >= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_lo=$ac_mid; break else as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val if test $ac_mid -le $ac_hi; then ac_lo= ac_hi= break fi as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done else ac_lo= ac_hi= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext # Binary search between lo and hi bounds. while test "x$ac_lo" != "x$ac_hi"; do as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { static int test_array [1 - 2 * !(($2) <= $ac_mid)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_hi=$ac_mid else as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done case $ac_lo in #(( ?*) eval "$3=\$ac_lo"; ac_retval=0 ;; '') ac_retval=1 ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 static long int longval () { return $2; } static unsigned long int ulongval () { return $2; } #include #include int main () { FILE *f = fopen ("conftest.val", "w"); if (! f) return 1; if (($2) < 0) { long int i = longval (); if (i != ($2)) return 1; fprintf (f, "%ld", i); } else { unsigned long int i = ulongval (); if (i != ($2)) return 1; fprintf (f, "%lu", i); } /* Do not output a trailing newline, as this causes \r\n confusion on some platforms. */ return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : echo >>conftest.val; read $3 &5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` rev=2 srcroot=$srcdir if test "x${srcroot}" = "x." ; then srcroot="" else srcroot="${srcroot}/" fi abs_srcroot="`cd \"${srcdir}\"; pwd`/" objroot="" abs_objroot="`pwd`/" if test "x$prefix" = "xNONE" ; then prefix="/usr/local" fi if test "x$exec_prefix" = "xNONE" ; then exec_prefix=$prefix fi PREFIX=$prefix BINDIR=`eval echo $bindir` BINDIR=`eval echo $BINDIR` INCLUDEDIR=`eval echo $includedir` INCLUDEDIR=`eval echo $INCLUDEDIR` LIBDIR=`eval echo $libdir` LIBDIR=`eval echo $LIBDIR` DATADIR=`eval echo $datadir` DATADIR=`eval echo $DATADIR` MANDIR=`eval echo $mandir` MANDIR=`eval echo $MANDIR` # Extract the first word of "xsltproc", so it can be a program name with args. set dummy xsltproc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XSLTPROC+:} false; then : $as_echo_n "(cached) " >&6 else case $XSLTPROC in [\\/]* | ?:[\\/]*) ac_cv_path_XSLTPROC="$XSLTPROC" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false" ;; esac fi XSLTPROC=$ac_cv_path_XSLTPROC if test -n "$XSLTPROC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLTPROC" >&5 $as_echo "$XSLTPROC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" else DEFAULT_XSLROOT="" fi # Check whether --with-xslroot was given. if test "${with_xslroot+set}" = set; then : withval=$with_xslroot; if test "x$with_xslroot" = "xno" ; then XSLROOT="${DEFAULT_XSLROOT}" else XSLROOT="${with_xslroot}" fi else XSLROOT="${DEFAULT_XSLROOT}" fi CFLAGS=$CFLAGS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test "x$GCC" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5 $as_echo_n "checking whether compiler is MSVC... " >&6; } if ${je_cv_msvc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef _MSC_VER int fail-1; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_msvc=yes else je_cv_msvc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5 $as_echo "$je_cv_msvc" >&6; } fi if test "x$CFLAGS" = "x" ; then no_CFLAGS="yes" if test "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5 $as_echo_n "checking whether compiler supports -std=gnu99... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-std=gnu99" else CFLAGS="${CFLAGS} -std=gnu99" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-std=gnu99 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_HAS_RESTRICT 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 $as_echo_n "checking whether compiler supports -Wall... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Wall" else CFLAGS="${CFLAGS} -Wall" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-Wall { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror=declaration-after-statement" >&5 $as_echo_n "checking whether compiler supports -Werror=declaration-after-statement... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Werror=declaration-after-statement" else CFLAGS="${CFLAGS} -Werror=declaration-after-statement" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-Werror=declaration-after-statement { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5 $as_echo_n "checking whether compiler supports -pipe... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-pipe" else CFLAGS="${CFLAGS} -pipe" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-pipe { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 $as_echo_n "checking whether compiler supports -g3... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-g3" else CFLAGS="${CFLAGS} -g3" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-g3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext elif test "x$je_cv_msvc" = "xyes" ; then CC="$CC -nologo" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Zi" >&5 $as_echo_n "checking whether compiler supports -Zi... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Zi" else CFLAGS="${CFLAGS} -Zi" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-Zi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -MT" >&5 $as_echo_n "checking whether compiler supports -MT... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-MT" else CFLAGS="${CFLAGS} -MT" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-MT { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -W3" >&5 $as_echo_n "checking whether compiler supports -W3... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-W3" else CFLAGS="${CFLAGS} -W3" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-W3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -FS" >&5 $as_echo_n "checking whether compiler supports -FS... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-FS" else CFLAGS="${CFLAGS} -FS" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-FS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat" fi fi if test "x$EXTRA_CFLAGS" != "x" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports $EXTRA_CFLAGS" >&5 $as_echo_n "checking whether compiler supports $EXTRA_CFLAGS... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="$EXTRA_CFLAGS" else CFLAGS="${CFLAGS} $EXTRA_CFLAGS" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=$EXTRA_CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 $as_echo_n "checking whether byte ordering is bigendian... " >&6; } if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. ac_arch= ac_prev= for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do if test -n "$ac_prev"; then case $ac_word in i?86 | x86_64 | ppc | ppc64) if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then ac_arch=$ac_word else ac_cv_c_bigendian=universal break fi ;; esac ac_prev= elif test "x$ac_word" = "x-arch"; then ac_prev=arch fi done fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ && LITTLE_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if BYTE_ORDER != BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. if test "$cross_compiling" = yes; then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; int main () { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then if test "$ac_cv_c_bigendian" = unknown; then ac_cv_c_bigendian=no else # finding both strings is unlikely to happen, but who knows? ac_cv_c_bigendian=unknown fi fi fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Are we little or big endian? From Harbison&Steele. */ union { long int l; char c[sizeof (long int)]; } u; u.l = 1; return u.c[sizeof (long int) - 1] == 1; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_bigendian=no else ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 $as_echo "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) ac_cv_big_endian=1;; #( no) ac_cv_big_endian=0 ;; #( universal) $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) as_fn_error $? "unknown endianness presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; esac if test "x${ac_cv_big_endian}" = "x1" ; then cat >>confdefs.h <<_ACEOF #define JEMALLOC_BIG_ENDIAN _ACEOF fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99" fi # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5 $as_echo_n "checking size of void *... " >&6; } if ${ac_cv_sizeof_void_p+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then : else if test "$ac_cv_type_void_p" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (void *) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_void_p=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5 $as_echo "$ac_cv_sizeof_void_p" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_VOID_P $ac_cv_sizeof_void_p _ACEOF if test "x${ac_cv_sizeof_void_p}" = "x8" ; then LG_SIZEOF_PTR=3 elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then LG_SIZEOF_PTR=2 else as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_PTR $LG_SIZEOF_PTR _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 $as_echo_n "checking size of int... " >&6; } if ${ac_cv_sizeof_int+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : else if test "$ac_cv_type_int" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (int) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_int=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 $as_echo "$ac_cv_sizeof_int" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INT $ac_cv_sizeof_int _ACEOF if test "x${ac_cv_sizeof_int}" = "x8" ; then LG_SIZEOF_INT=3 elif test "x${ac_cv_sizeof_int}" = "x4" ; then LG_SIZEOF_INT=2 else as_fn_error $? "Unsupported int size: ${ac_cv_sizeof_int}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_INT $LG_SIZEOF_INT _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5 $as_echo_n "checking size of long... " >&6; } if ${ac_cv_sizeof_long+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then : else if test "$ac_cv_type_long" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (long) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_long=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5 $as_echo "$ac_cv_sizeof_long" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_LONG $ac_cv_sizeof_long _ACEOF if test "x${ac_cv_sizeof_long}" = "x8" ; then LG_SIZEOF_LONG=3 elif test "x${ac_cv_sizeof_long}" = "x4" ; then LG_SIZEOF_LONG=2 else as_fn_error $? "Unsupported long size: ${ac_cv_sizeof_long}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_LONG $LG_SIZEOF_LONG _ACEOF # The cast to long int works around a bug in the HP C Compiler # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. # This bug is HP SR number 8606223364. { $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5 $as_echo_n "checking size of intmax_t... " >&6; } if ${ac_cv_sizeof_intmax_t+:} false; then : $as_echo_n "(cached) " >&6 else if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (intmax_t))" "ac_cv_sizeof_intmax_t" "$ac_includes_default"; then : else if test "$ac_cv_type_intmax_t" = yes; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot compute sizeof (intmax_t) See \`config.log' for more details" "$LINENO" 5; } else ac_cv_sizeof_intmax_t=0 fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_intmax_t" >&5 $as_echo "$ac_cv_sizeof_intmax_t" >&6; } cat >>confdefs.h <<_ACEOF #define SIZEOF_INTMAX_T $ac_cv_sizeof_intmax_t _ACEOF if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then LG_SIZEOF_INTMAX_T=4 elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then LG_SIZEOF_INTMAX_T=3 elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then LG_SIZEOF_INTMAX_T=2 else as_fn_error $? "Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define LG_SIZEOF_INTMAX_T $LG_SIZEOF_INTMAX_T _ACEOF ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac CPU_SPINWAIT="" case "${host_cpu}" in i686|x86_64) if ${je_cv_pause+:} false; then : $as_echo_n "(cached) " >&6 else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5 $as_echo_n "checking whether pause instruction is compilable... " >&6; } if ${je_cv_pause+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { __asm__ volatile("pause"); return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pause=yes else je_cv_pause=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5 $as_echo "$je_cv_pause" >&6; } fi if test "x${je_cv_pause}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi ;; powerpc) cat >>confdefs.h <<_ACEOF #define HAVE_ALTIVEC _ACEOF ;; *) ;; esac cat >>confdefs.h <<_ACEOF #define CPU_SPINWAIT $CPU_SPINWAIT _ACEOF LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="${ac_tool_prefix}ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AR"; then ac_ct_AR=$AR # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AR" = x; then AR=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi default_munmap="1" maps_coalesce="1" case "${host}" in *-*-darwin* | *-*-ios*) CFLAGS="$CFLAGS" abi="macho" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" ;; *-*-freebsd*) CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h force_lazy_lock="1" ;; *-*-dragonfly*) CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h ;; *-*-openbsd*) CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h force_tls="0" ;; *-*-bitrig*) CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h ;; *-*-linux*) CFLAGS="$CFLAGS" CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" abi="elf" $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h default_munmap="0" ;; *-*-netbsd*) { $as_echo "$as_me:${as_lineno-$LINENO}: checking ABI" >&5 $as_echo_n "checking ABI... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __ELF__ /* ELF */ #else #error aout #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : CFLAGS="$CFLAGS"; abi="elf" else abi="aout" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5 $as_echo "$abi" >&6; } $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h ;; *-*-solaris2*) CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h RPATH='-Wl,-R,$(1)' CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" LIBS="$LIBS -lposix4 -lsocket -lnsl" ;; *-ibm-aix*) if "$LG_SIZEOF_PTR" = "8"; then LD_PRELOAD_VAR="LDR_PRELOAD64" else LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" force_lazy_lock="1" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" fi a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5 $as_echo "Unsupported operating system: ${host}" >&6; } abi="elf" ;; esac JEMALLOC_USABLE_SIZE_CONST=const for ac_header in malloc.h do : ac_fn_c_check_header_mongrel "$LINENO" "malloc.h" "ac_cv_header_malloc_h" "$ac_includes_default" if test "x$ac_cv_header_malloc_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_MALLOC_H 1 _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether malloc_usable_size definition can use const argument" >&5 $as_echo_n "checking whether malloc_usable_size definition can use const argument... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include size_t malloc_usable_size(const void *ptr); int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else JEMALLOC_USABLE_SIZE_CONST= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi done cat >>confdefs.h <<_ACEOF #define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5 $as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; } if ${je_cv_attribute+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ static __attribute__((unused)) void foo(void){} int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_attribute=yes else je_cv_attribute=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5 $as_echo "$je_cv_attribute" >&6; } if test "x${je_cv_attribute}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR " >>confdefs.h if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 $as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-fvisibility=hidden" else CFLAGS="${CFLAGS} -fvisibility=hidden" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-fvisibility=hidden { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi fi SAVED_CFLAGS="${CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Werror" else CFLAGS="${CFLAGS} -Werror" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5 $as_echo_n "checking whether tls_model attribute is compilable... " >&6; } if ${je_cv_tls_model+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_tls_model=yes else je_cv_tls_model=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5 $as_echo "$je_cv_tls_model" >&6; } CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_tls_model}" = "xyes" ; then $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h else $as_echo "#define JEMALLOC_TLS_MODEL " >>confdefs.h fi SAVED_CFLAGS="${CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Werror" else CFLAGS="${CFLAGS} -Werror" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5 $as_echo_n "checking whether alloc_size attribute is compilable... " >&6; } if ${je_cv_alloc_size+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(size_t size) __attribute__((alloc_size(1))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_alloc_size=yes else je_cv_alloc_size=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5 $as_echo "$je_cv_alloc_size" >&6; } CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_alloc_size}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE " >>confdefs.h fi SAVED_CFLAGS="${CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Werror" else CFLAGS="${CFLAGS} -Werror" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_gnu_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_format_gnu_printf=yes else je_cv_format_gnu_printf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5 $as_echo "$je_cv_format_gnu_printf" >&6; } CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_format_gnu_printf}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF " >>confdefs.h fi SAVED_CFLAGS="${CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Werror" else CFLAGS="${CFLAGS} -Werror" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-Werror { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void *foo(const char *format, ...) __attribute__((format(printf, 1, 2))); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_format_printf=yes else je_cv_format_printf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5 $as_echo "$je_cv_format_printf" >&6; } CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_format_printf}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF " >>confdefs.h fi # Check whether --with-rpath was given. if test "${with_rpath+set}" = set; then : withval=$with_rpath; if test "x$with_rpath" = "xno" ; then RPATH_EXTRA= else RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" fi else RPATH_EXTRA= fi # Check whether --enable-autogen was given. if test "${enable_autogen+set}" = set; then : enableval=$enable_autogen; if test "x$enable_autogen" = "xno" ; then enable_autogen="0" else enable_autogen="1" fi else enable_autogen="0" fi # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi # Extract the first word of "ld", so it can be a program name with args. set dummy ld; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else case $LD in [\\/]* | ?:[\\/]*) ac_cv_path_LD="$LD" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_LD" && ac_cv_path_LD="false" ;; esac fi LD=$ac_cv_path_LD if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "autoconf", so it can be a program name with args. set dummy autoconf; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_AUTOCONF+:} false; then : $as_echo_n "(cached) " >&6 else case $AUTOCONF in [\\/]* | ?:[\\/]*) ac_cv_path_AUTOCONF="$AUTOCONF" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false" ;; esac fi AUTOCONF=$ac_cv_path_AUTOCONF if test -n "$AUTOCONF"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AUTOCONF" >&5 $as_echo "$AUTOCONF" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size" ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign" if test "x$ac_cv_func_memalign" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE_MEMALIGN " >>confdefs.h public_syms="${public_syms} memalign" fi ac_fn_c_check_func "$LINENO" "valloc" "ac_cv_func_valloc" if test "x$ac_cv_func_valloc" = xyes; then : $as_echo "#define JEMALLOC_OVERRIDE_VALLOC " >>confdefs.h public_syms="${public_syms} valloc" fi GCOV_FLAGS= # Check whether --enable-code-coverage was given. if test "${enable_code_coverage+set}" = set; then : enableval=$enable_code_coverage; if test "x$enable_code_coverage" = "xno" ; then enable_code_coverage="0" else enable_code_coverage="1" fi else enable_code_coverage="0" fi if test "x$enable_code_coverage" = "x1" ; then deoptimize="no" echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes" if test "x${deoptimize}" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O0" >&5 $as_echo_n "checking whether compiler supports -O0... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-O0" else CFLAGS="${CFLAGS} -O0" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-O0 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fprofile-arcs -ftest-coverage" >&5 $as_echo_n "checking whether compiler supports -fprofile-arcs -ftest-coverage... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-fprofile-arcs -ftest-coverage" else CFLAGS="${CFLAGS} -fprofile-arcs -ftest-coverage" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-fprofile-arcs -ftest-coverage { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage" $as_echo "#define JEMALLOC_CODE_COVERAGE " >>confdefs.h fi # Check whether --with-mangling was given. if test "${with_mangling+set}" = set; then : withval=$with_mangling; mangling_map="$with_mangling" else mangling_map="" fi # Check whether --with-jemalloc_prefix was given. if test "${with_jemalloc_prefix+set}" = set; then : withval=$with_jemalloc_prefix; JEMALLOC_PREFIX="$with_jemalloc_prefix" else if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then JEMALLOC_PREFIX="" else JEMALLOC_PREFIX="je_" fi fi if test "x$JEMALLOC_PREFIX" != "x" ; then JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` cat >>confdefs.h <<_ACEOF #define JEMALLOC_PREFIX "$JEMALLOC_PREFIX" _ACEOF cat >>confdefs.h <<_ACEOF #define JEMALLOC_CPREFIX "$JEMALLOC_CPREFIX" _ACEOF fi # Check whether --with-export was given. if test "${with_export+set}" = set; then : withval=$with_export; if test "x$with_export" = "xno"; then $as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h fi fi # Check whether --with-private_namespace was given. if test "${with_private_namespace+set}" = set; then : withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_" else JEMALLOC_PRIVATE_NAMESPACE="je_" fi cat >>confdefs.h <<_ACEOF #define JEMALLOC_PRIVATE_NAMESPACE $JEMALLOC_PRIVATE_NAMESPACE _ACEOF private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" # Check whether --with-install_suffix was given. if test "${with_install_suffix+set}" = set; then : withval=$with_install_suffix; INSTALL_SUFFIX="$with_install_suffix" else INSTALL_SUFFIX= fi install_suffix="$INSTALL_SUFFIX" je_="je_" cfgoutputs_in="Makefile.in" cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in" cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" cfgoutputs_out="Makefile" cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" cfgoutputs_out="${cfgoutputs_out} test/test.sh" cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" cfgoutputs_tup="Makefile" cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" cfghdrs_out="include/jemalloc/jemalloc_defs.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" # Check whether --enable-cc-silence was given. if test "${enable_cc_silence+set}" = set; then : enableval=$enable_cc_silence; if test "x$enable_cc_silence" = "xno" ; then enable_cc_silence="0" else enable_cc_silence="1" fi else enable_cc_silence="1" fi if test "x$enable_cc_silence" = "x1" ; then $as_echo "#define JEMALLOC_CC_SILENCE " >>confdefs.h fi # Check whether --enable-debug was given. if test "${enable_debug+set}" = set; then : enableval=$enable_debug; if test "x$enable_debug" = "xno" ; then enable_debug="0" else enable_debug="1" fi else enable_debug="0" fi if test "x$enable_debug" = "x1" ; then $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h fi if test "x$enable_debug" = "x1" ; then $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h enable_ivsalloc="1" fi # Check whether --enable-ivsalloc was given. if test "${enable_ivsalloc+set}" = set; then : enableval=$enable_ivsalloc; if test "x$enable_ivsalloc" = "xno" ; then enable_ivsalloc="0" else enable_ivsalloc="1" fi else enable_ivsalloc="0" fi if test "x$enable_ivsalloc" = "x1" ; then $as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h fi if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then optimize="no" echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes" if test "x${optimize}" = "xyes" ; then if test "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 $as_echo_n "checking whether compiler supports -O3... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-O3" else CFLAGS="${CFLAGS} -O3" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-O3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -funroll-loops" >&5 $as_echo_n "checking whether compiler supports -funroll-loops... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-funroll-loops" else CFLAGS="${CFLAGS} -funroll-loops" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-funroll-loops { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext elif test "x$je_cv_msvc" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 $as_echo_n "checking whether compiler supports -O2... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-O2" else CFLAGS="${CFLAGS} -O2" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-O2 { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 $as_echo_n "checking whether compiler supports -O... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-O" else CFLAGS="${CFLAGS} -O" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-O { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi fi fi # Check whether --enable-stats was given. if test "${enable_stats+set}" = set; then : enableval=$enable_stats; if test "x$enable_stats" = "xno" ; then enable_stats="0" else enable_stats="1" fi else enable_stats="1" fi if test "x$enable_stats" = "x1" ; then $as_echo "#define JEMALLOC_STATS " >>confdefs.h fi # Check whether --enable-prof was given. if test "${enable_prof+set}" = set; then : enableval=$enable_prof; if test "x$enable_prof" = "xno" ; then enable_prof="0" else enable_prof="1" fi else enable_prof="0" fi if test "x$enable_prof" = "x1" ; then backtrace_method="" else backtrace_method="N/A" fi # Check whether --enable-prof-libunwind was given. if test "${enable_prof_libunwind+set}" = set; then : enableval=$enable_prof_libunwind; if test "x$enable_prof_libunwind" = "xno" ; then enable_prof_libunwind="0" else enable_prof_libunwind="1" fi else enable_prof_libunwind="0" fi # Check whether --with-static_libunwind was given. if test "${with_static_libunwind+set}" = set; then : withval=$with_static_libunwind; if test "x$with_static_libunwind" = "xno" ; then LUNWIND="-lunwind" else if test ! -f "$with_static_libunwind" ; then as_fn_error $? "Static libunwind not found: $with_static_libunwind" "$LINENO" 5 fi LUNWIND="$with_static_libunwind" fi else LUNWIND="-lunwind" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then for ac_header in libunwind.h do : ac_fn_c_check_header_mongrel "$LINENO" "libunwind.h" "ac_cv_header_libunwind_h" "$ac_includes_default" if test "x$ac_cv_header_libunwind_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBUNWIND_H 1 _ACEOF else enable_prof_libunwind="0" fi done if test "x$LUNWIND" = "x-lunwind" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unw_backtrace in -lunwind" >&5 $as_echo_n "checking for unw_backtrace in -lunwind... " >&6; } if ${ac_cv_lib_unwind_unw_backtrace+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lunwind $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char unw_backtrace (); int main () { return unw_backtrace (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_unwind_unw_backtrace=yes else ac_cv_lib_unwind_unw_backtrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_unw_backtrace" >&5 $as_echo "$ac_cv_lib_unwind_unw_backtrace" >&6; } if test "x$ac_cv_lib_unwind_unw_backtrace" = xyes; then : LIBS="$LIBS $LUNWIND" else enable_prof_libunwind="0" fi else LIBS="$LIBS $LUNWIND" fi if test "x${enable_prof_libunwind}" = "x1" ; then backtrace_method="libunwind" $as_echo "#define JEMALLOC_PROF_LIBUNWIND " >>confdefs.h fi fi # Check whether --enable-prof-libgcc was given. if test "${enable_prof_libgcc+set}" = set; then : enableval=$enable_prof_libgcc; if test "x$enable_prof_libgcc" = "xno" ; then enable_prof_libgcc="0" else enable_prof_libgcc="1" fi else enable_prof_libgcc="1" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ -a "x$GCC" = "xyes" ; then for ac_header in unwind.h do : ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default" if test "x$ac_cv_header_unwind_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UNWIND_H 1 _ACEOF else enable_prof_libgcc="0" fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_Backtrace in -lgcc" >&5 $as_echo_n "checking for _Unwind_Backtrace in -lgcc... " >&6; } if ${ac_cv_lib_gcc__Unwind_Backtrace+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgcc $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char _Unwind_Backtrace (); int main () { return _Unwind_Backtrace (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gcc__Unwind_Backtrace=yes else ac_cv_lib_gcc__Unwind_Backtrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gcc__Unwind_Backtrace" >&5 $as_echo "$ac_cv_lib_gcc__Unwind_Backtrace" >&6; } if test "x$ac_cv_lib_gcc__Unwind_Backtrace" = xyes; then : LIBS="$LIBS -lgcc" else enable_prof_libgcc="0" fi if test "x${enable_prof_libgcc}" = "x1" ; then backtrace_method="libgcc" $as_echo "#define JEMALLOC_PROF_LIBGCC " >>confdefs.h fi else enable_prof_libgcc="0" fi # Check whether --enable-prof-gcc was given. if test "${enable_prof_gcc+set}" = set; then : enableval=$enable_prof_gcc; if test "x$enable_prof_gcc" = "xno" ; then enable_prof_gcc="0" else enable_prof_gcc="1" fi else enable_prof_gcc="1" fi if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ -a "x$GCC" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fno-omit-frame-pointer" >&5 $as_echo_n "checking whether compiler supports -fno-omit-frame-pointer... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-fno-omit-frame-pointer" else CFLAGS="${CFLAGS} -fno-omit-frame-pointer" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : je_cv_cflags_appended=-fno-omit-frame-pointer { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext backtrace_method="gcc intrinsics" $as_echo "#define JEMALLOC_PROF_GCC " >>confdefs.h else enable_prof_gcc="0" fi if test "x$backtrace_method" = "x" ; then backtrace_method="none (disabling profiling)" enable_prof="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5 $as_echo_n "checking configured backtracing method... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5 $as_echo "$backtrace_method" >&6; } if test "x$enable_prof" = "x1" ; then if test "x$abi" != "xpecoff"; then LIBS="$LIBS -lm" fi $as_echo "#define JEMALLOC_PROF " >>confdefs.h fi # Check whether --enable-tcache was given. if test "${enable_tcache+set}" = set; then : enableval=$enable_tcache; if test "x$enable_tcache" = "xno" ; then enable_tcache="0" else enable_tcache="1" fi else enable_tcache="1" fi if test "x$enable_tcache" = "x1" ; then $as_echo "#define JEMALLOC_TCACHE " >>confdefs.h fi if test "x${maps_coalesce}" = "x1" ; then $as_echo "#define JEMALLOC_MAPS_COALESCE " >>confdefs.h fi # Check whether --enable-munmap was given. if test "${enable_munmap+set}" = set; then : enableval=$enable_munmap; if test "x$enable_munmap" = "xno" ; then enable_munmap="0" else enable_munmap="1" fi else enable_munmap="${default_munmap}" fi if test "x$enable_munmap" = "x1" ; then $as_echo "#define JEMALLOC_MUNMAP " >>confdefs.h fi have_dss="1" ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk" if test "x$ac_cv_func_sbrk" = xyes; then : have_sbrk="1" else have_sbrk="0" fi if test "x$have_sbrk" = "x1" ; then if test "x$sbrk_deprecated" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5 $as_echo "Disabling dss allocation because sbrk is deprecated" >&6; } have_dss="0" fi else have_dss="0" fi if test "x$have_dss" = "x1" ; then $as_echo "#define JEMALLOC_DSS " >>confdefs.h fi # Check whether --enable-fill was given. if test "${enable_fill+set}" = set; then : enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then enable_fill="0" else enable_fill="1" fi else enable_fill="1" fi if test "x$enable_fill" = "x1" ; then $as_echo "#define JEMALLOC_FILL " >>confdefs.h fi # Check whether --enable-utrace was given. if test "${enable_utrace+set}" = set; then : enableval=$enable_utrace; if test "x$enable_utrace" = "xno" ; then enable_utrace="0" else enable_utrace="1" fi else enable_utrace="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) is compilable" >&5 $as_echo_n "checking whether utrace(2) is compilable... " >&6; } if ${je_cv_utrace+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include #include int main () { utrace((void *)0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_utrace=yes else je_cv_utrace=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace" >&5 $as_echo "$je_cv_utrace" >&6; } if test "x${je_cv_utrace}" = "xno" ; then enable_utrace="0" fi if test "x$enable_utrace" = "x1" ; then $as_echo "#define JEMALLOC_UTRACE " >>confdefs.h fi # Check whether --enable-valgrind was given. if test "${enable_valgrind+set}" = set; then : enableval=$enable_valgrind; if test "x$enable_valgrind" = "xno" ; then enable_valgrind="0" else enable_valgrind="1" fi else enable_valgrind="1" fi if test "x$enable_valgrind" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether valgrind is compilable" >&5 $as_echo_n "checking whether valgrind is compilable... " >&6; } if ${je_cv_valgrind+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if !defined(VALGRIND_RESIZEINPLACE_BLOCK) # error "Incompatible Valgrind version" #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_valgrind=yes else je_cv_valgrind=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_valgrind" >&5 $as_echo "$je_cv_valgrind" >&6; } if test "x${je_cv_valgrind}" = "xno" ; then enable_valgrind="0" fi if test "x$enable_valgrind" = "x1" ; then $as_echo "#define JEMALLOC_VALGRIND " >>confdefs.h fi fi # Check whether --enable-xmalloc was given. if test "${enable_xmalloc+set}" = set; then : enableval=$enable_xmalloc; if test "x$enable_xmalloc" = "xno" ; then enable_xmalloc="0" else enable_xmalloc="1" fi else enable_xmalloc="0" fi if test "x$enable_xmalloc" = "x1" ; then $as_echo "#define JEMALLOC_XMALLOC " >>confdefs.h fi # Check whether --enable-cache-oblivious was given. if test "${enable_cache_oblivious+set}" = set; then : enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then enable_cache_oblivious="0" else enable_cache_oblivious="1" fi else enable_cache_oblivious="1" fi if test "x$enable_cache_oblivious" = "x1" ; then $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5 $as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; } if ${je_cv_gcc_builtin_ffsl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { int rv = __builtin_ffsl(0x08); printf("%d\n", rv); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_gcc_builtin_ffsl=yes else je_cv_gcc_builtin_ffsl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_ffsl" >&5 $as_echo "$je_cv_gcc_builtin_ffsl" >&6; } if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5 $as_echo_n "checking whether a program using ffsl is compilable... " >&6; } if ${je_cv_function_ffsl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { int rv = ffsl(0x08); printf("%d\n", rv); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_function_ffsl=yes else je_cv_function_ffsl=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5 $as_echo "$je_cv_function_ffsl" >&6; } if test "x${je_cv_function_ffsl}" = "xyes" ; then $as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h $as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h else as_fn_error $? "Cannot build without ffsl(3) or __builtin_ffsl()" "$LINENO" 5 fi fi # Check whether --with-lg_tiny_min was given. if test "${with_lg_tiny_min+set}" = set; then : withval=$with_lg_tiny_min; LG_TINY_MIN="$with_lg_tiny_min" else LG_TINY_MIN="3" fi cat >>confdefs.h <<_ACEOF #define LG_TINY_MIN $LG_TINY_MIN _ACEOF # Check whether --with-lg_quantum was given. if test "${with_lg_quantum+set}" = set; then : withval=$with_lg_quantum; LG_QUANTA="$with_lg_quantum" else LG_QUANTA="3 4" fi if test "x$with_lg_quantum" != "x" ; then cat >>confdefs.h <<_ACEOF #define LG_QUANTUM $with_lg_quantum _ACEOF fi # Check whether --with-lg_page was given. if test "${with_lg_page+set}" = set; then : withval=$with_lg_page; LG_PAGE="$with_lg_page" else LG_PAGE="detect" fi if test "x$LG_PAGE" = "xdetect"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5 $as_echo_n "checking LG_PAGE... " >&6; } if ${je_cv_lg_page+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : je_cv_lg_page=12 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef _WIN32 #include #else #include #endif #include int main () { int result; FILE *f; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwPageSize; #else result = sysconf(_SC_PAGESIZE); #endif if (result == -1) { return 1; } result = JEMALLOC_INTERNAL_FFSL(result) - 1; f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } fprintf(f, "%d\n", result); fclose(f); return 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : je_cv_lg_page=`cat conftest.out` else je_cv_lg_page=undefined fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5 $as_echo "$je_cv_lg_page" >&6; } fi if test "x${je_cv_lg_page}" != "x" ; then LG_PAGE="${je_cv_lg_page}" fi if test "x${LG_PAGE}" != "xundefined" ; then cat >>confdefs.h <<_ACEOF #define LG_PAGE $LG_PAGE _ACEOF else as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5 fi # Check whether --with-lg_page_sizes was given. if test "${with_lg_page_sizes+set}" = set; then : withval=$with_lg_page_sizes; LG_PAGE_SIZES="$with_lg_page_sizes" else LG_PAGE_SIZES="$LG_PAGE" fi # Check whether --with-lg_size_class_group was given. if test "${with_lg_size_class_group+set}" = set; then : withval=$with_lg_size_class_group; LG_SIZE_CLASS_GROUP="$with_lg_size_class_group" else LG_SIZE_CLASS_GROUP="2" fi if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5 $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; } echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi fi jemalloc_version=`cat "${objroot}VERSION"` jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'` jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'` jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'` jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $4}'` jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $5}'` if test "x$abi" != "xpecoff" ; then for ac_header in pthread.h do : ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" if test "x$ac_cv_header_pthread_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_PTHREAD_H 1 _ACEOF else as_fn_error $? "pthread.h is missing" "$LINENO" 5 fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 $as_echo_n "checking for pthread_create in -lpthread... " >&6; } if ${ac_cv_lib_pthread_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpthread $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pthread_pthread_create=yes else ac_cv_lib_pthread_pthread_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 $as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : LIBS="$LIBS -lpthread" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 $as_echo_n "checking for library containing pthread_create... " >&6; } if ${ac_cv_search_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF for ac_lib in '' ; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_pthread_create=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_pthread_create+:} false; then : break fi done if ${ac_cv_search_pthread_create+:} false; then : else ac_cv_search_pthread_create=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 $as_echo "$ac_cv_search_pthread_create" >&6; } ac_res=$ac_cv_search_pthread_create if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" else as_fn_error $? "libpthread is missing" "$LINENO" 5 fi fi fi CPPFLAGS="$CPPFLAGS -D_REENTRANT" SAVED_LIBS="${LIBS}" LIBS= { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if ${ac_cv_search_clock_gettime+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char clock_gettime (); int main () { return clock_gettime (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_clock_gettime=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_clock_gettime+:} false; then : break fi done if ${ac_cv_search_clock_gettime+:} false; then : else ac_cv_search_clock_gettime=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" TESTLIBS="${LIBS}" fi LIBS="${SAVED_LIBS}" ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv" if test "x$ac_cv_func_secure_getenv" = xyes; then : have_secure_getenv="1" else have_secure_getenv="0" fi if test "x$have_secure_getenv" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_SECURE_GETENV " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid" if test "x$ac_cv_func_issetugid" = xyes; then : have_issetugid="1" else have_issetugid="0" fi if test "x$have_issetugid" = "x1" ; then $as_echo "#define JEMALLOC_HAVE_ISSETUGID " >>confdefs.h fi ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup" if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then : have__malloc_thread_cleanup="1" else have__malloc_thread_cleanup="0" fi if test "x$have__malloc_thread_cleanup" = "x1" ; then $as_echo "#define JEMALLOC_MALLOC_THREAD_CLEANUP " >>confdefs.h force_tls="1" fi ac_fn_c_check_func "$LINENO" "_pthread_mutex_init_calloc_cb" "ac_cv_func__pthread_mutex_init_calloc_cb" if test "x$ac_cv_func__pthread_mutex_init_calloc_cb" = xyes; then : have__pthread_mutex_init_calloc_cb="1" else have__pthread_mutex_init_calloc_cb="0" fi if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then $as_echo "#define JEMALLOC_MUTEX_INIT_CB 1" >>confdefs.h fi # Check whether --enable-lazy_lock was given. if test "${enable_lazy_lock+set}" = set; then : enableval=$enable_lazy_lock; if test "x$enable_lazy_lock" = "xno" ; then enable_lazy_lock="0" else enable_lazy_lock="1" fi else enable_lazy_lock="" fi if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; } enable_lazy_lock="1" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$abi" != "xpecoff" ; then for ac_header in dlfcn.h do : ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default" if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF else as_fn_error $? "dlfcn.h is missing" "$LINENO" 5 fi done ac_fn_c_check_func "$LINENO" "dlsym" "ac_cv_func_dlsym" if test "x$ac_cv_func_dlsym" = xyes; then : else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlsym in -ldl" >&5 $as_echo_n "checking for dlsym in -ldl... " >&6; } if ${ac_cv_lib_dl_dlsym+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlsym (); int main () { return dlsym (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlsym=yes else ac_cv_lib_dl_dlsym=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlsym" >&5 $as_echo "$ac_cv_lib_dl_dlsym" >&6; } if test "x$ac_cv_lib_dl_dlsym" = xyes; then : LIBS="$LIBS -ldl" else as_fn_error $? "libdl is missing" "$LINENO" 5 fi fi fi $as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h else enable_lazy_lock="0" fi # Check whether --enable-tls was given. if test "${enable_tls+set}" = set; then : enableval=$enable_tls; if test "x$enable_tls" = "xno" ; then enable_tls="0" else enable_tls="1" fi else enable_tls="" fi if test "x${enable_tls}" = "x" ; then if test "x${force_tls}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; } enable_tls="1" elif test "x${force_tls}" = "x0" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; } enable_tls="0" else enable_tls="1" fi fi if test "x${enable_tls}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5 $as_echo_n "checking for TLS... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ __thread int x; int main () { x = 42; return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } enable_tls="0" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else enable_tls="0" fi if test "x${enable_tls}" = "x1" ; then if test "x${force_tls}" = "x0" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS enabled despite being marked unusable on this platform" >&5 $as_echo "$as_me: WARNING: TLS enabled despite being marked unusable on this platform" >&2;} fi cat >>confdefs.h <<_ACEOF #define JEMALLOC_TLS _ACEOF elif test "x${force_tls}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS disabled despite being marked critical on this platform" >&5 $as_echo "$as_me: WARNING: TLS disabled despite being marked critical on this platform" >&2;} fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5 $as_echo_n "checking whether C11 atomics is compilable... " >&6; } if ${je_cv_c11atomics+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #include #else #error Atomics not available #endif int main () { uint64_t *p = (uint64_t *)0; uint64_t x = 1; volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; uint64_t r = atomic_fetch_add(a, x) + x; return (r == 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_c11atomics=yes else je_cv_c11atomics=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11atomics" >&5 $as_echo "$je_cv_c11atomics" >&6; } if test "x${je_cv_c11atomics}" = "xyes" ; then $as_echo "#define JEMALLOC_C11ATOMICS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether atomic(9) is compilable" >&5 $as_echo_n "checking whether atomic(9) is compilable... " >&6; } if ${je_cv_atomic9+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { { uint32_t x32 = 0; volatile uint32_t *x32p = &x32; atomic_fetchadd_32(x32p, 1); } { unsigned long xlong = 0; volatile unsigned long *xlongp = &xlong; atomic_fetchadd_long(xlongp, 1); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_atomic9=yes else je_cv_atomic9=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_atomic9" >&5 $as_echo "$je_cv_atomic9" >&6; } if test "x${je_cv_atomic9}" = "xyes" ; then $as_echo "#define JEMALLOC_ATOMIC9 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSAtomic*() is compilable" >&5 $as_echo_n "checking whether Darwin OSAtomic*() is compilable... " >&6; } if ${je_cv_osatomic+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { { int32_t x32 = 0; volatile int32_t *x32p = &x32; OSAtomicAdd32(1, x32p); } { int64_t x64 = 0; volatile int64_t *x64p = &x64; OSAtomicAdd64(1, x64p); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_osatomic=yes else je_cv_osatomic=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osatomic" >&5 $as_echo "$je_cv_osatomic" >&6; } if test "x${je_cv_osatomic}" = "xyes" ; then $as_echo "#define JEMALLOC_OSATOMIC " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(2) is compilable" >&5 $as_echo_n "checking whether madvise(2) is compilable... " >&6; } if ${je_cv_madvise+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { { madvise((void *)0, 0, 0); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_madvise=yes else je_cv_madvise=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madvise" >&5 $as_echo "$je_cv_madvise" >&6; } if test "x${je_cv_madvise}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h fi if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 32-bit __sync_{add,sub}_and_fetch()" >&5 $as_echo_n "checking whether to force 32-bit __sync_{add,sub}_and_fetch()... " >&6; } if ${je_cv_sync_compare_and_swap_4+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 { uint32_t x32 = 0; __sync_add_and_fetch(&x32, 42); __sync_sub_and_fetch(&x32, 1); } #else #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 is defined, no need to force #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_sync_compare_and_swap_4=yes else je_cv_sync_compare_and_swap_4=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_4" >&5 $as_echo "$je_cv_sync_compare_and_swap_4" >&6; } if test "x${je_cv_sync_compare_and_swap_4}" = "xyes" ; then $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_4 " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 64-bit __sync_{add,sub}_and_fetch()" >&5 $as_echo_n "checking whether to force 64-bit __sync_{add,sub}_and_fetch()... " >&6; } if ${je_cv_sync_compare_and_swap_8+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 { uint64_t x64 = 0; __sync_add_and_fetch(&x64, 42); __sync_sub_and_fetch(&x64, 1); } #else #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 is defined, no need to force #endif ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_sync_compare_and_swap_8=yes else je_cv_sync_compare_and_swap_8=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_8" >&5 $as_echo "$je_cv_sync_compare_and_swap_8" >&6; } if test "x${je_cv_sync_compare_and_swap_8}" = "xyes" ; then $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_8 " >>confdefs.h fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5 $as_echo_n "checking for __builtin_clz... " >&6; } if ${je_cv_builtin_clz+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { { unsigned x = 0; int y = __builtin_clz(x); } { unsigned long x = 0; int y = __builtin_clzl(x); } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_builtin_clz=yes else je_cv_builtin_clz=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_builtin_clz" >&5 $as_echo "$je_cv_builtin_clz" >&6; } if test "x${je_cv_builtin_clz}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSSpin*() is compilable" >&5 $as_echo_n "checking whether Darwin OSSpin*() is compilable... " >&6; } if ${je_cv_osspin+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { OSSpinLock lock = 0; OSSpinLockLock(&lock); OSSpinLockUnlock(&lock); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_osspin=yes else je_cv_osspin=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osspin" >&5 $as_echo "$je_cv_osspin" >&6; } if test "x${je_cv_osspin}" = "xyes" ; then $as_echo "#define JEMALLOC_OSSPIN " >>confdefs.h fi # Check whether --enable-zone-allocator was given. if test "${enable_zone_allocator+set}" = set; then : enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then enable_zone_allocator="0" else enable_zone_allocator="1" fi else if test "x${abi}" = "xmacho"; then enable_zone_allocator="1" fi fi if test "x${enable_zone_allocator}" = "x1" ; then if test "x${abi}" != "xmacho"; then as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5 fi $as_echo "#define JEMALLOC_ZONE " >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking malloc zone version" >&5 $as_echo_n "checking malloc zone version... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 14 ? 1 : -1] ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : JEMALLOC_ZONE_VERSION=3 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 15 ? 1 : -1] ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : JEMALLOC_ZONE_VERSION=5 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 16 ? 1 : -1] ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 9 ? 1 : -1] ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : JEMALLOC_ZONE_VERSION=6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 13 ? 1 : -1] ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : JEMALLOC_ZONE_VERSION=7 else JEMALLOC_ZONE_VERSION= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 17 ? 1 : -1] ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : JEMALLOC_ZONE_VERSION=8 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { static int foo[sizeof(malloc_zone_t) > sizeof(void *) * 17 ? 1 : -1] ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : JEMALLOC_ZONE_VERSION=9 else JEMALLOC_ZONE_VERSION= fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } as_fn_error $? "Unsupported malloc zone version" "$LINENO" 5 fi if test "${JEMALLOC_ZONE_VERSION}" = 9; then JEMALLOC_ZONE_VERSION=8 { $as_echo "$as_me:${as_lineno-$LINENO}: result: > 8" >&5 $as_echo "> 8" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JEMALLOC_ZONE_VERSION" >&5 $as_echo "$JEMALLOC_ZONE_VERSION" >&6; } fi cat >>confdefs.h <<_ACEOF #define JEMALLOC_ZONE_VERSION $JEMALLOC_ZONE_VERSION _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc malloc hook is compilable" >&5 $as_echo_n "checking whether glibc malloc hook is compilable... " >&6; } if ${je_cv_glibc_malloc_hook+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern void (* __free_hook)(void *ptr); extern void *(* __malloc_hook)(size_t size); extern void *(* __realloc_hook)(void *ptr, size_t size); int main () { void *ptr = 0L; if (__malloc_hook) ptr = __malloc_hook(1); if (__realloc_hook) ptr = __realloc_hook(ptr, 2); if (__free_hook && ptr) __free_hook(ptr); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_glibc_malloc_hook=yes else je_cv_glibc_malloc_hook=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_malloc_hook" >&5 $as_echo "$je_cv_glibc_malloc_hook" >&6; } if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then $as_echo "#define JEMALLOC_GLIBC_MALLOC_HOOK " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc memalign hook is compilable" >&5 $as_echo_n "checking whether glibc memalign hook is compilable... " >&6; } if ${je_cv_glibc_memalign_hook+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include extern void *(* __memalign_hook)(size_t alignment, size_t size); int main () { void *ptr = 0L; if (__memalign_hook) ptr = __memalign_hook(16, 7); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_glibc_memalign_hook=yes else je_cv_glibc_memalign_hook=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_memalign_hook" >&5 $as_echo "$je_cv_glibc_memalign_hook" >&6; } if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then $as_echo "#define JEMALLOC_GLIBC_MEMALIGN_HOOK " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads adaptive mutexes is compilable" >&5 $as_echo_n "checking whether pthreads adaptive mutexes is compilable... " >&6; } if ${je_cv_pthread_mutex_adaptive_np+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutexattr_destroy(&attr); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : je_cv_pthread_mutex_adaptive_np=yes else je_cv_pthread_mutex_adaptive_np=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_mutex_adaptive_np" >&5 $as_echo "$je_cv_pthread_mutex_adaptive_np" >&6; } if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP " >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; /* See body of main program for 'e'. */ char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { bool e = &s; *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdbool_h=yes else ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h fi ac_config_commands="$ac_config_commands include/jemalloc/internal/private_namespace.h" ac_config_commands="$ac_config_commands include/jemalloc/internal/private_unnamespace.h" ac_config_commands="$ac_config_commands include/jemalloc/internal/public_symbols.txt" ac_config_commands="$ac_config_commands include/jemalloc/internal/public_namespace.h" ac_config_commands="$ac_config_commands include/jemalloc/internal/public_unnamespace.h" ac_config_commands="$ac_config_commands include/jemalloc/internal/size_classes.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_protos_jet.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_rename.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle_jet.h" ac_config_commands="$ac_config_commands include/jemalloc/jemalloc.h" ac_config_headers="$ac_config_headers $cfghdrs_tup" ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ config.status configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" mangling_map="${mangling_map}" public_syms="${public_syms}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" SHELL="${SHELL}" srcdir="${srcdir}" objroot="${objroot}" LG_QUANTA="${LG_QUANTA}" LG_TINY_MIN=${LG_TINY_MIN} LG_PAGE_SIZES="${LG_PAGE_SIZES}" LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP} srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" srcdir="${srcdir}" objroot="${objroot}" install_suffix="${install_suffix}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "include/jemalloc/internal/private_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_namespace.h" ;; "include/jemalloc/internal/private_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_unnamespace.h" ;; "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;; "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/internal/size_classes.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/size_classes.h" ;; "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;; "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;; "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;; "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;; "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;; "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;; "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "include/jemalloc/internal/private_namespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h" ;; "include/jemalloc/internal/private_unnamespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h" ;; "include/jemalloc/internal/public_symbols.txt":C) f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" cp /dev/null "${f}" for nm in `echo ${mangling_map} |tr ',' ' '` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` echo "${n}:${m}" >> "${f}" public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` done for sym in ${public_syms} ; do n="${sym}" m="${JEMALLOC_PREFIX}${sym}" echo "${n}:${m}" >> "${f}" done ;; "include/jemalloc/internal/public_namespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" ;; "include/jemalloc/internal/public_unnamespace.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" ;; "include/jemalloc/internal/size_classes.h":C) mkdir -p "${objroot}include/jemalloc/internal" "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h" ;; "include/jemalloc/jemalloc_protos_jet.h":C) mkdir -p "${objroot}include/jemalloc" cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" ;; "include/jemalloc/jemalloc_rename.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" ;; "include/jemalloc/jemalloc_mangle.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" ;; "include/jemalloc/jemalloc_mangle_jet.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" ;; "include/jemalloc/jemalloc.h":C) mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 $as_echo "===============================================================================" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc version : ${jemalloc_version}" >&5 $as_echo "jemalloc version : ${jemalloc_version}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: library revision : ${rev}" >&5 $as_echo "library revision : ${rev}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG : ${CONFIG}" >&5 $as_echo "CONFIG : ${CONFIG}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5 $as_echo "CC : ${CC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS : ${CFLAGS}" >&5 $as_echo "CFLAGS : ${CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5 $as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5 $as_echo "LDFLAGS : ${LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5 $as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5 $as_echo "LIBS : ${LIBS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: TESTLIBS : ${TESTLIBS}" >&5 $as_echo "TESTLIBS : ${TESTLIBS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5 $as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC : ${XSLTPROC}" >&5 $as_echo "XSLTPROC : ${XSLTPROC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT : ${XSLROOT}" >&5 $as_echo "XSLROOT : ${XSLROOT}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: PREFIX : ${PREFIX}" >&5 $as_echo "PREFIX : ${PREFIX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR : ${BINDIR}" >&5 $as_echo "BINDIR : ${BINDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5 $as_echo "DATADIR : ${DATADIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR : ${INCLUDEDIR}" >&5 $as_echo "INCLUDEDIR : ${INCLUDEDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR : ${LIBDIR}" >&5 $as_echo "LIBDIR : ${LIBDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR : ${MANDIR}" >&5 $as_echo "MANDIR : ${MANDIR}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: srcroot : ${srcroot}" >&5 $as_echo "srcroot : ${srcroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_srcroot : ${abs_srcroot}" >&5 $as_echo "abs_srcroot : ${abs_srcroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: objroot : ${objroot}" >&5 $as_echo "objroot : ${objroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_objroot : ${abs_objroot}" >&5 $as_echo "abs_objroot : ${abs_objroot}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&5 $as_echo "JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PRIVATE_NAMESPACE" >&5 $as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: : ${JEMALLOC_PRIVATE_NAMESPACE}" >&5 $as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix : ${install_suffix}" >&5 $as_echo "install_suffix : ${install_suffix}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5 $as_echo "autogen : ${enable_autogen}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: cc-silence : ${enable_cc_silence}" >&5 $as_echo "cc-silence : ${enable_cc_silence}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5 $as_echo "debug : ${enable_debug}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: code-coverage : ${enable_code_coverage}" >&5 $as_echo "code-coverage : ${enable_code_coverage}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: stats : ${enable_stats}" >&5 $as_echo "stats : ${enable_stats}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof : ${enable_prof}" >&5 $as_echo "prof : ${enable_prof}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libunwind : ${enable_prof_libunwind}" >&5 $as_echo "prof-libunwind : ${enable_prof_libunwind}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libgcc : ${enable_prof_libgcc}" >&5 $as_echo "prof-libgcc : ${enable_prof_libgcc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-gcc : ${enable_prof_gcc}" >&5 $as_echo "prof-gcc : ${enable_prof_gcc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: tcache : ${enable_tcache}" >&5 $as_echo "tcache : ${enable_tcache}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: fill : ${enable_fill}" >&5 $as_echo "fill : ${enable_fill}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: utrace : ${enable_utrace}" >&5 $as_echo "utrace : ${enable_utrace}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: valgrind : ${enable_valgrind}" >&5 $as_echo "valgrind : ${enable_valgrind}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc : ${enable_xmalloc}" >&5 $as_echo "xmalloc : ${enable_xmalloc}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: munmap : ${enable_munmap}" >&5 $as_echo "munmap : ${enable_munmap}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock : ${enable_lazy_lock}" >&5 $as_echo "lazy_lock : ${enable_lazy_lock}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: tls : ${enable_tls}" >&5 $as_echo "tls : ${enable_tls}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious : ${enable_cache_oblivious}" >&5 $as_echo "cache-oblivious : ${enable_cache_oblivious}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 $as_echo "===============================================================================" >&6; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/configure.ac�����������������������������������������������������������0000664�0000000�0000000�00000156142�12641765611�0020360�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������dnl Process this file with autoconf to produce a configure script. AC_INIT([Makefile.in]) dnl ============================================================================ dnl Custom macro definitions. dnl JE_CFLAGS_APPEND(cflag) AC_DEFUN([JE_CFLAGS_APPEND], [ AC_MSG_CHECKING([whether compiler supports $1]) TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="$1" else CFLAGS="${CFLAGS} $1" fi AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ ]], [[ return 0; ]])], [je_cv_cflags_appended=$1] AC_MSG_RESULT([yes]), [je_cv_cflags_appended=] AC_MSG_RESULT([no]) [CFLAGS="${TCFLAGS}"] ) ]) dnl JE_COMPILABLE(label, hcode, mcode, rvar) dnl dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors dnl cause failure. AC_DEFUN([JE_COMPILABLE], [ AC_CACHE_CHECK([whether $1 is compilable], [$4], [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2], [$3])], [$4=yes], [$4=no])]) ]) dnl ============================================================================ CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` AC_SUBST([CONFIG]) dnl Library revision. rev=2 AC_SUBST([rev]) srcroot=$srcdir if test "x${srcroot}" = "x." ; then srcroot="" else srcroot="${srcroot}/" fi AC_SUBST([srcroot]) abs_srcroot="`cd \"${srcdir}\"; pwd`/" AC_SUBST([abs_srcroot]) objroot="" AC_SUBST([objroot]) abs_objroot="`pwd`/" AC_SUBST([abs_objroot]) dnl Munge install path variables. if test "x$prefix" = "xNONE" ; then prefix="/usr/local" fi if test "x$exec_prefix" = "xNONE" ; then exec_prefix=$prefix fi PREFIX=$prefix AC_SUBST([PREFIX]) BINDIR=`eval echo $bindir` BINDIR=`eval echo $BINDIR` AC_SUBST([BINDIR]) INCLUDEDIR=`eval echo $includedir` INCLUDEDIR=`eval echo $INCLUDEDIR` AC_SUBST([INCLUDEDIR]) LIBDIR=`eval echo $libdir` LIBDIR=`eval echo $LIBDIR` AC_SUBST([LIBDIR]) DATADIR=`eval echo $datadir` DATADIR=`eval echo $DATADIR` AC_SUBST([DATADIR]) MANDIR=`eval echo $mandir` MANDIR=`eval echo $MANDIR` AC_SUBST([MANDIR]) dnl Support for building documentation. AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH]) if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" else dnl Documentation building will fail if this default gets used. DEFAULT_XSLROOT="" fi AC_ARG_WITH([xslroot], [AS_HELP_STRING([--with-xslroot=], [XSL stylesheet root path])], [ if test "x$with_xslroot" = "xno" ; then XSLROOT="${DEFAULT_XSLROOT}" else XSLROOT="${with_xslroot}" fi ], XSLROOT="${DEFAULT_XSLROOT}" ) AC_SUBST([XSLROOT]) dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, dnl just prevent autoconf from molesting CFLAGS. CFLAGS=$CFLAGS AC_PROG_CC if test "x$GCC" != "xyes" ; then AC_CACHE_CHECK([whether compiler is MSVC], [je_cv_msvc], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #ifndef _MSC_VER int fail[-1]; #endif ])], [je_cv_msvc=yes], [je_cv_msvc=no])]) fi if test "x$CFLAGS" = "x" ; then no_CFLAGS="yes" if test "x$GCC" = "xyes" ; then JE_CFLAGS_APPEND([-std=gnu99]) if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) fi JE_CFLAGS_APPEND([-Wall]) JE_CFLAGS_APPEND([-Werror=declaration-after-statement]) JE_CFLAGS_APPEND([-pipe]) JE_CFLAGS_APPEND([-g3]) elif test "x$je_cv_msvc" = "xyes" ; then CC="$CC -nologo" JE_CFLAGS_APPEND([-Zi]) JE_CFLAGS_APPEND([-MT]) JE_CFLAGS_APPEND([-W3]) JE_CFLAGS_APPEND([-FS]) CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat" fi fi dnl Append EXTRA_CFLAGS to CFLAGS, if defined. if test "x$EXTRA_CFLAGS" != "x" ; then JE_CFLAGS_APPEND([$EXTRA_CFLAGS]) fi AC_PROG_CPP AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) if test "x${ac_cv_big_endian}" = "x1" ; then AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ]) fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99" fi AC_CHECK_SIZEOF([void *]) if test "x${ac_cv_sizeof_void_p}" = "x8" ; then LG_SIZEOF_PTR=3 elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then LG_SIZEOF_PTR=2 else AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) AC_CHECK_SIZEOF([int]) if test "x${ac_cv_sizeof_int}" = "x8" ; then LG_SIZEOF_INT=3 elif test "x${ac_cv_sizeof_int}" = "x4" ; then LG_SIZEOF_INT=2 else AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT]) AC_CHECK_SIZEOF([long]) if test "x${ac_cv_sizeof_long}" = "x8" ; then LG_SIZEOF_LONG=3 elif test "x${ac_cv_sizeof_long}" = "x4" ; then LG_SIZEOF_LONG=2 else AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) AC_CHECK_SIZEOF([intmax_t]) if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then LG_SIZEOF_INTMAX_T=4 elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then LG_SIZEOF_INTMAX_T=3 elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then LG_SIZEOF_INTMAX_T=2 else AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}]) fi AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T]) AC_CANONICAL_HOST dnl CPU-specific settings. CPU_SPINWAIT="" case "${host_cpu}" in i686|x86_64) AC_CACHE_VAL([je_cv_pause], [JE_COMPILABLE([pause instruction], [], [[__asm__ volatile("pause"); return 0;]], [je_cv_pause])]) if test "x${je_cv_pause}" = "xyes" ; then CPU_SPINWAIT='__asm__ volatile("pause")' fi ;; powerpc) AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ]) ;; *) ;; esac AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 AN_MAKEVAR([AR], [AC_PROG_AR]) AN_PROGRAM([ar], [AC_PROG_AR]) AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) AC_PROG_AR dnl Platform-specific settings. abi and RPATH can probably be determined dnl programmatically, but doing so is error-prone, which makes it generally dnl not worth the trouble. dnl dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the dnl definitions need to be seen before any headers are included, which is a pain dnl to make happen otherwise. default_munmap="1" maps_coalesce="1" case "${host}" in *-*-darwin* | *-*-ios*) CFLAGS="$CFLAGS" abi="macho" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" ;; *-*-freebsd*) CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) force_lazy_lock="1" ;; *-*-dragonfly*) CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-openbsd*) CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) force_tls="0" ;; *-*-bitrig*) CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-linux*) CFLAGS="$CFLAGS" CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" abi="elf" AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) default_munmap="0" ;; *-*-netbsd*) AC_MSG_CHECKING([ABI]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[#ifdef __ELF__ /* ELF */ #else #error aout #endif ]])], [CFLAGS="$CFLAGS"; abi="elf"], [abi="aout"]) AC_MSG_RESULT([$abi]) AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-solaris2*) CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH='-Wl,-R,$(1)' dnl Solaris needs this for sigwait(). CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" LIBS="$LIBS -lposix4 -lsocket -lnsl" ;; *-ibm-aix*) if "$LG_SIZEOF_PTR" = "8"; then dnl 64bit AIX LD_PRELOAD_VAR="LDR_PRELOAD64" else dnl 32bit AIX LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" force_lazy_lock="1" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" fi a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) AC_MSG_RESULT([Unsupported operating system: ${host}]) abi="elf" ;; esac JEMALLOC_USABLE_SIZE_CONST=const AC_CHECK_HEADERS([malloc.h], [ AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [#include #include size_t malloc_usable_size(const void *ptr); ], [])],[ AC_MSG_RESULT([yes]) ],[ JEMALLOC_USABLE_SIZE_CONST= AC_MSG_RESULT([no]) ]) ]) AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST]) AC_SUBST([abi]) AC_SUBST([RPATH]) AC_SUBST([LD_PRELOAD_VAR]) AC_SUBST([so]) AC_SUBST([importlib]) AC_SUBST([o]) AC_SUBST([a]) AC_SUBST([exe]) AC_SUBST([libprefix]) AC_SUBST([DSO_LDFLAGS]) AC_SUBST([EXTRA_LDFLAGS]) AC_SUBST([SOREV]) AC_SUBST([PIC_CFLAGS]) AC_SUBST([CTARGET]) AC_SUBST([LDTARGET]) AC_SUBST([MKLIB]) AC_SUBST([ARFLAGS]) AC_SUBST([AROUT]) AC_SUBST([CC_MM]) JE_COMPILABLE([__attribute__ syntax], [static __attribute__((unused)) void foo(void){}], [], [je_cv_attribute]) if test "x${je_cv_attribute}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then JE_CFLAGS_APPEND([-fvisibility=hidden]) fi fi dnl Check for tls_model attribute support (clang 3.0 still lacks support). SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) JE_COMPILABLE([tls_model attribute], [], [static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0;], [je_cv_tls_model]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_tls_model}" = "xyes" ; then AC_DEFINE([JEMALLOC_TLS_MODEL], [__attribute__((tls_model("initial-exec")))]) else AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) fi dnl Check for alloc_size attribute support. SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) JE_COMPILABLE([alloc_size attribute], [#include ], [void *foo(size_t size) __attribute__((alloc_size(1)));], [je_cv_alloc_size]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_alloc_size}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ]) fi dnl Check for format(gnu_printf, ...) attribute support. SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));], [je_cv_format_gnu_printf]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_format_gnu_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ]) fi dnl Check for format(printf, ...) attribute support. SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) JE_COMPILABLE([format(printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));], [je_cv_format_printf]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_format_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) fi dnl Support optional additions to rpath. AC_ARG_WITH([rpath], [AS_HELP_STRING([--with-rpath=], [Colon-separated rpath (ELF systems only)])], if test "x$with_rpath" = "xno" ; then RPATH_EXTRA= else RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" fi, RPATH_EXTRA= ) AC_SUBST([RPATH_EXTRA]) dnl Disable rules that do automatic regeneration of configure output by default. AC_ARG_ENABLE([autogen], [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])], if test "x$enable_autogen" = "xno" ; then enable_autogen="0" else enable_autogen="1" fi , enable_autogen="0" ) AC_SUBST([enable_autogen]) AC_PROG_INSTALL AC_PROG_RANLIB AC_PATH_PROG([LD], [ld], [false], [$PATH]) AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size" dnl Check for allocator-related functions that should be wrapped. AC_CHECK_FUNC([memalign], [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) public_syms="${public_syms} memalign"]) AC_CHECK_FUNC([valloc], [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) public_syms="${public_syms} valloc"]) dnl Do not compute test code coverage by default. GCOV_FLAGS= AC_ARG_ENABLE([code-coverage], [AS_HELP_STRING([--enable-code-coverage], [Enable code coverage])], [if test "x$enable_code_coverage" = "xno" ; then enable_code_coverage="0" else enable_code_coverage="1" fi ], [enable_code_coverage="0"] ) if test "x$enable_code_coverage" = "x1" ; then deoptimize="no" echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes" if test "x${deoptimize}" = "xyes" ; then JE_CFLAGS_APPEND([-O0]) fi JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage]) EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage" AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ]) fi AC_SUBST([enable_code_coverage]) dnl Perform no name mangling by default. AC_ARG_WITH([mangling], [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], [mangling_map="$with_mangling"], [mangling_map=""]) dnl Do not prefix public APIs by default. AC_ARG_WITH([jemalloc_prefix], [AS_HELP_STRING([--with-jemalloc-prefix=], [Prefix to prepend to all public APIs])], [JEMALLOC_PREFIX="$with_jemalloc_prefix"], [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then JEMALLOC_PREFIX="" else JEMALLOC_PREFIX="je_" fi] ) if test "x$JEMALLOC_PREFIX" != "x" ; then JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) fi AC_SUBST([JEMALLOC_CPREFIX]) AC_ARG_WITH([export], [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], [if test "x$with_export" = "xno"; then AC_DEFINE([JEMALLOC_EXPORT],[]) fi] ) dnl Mangle library-private APIs. AC_ARG_WITH([private_namespace], [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"], [JEMALLOC_PRIVATE_NAMESPACE="je_"] ) AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE]) private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" AC_SUBST([private_namespace]) dnl Do not add suffix to installed files by default. AC_ARG_WITH([install_suffix], [AS_HELP_STRING([--with-install-suffix=], [Suffix to append to all installed files])], [INSTALL_SUFFIX="$with_install_suffix"], [INSTALL_SUFFIX=] ) install_suffix="$INSTALL_SUFFIX" AC_SUBST([install_suffix]) dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of dnl jemalloc_protos_jet.h easy. je_="je_" AC_SUBST([je_]) cfgoutputs_in="Makefile.in" cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in" cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" cfgoutputs_out="Makefile" cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" cfgoutputs_out="${cfgoutputs_out} test/test.sh" cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" cfgoutputs_tup="Makefile" cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" cfghdrs_out="include/jemalloc/jemalloc_defs.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" dnl Silence irrelevant compiler warnings by default. AC_ARG_ENABLE([cc-silence], [AS_HELP_STRING([--disable-cc-silence], [Do not silence irrelevant compiler warnings])], [if test "x$enable_cc_silence" = "xno" ; then enable_cc_silence="0" else enable_cc_silence="1" fi ], [enable_cc_silence="1"] ) if test "x$enable_cc_silence" = "x1" ; then AC_DEFINE([JEMALLOC_CC_SILENCE], [ ]) fi dnl Do not compile with debugging by default. AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], [Build debugging code (implies --enable-ivsalloc)])], [if test "x$enable_debug" = "xno" ; then enable_debug="0" else enable_debug="1" fi ], [enable_debug="0"] ) if test "x$enable_debug" = "x1" ; then AC_DEFINE([JEMALLOC_DEBUG], [ ]) fi if test "x$enable_debug" = "x1" ; then AC_DEFINE([JEMALLOC_DEBUG], [ ]) enable_ivsalloc="1" fi AC_SUBST([enable_debug]) dnl Do not validate pointers by default. AC_ARG_ENABLE([ivsalloc], [AS_HELP_STRING([--enable-ivsalloc], [Validate pointers passed through the public API])], [if test "x$enable_ivsalloc" = "xno" ; then enable_ivsalloc="0" else enable_ivsalloc="1" fi ], [enable_ivsalloc="0"] ) if test "x$enable_ivsalloc" = "x1" ; then AC_DEFINE([JEMALLOC_IVSALLOC], [ ]) fi dnl Only optimize if not debugging. if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS. optimize="no" echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes" if test "x${optimize}" = "xyes" ; then if test "x$GCC" = "xyes" ; then JE_CFLAGS_APPEND([-O3]) JE_CFLAGS_APPEND([-funroll-loops]) elif test "x$je_cv_msvc" = "xyes" ; then JE_CFLAGS_APPEND([-O2]) else JE_CFLAGS_APPEND([-O]) fi fi fi dnl Enable statistics calculation by default. AC_ARG_ENABLE([stats], [AS_HELP_STRING([--disable-stats], [Disable statistics calculation/reporting])], [if test "x$enable_stats" = "xno" ; then enable_stats="0" else enable_stats="1" fi ], [enable_stats="1"] ) if test "x$enable_stats" = "x1" ; then AC_DEFINE([JEMALLOC_STATS], [ ]) fi AC_SUBST([enable_stats]) dnl Do not enable profiling by default. AC_ARG_ENABLE([prof], [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])], [if test "x$enable_prof" = "xno" ; then enable_prof="0" else enable_prof="1" fi ], [enable_prof="0"] ) if test "x$enable_prof" = "x1" ; then backtrace_method="" else backtrace_method="N/A" fi AC_ARG_ENABLE([prof-libunwind], [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])], [if test "x$enable_prof_libunwind" = "xno" ; then enable_prof_libunwind="0" else enable_prof_libunwind="1" fi ], [enable_prof_libunwind="0"] ) AC_ARG_WITH([static_libunwind], [AS_HELP_STRING([--with-static-libunwind=], [Path to static libunwind library; use rather than dynamically linking])], if test "x$with_static_libunwind" = "xno" ; then LUNWIND="-lunwind" else if test ! -f "$with_static_libunwind" ; then AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind]) fi LUNWIND="$with_static_libunwind" fi, LUNWIND="-lunwind" ) if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) if test "x$LUNWIND" = "x-lunwind" ; then AC_CHECK_LIB([unwind], [unw_backtrace], [LIBS="$LIBS $LUNWIND"], [enable_prof_libunwind="0"]) else LIBS="$LIBS $LUNWIND" fi if test "x${enable_prof_libunwind}" = "x1" ; then backtrace_method="libunwind" AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ]) fi fi AC_ARG_ENABLE([prof-libgcc], [AS_HELP_STRING([--disable-prof-libgcc], [Do not use libgcc for backtracing])], [if test "x$enable_prof_libgcc" = "xno" ; then enable_prof_libgcc="0" else enable_prof_libgcc="1" fi ], [enable_prof_libgcc="1"] ) if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ -a "x$GCC" = "xyes" ; then AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"]) if test "x${enable_prof_libgcc}" = "x1" ; then backtrace_method="libgcc" AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) fi else enable_prof_libgcc="0" fi AC_ARG_ENABLE([prof-gcc], [AS_HELP_STRING([--disable-prof-gcc], [Do not use gcc intrinsics for backtracing])], [if test "x$enable_prof_gcc" = "xno" ; then enable_prof_gcc="0" else enable_prof_gcc="1" fi ], [enable_prof_gcc="1"] ) if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ -a "x$GCC" = "xyes" ; then JE_CFLAGS_APPEND([-fno-omit-frame-pointer]) backtrace_method="gcc intrinsics" AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) else enable_prof_gcc="0" fi if test "x$backtrace_method" = "x" ; then backtrace_method="none (disabling profiling)" enable_prof="0" fi AC_MSG_CHECKING([configured backtracing method]) AC_MSG_RESULT([$backtrace_method]) if test "x$enable_prof" = "x1" ; then if test "x$abi" != "xpecoff"; then dnl Heap profiling uses the log(3) function. LIBS="$LIBS -lm" fi AC_DEFINE([JEMALLOC_PROF], [ ]) fi AC_SUBST([enable_prof]) dnl Enable thread-specific caching by default. AC_ARG_ENABLE([tcache], [AS_HELP_STRING([--disable-tcache], [Disable per thread caches])], [if test "x$enable_tcache" = "xno" ; then enable_tcache="0" else enable_tcache="1" fi ], [enable_tcache="1"] ) if test "x$enable_tcache" = "x1" ; then AC_DEFINE([JEMALLOC_TCACHE], [ ]) fi AC_SUBST([enable_tcache]) dnl Indicate whether adjacent virtual memory mappings automatically coalesce dnl (and fragment on demand). if test "x${maps_coalesce}" = "x1" ; then AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ]) fi dnl Enable VM deallocation via munmap() by default. AC_ARG_ENABLE([munmap], [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])], [if test "x$enable_munmap" = "xno" ; then enable_munmap="0" else enable_munmap="1" fi ], [enable_munmap="${default_munmap}"] ) if test "x$enable_munmap" = "x1" ; then AC_DEFINE([JEMALLOC_MUNMAP], [ ]) fi AC_SUBST([enable_munmap]) dnl Enable allocation from DSS if supported by the OS. have_dss="1" dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) if test "x$have_sbrk" = "x1" ; then if test "x$sbrk_deprecated" = "x1" ; then AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated]) have_dss="0" fi else have_dss="0" fi if test "x$have_dss" = "x1" ; then AC_DEFINE([JEMALLOC_DSS], [ ]) fi dnl Support the junk/zero filling option by default. AC_ARG_ENABLE([fill], [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling, quarantine, and redzones])], [if test "x$enable_fill" = "xno" ; then enable_fill="0" else enable_fill="1" fi ], [enable_fill="1"] ) if test "x$enable_fill" = "x1" ; then AC_DEFINE([JEMALLOC_FILL], [ ]) fi AC_SUBST([enable_fill]) dnl Disable utrace(2)-based tracing by default. AC_ARG_ENABLE([utrace], [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])], [if test "x$enable_utrace" = "xno" ; then enable_utrace="0" else enable_utrace="1" fi ], [enable_utrace="0"] ) JE_COMPILABLE([utrace(2)], [ #include #include #include #include #include ], [ utrace((void *)0, 0); ], [je_cv_utrace]) if test "x${je_cv_utrace}" = "xno" ; then enable_utrace="0" fi if test "x$enable_utrace" = "x1" ; then AC_DEFINE([JEMALLOC_UTRACE], [ ]) fi AC_SUBST([enable_utrace]) dnl Support Valgrind by default. AC_ARG_ENABLE([valgrind], [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])], [if test "x$enable_valgrind" = "xno" ; then enable_valgrind="0" else enable_valgrind="1" fi ], [enable_valgrind="1"] ) if test "x$enable_valgrind" = "x1" ; then JE_COMPILABLE([valgrind], [ #include #include #if !defined(VALGRIND_RESIZEINPLACE_BLOCK) # error "Incompatible Valgrind version" #endif ], [], [je_cv_valgrind]) if test "x${je_cv_valgrind}" = "xno" ; then enable_valgrind="0" fi if test "x$enable_valgrind" = "x1" ; then AC_DEFINE([JEMALLOC_VALGRIND], [ ]) fi fi AC_SUBST([enable_valgrind]) dnl Do not support the xmalloc option by default. AC_ARG_ENABLE([xmalloc], [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], [if test "x$enable_xmalloc" = "xno" ; then enable_xmalloc="0" else enable_xmalloc="1" fi ], [enable_xmalloc="0"] ) if test "x$enable_xmalloc" = "x1" ; then AC_DEFINE([JEMALLOC_XMALLOC], [ ]) fi AC_SUBST([enable_xmalloc]) dnl Support cache-oblivious allocation alignment by default. AC_ARG_ENABLE([cache-oblivious], [AS_HELP_STRING([--disable-cache-oblivious], [Disable support for cache-oblivious allocation alignment])], [if test "x$enable_cache_oblivious" = "xno" ; then enable_cache_oblivious="0" else enable_cache_oblivious="1" fi ], [enable_cache_oblivious="1"] ) if test "x$enable_cache_oblivious" = "x1" ; then AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ]) fi AC_SUBST([enable_cache_oblivious]) dnl ============================================================================ dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found. dnl One of those two functions should (theoretically) exist on all platforms dnl that jemalloc currently has a chance of functioning on without modification. dnl We additionally assume ffs() or __builtin_ffs() are defined if dnl ffsl() or __builtin_ffsl() are defined, respectively. JE_COMPILABLE([a program using __builtin_ffsl], [ #include #include #include ], [ { int rv = __builtin_ffsl(0x08); printf("%d\n", rv); } ], [je_cv_gcc_builtin_ffsl]) if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs]) else JE_COMPILABLE([a program using ffsl], [ #include #include #include ], [ { int rv = ffsl(0x08); printf("%d\n", rv); } ], [je_cv_function_ffsl]) if test "x${je_cv_function_ffsl}" = "xyes" ; then AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs]) else AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()]) fi fi AC_ARG_WITH([lg_tiny_min], [AS_HELP_STRING([--with-lg-tiny-min=], [Base 2 log of minimum tiny size class to support])], [LG_TINY_MIN="$with_lg_tiny_min"], [LG_TINY_MIN="3"]) AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN]) AC_ARG_WITH([lg_quantum], [AS_HELP_STRING([--with-lg-quantum=], [Base 2 log of minimum allocation alignment])], [LG_QUANTA="$with_lg_quantum"], [LG_QUANTA="3 4"]) if test "x$with_lg_quantum" != "x" ; then AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum]) fi AC_ARG_WITH([lg_page], [AS_HELP_STRING([--with-lg-page=], [Base 2 log of system page size])], [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"]) if test "x$LG_PAGE" = "xdetect"; then AC_CACHE_CHECK([LG_PAGE], [je_cv_lg_page], AC_RUN_IFELSE([AC_LANG_PROGRAM( [[ #include #ifdef _WIN32 #include #else #include #endif #include ]], [[ int result; FILE *f; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwPageSize; #else result = sysconf(_SC_PAGESIZE); #endif if (result == -1) { return 1; } result = JEMALLOC_INTERNAL_FFSL(result) - 1; f = fopen("conftest.out", "w"); if (f == NULL) { return 1; } fprintf(f, "%d\n", result); fclose(f); return 0; ]])], [je_cv_lg_page=`cat conftest.out`], [je_cv_lg_page=undefined], [je_cv_lg_page=12])) fi if test "x${je_cv_lg_page}" != "x" ; then LG_PAGE="${je_cv_lg_page}" fi if test "x${LG_PAGE}" != "xundefined" ; then AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE]) else AC_MSG_ERROR([cannot determine value for LG_PAGE]) fi AC_ARG_WITH([lg_page_sizes], [AS_HELP_STRING([--with-lg-page-sizes=], [Base 2 logs of system page sizes to support])], [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"]) AC_ARG_WITH([lg_size_class_group], [AS_HELP_STRING([--with-lg-size-class-group=], [Base 2 log of size classes per doubling])], [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"], [LG_SIZE_CLASS_GROUP="2"]) dnl ============================================================================ dnl jemalloc configuration. dnl dnl Set VERSION if source directory is inside a git repository. if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then dnl Pattern globs aren't powerful enough to match both single- and dnl double-digit version numbers, so iterate over patterns to support up to dnl version 99.99.99 without any accidental matches. rm -f "${objroot}VERSION" for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9]' \ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do if test ! -e "${objroot}VERSION" ; then (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null if test $? -eq 0 ; then mv "${objroot}VERSION.tmp" "${objroot}VERSION" break fi fi done fi rm -f "${objroot}VERSION.tmp" if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then AC_MSG_RESULT( [Missing VERSION file, and unable to generate it; creating bogus VERSION]) echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi fi jemalloc_version=`cat "${objroot}VERSION"` jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'` jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'` jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'` jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'` jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'` AC_SUBST([jemalloc_version]) AC_SUBST([jemalloc_version_major]) AC_SUBST([jemalloc_version_minor]) AC_SUBST([jemalloc_version_bugfix]) AC_SUBST([jemalloc_version_nrev]) AC_SUBST([jemalloc_version_gid]) dnl ============================================================================ dnl Configure pthreads. if test "x$abi" != "xpecoff" ; then AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) dnl Some systems may embed pthreads functionality in libc; check for libpthread dnl first, but try libc too before failing. AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"], [AC_SEARCH_LIBS([pthread_create], , , AC_MSG_ERROR([libpthread is missing]))]) fi CPPFLAGS="$CPPFLAGS -D_REENTRANT" dnl Check whether clock_gettime(2) is in libc or librt. This function is only dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS. SAVED_LIBS="${LIBS}" LIBS= AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"]) AC_SUBST([TESTLIBS]) LIBS="${SAVED_LIBS}" dnl Check if the GNU-specific secure_getenv function exists. AC_CHECK_FUNC([secure_getenv], [have_secure_getenv="1"], [have_secure_getenv="0"] ) if test "x$have_secure_getenv" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ]) fi dnl Check if the Solaris/BSD issetugid function exists. AC_CHECK_FUNC([issetugid], [have_issetugid="1"], [have_issetugid="0"] ) if test "x$have_issetugid" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ]) fi dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use dnl it rather than pthreads TSD cleanup functions to support cleanup during dnl thread exit, in order to avoid pthreads library recursion during dnl bootstrapping. AC_CHECK_FUNC([_malloc_thread_cleanup], [have__malloc_thread_cleanup="1"], [have__malloc_thread_cleanup="0"] ) if test "x$have__malloc_thread_cleanup" = "x1" ; then AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) force_tls="1" fi dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If dnl so, mutex initialization causes allocation, and we need to implement this dnl callback function in order to prevent recursive allocation. AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], [have__pthread_mutex_init_calloc_cb="1"], [have__pthread_mutex_init_calloc_cb="0"] ) if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) fi dnl Disable lazy locking by default. AC_ARG_ENABLE([lazy_lock], [AS_HELP_STRING([--enable-lazy-lock], [Enable lazy locking (only lock when multi-threaded)])], [if test "x$enable_lazy_lock" = "xno" ; then enable_lazy_lock="0" else enable_lazy_lock="1" fi ], [enable_lazy_lock=""] ) if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) enable_lazy_lock="1" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$abi" != "xpecoff" ; then AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])]) AC_CHECK_FUNC([dlsym], [], [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [AC_MSG_ERROR([libdl is missing])]) ]) fi AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) else enable_lazy_lock="0" fi AC_SUBST([enable_lazy_lock]) AC_ARG_ENABLE([tls], [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])], if test "x$enable_tls" = "xno" ; then enable_tls="0" else enable_tls="1" fi , enable_tls="" ) if test "x${enable_tls}" = "x" ; then if test "x${force_tls}" = "x1" ; then AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues]) enable_tls="1" elif test "x${force_tls}" = "x0" ; then AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues]) enable_tls="0" else enable_tls="1" fi fi if test "x${enable_tls}" = "x1" ; then AC_MSG_CHECKING([for TLS]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ __thread int x; ]], [[ x = 42; return 0; ]])], AC_MSG_RESULT([yes]), AC_MSG_RESULT([no]) enable_tls="0") else enable_tls="0" fi AC_SUBST([enable_tls]) if test "x${enable_tls}" = "x1" ; then if test "x${force_tls}" = "x0" ; then AC_MSG_WARN([TLS enabled despite being marked unusable on this platform]) fi AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) elif test "x${force_tls}" = "x1" ; then AC_MSG_WARN([TLS disabled despite being marked critical on this platform]) fi dnl ============================================================================ dnl Check for C11 atomics. JE_COMPILABLE([C11 atomics], [ #include #if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #include #else #error Atomics not available #endif ], [ uint64_t *p = (uint64_t *)0; uint64_t x = 1; volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; uint64_t r = atomic_fetch_add(a, x) + x; return (r == 0); ], [je_cv_c11atomics]) if test "x${je_cv_c11atomics}" = "xyes" ; then AC_DEFINE([JEMALLOC_C11ATOMICS]) fi dnl ============================================================================ dnl Check for atomic(9) operations as provided on FreeBSD. JE_COMPILABLE([atomic(9)], [ #include #include #include ], [ { uint32_t x32 = 0; volatile uint32_t *x32p = &x32; atomic_fetchadd_32(x32p, 1); } { unsigned long xlong = 0; volatile unsigned long *xlongp = &xlong; atomic_fetchadd_long(xlongp, 1); } ], [je_cv_atomic9]) if test "x${je_cv_atomic9}" = "xyes" ; then AC_DEFINE([JEMALLOC_ATOMIC9]) fi dnl ============================================================================ dnl Check for atomic(3) operations as provided on Darwin. JE_COMPILABLE([Darwin OSAtomic*()], [ #include #include ], [ { int32_t x32 = 0; volatile int32_t *x32p = &x32; OSAtomicAdd32(1, x32p); } { int64_t x64 = 0; volatile int64_t *x64p = &x64; OSAtomicAdd64(1, x64p); } ], [je_cv_osatomic]) if test "x${je_cv_osatomic}" = "xyes" ; then AC_DEFINE([JEMALLOC_OSATOMIC], [ ]) fi dnl ============================================================================ dnl Check for madvise(2). JE_COMPILABLE([madvise(2)], [ #include ], [ { madvise((void *)0, 0, 0); } ], [je_cv_madvise]) if test "x${je_cv_madvise}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ]) fi dnl ============================================================================ dnl Check whether __sync_{add,sub}_and_fetch() are available despite dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined. AC_DEFUN([JE_SYNC_COMPARE_AND_SWAP_CHECK],[ AC_CACHE_CHECK([whether to force $1-bit __sync_{add,sub}_and_fetch()], [je_cv_sync_compare_and_swap_$2], [AC_LINK_IFELSE([AC_LANG_PROGRAM([ #include ], [ #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 { uint$1_t x$1 = 0; __sync_add_and_fetch(&x$1, 42); __sync_sub_and_fetch(&x$1, 1); } #else #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 is defined, no need to force #endif ])], [je_cv_sync_compare_and_swap_$2=yes], [je_cv_sync_compare_and_swap_$2=no])]) if test "x${je_cv_sync_compare_and_swap_$2}" = "xyes" ; then AC_DEFINE([JE_FORCE_SYNC_COMPARE_AND_SWAP_$2], [ ]) fi ]) if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then JE_SYNC_COMPARE_AND_SWAP_CHECK(32, 4) JE_SYNC_COMPARE_AND_SWAP_CHECK(64, 8) fi dnl ============================================================================ dnl Check for __builtin_clz() and __builtin_clzl(). AC_CACHE_CHECK([for __builtin_clz], [je_cv_builtin_clz], [AC_LINK_IFELSE([AC_LANG_PROGRAM([], [ { unsigned x = 0; int y = __builtin_clz(x); } { unsigned long x = 0; int y = __builtin_clzl(x); } ])], [je_cv_builtin_clz=yes], [je_cv_builtin_clz=no])]) if test "x${je_cv_builtin_clz}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ]) fi dnl ============================================================================ dnl Check for spinlock(3) operations as provided on Darwin. JE_COMPILABLE([Darwin OSSpin*()], [ #include #include ], [ OSSpinLock lock = 0; OSSpinLockLock(&lock); OSSpinLockUnlock(&lock); ], [je_cv_osspin]) if test "x${je_cv_osspin}" = "xyes" ; then AC_DEFINE([JEMALLOC_OSSPIN], [ ]) fi dnl ============================================================================ dnl Darwin-related configuration. AC_ARG_ENABLE([zone-allocator], [AS_HELP_STRING([--disable-zone-allocator], [Disable zone allocator for Darwin])], [if test "x$enable_zone_allocator" = "xno" ; then enable_zone_allocator="0" else enable_zone_allocator="1" fi ], [if test "x${abi}" = "xmacho"; then enable_zone_allocator="1" fi ] ) AC_SUBST([enable_zone_allocator]) if test "x${enable_zone_allocator}" = "x1" ; then if test "x${abi}" != "xmacho"; then AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) fi AC_DEFINE([JEMALLOC_ZONE], [ ]) dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6 dnl releases. malloc_zone_t and malloc_introspection_t have new fields in dnl 10.6, which is the only source-level indication of the change. AC_MSG_CHECKING([malloc zone version]) AC_DEFUN([JE_ZONE_PROGRAM], [AC_LANG_PROGRAM( [#include ], [static int foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]] )]) AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,15)],[JEMALLOC_ZONE_VERSION=5],[ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,16)],[ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,9)],[JEMALLOC_ZONE_VERSION=6],[ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,13)],[JEMALLOC_ZONE_VERSION=7],[JEMALLOC_ZONE_VERSION=] )])],[ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,17)],[JEMALLOC_ZONE_VERSION=8],[ AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,>,17)],[JEMALLOC_ZONE_VERSION=9],[JEMALLOC_ZONE_VERSION=] )])])])]) if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then AC_MSG_RESULT([unsupported]) AC_MSG_ERROR([Unsupported malloc zone version]) fi if test "${JEMALLOC_ZONE_VERSION}" = 9; then JEMALLOC_ZONE_VERSION=8 AC_MSG_RESULT([> 8]) else AC_MSG_RESULT([$JEMALLOC_ZONE_VERSION]) fi AC_DEFINE_UNQUOTED(JEMALLOC_ZONE_VERSION, [$JEMALLOC_ZONE_VERSION]) fi dnl ============================================================================ dnl Check for glibc malloc hooks JE_COMPILABLE([glibc malloc hook], [ #include extern void (* __free_hook)(void *ptr); extern void *(* __malloc_hook)(size_t size); extern void *(* __realloc_hook)(void *ptr, size_t size); ], [ void *ptr = 0L; if (__malloc_hook) ptr = __malloc_hook(1); if (__realloc_hook) ptr = __realloc_hook(ptr, 2); if (__free_hook && ptr) __free_hook(ptr); ], [je_cv_glibc_malloc_hook]) if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ]) fi JE_COMPILABLE([glibc memalign hook], [ #include extern void *(* __memalign_hook)(size_t alignment, size_t size); ], [ void *ptr = 0L; if (__memalign_hook) ptr = __memalign_hook(16, 7); ], [je_cv_glibc_memalign_hook]) if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ]) fi JE_COMPILABLE([pthreads adaptive mutexes], [ #include ], [ pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutexattr_destroy(&attr); ], [je_cv_pthread_mutex_adaptive_np]) if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ]) fi dnl ============================================================================ dnl Check for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL dnl ============================================================================ dnl Define commands that generate output files. AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" cp /dev/null "${f}" for nm in `echo ${mangling_map} |tr ',' ' '` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'` echo "${n}:${m}" >> "${f}" dnl Remove name from public_syms so that it isn't redefined later. public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` done for sym in ${public_syms} ; do n="${sym}" m="${JEMALLOC_PREFIX}${sym}" echo "${n}:${m}" >> "${f}" done ], [ srcdir="${srcdir}" objroot="${objroot}" mangling_map="${mangling_map}" public_syms="${public_syms}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h" ], [ SHELL="${SHELL}" srcdir="${srcdir}" objroot="${objroot}" LG_QUANTA="${LG_QUANTA}" LG_TINY_MIN=${LG_TINY_MIN} LG_PAGE_SIZES="${LG_PAGE_SIZES}" LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP} ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ mkdir -p "${objroot}include/jemalloc" cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" ], [ srcdir="${srcdir}" objroot="${objroot}" ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [ mkdir -p "${objroot}include/jemalloc" "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" ], [ srcdir="${srcdir}" objroot="${objroot}" install_suffix="${install_suffix}" ]) dnl Process .in files. AC_SUBST([cfghdrs_in]) AC_SUBST([cfghdrs_out]) AC_CONFIG_HEADERS([$cfghdrs_tup]) dnl ============================================================================ dnl Generate outputs. AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof]) AC_SUBST([cfgoutputs_in]) AC_SUBST([cfgoutputs_out]) AC_OUTPUT dnl ============================================================================ dnl Print out the results of configuration. AC_MSG_RESULT([===============================================================================]) AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) AC_MSG_RESULT([library revision : ${rev}]) AC_MSG_RESULT([]) AC_MSG_RESULT([CONFIG : ${CONFIG}]) AC_MSG_RESULT([CC : ${CC}]) AC_MSG_RESULT([CFLAGS : ${CFLAGS}]) AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) AC_MSG_RESULT([LIBS : ${LIBS}]) AC_MSG_RESULT([TESTLIBS : ${TESTLIBS}]) AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) AC_MSG_RESULT([]) AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) AC_MSG_RESULT([]) AC_MSG_RESULT([PREFIX : ${PREFIX}]) AC_MSG_RESULT([BINDIR : ${BINDIR}]) AC_MSG_RESULT([DATADIR : ${DATADIR}]) AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}]) AC_MSG_RESULT([LIBDIR : ${LIBDIR}]) AC_MSG_RESULT([MANDIR : ${MANDIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([srcroot : ${srcroot}]) AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}]) AC_MSG_RESULT([objroot : ${objroot}]) AC_MSG_RESULT([abs_objroot : ${abs_objroot}]) AC_MSG_RESULT([]) AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) AC_MSG_RESULT([install_suffix : ${install_suffix}]) AC_MSG_RESULT([autogen : ${enable_autogen}]) AC_MSG_RESULT([cc-silence : ${enable_cc_silence}]) AC_MSG_RESULT([debug : ${enable_debug}]) AC_MSG_RESULT([code-coverage : ${enable_code_coverage}]) AC_MSG_RESULT([stats : ${enable_stats}]) AC_MSG_RESULT([prof : ${enable_prof}]) AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) AC_MSG_RESULT([tcache : ${enable_tcache}]) AC_MSG_RESULT([fill : ${enable_fill}]) AC_MSG_RESULT([utrace : ${enable_utrace}]) AC_MSG_RESULT([valgrind : ${enable_valgrind}]) AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) AC_MSG_RESULT([munmap : ${enable_munmap}]) AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) AC_MSG_RESULT([tls : ${enable_tls}]) AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}]) AC_MSG_RESULT([===============================================================================]) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/coverage.sh������������������������������������������������������������0000775�0000000�0000000�00000000501�12641765611�0020207�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh set -e objdir=$1 suffix=$2 shift 2 objs=$@ gcov -b -p -f -o "${objdir}" ${objs} # Move gcov outputs so that subsequent gcov invocations won't clobber results # for the same sources with different compilation flags. for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do mv "${f}" "${f}.${suffix}" done �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/doc/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�12641765611�0016626�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/doc/html.xsl.in��������������������������������������������������������0000664�0000000�0000000�00000000313�12641765611�0020724�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������disque-1.0-rc1/deps/jemalloc/doc/jemalloc.3���������������������������������������������������������0000664�0000000�0000000�00000204540�12641765611�0020505�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������'\" t .\" Title: JEMALLOC .\" Author: Jason Evans .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 09/24/2015 .\" Manual: User Manual .\" Source: jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c .\" Language: English .\" .TH "JEMALLOC" "3" "09/24/2015" "jemalloc 4.0.3-0-ge9192eacf893" "User Manual" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" jemalloc \- general purpose memory allocation functions .SH "LIBRARY" .PP This manual describes jemalloc 4\&.0\&.3\-0\-ge9192eacf8935e29fc62fddc2701f7942b1cc02c\&. More information can be found at the \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&. .SH "SYNOPSIS" .sp .ft B .nf #include .fi .ft .SS "Standard API" .HP \w'void\ *malloc('u .BI "void *malloc(size_t\ " "size" ");" .HP \w'void\ *calloc('u .BI "void *calloc(size_t\ " "number" ", size_t\ " "size" ");" .HP \w'int\ posix_memalign('u .BI "int posix_memalign(void\ **" "ptr" ", size_t\ " "alignment" ", size_t\ " "size" ");" .HP \w'void\ *aligned_alloc('u .BI "void *aligned_alloc(size_t\ " "alignment" ", size_t\ " "size" ");" .HP \w'void\ *realloc('u .BI "void *realloc(void\ *" "ptr" ", size_t\ " "size" ");" .HP \w'void\ free('u .BI "void free(void\ *" "ptr" ");" .SS "Non\-standard API" .HP \w'void\ *mallocx('u .BI "void *mallocx(size_t\ " "size" ", int\ " "flags" ");" .HP \w'void\ *rallocx('u .BI "void *rallocx(void\ *" "ptr" ", size_t\ " "size" ", int\ " "flags" ");" .HP \w'size_t\ xallocx('u .BI "size_t xallocx(void\ *" "ptr" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");" .HP \w'size_t\ sallocx('u .BI "size_t sallocx(void\ *" "ptr" ", int\ " "flags" ");" .HP \w'void\ dallocx('u .BI "void dallocx(void\ *" "ptr" ", int\ " "flags" ");" .HP \w'void\ sdallocx('u .BI "void sdallocx(void\ *" "ptr" ", size_t\ " "size" ", int\ " "flags" ");" .HP \w'size_t\ nallocx('u .BI "size_t nallocx(size_t\ " "size" ", int\ " "flags" ");" .HP \w'int\ mallctl('u .BI "int mallctl(const\ char\ *" "name" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" .HP \w'int\ mallctlnametomib('u .BI "int mallctlnametomib(const\ char\ *" "name" ", size_t\ *" "mibp" ", size_t\ *" "miblenp" ");" .HP \w'int\ mallctlbymib('u .BI "int mallctlbymib(const\ size_t\ *" "mib" ", size_t\ " "miblen" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" .HP \w'void\ malloc_stats_print('u .BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");" .HP \w'size_t\ malloc_usable_size('u .BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");" .HP \w'void\ (*malloc_message)('u .BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");" .PP const char *\fImalloc_conf\fR; .SH "DESCRIPTION" .SS "Standard API" .PP The \fBmalloc\fR\fB\fR function allocates \fIsize\fR bytes of uninitialized memory\&. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object\&. .PP The \fBcalloc\fR\fB\fR function allocates space for \fInumber\fR objects, each \fIsize\fR bytes in length\&. The result is identical to calling \fBmalloc\fR\fB\fR with an argument of \fInumber\fR * \fIsize\fR, with the exception that the allocated memory is explicitly initialized to zero bytes\&. .PP The \fBposix_memalign\fR\fB\fR function allocates \fIsize\fR bytes of memory such that the allocation\*(Aqs base address is a multiple of \fIalignment\fR, and returns the allocation in the value pointed to by \fIptr\fR\&. The requested \fIalignment\fR must be a power of 2 at least as large as sizeof(\fBvoid *\fR)\&. .PP The \fBaligned_alloc\fR\fB\fR function allocates \fIsize\fR bytes of memory such that the allocation\*(Aqs base address is a multiple of \fIalignment\fR\&. The requested \fIalignment\fR must be a power of 2\&. Behavior is undefined if \fIsize\fR is not an integral multiple of \fIalignment\fR\&. .PP The \fBrealloc\fR\fB\fR function changes the size of the previously allocated memory referenced by \fIptr\fR to \fIsize\fR bytes\&. The contents of the memory are unchanged up to the lesser of the new and old sizes\&. If the new size is larger, the contents of the newly allocated portion of the memory are undefined\&. Upon success, the memory referenced by \fIptr\fR is freed and a pointer to the newly allocated memory is returned\&. Note that \fBrealloc\fR\fB\fR may move the memory allocation, resulting in a different return value than \fIptr\fR\&. If \fIptr\fR is \fBNULL\fR, the \fBrealloc\fR\fB\fR function behaves identically to \fBmalloc\fR\fB\fR for the specified size\&. .PP The \fBfree\fR\fB\fR function causes the allocated memory referenced by \fIptr\fR to be made available for future allocations\&. If \fIptr\fR is \fBNULL\fR, no action occurs\&. .SS "Non\-standard API" .PP The \fBmallocx\fR\fB\fR, \fBrallocx\fR\fB\fR, \fBxallocx\fR\fB\fR, \fBsallocx\fR\fB\fR, \fBdallocx\fR\fB\fR, \fBsdallocx\fR\fB\fR, and \fBnallocx\fR\fB\fR functions all have a \fIflags\fR argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following: .PP \fBMALLOCX_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR .RS 4 Align the memory allocation to start at an address that is a multiple of (1 << \fIla\fR)\&. This macro does not validate that \fIla\fR is within the valid range\&. .RE .PP \fBMALLOCX_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR .RS 4 Align the memory allocation to start at an address that is a multiple of \fIa\fR, where \fIa\fR is a power of two\&. This macro does not validate that \fIa\fR is a power of 2\&. .RE .PP \fBMALLOCX_ZERO\fR .RS 4 Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this macro is absent, newly allocated memory is uninitialized\&. .RE .PP \fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB) \fR .RS 4 Use the thread\-specific cache (tcache) specified by the identifier \fItc\fR, which must have been acquired via the "tcache\&.create" mallctl\&. This macro does not validate that \fItc\fR specifies a valid identifier\&. .RE .PP \fBMALLOCX_TCACHE_NONE\fR .RS 4 Do not use a thread\-specific cache (tcache)\&. Unless \fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR or \fBMALLOCX_TCACHE_NONE\fR is specified, an automatically managed tcache will be used under many circumstances\&. This macro cannot be used in the same \fIflags\fR argument as \fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR\&. .RE .PP \fBMALLOCX_ARENA(\fR\fB\fIa\fR\fR\fB) \fR .RS 4 Use the arena specified by the index \fIa\fR\&. This macro has no effect for regions that were allocated via an arena other than the one specified\&. This macro does not validate that \fIa\fR specifies an arena index in the valid range\&. .RE .PP The \fBmallocx\fR\fB\fR function allocates at least \fIsize\fR bytes of memory, and returns a pointer to the base address of the allocation\&. Behavior is undefined if \fIsize\fR is \fB0\fR, or if request size overflows due to size class and/or alignment constraints\&. .PP The \fBrallocx\fR\fB\fR function resizes the allocation at \fIptr\fR to be at least \fIsize\fR bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location\&. Behavior is undefined if \fIsize\fR is \fB0\fR, or if request size overflows due to size class and/or alignment constraints\&. .PP The \fBxallocx\fR\fB\fR function resizes the allocation at \fIptr\fR in place to be at least \fIsize\fR bytes, and returns the real size of the allocation\&. If \fIextra\fR is non\-zero, an attempt is made to resize the allocation to be at least (\fIsize\fR + \fIextra\fR) bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize\&. Behavior is undefined if \fIsize\fR is \fB0\fR, or if (\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&. .PP The \fBsallocx\fR\fB\fR function returns the real size of the allocation at \fIptr\fR\&. .PP The \fBdallocx\fR\fB\fR function causes the memory referenced by \fIptr\fR to be made available for future allocations\&. .PP The \fBsdallocx\fR\fB\fR function is an extension of \fBdallocx\fR\fB\fR with a \fIsize\fR parameter to allow the caller to pass in the allocation size as an optimization\&. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by \fBnallocx\fR\fB\fR or \fBsallocx\fR\fB\fR\&. .PP The \fBnallocx\fR\fB\fR function allocates no memory, but it performs the same size computation as the \fBmallocx\fR\fB\fR function, and returns the real size of the allocation that would result from the equivalent \fBmallocx\fR\fB\fR function call\&. Behavior is undefined if \fIsize\fR is \fB0\fR, or if request size overflows due to size class and/or alignment constraints\&. .PP The \fBmallctl\fR\fB\fR function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions\&. The period\-separated \fIname\fR argument specifies a location in a tree\-structured namespace; see the MALLCTL NAMESPACE section for documentation on the tree contents\&. To read a value, pass a pointer via \fIoldp\fR to adequate space to contain the value, and a pointer to its length via \fIoldlenp\fR; otherwise pass \fBNULL\fR and \fBNULL\fR\&. Similarly, to write a value, pass a pointer to the value via \fInewp\fR, and its length via \fInewlen\fR; otherwise pass \fBNULL\fR and \fB0\fR\&. .PP The \fBmallctlnametomib\fR\fB\fR function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a \(lqManagement Information Base\(rq (MIB) that can be passed repeatedly to \fBmallctlbymib\fR\fB\fR\&. Upon successful return from \fBmallctlnametomib\fR\fB\fR, \fImibp\fR contains an array of \fI*miblenp\fR integers, where \fI*miblenp\fR is the lesser of the number of components in \fIname\fR and the input value of \fI*miblenp\fR\&. Thus it is possible to pass a \fI*miblenp\fR that is smaller than the number of period\-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB\&. For name components that are integers (e\&.g\&. the 2 in "arenas\&.bin\&.2\&.size"), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following: .sp .if n \{\ .RS 4 .\} .nf unsigned nbins, i; size_t mib[4]; size_t len, miblen; len = sizeof(nbins); mallctl("arenas\&.nbins", &nbins, &len, NULL, 0); miblen = 4; mallctlnametomib("arenas\&.bin\&.0\&.size", mib, &miblen); for (i = 0; i < nbins; i++) { size_t bin_size; mib[2] = i; len = sizeof(bin_size); mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); /* Do something with bin_size\&.\&.\&. */ } .fi .if n \{\ .RE .\} .PP The \fBmalloc_stats_print\fR\fB\fR function writes human\-readable summary statistics via the \fIwrite_cb\fR callback function pointer and \fIcbopaque\fR data passed to \fIwrite_cb\fR, or \fBmalloc_message\fR\fB\fR if \fIwrite_cb\fR is \fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the \fIopts\fR string\&. Note that \fBmalloc_message\fR\fB\fR uses the \fBmallctl*\fR\fB\fR functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If \fB\-\-enable\-stats\fR is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq, \(lql\(rq, and \(lqh\(rq can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&. .PP The \fBmalloc_usable_size\fR\fB\fR function returns the usable size of the allocation pointed to by \fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The \fBmalloc_usable_size\fR\fB\fR function is not a mechanism for in\-place \fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by \fBmalloc_usable_size\fR\fB\fR should not be depended on, since such behavior is entirely implementation\-dependent\&. .SH "TUNING" .PP Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&. .PP The string pointed to by the global variable \fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named /etc/malloc\&.conf, and the value of the environment variable \fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. Note that \fImalloc_conf\fR may be read before \fBmain\fR\fB\fR is entered, so the declaration of \fImalloc_conf\fR should specify an initializer that contains the final value to be read by jemalloc\&. \fImalloc_conf\fR is a compile\-time setting, whereas /etc/malloc\&.conf and \fBMALLOC_CONF\fR can be safely set any time prior to program invocation\&. .PP An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each "opt\&.*" mallctl (see the MALLCTL NAMESPACE section for options documentation)\&. For example, abort:true,narenas:1 sets the "opt\&.abort" and "opt\&.narenas" options\&. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values\&. .SH "IMPLEMENTATION NOTES" .PP Traditionally, allocators have used \fBsbrk\fR(2) to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If \fBsbrk\fR(2) is supported by the operating system, this allocator uses both \fBmmap\fR(2) and \fBsbrk\fR(2), in that order of preference; otherwise only \fBmmap\fR(2) is used\&. .PP This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi\-processor systems\&. This works well with regard to threading scalability, but incurs some costs\&. There is a small fixed per\-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation\&. These overheads are not generally an issue, given the number of arenas normally used\&. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance\&. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions\&. .PP In addition to multiple arenas, unless \fB\-\-disable\-tcache\fR is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&. .PP Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&. .PP User objects are broken into three categories according to size: small, large, and huge\&. Small and large objects are managed entirely by arenas; huge objects are additionally aggregated in a single data structure that is shared by all threads\&. Huge objects are typically used by applications infrequently enough that this single data structure is not a scalability issue\&. .PP Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&. .PP Small objects are managed in groups by page runs\&. Each run maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the "opt\&.lg_chunk" option), and huge size classes extend from the chunk size up to one size class less than the full address space size\&. .PP Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&. .PP The \fBrealloc\fR\fB\fR, \fBrallocx\fR\fB\fR, and \fBxallocx\fR\fB\fR functions may resize allocations without moving them under limited circumstances\&. Unlike the \fB*allocx\fR\fB\fR API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call \fBrealloc\fR\fB\fR to grow e\&.g\&. a 9\-byte allocation to 16 bytes, or shrink a 16\-byte allocation to 9 bytes\&. Growth and shrinkage trivially succeeds in place as long as the pre\-size and post\-size both round up to the same size class\&. No other API guarantees are made regarding in\-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre\-size and post\-size are both large or both huge\&. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see "arena\&.\&.chunk_hooks")\&. Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging\&. .PP Assuming 2 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in Table 1\&. .sp .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .B Table\ \&1.\ \&Size classes .TS allbox tab(:); lB rB lB. T{ Category T}:T{ Spacing T}:T{ Size T} .T& l r l ^ r l ^ r l ^ r l ^ r l ^ r l ^ r l ^ r l ^ r l l r l ^ r l ^ r l ^ r l ^ r l ^ r l ^ r l ^ r l l r l ^ r l ^ r l ^ r l ^ r l ^ r l ^ r l. T{ Small T}:T{ lg T}:T{ [8] T} :T{ 16 T}:T{ [16, 32, 48, 64, 80, 96, 112, 128] T} :T{ 32 T}:T{ [160, 192, 224, 256] T} :T{ 64 T}:T{ [320, 384, 448, 512] T} :T{ 128 T}:T{ [640, 768, 896, 1024] T} :T{ 256 T}:T{ [1280, 1536, 1792, 2048] T} :T{ 512 T}:T{ [2560, 3072, 3584, 4096] T} :T{ 1 KiB T}:T{ [5 KiB, 6 KiB, 7 KiB, 8 KiB] T} :T{ 2 KiB T}:T{ [10 KiB, 12 KiB, 14 KiB] T} T{ Large T}:T{ 2 KiB T}:T{ [16 KiB] T} :T{ 4 KiB T}:T{ [20 KiB, 24 KiB, 28 KiB, 32 KiB] T} :T{ 8 KiB T}:T{ [40 KiB, 48 KiB, 54 KiB, 64 KiB] T} :T{ 16 KiB T}:T{ [80 KiB, 96 KiB, 112 KiB, 128 KiB] T} :T{ 32 KiB T}:T{ [160 KiB, 192 KiB, 224 KiB, 256 KiB] T} :T{ 64 KiB T}:T{ [320 KiB, 384 KiB, 448 KiB, 512 KiB] T} :T{ 128 KiB T}:T{ [640 KiB, 768 KiB, 896 KiB, 1 MiB] T} :T{ 256 KiB T}:T{ [1280 KiB, 1536 KiB, 1792 KiB] T} T{ Huge T}:T{ 256 KiB T}:T{ [2 MiB] T} :T{ 512 KiB T}:T{ [2560 KiB, 3 MiB, 3584 KiB, 4 MiB] T} :T{ 1 MiB T}:T{ [5 MiB, 6 MiB, 7 MiB, 8 MiB] T} :T{ 2 MiB T}:T{ [10 MiB, 12 MiB, 14 MiB, 16 MiB] T} :T{ 4 MiB T}:T{ [20 MiB, 24 MiB, 28 MiB, 32 MiB] T} :T{ 8 MiB T}:T{ [40 MiB, 48 MiB, 56 MiB, 64 MiB] T} :T{ \&.\&.\&. T}:T{ \&.\&.\&. T} .TE .sp 1 .SH "MALLCTL NAMESPACE" .PP The following names are defined in the namespace accessible via the \fBmallctl*\fR\fB\fR functions\&. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r\-, \-w, or \-\-, and required build configuration flags follow, if any\&. A name element encoded as or indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of "stats\&.arenas\&.\&.*", equal to "arenas\&.narenas" can be used to access the summation of statistics from all arenas\&. Take special note of the "epoch" mallctl, which controls refreshing of cached dynamic statistics\&. .PP "version" (\fBconst char *\fR) r\- .RS 4 Return the jemalloc version string\&. .RE .PP "epoch" (\fBuint64_t\fR) rw .RS 4 If a value is passed in, refresh the data from which the \fBmallctl*\fR\fB\fR functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&. .RE .PP "config\&.cache_oblivious" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-cache\-oblivious\fR was specified during build configuration\&. .RE .PP "config\&.debug" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-debug\fR was specified during build configuration\&. .RE .PP "config\&.fill" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-fill\fR was specified during build configuration\&. .RE .PP "config\&.lazy_lock" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-lazy\-lock\fR was specified during build configuration\&. .RE .PP "config\&.munmap" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-munmap\fR was specified during build configuration\&. .RE .PP "config\&.prof" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-prof\fR was specified during build configuration\&. .RE .PP "config\&.prof_libgcc" (\fBbool\fR) r\- .RS 4 \fB\-\-disable\-prof\-libgcc\fR was not specified during build configuration\&. .RE .PP "config\&.prof_libunwind" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-prof\-libunwind\fR was specified during build configuration\&. .RE .PP "config\&.stats" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-stats\fR was specified during build configuration\&. .RE .PP "config\&.tcache" (\fBbool\fR) r\- .RS 4 \fB\-\-disable\-tcache\fR was not specified during build configuration\&. .RE .PP "config\&.tls" (\fBbool\fR) r\- .RS 4 \fB\-\-disable\-tls\fR was not specified during build configuration\&. .RE .PP "config\&.utrace" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-utrace\fR was specified during build configuration\&. .RE .PP "config\&.valgrind" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-valgrind\fR was specified during build configuration\&. .RE .PP "config\&.xmalloc" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-xmalloc\fR was specified during build configuration\&. .RE .PP "opt\&.abort" (\fBbool\fR) r\- .RS 4 Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call \fBabort\fR(3) in these cases\&. This option is disabled by default unless \fB\-\-enable\-debug\fR is specified during configuration, in which case it is enabled by default\&. .RE .PP "opt\&.dss" (\fBconst char *\fR) r\- .RS 4 dss (\fBsbrk\fR(2)) allocation precedence as related to \fBmmap\fR(2) allocation\&. The following settings are supported if \fBsbrk\fR(2) is supported by the operating system: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq; otherwise only \(lqdisabled\(rq is supported\&. The default is \(lqsecondary\(rq if \fBsbrk\fR(2) is supported by the operating system; \(lqdisabled\(rq otherwise\&. .RE .PP "opt\&.lg_chunk" (\fBsize_t\fR) r\- .RS 4 Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 2 MiB (2^21)\&. .RE .PP "opt\&.narenas" (\fBsize_t\fR) r\- .RS 4 Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&. .RE .PP "opt\&.lg_dirty_mult" (\fBssize_t\fR) r\- .RS 4 Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via \fBmadvise\fR(2) or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&. See "arenas\&.lg_dirty_mult" and "arena\&.\&.lg_dirty_mult" for related dynamic control options\&. .RE .PP "opt\&.stats_print" (\fBbool\fR) r\- .RS 4 Enable/disable statistics printing at exit\&. If enabled, the \fBmalloc_stats_print\fR\fB\fR function is called at program exit via an \fBatexit\fR(3) function\&. If \fB\-\-enable\-stats\fR is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Furthermore, \fBatexit\fR\fB\fR may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls \fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own \fBatexit\fR\fB\fR function with equivalent functionality)\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&. .RE .PP "opt\&.junk" (\fBconst char *\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Junk filling\&. If set to "alloc", each byte of uninitialized allocated memory will be initialized to 0xa5\&. If set to "free", all deallocated memory will be initialized to 0x5a\&. If set to "true", both allocated and deallocated memory will be initialized, and if set to "false", junk filling be disabled entirely\&. This is intended for debugging and will impact performance negatively\&. This option is "false" by default unless \fB\-\-enable\-debug\fR is specified during configuration, in which case it is "true" by default unless running inside \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&. .RE .PP "opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the "opt\&.junk" option is enabled\&. This feature is of particular use in combination with \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&. .RE .PP "opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the "opt\&.junk" option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&. .RE .PP "opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so \fBrealloc\fR\fB\fR and \fBrallocx\fR\fB\fR calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&. .RE .PP "opt\&.utrace" (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR] .RS 4 Allocation tracing based on \fButrace\fR(2) enabled/disabled\&. This option is disabled by default\&. .RE .PP "opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR] .RS 4 Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on \fBSTDERR_FILENO\fR and cause the program to drop core (using \fBabort\fR(3))\&. If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: .sp .if n \{\ .RS 4 .\} .nf malloc_conf = "xmalloc:true"; .fi .if n \{\ .RE .\} .sp This option is disabled by default\&. .RE .PP "opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Thread\-specific caching (tcache) enabled/disabled\&. When there are multiple threads, each thread uses a tcache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the "opt\&.lg_tcache_max" option for related tuning information\&. This option is enabled by default unless running inside \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, in which case it is forcefully disabled\&. .RE .PP "opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Maximum size class (log base 2) to cache in the thread\-specific cache (tcache)\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&. .RE .PP "opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity\&. See the "opt\&.prof_active" option for on\-the\-fly activation/deactivation\&. See the "opt\&.lg_prof_sample" option for probabilistic sampling control\&. See the "opt\&.prof_accum" option for control of cumulative sample reporting\&. See the "opt\&.lg_prof_interval" option for information on interval\-triggered profile dumping, the "opt\&.prof_gdump" option for information on high\-water\-triggered profile dumping, and the "opt\&.prof_final" option for final profile dumping\&. Profile output is compatible with the \fBjeprof\fR command, which is based on the \fBpprof\fR that is developed as part of the \m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&. .RE .PP "opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is jeprof\&. .RE .PP "opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the "opt\&.prof" option) but inactive, then toggle profiling at any time during program execution with the "prof\&.active" mallctl\&. This option is enabled by default\&. .RE .PP "opt\&.prof_thread_active_init" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Initial setting for "thread\&.prof\&.active" in newly created threads\&. The initial setting for newly created threads can also be changed during execution via the "prof\&.thread_active_init" mallctl\&. This option is enabled by default\&. .RE .PP "opt\&.lg_prof_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 512 KiB (2^19 B)\&. .RE .PP "opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is disabled by default\&. .RE .PP "opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity\&. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks\&. Profiles are dumped to files named according to the pattern \&.\&.\&.i\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&. .RE .PP "opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Set the initial state of "prof\&.gdump", which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum\&. This option is disabled by default\&. .RE .PP "opt\&.prof_final" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Use an \fBatexit\fR(3) function to dump final memory usage to a file named according to the pattern \&.\&.\&.f\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. Note that \fBatexit\fR\fB\fR may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls \fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own \fBatexit\fR\fB\fR function with equivalent functionality)\&. This option is disabled by default\&. .RE .PP "opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Leak reporting enabled/disabled\&. If enabled, use an \fBatexit\fR(3) function to report memory leaks detected by allocation sampling\&. See the "opt\&.prof" option for information on analyzing heap profile output\&. This option is disabled by default\&. .RE .PP "thread\&.arena" (\fBunsigned\fR) rw .RS 4 Get or set the arena associated with the calling thread\&. If the specified arena was not initialized beforehand (see the "arenas\&.initialized" mallctl), it will be automatically initialized as a side effect of calling this interface\&. .RE .PP "thread\&.allocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get the total number of bytes ever allocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. .RE .PP "thread\&.allocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get a pointer to the the value that is returned by the "thread\&.allocated" mallctl\&. This is useful for avoiding the overhead of repeated \fBmallctl*\fR\fB\fR calls\&. .RE .PP "thread\&.deallocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get the total number of bytes ever deallocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. .RE .PP "thread\&.deallocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get a pointer to the the value that is returned by the "thread\&.deallocated" mallctl\&. This is useful for avoiding the overhead of repeated \fBmallctl*\fR\fB\fR calls\&. .RE .PP "thread\&.tcache\&.enabled" (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR] .RS 4 Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see "thread\&.tcache\&.flush")\&. .RE .PP "thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR] .RS 4 Flush calling thread\*(Aqs thread\-specific cache (tcache)\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs tcache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&. .RE .PP "thread\&.prof\&.name" (\fBconst char *\fR) r\- or \-w [\fB\-\-enable\-prof\fR] .RS 4 Get/set the descriptive name associated with the calling thread in memory profile dumps\&. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution\&. The output string of this interface should be copied for non\-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation\&. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations\&. The name string must nil\-terminated and comprised only of characters in the sets recognized by \fBisgraph\fR(3) and \fBisblank\fR(3)\&. .RE .PP "thread\&.prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] .RS 4 Control whether sampling is currently active for the calling thread\&. This is an activation mechanism in addition to "prof\&.active"; both must be active for the calling thread to sample\&. This flag is enabled by default\&. .RE .PP "tcache\&.create" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Create an explicit thread\-specific cache (tcache) and return an identifier that can be passed to the \fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR macro to explicitly use the specified cache rather than the automatically managed one that is used by default\&. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds\&. .RE .PP "tcache\&.flush" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR] .RS 4 Flush the specified thread\-specific cache (tcache)\&. The same considerations apply to this interface as to "thread\&.tcache\&.flush", except that the tcache will never be automatically be discarded\&. .RE .PP "tcache\&.destroy" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR] .RS 4 Flush the specified thread\-specific cache (tcache) and make the identifier available for use during a future tcache creation\&. .RE .PP "arena\&.\&.purge" (\fBvoid\fR) \-\- .RS 4 Purge unused dirty pages for arena , or for all arenas if equals "arenas\&.narenas"\&. .RE .PP "arena\&.\&.dss" (\fBconst char *\fR) rw .RS 4 Set the precedence of dss allocation as related to mmap allocation for arena , or for all arenas if equals "arenas\&.narenas"\&. See "opt\&.dss" for supported settings\&. .RE .PP "arena\&.\&.lg_dirty_mult" (\fBssize_t\fR) rw .RS 4 Current per\-arena minimum ratio (log base 2) of active to dirty pages for arena \&. Each time this interface is set and the ratio is increased, pages are synchronously purged as necessary to impose the new ratio\&. See "opt\&.lg_dirty_mult" for additional information\&. .RE .PP "arena\&.\&.chunk_hooks" (\fBchunk_hooks_t\fR) rw .RS 4 Get or set the chunk management hook functions for arena \&. The functions must be capable of operating on all extant chunks associated with arena , usually by passing unknown chunks to the replaced functions\&. In practice, it is feasible to control allocation for arenas created via "arenas\&.extend" such that all chunks originate from an application\-supplied chunk allocator (by setting custom chunk hook functions just after arena creation), but the automatically created arenas may have already created chunks prior to the application having an opportunity to take over chunk allocation\&. .sp .if n \{\ .RS 4 .\} .nf typedef struct { chunk_alloc_t *alloc; chunk_dalloc_t *dalloc; chunk_commit_t *commit; chunk_decommit_t *decommit; chunk_purge_t *purge; chunk_split_t *split; chunk_merge_t *merge; } chunk_hooks_t; .fi .if n \{\ .RE .\} .sp The \fBchunk_hooks_t\fR structure comprises function pointers which are described individually below\&. jemalloc uses these functions to manage chunk lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation\&. However, there are performance and platform reasons to retain chunks for later reuse\&. Cleanup attempts cascade from deallocation to decommit to purging, which gives the chunk management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations\&. The chunk splitting and merging operations can also be opted out of, but this is mainly intended to support platforms on which virtual memory mappings provided by the operating system kernel do not automatically coalesce and split, e\&.g\&. Windows\&. .HP \w'typedef\ void\ *(chunk_alloc_t)('u .BI "typedef void *(chunk_alloc_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "alignment" ", bool\ *" "zero" ", bool\ *" "commit" ", unsigned\ " "arena_ind" ");" .sp .if n \{\ .RS 4 .\} .nf .fi .if n \{\ .RE .\} .sp A chunk allocation function conforms to the \fBchunk_alloc_t\fR type and upon success returns a pointer to \fIsize\fR bytes of mapped memory on behalf of arena \fIarena_ind\fR such that the chunk\*(Aqs base address is a multiple of \fIalignment\fR, as well as setting \fI*zero\fR to indicate whether the chunk is zeroed and \fI*commit\fR to indicate whether the chunk is committed\&. Upon error the function returns \fBNULL\fR and leaves \fI*zero\fR and \fI*commit\fR unmodified\&. The \fIsize\fR parameter is always a multiple of the chunk size\&. The \fIalignment\fR parameter is always a power of two at least as large as the chunk size\&. Zeroing is mandatory if \fI*zero\fR is true upon function entry\&. Committing is mandatory if \fI*commit\fR is true upon function entry\&. If \fIchunk\fR is not \fBNULL\fR, the returned pointer must be \fIchunk\fR on success or \fBNULL\fR on error\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. Note that replacing the default chunk allocation function makes the arena\*(Aqs "arena\&.\&.dss" setting irrelevant\&. .HP \w'typedef\ bool\ (chunk_dalloc_t)('u .BI "typedef bool (chunk_dalloc_t)(void\ *" "chunk" ", size_t\ " "size" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");" .sp .if n \{\ .RS 4 .\} .nf .fi .if n \{\ .RE .\} .sp A chunk deallocation function conforms to the \fBchunk_dalloc_t\fR type and deallocates a \fIchunk\fR of given \fIsize\fR with \fIcommitted\fR/decommited memory as indicated, on behalf of arena \fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates opt\-out from deallocation; the virtual memory mapping associated with the chunk remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse\&. .HP \w'typedef\ bool\ (chunk_commit_t)('u .BI "typedef bool (chunk_commit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");" .sp .if n \{\ .RS 4 .\} .nf .fi .if n \{\ .RE .\} .sp A chunk commit function conforms to the \fBchunk_commit_t\fR type and commits zeroed physical memory to back pages within a \fIchunk\fR of given \fIsize\fR at \fIoffset\fR bytes, extending for \fIlength\fR on behalf of arena \fIarena_ind\fR, returning false upon success\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. If the function returns true, this indicates insufficient physical memory to satisfy the request\&. .HP \w'typedef\ bool\ (chunk_decommit_t)('u .BI "typedef bool (chunk_decommit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");" .sp .if n \{\ .RS 4 .\} .nf .fi .if n \{\ .RE .\} .sp A chunk decommit function conforms to the \fBchunk_decommit_t\fR type and decommits any physical memory that is backing pages within a \fIchunk\fR of given \fIsize\fR at \fIoffset\fR bytes, extending for \fIlength\fR on behalf of arena \fIarena_ind\fR, returning false upon success, in which case the pages will be committed via the chunk commit function before being reused\&. If the function returns true, this indicates opt\-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse\&. .HP \w'typedef\ bool\ (chunk_purge_t)('u .BI "typedef bool (chunk_purge_t)(void\ *" "chunk" ", size_t" "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");" .sp .if n \{\ .RS 4 .\} .nf .fi .if n \{\ .RE .\} .sp A chunk purge function conforms to the \fBchunk_purge_t\fR type and optionally discards physical pages within the virtual memory mapping associated with \fIchunk\fR of given \fIsize\fR at \fIoffset\fR bytes, extending for \fIlength\fR on behalf of arena \fIarena_ind\fR, returning false if pages within the purged virtual memory range will be zero\-filled the next time they are accessed\&. .HP \w'typedef\ bool\ (chunk_split_t)('u .BI "typedef bool (chunk_split_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "size_a" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");" .sp .if n \{\ .RS 4 .\} .nf .fi .if n \{\ .RE .\} .sp A chunk split function conforms to the \fBchunk_split_t\fR type and optionally splits \fIchunk\fR of given \fIsize\fR into two adjacent chunks, the first of \fIsize_a\fR bytes, and the second of \fIsize_b\fR bytes, operating on \fIcommitted\fR/decommitted memory as indicated, on behalf of arena \fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunk remains unsplit and therefore should continue to be operated on as a whole\&. .HP \w'typedef\ bool\ (chunk_merge_t)('u .BI "typedef bool (chunk_merge_t)(void\ *" "chunk_a" ", size_t\ " "size_a" ", void\ *" "chunk_b" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");" .sp .if n \{\ .RS 4 .\} .nf .fi .if n \{\ .RE .\} .sp A chunk merge function conforms to the \fBchunk_merge_t\fR type and optionally merges adjacent chunks, \fIchunk_a\fR of given \fIsize_a\fR and \fIchunk_b\fR of given \fIsize_b\fR into one contiguous chunk, operating on \fIcommitted\fR/decommitted memory as indicated, on behalf of arena \fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunks remain distinct mappings and therefore should continue to be operated on independently\&. .RE .PP "arenas\&.narenas" (\fBunsigned\fR) r\- .RS 4 Current limit on number of arenas\&. .RE .PP "arenas\&.initialized" (\fBbool *\fR) r\- .RS 4 An array of "arenas\&.narenas" booleans\&. Each boolean indicates whether the corresponding arena is initialized\&. .RE .PP "arenas\&.lg_dirty_mult" (\fBssize_t\fR) rw .RS 4 Current default per\-arena minimum ratio (log base 2) of active to dirty pages, used to initialize "arena\&.\&.lg_dirty_mult" during arena creation\&. See "opt\&.lg_dirty_mult" for additional information\&. .RE .PP "arenas\&.quantum" (\fBsize_t\fR) r\- .RS 4 Quantum size\&. .RE .PP "arenas\&.page" (\fBsize_t\fR) r\- .RS 4 Page size\&. .RE .PP "arenas\&.tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Maximum thread\-cached size class\&. .RE .PP "arenas\&.nbins" (\fBunsigned\fR) r\- .RS 4 Number of bin size classes\&. .RE .PP "arenas\&.nhbins" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Total number of thread cache bin size classes\&. .RE .PP "arenas\&.bin\&.\&.size" (\fBsize_t\fR) r\- .RS 4 Maximum size supported by size class\&. .RE .PP "arenas\&.bin\&.\&.nregs" (\fBuint32_t\fR) r\- .RS 4 Number of regions per page run\&. .RE .PP "arenas\&.bin\&.\&.run_size" (\fBsize_t\fR) r\- .RS 4 Number of bytes per page run\&. .RE .PP "arenas\&.nlruns" (\fBunsigned\fR) r\- .RS 4 Total number of large size classes\&. .RE .PP "arenas\&.lrun\&.\&.size" (\fBsize_t\fR) r\- .RS 4 Maximum size supported by this large size class\&. .RE .PP "arenas\&.nhchunks" (\fBunsigned\fR) r\- .RS 4 Total number of huge size classes\&. .RE .PP "arenas\&.hchunk\&.\&.size" (\fBsize_t\fR) r\- .RS 4 Maximum size supported by this huge size class\&. .RE .PP "arenas\&.extend" (\fBunsigned\fR) r\- .RS 4 Extend the array of arenas by appending a new arena, and returning the new arena index\&. .RE .PP "prof\&.thread_active_init" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] .RS 4 Control the initial setting for "thread\&.prof\&.active" in newly created threads\&. See the "opt\&.prof_thread_active_init" option for additional information\&. .RE .PP "prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] .RS 4 Control whether sampling is currently active\&. See the "opt\&.prof_active" option for additional information, as well as the interrelated "thread\&.prof\&.active" mallctl\&. .RE .PP "prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR] .RS 4 Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern \&.\&.\&.m\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. .RE .PP "prof\&.gdump" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] .RS 4 When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern \&.\&.\&.u\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. .RE .PP "prof\&.reset" (\fBsize_t\fR) \-w [\fB\-\-enable\-prof\fR] .RS 4 Reset all memory profile statistics, and optionally update the sample rate (see "opt\&.lg_prof_sample" and "prof\&.lg_sample")\&. .RE .PP "prof\&.lg_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Get the current sample rate (see "opt\&.lg_prof_sample")\&. .RE .PP "prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Average number of bytes allocated between inverval\-based profile dumps\&. See the "opt\&.lg_prof_interval" option for additional information\&. .RE .PP "stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up when computing its contribution to the counter\&. Note that the "epoch" mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&. .RE .PP "stats\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of bytes allocated by the application\&. .RE .PP "stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to "stats\&.allocated"\&. This does not include "stats\&.arenas\&.\&.pdirty", nor pages entirely devoted to allocator metadata\&. .RE .PP "stats\&.metadata" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap\-sensitive internal allocator data structures, arena chunk headers (see "stats\&.arenas\&.\&.metadata\&.mapped"), and internal allocations (see "stats\&.arenas\&.\&.metadata\&.allocated")\&. .RE .PP "stats\&.resident" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages\&. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand\-zeroed virtual memory that has not yet been touched\&. This is a multiple of the page size, and is larger than "stats\&.active"\&. .RE .PP "stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of bytes in active chunks mapped by the allocator\&. This is a multiple of the chunk size, and is larger than "stats\&.active"\&. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and "stats\&.resident"\&. .RE .PP "stats\&.arenas\&.\&.dss" (\fBconst char *\fR) r\- .RS 4 dss (\fBsbrk\fR(2)) allocation precedence as related to \fBmmap\fR(2) allocation\&. See "opt\&.dss" for details\&. .RE .PP "stats\&.arenas\&.\&.lg_dirty_mult" (\fBssize_t\fR) r\- .RS 4 Minimum ratio (log base 2) of active to dirty pages\&. See "opt\&.lg_dirty_mult" for details\&. .RE .PP "stats\&.arenas\&.\&.nthreads" (\fBunsigned\fR) r\- .RS 4 Number of threads currently assigned to arena\&. .RE .PP "stats\&.arenas\&.\&.pactive" (\fBsize_t\fR) r\- .RS 4 Number of pages in active runs\&. .RE .PP "stats\&.arenas\&.\&.pdirty" (\fBsize_t\fR) r\- .RS 4 Number of pages within unused runs that are potentially dirty, and for which \fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR or similar has not been called\&. .RE .PP "stats\&.arenas\&.\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of mapped bytes\&. .RE .PP "stats\&.arenas\&.\&.metadata\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of mapped bytes in arena chunk headers, which track the states of the non\-metadata pages\&. .RE .PP "stats\&.arenas\&.\&.metadata\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of bytes dedicated to internal allocations\&. Internal allocations differ from application\-originated allocations in that they are for internal use, and that they are omitted from heap profiles\&. This statistic is reported separately from "stats\&.metadata" and "stats\&.arenas\&.\&.metadata\&.mapped" because it overlaps with e\&.g\&. the "stats\&.allocated" and "stats\&.active" statistics, whereas the other metadata statistics do not\&. .RE .PP "stats\&.arenas\&.\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of dirty page purge sweeps performed\&. .RE .PP "stats\&.arenas\&.\&.nmadvise" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of \fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR or similar calls made to purge dirty pages\&. .RE .PP "stats\&.arenas\&.\&.purged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of pages purged\&. .RE .PP "stats\&.arenas\&.\&.small\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of bytes currently allocated by small objects\&. .RE .PP "stats\&.arenas\&.\&.small\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests served by small bins\&. .RE .PP "stats\&.arenas\&.\&.small\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of small objects returned to bins\&. .RE .PP "stats\&.arenas\&.\&.small\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of small allocation requests\&. .RE .PP "stats\&.arenas\&.\&.large\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of bytes currently allocated by large objects\&. .RE .PP "stats\&.arenas\&.\&.large\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of large allocation requests served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.large\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of large deallocation requests served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.large\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of large allocation requests\&. .RE .PP "stats\&.arenas\&.\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of bytes currently allocated by huge objects\&. .RE .PP "stats\&.arenas\&.\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of huge allocation requests served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of huge deallocation requests served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.huge\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of huge allocation requests\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocations served by bin\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocations returned to bin\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.curregs" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Current number of regions for this size class\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] .RS 4 Cumulative number of tcache fills\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nflushes" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] .RS 4 Cumulative number of tcache flushes\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of runs created\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nreruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of times the current run from which to allocate changed\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Current number of runs\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests for this size class served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of deallocation requests for this size class served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests for this size class\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Current number of runs for this size class\&. .RE .PP "stats\&.arenas\&.\&.hchunks\&.\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests for this size class served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.hchunks\&.\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of deallocation requests for this size class served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.hchunks\&.\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests for this size class\&. .RE .PP "stats\&.arenas\&.\&.hchunks\&.\&.curhchunks" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Current number of huge allocations for this size class\&. .RE .SH "DEBUGGING MALLOC PROBLEMS" .PP When debugging, it is a good idea to configure/build jemalloc with the \fB\-\-enable\-debug\fR and \fB\-\-enable\-fill\fR options, and recompile the program with suitable options and symbols for debugger support\&. When so configured, jemalloc incorporates a wide variety of run\-time assertions that catch application errors such as double\-free, write\-after\-free, etc\&. .PP Programs often accidentally depend on \(lquninitialized\(rq memory actually being filled with zero bytes\&. Junk filling (see the "opt\&.junk" option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps\&. Conversely, zero filling (see the "opt\&.zero" option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&. .PP This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2 tool if the \fB\-\-enable\-valgrind\fR configuration option is enabled\&. .SH "DIAGNOSTIC MESSAGES" .PP If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor \fBSTDERR_FILENO\fR\&. Errors will result in the process dumping core\&. If the "opt\&.abort" option is set, most warnings are treated as errors\&. .PP The \fImalloc_message\fR variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the \fBSTDERR_FILENO\fR file descriptor is not suitable for this\&. \fBmalloc_message\fR\fB\fR takes the \fIcbopaque\fR pointer argument that is \fBNULL\fR unless overridden by the arguments in a call to \fBmalloc_stats_print\fR\fB\fR, followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&. .PP All messages are prefixed by \(lq:\(rq\&. .SH "RETURN VALUES" .SS "Standard API" .PP The \fBmalloc\fR\fB\fR and \fBcalloc\fR\fB\fR functions return a pointer to the allocated memory if successful; otherwise a \fBNULL\fR pointer is returned and \fIerrno\fR is set to ENOMEM\&. .PP The \fBposix_memalign\fR\fB\fR function returns the value 0 if successful; otherwise it returns an error value\&. The \fBposix_memalign\fR\fB\fR function will fail if: .PP EINVAL .RS 4 The \fIalignment\fR parameter is not a power of 2 at least as large as sizeof(\fBvoid *\fR)\&. .RE .PP ENOMEM .RS 4 Memory allocation error\&. .RE .PP The \fBaligned_alloc\fR\fB\fR function returns a pointer to the allocated memory if successful; otherwise a \fBNULL\fR pointer is returned and \fIerrno\fR is set\&. The \fBaligned_alloc\fR\fB\fR function will fail if: .PP EINVAL .RS 4 The \fIalignment\fR parameter is not a power of 2\&. .RE .PP ENOMEM .RS 4 Memory allocation error\&. .RE .PP The \fBrealloc\fR\fB\fR function returns a pointer, possibly identical to \fIptr\fR, to the allocated memory if successful; otherwise a \fBNULL\fR pointer is returned, and \fIerrno\fR is set to ENOMEM if the error was the result of an allocation failure\&. The \fBrealloc\fR\fB\fR function always leaves the original buffer intact when an error occurs\&. .PP The \fBfree\fR\fB\fR function returns no value\&. .SS "Non\-standard API" .PP The \fBmallocx\fR\fB\fR and \fBrallocx\fR\fB\fR functions return a pointer to the allocated memory if successful; otherwise a \fBNULL\fR pointer is returned to indicate insufficient contiguous memory was available to service the allocation request\&. .PP The \fBxallocx\fR\fB\fR function returns the real size of the resulting resized allocation pointed to by \fIptr\fR, which is a value less than \fIsize\fR if the allocation could not be adequately grown in place\&. .PP The \fBsallocx\fR\fB\fR function returns the real size of the allocation pointed to by \fIptr\fR\&. .PP The \fBnallocx\fR\fB\fR returns the real size that would result from a successful equivalent \fBmallocx\fR\fB\fR function call, or zero if insufficient memory is available to perform the size computation\&. .PP The \fBmallctl\fR\fB\fR, \fBmallctlnametomib\fR\fB\fR, and \fBmallctlbymib\fR\fB\fR functions return 0 on success; otherwise they return an error value\&. The functions will fail if: .PP EINVAL .RS 4 \fInewp\fR is not \fBNULL\fR, and \fInewlen\fR is too large or too small\&. Alternatively, \fI*oldlenp\fR is too large or too small; in this case as much data as possible are read despite the error\&. .RE .PP ENOENT .RS 4 \fIname\fR or \fImib\fR specifies an unknown/invalid value\&. .RE .PP EPERM .RS 4 Attempt to read or write void value, or attempt to write read\-only value\&. .RE .PP EAGAIN .RS 4 A memory allocation failure occurred\&. .RE .PP EFAULT .RS 4 An interface with side effects failed in some way not directly related to \fBmallctl*\fR\fB\fR read/write processing\&. .RE .PP The \fBmalloc_usable_size\fR\fB\fR function returns the usable size of the allocation pointed to by \fIptr\fR\&. .SH "ENVIRONMENT" .PP The following environment variable affects the execution of the allocation functions: .PP \fBMALLOC_CONF\fR .RS 4 If the environment variable \fBMALLOC_CONF\fR is set, the characters it contains will be interpreted as options\&. .RE .SH "EXAMPLES" .PP To dump core whenever a problem occurs: .sp .if n \{\ .RS 4 .\} .nf ln \-s \*(Aqabort:true\*(Aq /etc/malloc\&.conf .fi .if n \{\ .RE .\} .PP To specify in the source a chunk size that is 16 MiB: .sp .if n \{\ .RS 4 .\} .nf malloc_conf = "lg_chunk:24"; .fi .if n \{\ .RE .\} .SH "SEE ALSO" .PP \fBmadvise\fR(2), \fBmmap\fR(2), \fBsbrk\fR(2), \fButrace\fR(2), \fBalloca\fR(3), \fBatexit\fR(3), \fBgetpagesize\fR(3) .SH "STANDARDS" .PP The \fBmalloc\fR\fB\fR, \fBcalloc\fR\fB\fR, \fBrealloc\fR\fB\fR, and \fBfree\fR\fB\fR functions conform to ISO/IEC 9899:1990 (\(lqISO C90\(rq)\&. .PP The \fBposix_memalign\fR\fB\fR function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&. .SH "AUTHOR" .PP \fBJason Evans\fR .RS 4 .RE .SH "NOTES" .IP " 1." 4 jemalloc website .RS 4 \%http://www.canonware.com/jemalloc/ .RE .IP " 2." 4 Valgrind .RS 4 \%http://valgrind.org/ .RE .IP " 3." 4 gperftools package .RS 4 \%http://code.google.com/p/gperftools/ .RE disque-1.0-rc1/deps/jemalloc/doc/jemalloc.html000066400000000000000000004013541264176561100213110ustar00rootroot00000000000000JEMALLOC

Name

jemalloc — general purpose memory allocation functions

LIBRARY

This manual describes jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c. More information can be found at the jemalloc website.

SYNOPSIS

#include <jemalloc/jemalloc.h>

Standard API

void *malloc(size_t size);
 
void *calloc(size_t number,
 size_t size);
 
int posix_memalign(void **ptr,
 size_t alignment,
 size_t size);
 
void *aligned_alloc(size_t alignment,
 size_t size);
 
void *realloc(void *ptr,
 size_t size);
 
void free(void *ptr);
 

Non-standard API

void *mallocx(size_t size,
 int flags);
 
void *rallocx(void *ptr,
 size_t size,
 int flags);
 
size_t xallocx(void *ptr,
 size_t size,
 size_t extra,
 int flags);
 
size_t sallocx(void *ptr,
 int flags);
 
void dallocx(void *ptr,
 int flags);
 
void sdallocx(void *ptr,
 size_t size,
 int flags);
 
size_t nallocx(size_t size,
 int flags);
 
int mallctl(const char *name,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
int mallctlnametomib(const char *name,
 size_t *mibp,
 size_t *miblenp);
 
int mallctlbymib(const size_t *mib,
 size_t miblen,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
void malloc_stats_print(void (*write_cb) (void *, const char *) ,
 void *cbopaque,
 const char *opts);
 
size_t malloc_usable_size(const void *ptr);
 
void (*malloc_message)(void *cbopaque,
 const char *s);
 

const char *malloc_conf;

DESCRIPTION

Standard API

The malloc() function allocates size bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object.

The calloc() function allocates space for number objects, each size bytes in length. The result is identical to calling malloc() with an argument of number * size, with the exception that the allocated memory is explicitly initialized to zero bytes.

The posix_memalign() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment, and returns the allocation in the value pointed to by ptr. The requested alignment must be a power of 2 at least as large as sizeof(void *).

The aligned_alloc() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment. The requested alignment must be a power of 2. Behavior is undefined if size is not an integral multiple of alignment.

The realloc() function changes the size of the previously allocated memory referenced by ptr to size bytes. The contents of the memory are unchanged up to the lesser of the new and old sizes. If the new size is larger, the contents of the newly allocated portion of the memory are undefined. Upon success, the memory referenced by ptr is freed and a pointer to the newly allocated memory is returned. Note that realloc() may move the memory allocation, resulting in a different return value than ptr. If ptr is NULL, the realloc() function behaves identically to malloc() for the specified size.

The free() function causes the allocated memory referenced by ptr to be made available for future allocations. If ptr is NULL, no action occurs.

Non-standard API

The mallocx(), rallocx(), xallocx(), sallocx(), dallocx(), sdallocx(), and nallocx() functions all have a flags argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (|) operations to specify one or more of the following:

MALLOCX_LG_ALIGN(la)

Align the memory allocation to start at an address that is a multiple of (1 << la). This macro does not validate that la is within the valid range.

MALLOCX_ALIGN(a)

Align the memory allocation to start at an address that is a multiple of a, where a is a power of two. This macro does not validate that a is a power of 2.

MALLOCX_ZERO

Initialize newly allocated memory to contain zero bytes. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes. If this macro is absent, newly allocated memory is uninitialized.

MALLOCX_TCACHE(tc)

Use the thread-specific cache (tcache) specified by the identifier tc, which must have been acquired via the "tcache.create" mallctl. This macro does not validate that tc specifies a valid identifier.

MALLOCX_TCACHE_NONE

Do not use a thread-specific cache (tcache). Unless MALLOCX_TCACHE(tc) or MALLOCX_TCACHE_NONE is specified, an automatically managed tcache will be used under many circumstances. This macro cannot be used in the same flags argument as MALLOCX_TCACHE(tc).

MALLOCX_ARENA(a)

Use the arena specified by the index a. This macro has no effect for regions that were allocated via an arena other than the one specified. This macro does not validate that a specifies an arena index in the valid range.

The mallocx() function allocates at least size bytes of memory, and returns a pointer to the base address of the allocation. Behavior is undefined if size is 0, or if request size overflows due to size class and/or alignment constraints.

The rallocx() function resizes the allocation at ptr to be at least size bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location. Behavior is undefined if size is 0, or if request size overflows due to size class and/or alignment constraints.

The xallocx() function resizes the allocation at ptr in place to be at least size bytes, and returns the real size of the allocation. If extra is non-zero, an attempt is made to resize the allocation to be at least (size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize. Behavior is undefined if size is 0, or if (size + extra > SIZE_T_MAX).

The sallocx() function returns the real size of the allocation at ptr.

The dallocx() function causes the memory referenced by ptr to be made available for future allocations.

The sdallocx() function is an extension of dallocx() with a size parameter to allow the caller to pass in the allocation size as an optimization. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by nallocx() or sallocx().

The nallocx() function allocates no memory, but it performs the same size computation as the mallocx() function, and returns the real size of the allocation that would result from the equivalent mallocx() function call. Behavior is undefined if size is 0, or if request size overflows due to size class and/or alignment constraints.

The mallctl() function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions. The period-separated name argument specifies a location in a tree-structured namespace; see the MALLCTL NAMESPACE section for documentation on the tree contents. To read a value, pass a pointer via oldp to adequate space to contain the value, and a pointer to its length via oldlenp; otherwise pass NULL and NULL. Similarly, to write a value, pass a pointer to the value via newp, and its length via newlen; otherwise pass NULL and 0.

The mallctlnametomib() function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a “Management Information Base” (MIB) that can be passed repeatedly to mallctlbymib(). Upon successful return from mallctlnametomib(), mibp contains an array of *miblenp integers, where *miblenp is the lesser of the number of components in name and the input value of *miblenp. Thus it is possible to pass a *miblenp that is smaller than the number of period-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB. For name components that are integers (e.g. the 2 in "arenas.bin.2.size" ), the corresponding MIB component will always be that integer. Therefore, it is legitimate to construct code like the following:

unsigned nbins, i;
size_t mib[4];
size_t len, miblen;

len = sizeof(nbins);
mallctl("arenas.nbins", &nbins, &len, NULL, 0);

miblen = 4;
mallctlnametomib("arenas.bin.0.size", mib, &miblen);
for (i = 0; i < nbins; i++) {
	size_t bin_size;

	mib[2] = i;
	len = sizeof(bin_size);
	mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
	/* Do something with bin_size... */
}

The malloc_stats_print() function writes human-readable summary statistics via the write_cb callback function pointer and cbopaque data passed to write_cb, or malloc_message() if write_cb is NULL. This function can be called repeatedly. General information that never changes during execution can be omitted by specifying "g" as a character within the opts string. Note that malloc_message() uses the mallctl*() functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously. If --enable-stats is specified during configuration, “m” and “a” can be specified to omit merged arena and per arena statistics, respectively; “b”, “l”, and “h” can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively. Unrecognized characters are silently ignored. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations.

The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. The return value may be larger than the size that was requested during allocation. The malloc_usable_size() function is not a mechanism for in-place realloc(); rather it is provided solely as a tool for introspection purposes. Any discrepancy between the requested allocation size and the size reported by malloc_usable_size() should not be depended on, since such behavior is entirely implementation-dependent.

TUNING

Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time.

The string pointed to by the global variable malloc_conf, the “name” of the file referenced by the symbolic link named /etc/malloc.conf, and the value of the environment variable MALLOC_CONF, will be interpreted, in that order, from left to right as options. Note that malloc_conf may be read before main() is entered, so the declaration of malloc_conf should specify an initializer that contains the final value to be read by jemalloc. malloc_conf is a compile-time setting, whereas /etc/malloc.conf and MALLOC_CONF can be safely set any time prior to program invocation.

An options string is a comma-separated list of option:value pairs. There is one key corresponding to each "opt.*" mallctl (see the MALLCTL NAMESPACE section for options documentation). For example, abort:true,narenas:1 sets the "opt.abort" and "opt.narenas" options. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values.

IMPLEMENTATION NOTES

Traditionally, allocators have used sbrk(2) to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory. If sbrk(2) is supported by the operating system, this allocator uses both mmap(2) and sbrk(2), in that order of preference; otherwise only mmap(2) is used.

This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi-processor systems. This works well with regard to threading scalability, but incurs some costs. There is a small fixed per-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation. These overheads are not generally an issue, given the number of arenas normally used. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions.

In addition to multiple arenas, unless --disable-tcache is specified during configuration, this allocator supports thread-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache.

Memory is conceptually broken into equal-sized chunks, where the chunk size is a power of two that is greater than the page size. Chunks are always aligned to multiples of the chunk size. This alignment makes it possible to find metadata for user objects very quickly.

User objects are broken into three categories according to size: small, large, and huge. Small and large objects are managed entirely by arenas; huge objects are additionally aggregated in a single data structure that is shared by all threads. Huge objects are typically used by applications infrequently enough that this single data structure is not a scalability issue.

Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object). The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time.

Small objects are managed in groups by page runs. Each run maintains a bitmap to track which regions are in use. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least sizeof(double). All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the "opt.lg_chunk" option), and huge size classes extend from the chunk size up to one size class less than the full address space size.

Allocations are packed tightly together, which can be an issue for multi-threaded applications. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating.

The realloc(), rallocx(), and xallocx() functions may resize allocations without moving them under limited circumstances. Unlike the *allocx() API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call realloc() to grow e.g. a 9-byte allocation to 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage trivially succeeds in place as long as the pre-size and post-size both round up to the same size class. No other API guarantees are made regarding in-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre-size and post-size are both large or both huge. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see "arena.<i>.chunk_hooks" ). Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging.

Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit system, the size classes in each category are as shown in Table 1.

Table 1. Size classes

CategorySpacingSize
Smalllg[8]
16[16, 32, 48, 64, 80, 96, 112, 128]
32[160, 192, 224, 256]
64[320, 384, 448, 512]
128[640, 768, 896, 1024]
256[1280, 1536, 1792, 2048]
512[2560, 3072, 3584, 4096]
1 KiB[5 KiB, 6 KiB, 7 KiB, 8 KiB]
2 KiB[10 KiB, 12 KiB, 14 KiB]
Large2 KiB[16 KiB]
4 KiB[20 KiB, 24 KiB, 28 KiB, 32 KiB]
8 KiB[40 KiB, 48 KiB, 54 KiB, 64 KiB]
16 KiB[80 KiB, 96 KiB, 112 KiB, 128 KiB]
32 KiB[160 KiB, 192 KiB, 224 KiB, 256 KiB]
64 KiB[320 KiB, 384 KiB, 448 KiB, 512 KiB]
128 KiB[640 KiB, 768 KiB, 896 KiB, 1 MiB]
256 KiB[1280 KiB, 1536 KiB, 1792 KiB]
Huge256 KiB[2 MiB]
512 KiB[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]
1 MiB[5 MiB, 6 MiB, 7 MiB, 8 MiB]
2 MiB[10 MiB, 12 MiB, 14 MiB, 16 MiB]
4 MiB[20 MiB, 24 MiB, 28 MiB, 32 MiB]
8 MiB[40 MiB, 48 MiB, 56 MiB, 64 MiB]
......

MALLCTL NAMESPACE

The following names are defined in the namespace accessible via the mallctl*() functions. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r-, -w, or --, and required build configuration flags follow, if any. A name element encoded as <i> or <j> indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection. In the case of "stats.arenas.<i>.*" , <i> equal to "arenas.narenas" can be used to access the summation of statistics from all arenas. Take special note of the "epoch" mallctl, which controls refreshing of cached dynamic statistics.

"version" (const char *) r-

Return the jemalloc version string.

"epoch" (uint64_t) rw

If a value is passed in, refresh the data from which the mallctl*() functions report values, and increment the epoch. Return the current epoch. This is useful for detecting whether another thread caused a refresh.

"config.cache_oblivious" (bool) r-

--enable-cache-oblivious was specified during build configuration.

"config.debug" (bool) r-

--enable-debug was specified during build configuration.

"config.fill" (bool) r-

--enable-fill was specified during build configuration.

"config.lazy_lock" (bool) r-

--enable-lazy-lock was specified during build configuration.

"config.munmap" (bool) r-

--enable-munmap was specified during build configuration.

"config.prof" (bool) r-

--enable-prof was specified during build configuration.

"config.prof_libgcc" (bool) r-

--disable-prof-libgcc was not specified during build configuration.

"config.prof_libunwind" (bool) r-

--enable-prof-libunwind was specified during build configuration.

"config.stats" (bool) r-

--enable-stats was specified during build configuration.

"config.tcache" (bool) r-

--disable-tcache was not specified during build configuration.

"config.tls" (bool) r-

--disable-tls was not specified during build configuration.

"config.utrace" (bool) r-

--enable-utrace was specified during build configuration.

"config.valgrind" (bool) r-

--enable-valgrind was specified during build configuration.

"config.xmalloc" (bool) r-

--enable-xmalloc was specified during build configuration.

"opt.abort" (bool) r-

Abort-on-warning enabled/disabled. If true, most warnings are fatal. The process will call abort(3) in these cases. This option is disabled by default unless --enable-debug is specified during configuration, in which case it is enabled by default.

"opt.dss" (const char *) r-

dss (sbrk(2)) allocation precedence as related to mmap(2) allocation. The following settings are supported if sbrk(2) is supported by the operating system: “disabled”, “primary”, and “secondary”; otherwise only “disabled” is supported. The default is “secondary” if sbrk(2) is supported by the operating system; “disabled” otherwise.

"opt.lg_chunk" (size_t) r-

Virtual memory chunk size (log base 2). If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size. The default chunk size is 2 MiB (2^21).

"opt.narenas" (size_t) r-

Maximum number of arenas to use for automatic multiplexing of threads and arenas. The default is four times the number of CPUs, or one if there is a single CPU.

"opt.lg_dirty_mult" (ssize_t) r-

Per-arena minimum ratio (log base 2) of active to dirty pages. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via madvise(2) or a similar system call. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused. The default minimum ratio is 8:1 (2^3:1); an option value of -1 will disable dirty page purging. See "arenas.lg_dirty_mult" and "arena.<i>.lg_dirty_mult" for related dynamic control options.

"opt.stats_print" (bool) r-

Enable/disable statistics printing at exit. If enabled, the malloc_stats_print() function is called at program exit via an atexit(3) function. If --enable-stats is specified during configuration, this has the potential to cause deadlock for a multi-threaded process that exits while one or more threads are executing in the memory allocation functions. Furthermore, atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not univerally usable (though the application can register its own atexit() function with equivalent functionality). Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development. This option is disabled by default.

"opt.junk" (const char *) r- [--enable-fill]

Junk filling. If set to "alloc", each byte of uninitialized allocated memory will be initialized to 0xa5. If set to "free", all deallocated memory will be initialized to 0x5a. If set to "true", both allocated and deallocated memory will be initialized, and if set to "false", junk filling be disabled entirely. This is intended for debugging and will impact performance negatively. This option is "false" by default unless --enable-debug is specified during configuration, in which case it is "true" by default unless running inside Valgrind.

"opt.quarantine" (size_t) r- [--enable-fill]

Per thread quarantine size in bytes. If non-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk-filled if the "opt.junk" option is enabled. This feature is of particular use in combination with Valgrind, which can detect attempts to access quarantined objects. This is intended for debugging and will impact performance negatively. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB.

"opt.redzone" (bool) r- [--enable-fill]

Redzones enabled/disabled. If enabled, small allocations have redzones before and after them. Furthermore, if the "opt.junk" option is enabled, the redzones are checked for corruption during deallocation. However, the primary intended purpose of this feature is to be used in combination with Valgrind, which needs redzones in order to do effective buffer overflow/underflow detection. This option is intended for debugging and will impact performance negatively. This option is disabled by default unless running inside Valgrind.

"opt.zero" (bool) r- [--enable-fill]

Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so realloc() and rallocx() calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default.

"opt.utrace" (bool) r- [--enable-utrace]

Allocation tracing based on utrace(2) enabled/disabled. This option is disabled by default.

"opt.xmalloc" (bool) r- [--enable-xmalloc]

Abort-on-out-of-memory enabled/disabled. If enabled, rather than returning failure for any allocation function, display a diagnostic message on STDERR_FILENO and cause the program to drop core (using abort(3)). If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code:

malloc_conf = "xmalloc:true";

This option is disabled by default.

"opt.tcache" (bool) r- [--enable-tcache]

Thread-specific caching (tcache) enabled/disabled. When there are multiple threads, each thread uses a tcache for objects up to a certain size. Thread-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use. See the "opt.lg_tcache_max" option for related tuning information. This option is enabled by default unless running inside Valgrind, in which case it is forcefully disabled.

"opt.lg_tcache_max" (size_t) r- [--enable-tcache]

Maximum size class (log base 2) to cache in the thread-specific cache (tcache). At a minimum, all small size classes are cached, and at a maximum all large size classes are cached. The default maximum is 32 KiB (2^15).

"opt.prof" (bool) r- [--enable-prof]

Memory profiling enabled/disabled. If enabled, profile memory allocation activity. See the "opt.prof_active" option for on-the-fly activation/deactivation. See the "opt.lg_prof_sample" option for probabilistic sampling control. See the "opt.prof_accum" option for control of cumulative sample reporting. See the "opt.lg_prof_interval" option for information on interval-triggered profile dumping, the "opt.prof_gdump" option for information on high-water-triggered profile dumping, and the "opt.prof_final" option for final profile dumping. Profile output is compatible with the jeprof command, which is based on the pprof that is developed as part of the gperftools package.

"opt.prof_prefix" (const char *) r- [--enable-prof]

Filename prefix for profile dumps. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled). The default prefix is jeprof.

"opt.prof_active" (bool) r- [--enable-prof]

Profiling activated/deactivated. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the "opt.prof" option) but inactive, then toggle profiling at any time during program execution with the "prof.active" mallctl. This option is enabled by default.

"opt.prof_thread_active_init" (bool) r- [--enable-prof]

Initial setting for "thread.prof.active" in newly created threads. The initial setting for newly created threads can also be changed during execution via the "prof.thread_active_init" mallctl. This option is enabled by default.

"opt.lg_prof_sample" (size_t) r- [--enable-prof]

Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead. The default sample interval is 512 KiB (2^19 B).

"opt.prof_accum" (bool) r- [--enable-prof]

Reporting of cumulative object/byte counts in profile dumps enabled/disabled. If this option is enabled, every unique backtrace must be stored for the duration of execution. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest. This option is disabled by default.

"opt.lg_prof_interval" (ssize_t) r- [--enable-prof]

Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.i<iseq>.heap, where <prefix> is controlled by the "opt.prof_prefix" option. By default, interval-triggered profile dumping is disabled (encoded as -1).

"opt.prof_gdump" (bool) r- [--enable-prof]

Set the initial state of "prof.gdump" , which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum. This option is disabled by default.

"opt.prof_final" (bool) r- [--enable-prof]

Use an atexit(3) function to dump final memory usage to a file named according to the pattern <prefix>.<pid>.<seq>.f.heap, where <prefix> is controlled by the "opt.prof_prefix" option. Note that atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not univerally usable (though the application can register its own atexit() function with equivalent functionality). This option is disabled by default.

"opt.prof_leak" (bool) r- [--enable-prof]

Leak reporting enabled/disabled. If enabled, use an atexit(3) function to report memory leaks detected by allocation sampling. See the "opt.prof" option for information on analyzing heap profile output. This option is disabled by default.

"thread.arena" (unsigned) rw

Get or set the arena associated with the calling thread. If the specified arena was not initialized beforehand (see the "arenas.initialized" mallctl), it will be automatically initialized as a side effect of calling this interface.

"thread.allocated" (uint64_t) r- [--enable-stats]

Get the total number of bytes ever allocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases.

"thread.allocatedp" (uint64_t *) r- [--enable-stats]

Get a pointer to the the value that is returned by the "thread.allocated" mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls.

"thread.deallocated" (uint64_t) r- [--enable-stats]

Get the total number of bytes ever deallocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases.

"thread.deallocatedp" (uint64_t *) r- [--enable-stats]

Get a pointer to the the value that is returned by the "thread.deallocated" mallctl. This is useful for avoiding the overhead of repeated mallctl*() calls.

"thread.tcache.enabled" (bool) rw [--enable-tcache]

Enable/disable calling thread's tcache. The tcache is implicitly flushed as a side effect of becoming disabled (see "thread.tcache.flush" ).

"thread.tcache.flush" (void) -- [--enable-tcache]

Flush calling thread's thread-specific cache (tcache). This interface releases all cached objects and internal data structures associated with the calling thread's tcache. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful.

"thread.prof.name" (const char *) r- or -w [--enable-prof]

Get/set the descriptive name associated with the calling thread in memory profile dumps. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution. The output string of this interface should be copied for non-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations. The name string must nil-terminated and comprised only of characters in the sets recognized by isgraph(3) and isblank(3).

"thread.prof.active" (bool) rw [--enable-prof]

Control whether sampling is currently active for the calling thread. This is an activation mechanism in addition to "prof.active" ; both must be active for the calling thread to sample. This flag is enabled by default.

"tcache.create" (unsigned) r- [--enable-tcache]

Create an explicit thread-specific cache (tcache) and return an identifier that can be passed to the MALLOCX_TCACHE(tc) macro to explicitly use the specified cache rather than the automatically managed one that is used by default. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds.

"tcache.flush" (unsigned) -w [--enable-tcache]

Flush the specified thread-specific cache (tcache). The same considerations apply to this interface as to "thread.tcache.flush" , except that the tcache will never be automatically be discarded.

"tcache.destroy" (unsigned) -w [--enable-tcache]

Flush the specified thread-specific cache (tcache) and make the identifier available for use during a future tcache creation.

"arena.<i>.purge" (void) --

Purge unused dirty pages for arena <i>, or for all arenas if <i> equals "arenas.narenas" .

"arena.<i>.dss" (const char *) rw

Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals "arenas.narenas" . See "opt.dss" for supported settings.

"arena.<i>.lg_dirty_mult" (ssize_t) rw

Current per-arena minimum ratio (log base 2) of active to dirty pages for arena <i>. Each time this interface is set and the ratio is increased, pages are synchronously purged as necessary to impose the new ratio. See "opt.lg_dirty_mult" for additional information.

"arena.<i>.chunk_hooks" (chunk_hooks_t) rw

Get or set the chunk management hook functions for arena <i>. The functions must be capable of operating on all extant chunks associated with arena <i>, usually by passing unknown chunks to the replaced functions. In practice, it is feasible to control allocation for arenas created via "arenas.extend" such that all chunks originate from an application-supplied chunk allocator (by setting custom chunk hook functions just after arena creation), but the automatically created arenas may have already created chunks prior to the application having an opportunity to take over chunk allocation.

typedef struct {
	chunk_alloc_t		*alloc;
	chunk_dalloc_t		*dalloc;
	chunk_commit_t		*commit;
	chunk_decommit_t	*decommit;
	chunk_purge_t		*purge;
	chunk_split_t		*split;
	chunk_merge_t		*merge;
} chunk_hooks_t;

The chunk_hooks_t structure comprises function pointers which are described individually below. jemalloc uses these functions to manage chunk lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation. However, there are performance and platform reasons to retain chunks for later reuse. Cleanup attempts cascade from deallocation to decommit to purging, which gives the chunk management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations. The chunk splitting and merging operations can also be opted out of, but this is mainly intended to support platforms on which virtual memory mappings provided by the operating system kernel do not automatically coalesce and split, e.g. Windows.

typedef void *(chunk_alloc_t)(void *chunk,
 size_t size,
 size_t alignment,
 bool *zero,
 bool *commit,
 unsigned arena_ind);
 

A chunk allocation function conforms to the chunk_alloc_t type and upon success returns a pointer to size bytes of mapped memory on behalf of arena arena_ind such that the chunk's base address is a multiple of alignment, as well as setting *zero to indicate whether the chunk is zeroed and *commit to indicate whether the chunk is committed. Upon error the function returns NULL and leaves *zero and *commit unmodified. The size parameter is always a multiple of the chunk size. The alignment parameter is always a power of two at least as large as the chunk size. Zeroing is mandatory if *zero is true upon function entry. Committing is mandatory if *commit is true upon function entry. If chunk is not NULL, the returned pointer must be chunk on success or NULL on error. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. Note that replacing the default chunk allocation function makes the arena's "arena.<i>.dss" setting irrelevant.

typedef bool (chunk_dalloc_t)(void *chunk,
 size_t size,
 bool committed,
 unsigned arena_ind);
 

A chunk deallocation function conforms to the chunk_dalloc_t type and deallocates a chunk of given size with committed/decommited memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates opt-out from deallocation; the virtual memory mapping associated with the chunk remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse.

typedef bool (chunk_commit_t)(void *chunk,
 size_t size,
 size_t offset,
 size_t length,
 unsigned arena_ind);
 

A chunk commit function conforms to the chunk_commit_t type and commits zeroed physical memory to back pages within a chunk of given size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. If the function returns true, this indicates insufficient physical memory to satisfy the request.

typedef bool (chunk_decommit_t)(void *chunk,
 size_t size,
 size_t offset,
 size_t length,
 unsigned arena_ind);
 

A chunk decommit function conforms to the chunk_decommit_t type and decommits any physical memory that is backing pages within a chunk of given size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success, in which case the pages will be committed via the chunk commit function before being reused. If the function returns true, this indicates opt-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse.

typedef bool (chunk_purge_t)(void *chunk,
 size_tsize,
 size_t offset,
 size_t length,
 unsigned arena_ind);
 

A chunk purge function conforms to the chunk_purge_t type and optionally discards physical pages within the virtual memory mapping associated with chunk of given size at offset bytes, extending for length on behalf of arena arena_ind, returning false if pages within the purged virtual memory range will be zero-filled the next time they are accessed.

typedef bool (chunk_split_t)(void *chunk,
 size_t size,
 size_t size_a,
 size_t size_b,
 bool committed,
 unsigned arena_ind);
 

A chunk split function conforms to the chunk_split_t type and optionally splits chunk of given size into two adjacent chunks, the first of size_a bytes, and the second of size_b bytes, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the chunk remains unsplit and therefore should continue to be operated on as a whole.

typedef bool (chunk_merge_t)(void *chunk_a,
 size_t size_a,
 void *chunk_b,
 size_t size_b,
 bool committed,
 unsigned arena_ind);
 

A chunk merge function conforms to the chunk_merge_t type and optionally merges adjacent chunks, chunk_a of given size_a and chunk_b of given size_b into one contiguous chunk, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the chunks remain distinct mappings and therefore should continue to be operated on independently.

"arenas.narenas" (unsigned) r-

Current limit on number of arenas.

"arenas.initialized" (bool *) r-

An array of "arenas.narenas" booleans. Each boolean indicates whether the corresponding arena is initialized.

"arenas.lg_dirty_mult" (ssize_t) rw

Current default per-arena minimum ratio (log base 2) of active to dirty pages, used to initialize "arena.<i>.lg_dirty_mult" during arena creation. See "opt.lg_dirty_mult" for additional information.

"arenas.quantum" (size_t) r-

Quantum size.

"arenas.page" (size_t) r-

Page size.

"arenas.tcache_max" (size_t) r- [--enable-tcache]

Maximum thread-cached size class.

"arenas.nbins" (unsigned) r-

Number of bin size classes.

"arenas.nhbins" (unsigned) r- [--enable-tcache]

Total number of thread cache bin size classes.

"arenas.bin.<i>.size" (size_t) r-

Maximum size supported by size class.

"arenas.bin.<i>.nregs" (uint32_t) r-

Number of regions per page run.

"arenas.bin.<i>.run_size" (size_t) r-

Number of bytes per page run.

"arenas.nlruns" (unsigned) r-

Total number of large size classes.

"arenas.lrun.<i>.size" (size_t) r-

Maximum size supported by this large size class.

"arenas.nhchunks" (unsigned) r-

Total number of huge size classes.

"arenas.hchunk.<i>.size" (size_t) r-

Maximum size supported by this huge size class.

"arenas.extend" (unsigned) r-

Extend the array of arenas by appending a new arena, and returning the new arena index.

"prof.thread_active_init" (bool) rw [--enable-prof]

Control the initial setting for "thread.prof.active" in newly created threads. See the "opt.prof_thread_active_init" option for additional information.

"prof.active" (bool) rw [--enable-prof]

Control whether sampling is currently active. See the "opt.prof_active" option for additional information, as well as the interrelated "thread.prof.active" mallctl.

"prof.dump" (const char *) -w [--enable-prof]

Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern <prefix>.<pid>.<seq>.m<mseq>.heap, where <prefix> is controlled by the "opt.prof_prefix" option.

"prof.gdump" (bool) rw [--enable-prof]

When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.u<useq>.heap, where <prefix> is controlled by the "opt.prof_prefix" option.

"prof.reset" (size_t) -w [--enable-prof]

Reset all memory profile statistics, and optionally update the sample rate (see "opt.lg_prof_sample" and "prof.lg_sample" ).

"prof.lg_sample" (size_t) r- [--enable-prof]

Get the current sample rate (see "opt.lg_prof_sample" ).

"prof.interval" (uint64_t) r- [--enable-prof]

Average number of bytes allocated between inverval-based profile dumps. See the "opt.lg_prof_interval" option for additional information.

"stats.cactive" (size_t *) r- [--enable-stats]

Pointer to a counter that contains an approximate count of the current number of bytes in active pages. The estimate may be high, but never low, because each arena rounds up when computing its contribution to the counter. Note that the "epoch" mallctl has no bearing on this counter. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer.

"stats.allocated" (size_t) r- [--enable-stats]

Total number of bytes allocated by the application.

"stats.active" (size_t) r- [--enable-stats]

Total number of bytes in active pages allocated by the application. This is a multiple of the page size, and greater than or equal to "stats.allocated" . This does not include "stats.arenas.<i>.pdirty" , nor pages entirely devoted to allocator metadata.

"stats.metadata" (size_t) r- [--enable-stats]

Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap-sensitive internal allocator data structures, arena chunk headers (see "stats.arenas.<i>.metadata.mapped" ), and internal allocations (see "stats.arenas.<i>.metadata.allocated" ).

"stats.resident" (size_t) r- [--enable-stats]

Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size, and is larger than "stats.active" .

"stats.mapped" (size_t) r- [--enable-stats]

Total number of bytes in active chunks mapped by the allocator. This is a multiple of the chunk size, and is larger than "stats.active" . This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and "stats.resident" .

"stats.arenas.<i>.dss" (const char *) r-

dss (sbrk(2)) allocation precedence as related to mmap(2) allocation. See "opt.dss" for details.

"stats.arenas.<i>.lg_dirty_mult" (ssize_t) r-

Minimum ratio (log base 2) of active to dirty pages. See "opt.lg_dirty_mult" for details.

"stats.arenas.<i>.nthreads" (unsigned) r-

Number of threads currently assigned to arena.

"stats.arenas.<i>.pactive" (size_t) r-

Number of pages in active runs.

"stats.arenas.<i>.pdirty" (size_t) r-

Number of pages within unused runs that are potentially dirty, and for which madvise(..., MADV_DONTNEED) or similar has not been called.

"stats.arenas.<i>.mapped" (size_t) r- [--enable-stats]

Number of mapped bytes.

"stats.arenas.<i>.metadata.mapped" (size_t) r- [--enable-stats]

Number of mapped bytes in arena chunk headers, which track the states of the non-metadata pages.

"stats.arenas.<i>.metadata.allocated" (size_t) r- [--enable-stats]

Number of bytes dedicated to internal allocations. Internal allocations differ from application-originated allocations in that they are for internal use, and that they are omitted from heap profiles. This statistic is reported separately from "stats.metadata" and "stats.arenas.<i>.metadata.mapped" because it overlaps with e.g. the "stats.allocated" and "stats.active" statistics, whereas the other metadata statistics do not.

"stats.arenas.<i>.npurge" (uint64_t) r- [--enable-stats]

Number of dirty page purge sweeps performed.

"stats.arenas.<i>.nmadvise" (uint64_t) r- [--enable-stats]

Number of madvise(..., MADV_DONTNEED) or similar calls made to purge dirty pages.

"stats.arenas.<i>.purged" (uint64_t) r- [--enable-stats]

Number of pages purged.

"stats.arenas.<i>.small.allocated" (size_t) r- [--enable-stats]

Number of bytes currently allocated by small objects.

"stats.arenas.<i>.small.nmalloc" (uint64_t) r- [--enable-stats]

Cumulative number of allocation requests served by small bins.

"stats.arenas.<i>.small.ndalloc" (uint64_t) r- [--enable-stats]

Cumulative number of small objects returned to bins.

"stats.arenas.<i>.small.nrequests" (uint64_t) r- [--enable-stats]

Cumulative number of small allocation requests.

"stats.arenas.<i>.large.allocated" (size_t) r- [--enable-stats]

Number of bytes currently allocated by large objects.

"stats.arenas.<i>.large.nmalloc" (uint64_t) r- [--enable-stats]

Cumulative number of large allocation requests served directly by the arena.

"stats.arenas.<i>.large.ndalloc" (uint64_t) r- [--enable-stats]

Cumulative number of large deallocation requests served directly by the arena.

"stats.arenas.<i>.large.nrequests" (uint64_t) r- [--enable-stats]

Cumulative number of large allocation requests.

"stats.arenas.<i>.huge.allocated" (size_t) r- [--enable-stats]

Number of bytes currently allocated by huge objects.

"stats.arenas.<i>.huge.nmalloc" (uint64_t) r- [--enable-stats]

Cumulative number of huge allocation requests served directly by the arena.

"stats.arenas.<i>.huge.ndalloc" (uint64_t) r- [--enable-stats]

Cumulative number of huge deallocation requests served directly by the arena.

"stats.arenas.<i>.huge.nrequests" (uint64_t) r- [--enable-stats]

Cumulative number of huge allocation requests.

"stats.arenas.<i>.bins.<j>.nmalloc" (uint64_t) r- [--enable-stats]

Cumulative number of allocations served by bin.

"stats.arenas.<i>.bins.<j>.ndalloc" (uint64_t) r- [--enable-stats]

Cumulative number of allocations returned to bin.

"stats.arenas.<i>.bins.<j>.nrequests" (uint64_t) r- [--enable-stats]

Cumulative number of allocation requests.

"stats.arenas.<i>.bins.<j>.curregs" (size_t) r- [--enable-stats]

Current number of regions for this size class.

"stats.arenas.<i>.bins.<j>.nfills" (uint64_t) r- [--enable-stats --enable-tcache]

Cumulative number of tcache fills.

"stats.arenas.<i>.bins.<j>.nflushes" (uint64_t) r- [--enable-stats --enable-tcache]

Cumulative number of tcache flushes.

"stats.arenas.<i>.bins.<j>.nruns" (uint64_t) r- [--enable-stats]

Cumulative number of runs created.

"stats.arenas.<i>.bins.<j>.nreruns" (uint64_t) r- [--enable-stats]

Cumulative number of times the current run from which to allocate changed.

"stats.arenas.<i>.bins.<j>.curruns" (size_t) r- [--enable-stats]

Current number of runs.

"stats.arenas.<i>.lruns.<j>.nmalloc" (uint64_t) r- [--enable-stats]

Cumulative number of allocation requests for this size class served directly by the arena.

"stats.arenas.<i>.lruns.<j>.ndalloc" (uint64_t) r- [--enable-stats]

Cumulative number of deallocation requests for this size class served directly by the arena.

"stats.arenas.<i>.lruns.<j>.nrequests" (uint64_t) r- [--enable-stats]

Cumulative number of allocation requests for this size class.

"stats.arenas.<i>.lruns.<j>.curruns" (size_t) r- [--enable-stats]

Current number of runs for this size class.

"stats.arenas.<i>.hchunks.<j>.nmalloc" (uint64_t) r- [--enable-stats]

Cumulative number of allocation requests for this size class served directly by the arena.

"stats.arenas.<i>.hchunks.<j>.ndalloc" (uint64_t) r- [--enable-stats]

Cumulative number of deallocation requests for this size class served directly by the arena.

"stats.arenas.<i>.hchunks.<j>.nrequests" (uint64_t) r- [--enable-stats]

Cumulative number of allocation requests for this size class.

"stats.arenas.<i>.hchunks.<j>.curhchunks" (size_t) r- [--enable-stats]

Current number of huge allocations for this size class.

DEBUGGING MALLOC PROBLEMS

When debugging, it is a good idea to configure/build jemalloc with the --enable-debug and --enable-fill options, and recompile the program with suitable options and symbols for debugger support. When so configured, jemalloc incorporates a wide variety of run-time assertions that catch application errors such as double-free, write-after-free, etc.

Programs often accidentally depend on “uninitialized” memory actually being filled with zero bytes. Junk filling (see the "opt.junk" option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps. Conversely, zero filling (see the "opt.zero" option) eliminates the symptoms of such bugs. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs.

This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive. However, jemalloc does integrate with the most excellent Valgrind tool if the --enable-valgrind configuration option is enabled.

DIAGNOSTIC MESSAGES

If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor STDERR_FILENO. Errors will result in the process dumping core. If the "opt.abort" option is set, most warnings are treated as errors.

The malloc_message variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the STDERR_FILENO file descriptor is not suitable for this. malloc_message() takes the cbopaque pointer argument that is NULL unless overridden by the arguments in a call to malloc_stats_print(), followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock.

All messages are prefixed by “<jemalloc>: ”.

RETURN VALUES

Standard API

The malloc() and calloc() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set to ENOMEM.

The posix_memalign() function returns the value 0 if successful; otherwise it returns an error value. The posix_memalign() function will fail if:

EINVAL

The alignment parameter is not a power of 2 at least as large as sizeof(void *).

ENOMEM

Memory allocation error.

The aligned_alloc() function returns a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set. The aligned_alloc() function will fail if:

EINVAL

The alignment parameter is not a power of 2.

ENOMEM

Memory allocation error.

The realloc() function returns a pointer, possibly identical to ptr, to the allocated memory if successful; otherwise a NULL pointer is returned, and errno is set to ENOMEM if the error was the result of an allocation failure. The realloc() function always leaves the original buffer intact when an error occurs.

The free() function returns no value.

Non-standard API

The mallocx() and rallocx() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned to indicate insufficient contiguous memory was available to service the allocation request.

The xallocx() function returns the real size of the resulting resized allocation pointed to by ptr, which is a value less than size if the allocation could not be adequately grown in place.

The sallocx() function returns the real size of the allocation pointed to by ptr.

The nallocx() returns the real size that would result from a successful equivalent mallocx() function call, or zero if insufficient memory is available to perform the size computation.

The mallctl(), mallctlnametomib(), and mallctlbymib() functions return 0 on success; otherwise they return an error value. The functions will fail if:

EINVAL

newp is not NULL, and newlen is too large or too small. Alternatively, *oldlenp is too large or too small; in this case as much data as possible are read despite the error.

ENOENT

name or mib specifies an unknown/invalid value.

EPERM

Attempt to read or write void value, or attempt to write read-only value.

EAGAIN

A memory allocation failure occurred.

EFAULT

An interface with side effects failed in some way not directly related to mallctl*() read/write processing.

The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr.

ENVIRONMENT

The following environment variable affects the execution of the allocation functions:

MALLOC_CONF

If the environment variable MALLOC_CONF is set, the characters it contains will be interpreted as options.

EXAMPLES

To dump core whenever a problem occurs:

ln -s 'abort:true' /etc/malloc.conf

To specify in the source a chunk size that is 16 MiB:

malloc_conf = "lg_chunk:24";

SEE ALSO

madvise(2), mmap(2), sbrk(2), utrace(2), alloca(3), atexit(3), getpagesize(3)

STANDARDS

The malloc(), calloc(), realloc(), and free() functions conform to ISO/IEC 9899:1990 (“ISO C90”).

The posix_memalign() function conforms to IEEE Std 1003.1-2001 (“POSIX.1”).

disque-1.0-rc1/deps/jemalloc/doc/jemalloc.xml.in000066400000000000000000003623541264176561100215600ustar00rootroot00000000000000 User Manual jemalloc @jemalloc_version@ Jason Evans Author JEMALLOC 3 jemalloc jemalloc general purpose memory allocation functions LIBRARY This manual describes jemalloc @jemalloc_version@. More information can be found at the jemalloc website. SYNOPSIS #include <jemalloc/jemalloc.h> Standard API void *malloc size_t size void *calloc size_t number size_t size int posix_memalign void **ptr size_t alignment size_t size void *aligned_alloc size_t alignment size_t size void *realloc void *ptr size_t size void free void *ptr Non-standard API void *mallocx size_t size int flags void *rallocx void *ptr size_t size int flags size_t xallocx void *ptr size_t size size_t extra int flags size_t sallocx void *ptr int flags void dallocx void *ptr int flags void sdallocx void *ptr size_t size int flags size_t nallocx size_t size int flags int mallctl const char *name void *oldp size_t *oldlenp void *newp size_t newlen int mallctlnametomib const char *name size_t *mibp size_t *miblenp int mallctlbymib const size_t *mib size_t miblen void *oldp size_t *oldlenp void *newp size_t newlen void malloc_stats_print void (*write_cb) void *, const char * void *cbopaque const char *opts size_t malloc_usable_size const void *ptr void (*malloc_message) void *cbopaque const char *s const char *malloc_conf; DESCRIPTION Standard API The malloc function allocates size bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object. The calloc function allocates space for number objects, each size bytes in length. The result is identical to calling malloc with an argument of number * size, with the exception that the allocated memory is explicitly initialized to zero bytes. The posix_memalign function allocates size bytes of memory such that the allocation's base address is a multiple of alignment, and returns the allocation in the value pointed to by ptr. The requested alignment must be a power of 2 at least as large as sizeof(void *). The aligned_alloc function allocates size bytes of memory such that the allocation's base address is a multiple of alignment. The requested alignment must be a power of 2. Behavior is undefined if size is not an integral multiple of alignment. The realloc function changes the size of the previously allocated memory referenced by ptr to size bytes. The contents of the memory are unchanged up to the lesser of the new and old sizes. If the new size is larger, the contents of the newly allocated portion of the memory are undefined. Upon success, the memory referenced by ptr is freed and a pointer to the newly allocated memory is returned. Note that realloc may move the memory allocation, resulting in a different return value than ptr. If ptr is NULL, the realloc function behaves identically to malloc for the specified size. The free function causes the allocated memory referenced by ptr to be made available for future allocations. If ptr is NULL, no action occurs. Non-standard API The mallocx, rallocx, xallocx, sallocx, dallocx, sdallocx, and nallocx functions all have a flags argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (|) operations to specify one or more of the following: MALLOCX_LG_ALIGN(la) Align the memory allocation to start at an address that is a multiple of (1 << la). This macro does not validate that la is within the valid range. MALLOCX_ALIGN(a) Align the memory allocation to start at an address that is a multiple of a, where a is a power of two. This macro does not validate that a is a power of 2. MALLOCX_ZERO Initialize newly allocated memory to contain zero bytes. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes. If this macro is absent, newly allocated memory is uninitialized. MALLOCX_TCACHE(tc) Use the thread-specific cache (tcache) specified by the identifier tc, which must have been acquired via the tcache.create mallctl. This macro does not validate that tc specifies a valid identifier. MALLOCX_TCACHE_NONE Do not use a thread-specific cache (tcache). Unless MALLOCX_TCACHE(tc) or MALLOCX_TCACHE_NONE is specified, an automatically managed tcache will be used under many circumstances. This macro cannot be used in the same flags argument as MALLOCX_TCACHE(tc). MALLOCX_ARENA(a) Use the arena specified by the index a. This macro has no effect for regions that were allocated via an arena other than the one specified. This macro does not validate that a specifies an arena index in the valid range. The mallocx function allocates at least size bytes of memory, and returns a pointer to the base address of the allocation. Behavior is undefined if size is 0, or if request size overflows due to size class and/or alignment constraints. The rallocx function resizes the allocation at ptr to be at least size bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location. Behavior is undefined if size is 0, or if request size overflows due to size class and/or alignment constraints. The xallocx function resizes the allocation at ptr in place to be at least size bytes, and returns the real size of the allocation. If extra is non-zero, an attempt is made to resize the allocation to be at least (size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize. Behavior is undefined if size is 0, or if (size + extra > SIZE_T_MAX). The sallocx function returns the real size of the allocation at ptr. The dallocx function causes the memory referenced by ptr to be made available for future allocations. The sdallocx function is an extension of dallocx with a size parameter to allow the caller to pass in the allocation size as an optimization. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by nallocx or sallocx. The nallocx function allocates no memory, but it performs the same size computation as the mallocx function, and returns the real size of the allocation that would result from the equivalent mallocx function call. Behavior is undefined if size is 0, or if request size overflows due to size class and/or alignment constraints. The mallctl function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions. The period-separated name argument specifies a location in a tree-structured namespace; see the section for documentation on the tree contents. To read a value, pass a pointer via oldp to adequate space to contain the value, and a pointer to its length via oldlenp; otherwise pass NULL and NULL. Similarly, to write a value, pass a pointer to the value via newp, and its length via newlen; otherwise pass NULL and 0. The mallctlnametomib function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a “Management Information Base” (MIB) that can be passed repeatedly to mallctlbymib. Upon successful return from mallctlnametomib, mibp contains an array of *miblenp integers, where *miblenp is the lesser of the number of components in name and the input value of *miblenp. Thus it is possible to pass a *miblenp that is smaller than the number of period-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB. For name components that are integers (e.g. the 2 in arenas.bin.2.size), the corresponding MIB component will always be that integer. Therefore, it is legitimate to construct code like the following: The malloc_stats_print function writes human-readable summary statistics via the write_cb callback function pointer and cbopaque data passed to write_cb, or malloc_message if write_cb is NULL. This function can be called repeatedly. General information that never changes during execution can be omitted by specifying "g" as a character within the opts string. Note that malloc_message uses the mallctl* functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously. If is specified during configuration, “m” and “a” can be specified to omit merged arena and per arena statistics, respectively; “b”, “l”, and “h” can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively. Unrecognized characters are silently ignored. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations. The malloc_usable_size function returns the usable size of the allocation pointed to by ptr. The return value may be larger than the size that was requested during allocation. The malloc_usable_size function is not a mechanism for in-place realloc; rather it is provided solely as a tool for introspection purposes. Any discrepancy between the requested allocation size and the size reported by malloc_usable_size should not be depended on, since such behavior is entirely implementation-dependent. TUNING Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time. The string pointed to by the global variable malloc_conf, the “name” of the file referenced by the symbolic link named /etc/malloc.conf, and the value of the environment variable MALLOC_CONF, will be interpreted, in that order, from left to right as options. Note that malloc_conf may be read before main is entered, so the declaration of malloc_conf should specify an initializer that contains the final value to be read by jemalloc. malloc_conf is a compile-time setting, whereas /etc/malloc.conf and MALLOC_CONF can be safely set any time prior to program invocation. An options string is a comma-separated list of option:value pairs. There is one key corresponding to each opt.* mallctl (see the section for options documentation). For example, abort:true,narenas:1 sets the opt.abort and opt.narenas options. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values. IMPLEMENTATION NOTES Traditionally, allocators have used sbrk 2 to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory. If sbrk 2 is supported by the operating system, this allocator uses both mmap 2 and sbrk 2, in that order of preference; otherwise only mmap 2 is used. This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi-processor systems. This works well with regard to threading scalability, but incurs some costs. There is a small fixed per-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation. These overheads are not generally an issue, given the number of arenas normally used. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions. In addition to multiple arenas, unless is specified during configuration, this allocator supports thread-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache. Memory is conceptually broken into equal-sized chunks, where the chunk size is a power of two that is greater than the page size. Chunks are always aligned to multiples of the chunk size. This alignment makes it possible to find metadata for user objects very quickly. User objects are broken into three categories according to size: small, large, and huge. Small and large objects are managed entirely by arenas; huge objects are additionally aggregated in a single data structure that is shared by all threads. Huge objects are typically used by applications infrequently enough that this single data structure is not a scalability issue. Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object). The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time. Small objects are managed in groups by page runs. Each run maintains a bitmap to track which regions are in use. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least sizeof(double). All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the opt.lg_chunk option), and huge size classes extend from the chunk size up to one size class less than the full address space size. Allocations are packed tightly together, which can be an issue for multi-threaded applications. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating. The realloc, rallocx, and xallocx functions may resize allocations without moving them under limited circumstances. Unlike the *allocx API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call realloc to grow e.g. a 9-byte allocation to 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage trivially succeeds in place as long as the pre-size and post-size both round up to the same size class. No other API guarantees are made regarding in-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre-size and post-size are both large or both huge. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see arena.<i>.chunk_hooks). Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging. Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit system, the size classes in each category are as shown in . Size classes Category Spacing Size Small lg [8] 16 [16, 32, 48, 64, 80, 96, 112, 128] 32 [160, 192, 224, 256] 64 [320, 384, 448, 512] 128 [640, 768, 896, 1024] 256 [1280, 1536, 1792, 2048] 512 [2560, 3072, 3584, 4096] 1 KiB [5 KiB, 6 KiB, 7 KiB, 8 KiB] 2 KiB [10 KiB, 12 KiB, 14 KiB] Large 2 KiB [16 KiB] 4 KiB [20 KiB, 24 KiB, 28 KiB, 32 KiB] 8 KiB [40 KiB, 48 KiB, 54 KiB, 64 KiB] 16 KiB [80 KiB, 96 KiB, 112 KiB, 128 KiB] 32 KiB [160 KiB, 192 KiB, 224 KiB, 256 KiB] 64 KiB [320 KiB, 384 KiB, 448 KiB, 512 KiB] 128 KiB [640 KiB, 768 KiB, 896 KiB, 1 MiB] 256 KiB [1280 KiB, 1536 KiB, 1792 KiB] Huge 256 KiB [2 MiB] 512 KiB [2560 KiB, 3 MiB, 3584 KiB, 4 MiB] 1 MiB [5 MiB, 6 MiB, 7 MiB, 8 MiB] 2 MiB [10 MiB, 12 MiB, 14 MiB, 16 MiB] 4 MiB [20 MiB, 24 MiB, 28 MiB, 32 MiB] 8 MiB [40 MiB, 48 MiB, 56 MiB, 64 MiB] ... ...
MALLCTL NAMESPACE The following names are defined in the namespace accessible via the mallctl* functions. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r-, -w, or --, and required build configuration flags follow, if any. A name element encoded as <i> or <j> indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection. In the case of stats.arenas.<i>.*, <i> equal to arenas.narenas can be used to access the summation of statistics from all arenas. Take special note of the epoch mallctl, which controls refreshing of cached dynamic statistics. version (const char *) r- Return the jemalloc version string. epoch (uint64_t) rw If a value is passed in, refresh the data from which the mallctl* functions report values, and increment the epoch. Return the current epoch. This is useful for detecting whether another thread caused a refresh. config.cache_oblivious (bool) r- was specified during build configuration. config.debug (bool) r- was specified during build configuration. config.fill (bool) r- was specified during build configuration. config.lazy_lock (bool) r- was specified during build configuration. config.munmap (bool) r- was specified during build configuration. config.prof (bool) r- was specified during build configuration. config.prof_libgcc (bool) r- was not specified during build configuration. config.prof_libunwind (bool) r- was specified during build configuration. config.stats (bool) r- was specified during build configuration. config.tcache (bool) r- was not specified during build configuration. config.tls (bool) r- was not specified during build configuration. config.utrace (bool) r- was specified during build configuration. config.valgrind (bool) r- was specified during build configuration. config.xmalloc (bool) r- was specified during build configuration. opt.abort (bool) r- Abort-on-warning enabled/disabled. If true, most warnings are fatal. The process will call abort 3 in these cases. This option is disabled by default unless is specified during configuration, in which case it is enabled by default. opt.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. The following settings are supported if sbrk 2 is supported by the operating system: “disabled”, “primary”, and “secondary”; otherwise only “disabled” is supported. The default is “secondary” if sbrk 2 is supported by the operating system; “disabled” otherwise. opt.lg_chunk (size_t) r- Virtual memory chunk size (log base 2). If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size. The default chunk size is 2 MiB (2^21). opt.narenas (size_t) r- Maximum number of arenas to use for automatic multiplexing of threads and arenas. The default is four times the number of CPUs, or one if there is a single CPU. opt.lg_dirty_mult (ssize_t) r- Per-arena minimum ratio (log base 2) of active to dirty pages. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via madvise 2 or a similar system call. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused. The default minimum ratio is 8:1 (2^3:1); an option value of -1 will disable dirty page purging. See arenas.lg_dirty_mult and arena.<i>.lg_dirty_mult for related dynamic control options. opt.stats_print (bool) r- Enable/disable statistics printing at exit. If enabled, the malloc_stats_print function is called at program exit via an atexit 3 function. If is specified during configuration, this has the potential to cause deadlock for a multi-threaded process that exits while one or more threads are executing in the memory allocation functions. Furthermore, atexit may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit, so this option is not univerally usable (though the application can register its own atexit function with equivalent functionality). Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development. This option is disabled by default. opt.junk (const char *) r- [] Junk filling. If set to "alloc", each byte of uninitialized allocated memory will be initialized to 0xa5. If set to "free", all deallocated memory will be initialized to 0x5a. If set to "true", both allocated and deallocated memory will be initialized, and if set to "false", junk filling be disabled entirely. This is intended for debugging and will impact performance negatively. This option is "false" by default unless is specified during configuration, in which case it is "true" by default unless running inside Valgrind. opt.quarantine (size_t) r- [] Per thread quarantine size in bytes. If non-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk-filled if the opt.junk option is enabled. This feature is of particular use in combination with Valgrind, which can detect attempts to access quarantined objects. This is intended for debugging and will impact performance negatively. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB. opt.redzone (bool) r- [] Redzones enabled/disabled. If enabled, small allocations have redzones before and after them. Furthermore, if the opt.junk option is enabled, the redzones are checked for corruption during deallocation. However, the primary intended purpose of this feature is to be used in combination with Valgrind, which needs redzones in order to do effective buffer overflow/underflow detection. This option is intended for debugging and will impact performance negatively. This option is disabled by default unless running inside Valgrind. opt.zero (bool) r- [] Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so realloc and rallocx calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. opt.utrace (bool) r- [] Allocation tracing based on utrace 2 enabled/disabled. This option is disabled by default. opt.xmalloc (bool) r- [] Abort-on-out-of-memory enabled/disabled. If enabled, rather than returning failure for any allocation function, display a diagnostic message on STDERR_FILENO and cause the program to drop core (using abort 3). If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: This option is disabled by default. opt.tcache (bool) r- [] Thread-specific caching (tcache) enabled/disabled. When there are multiple threads, each thread uses a tcache for objects up to a certain size. Thread-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use. See the opt.lg_tcache_max option for related tuning information. This option is enabled by default unless running inside Valgrind, in which case it is forcefully disabled. opt.lg_tcache_max (size_t) r- [] Maximum size class (log base 2) to cache in the thread-specific cache (tcache). At a minimum, all small size classes are cached, and at a maximum all large size classes are cached. The default maximum is 32 KiB (2^15). opt.prof (bool) r- [] Memory profiling enabled/disabled. If enabled, profile memory allocation activity. See the opt.prof_active option for on-the-fly activation/deactivation. See the opt.lg_prof_sample option for probabilistic sampling control. See the opt.prof_accum option for control of cumulative sample reporting. See the opt.lg_prof_interval option for information on interval-triggered profile dumping, the opt.prof_gdump option for information on high-water-triggered profile dumping, and the opt.prof_final option for final profile dumping. Profile output is compatible with the jeprof command, which is based on the pprof that is developed as part of the gperftools package. opt.prof_prefix (const char *) r- [] Filename prefix for profile dumps. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled). The default prefix is jeprof. opt.prof_active (bool) r- [] Profiling activated/deactivated. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the opt.prof option) but inactive, then toggle profiling at any time during program execution with the prof.active mallctl. This option is enabled by default. opt.prof_thread_active_init (bool) r- [] Initial setting for thread.prof.active in newly created threads. The initial setting for newly created threads can also be changed during execution via the prof.thread_active_init mallctl. This option is enabled by default. opt.lg_prof_sample (size_t) r- [] Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead. The default sample interval is 512 KiB (2^19 B). opt.prof_accum (bool) r- [] Reporting of cumulative object/byte counts in profile dumps enabled/disabled. If this option is enabled, every unique backtrace must be stored for the duration of execution. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest. This option is disabled by default. opt.lg_prof_interval (ssize_t) r- [] Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.i<iseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. By default, interval-triggered profile dumping is disabled (encoded as -1). opt.prof_gdump (bool) r- [] Set the initial state of prof.gdump, which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum. This option is disabled by default. opt.prof_final (bool) r- [] Use an atexit 3 function to dump final memory usage to a file named according to the pattern <prefix>.<pid>.<seq>.f.heap, where <prefix> is controlled by the opt.prof_prefix option. Note that atexit may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit, so this option is not univerally usable (though the application can register its own atexit function with equivalent functionality). This option is disabled by default. opt.prof_leak (bool) r- [] Leak reporting enabled/disabled. If enabled, use an atexit 3 function to report memory leaks detected by allocation sampling. See the opt.prof option for information on analyzing heap profile output. This option is disabled by default. thread.arena (unsigned) rw Get or set the arena associated with the calling thread. If the specified arena was not initialized beforehand (see the arenas.initialized mallctl), it will be automatically initialized as a side effect of calling this interface. thread.allocated (uint64_t) r- [] Get the total number of bytes ever allocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.allocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.allocated mallctl. This is useful for avoiding the overhead of repeated mallctl* calls. thread.deallocated (uint64_t) r- [] Get the total number of bytes ever deallocated by the calling thread. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases. thread.deallocatedp (uint64_t *) r- [] Get a pointer to the the value that is returned by the thread.deallocated mallctl. This is useful for avoiding the overhead of repeated mallctl* calls. thread.tcache.enabled (bool) rw [] Enable/disable calling thread's tcache. The tcache is implicitly flushed as a side effect of becoming disabled (see thread.tcache.flush). thread.tcache.flush (void) -- [] Flush calling thread's thread-specific cache (tcache). This interface releases all cached objects and internal data structures associated with the calling thread's tcache. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful. thread.prof.name (const char *) r- or -w [] Get/set the descriptive name associated with the calling thread in memory profile dumps. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution. The output string of this interface should be copied for non-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations. The name string must nil-terminated and comprised only of characters in the sets recognized by isgraph 3 and isblank 3. thread.prof.active (bool) rw [] Control whether sampling is currently active for the calling thread. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. This flag is enabled by default. tcache.create (unsigned) r- [] Create an explicit thread-specific cache (tcache) and return an identifier that can be passed to the MALLOCX_TCACHE(tc) macro to explicitly use the specified cache rather than the automatically managed one that is used by default. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds. tcache.flush (unsigned) -w [] Flush the specified thread-specific cache (tcache). The same considerations apply to this interface as to thread.tcache.flush, except that the tcache will never be automatically be discarded. tcache.destroy (unsigned) -w [] Flush the specified thread-specific cache (tcache) and make the identifier available for use during a future tcache creation. arena.<i>.purge (void) -- Purge unused dirty pages for arena <i>, or for all arenas if <i> equals arenas.narenas. arena.<i>.dss (const char *) rw Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals arenas.narenas. See opt.dss for supported settings. arena.<i>.lg_dirty_mult (ssize_t) rw Current per-arena minimum ratio (log base 2) of active to dirty pages for arena <i>. Each time this interface is set and the ratio is increased, pages are synchronously purged as necessary to impose the new ratio. See opt.lg_dirty_mult for additional information. arena.<i>.chunk_hooks (chunk_hooks_t) rw Get or set the chunk management hook functions for arena <i>. The functions must be capable of operating on all extant chunks associated with arena <i>, usually by passing unknown chunks to the replaced functions. In practice, it is feasible to control allocation for arenas created via arenas.extend such that all chunks originate from an application-supplied chunk allocator (by setting custom chunk hook functions just after arena creation), but the automatically created arenas may have already created chunks prior to the application having an opportunity to take over chunk allocation. The chunk_hooks_t structure comprises function pointers which are described individually below. jemalloc uses these functions to manage chunk lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation. However, there are performance and platform reasons to retain chunks for later reuse. Cleanup attempts cascade from deallocation to decommit to purging, which gives the chunk management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations. The chunk splitting and merging operations can also be opted out of, but this is mainly intended to support platforms on which virtual memory mappings provided by the operating system kernel do not automatically coalesce and split, e.g. Windows. typedef void *(chunk_alloc_t) void *chunk size_t size size_t alignment bool *zero bool *commit unsigned arena_ind A chunk allocation function conforms to the chunk_alloc_t type and upon success returns a pointer to size bytes of mapped memory on behalf of arena arena_ind such that the chunk's base address is a multiple of alignment, as well as setting *zero to indicate whether the chunk is zeroed and *commit to indicate whether the chunk is committed. Upon error the function returns NULL and leaves *zero and *commit unmodified. The size parameter is always a multiple of the chunk size. The alignment parameter is always a power of two at least as large as the chunk size. Zeroing is mandatory if *zero is true upon function entry. Committing is mandatory if *commit is true upon function entry. If chunk is not NULL, the returned pointer must be chunk on success or NULL on error. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. Note that replacing the default chunk allocation function makes the arena's arena.<i>.dss setting irrelevant. typedef bool (chunk_dalloc_t) void *chunk size_t size bool committed unsigned arena_ind A chunk deallocation function conforms to the chunk_dalloc_t type and deallocates a chunk of given size with committed/decommited memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates opt-out from deallocation; the virtual memory mapping associated with the chunk remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse. typedef bool (chunk_commit_t) void *chunk size_t size size_t offset size_t length unsigned arena_ind A chunk commit function conforms to the chunk_commit_t type and commits zeroed physical memory to back pages within a chunk of given size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. If the function returns true, this indicates insufficient physical memory to satisfy the request. typedef bool (chunk_decommit_t) void *chunk size_t size size_t offset size_t length unsigned arena_ind A chunk decommit function conforms to the chunk_decommit_t type and decommits any physical memory that is backing pages within a chunk of given size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success, in which case the pages will be committed via the chunk commit function before being reused. If the function returns true, this indicates opt-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse. typedef bool (chunk_purge_t) void *chunk size_tsize size_t offset size_t length unsigned arena_ind A chunk purge function conforms to the chunk_purge_t type and optionally discards physical pages within the virtual memory mapping associated with chunk of given size at offset bytes, extending for length on behalf of arena arena_ind, returning false if pages within the purged virtual memory range will be zero-filled the next time they are accessed. typedef bool (chunk_split_t) void *chunk size_t size size_t size_a size_t size_b bool committed unsigned arena_ind A chunk split function conforms to the chunk_split_t type and optionally splits chunk of given size into two adjacent chunks, the first of size_a bytes, and the second of size_b bytes, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the chunk remains unsplit and therefore should continue to be operated on as a whole. typedef bool (chunk_merge_t) void *chunk_a size_t size_a void *chunk_b size_t size_b bool committed unsigned arena_ind A chunk merge function conforms to the chunk_merge_t type and optionally merges adjacent chunks, chunk_a of given size_a and chunk_b of given size_b into one contiguous chunk, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates that the chunks remain distinct mappings and therefore should continue to be operated on independently. arenas.narenas (unsigned) r- Current limit on number of arenas. arenas.initialized (bool *) r- An array of arenas.narenas booleans. Each boolean indicates whether the corresponding arena is initialized. arenas.lg_dirty_mult (ssize_t) rw Current default per-arena minimum ratio (log base 2) of active to dirty pages, used to initialize arena.<i>.lg_dirty_mult during arena creation. See opt.lg_dirty_mult for additional information. arenas.quantum (size_t) r- Quantum size. arenas.page (size_t) r- Page size. arenas.tcache_max (size_t) r- [] Maximum thread-cached size class. arenas.nbins (unsigned) r- Number of bin size classes. arenas.nhbins (unsigned) r- [] Total number of thread cache bin size classes. arenas.bin.<i>.size (size_t) r- Maximum size supported by size class. arenas.bin.<i>.nregs (uint32_t) r- Number of regions per page run. arenas.bin.<i>.run_size (size_t) r- Number of bytes per page run. arenas.nlruns (unsigned) r- Total number of large size classes. arenas.lrun.<i>.size (size_t) r- Maximum size supported by this large size class. arenas.nhchunks (unsigned) r- Total number of huge size classes. arenas.hchunk.<i>.size (size_t) r- Maximum size supported by this huge size class. arenas.extend (unsigned) r- Extend the array of arenas by appending a new arena, and returning the new arena index. prof.thread_active_init (bool) rw [] Control the initial setting for thread.prof.active in newly created threads. See the opt.prof_thread_active_init option for additional information. prof.active (bool) rw [] Control whether sampling is currently active. See the opt.prof_active option for additional information, as well as the interrelated thread.prof.active mallctl. prof.dump (const char *) -w [] Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern <prefix>.<pid>.<seq>.m<mseq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.gdump (bool) rw [] When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum. Profiles are dumped to files named according to the pattern <prefix>.<pid>.<seq>.u<useq>.heap, where <prefix> is controlled by the opt.prof_prefix option. prof.reset (size_t) -w [] Reset all memory profile statistics, and optionally update the sample rate (see opt.lg_prof_sample and prof.lg_sample). prof.lg_sample (size_t) r- [] Get the current sample rate (see opt.lg_prof_sample). prof.interval (uint64_t) r- [] Average number of bytes allocated between inverval-based profile dumps. See the opt.lg_prof_interval option for additional information. stats.cactive (size_t *) r- [] Pointer to a counter that contains an approximate count of the current number of bytes in active pages. The estimate may be high, but never low, because each arena rounds up when computing its contribution to the counter. Note that the epoch mallctl has no bearing on this counter. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer. stats.allocated (size_t) r- [] Total number of bytes allocated by the application. stats.active (size_t) r- [] Total number of bytes in active pages allocated by the application. This is a multiple of the page size, and greater than or equal to stats.allocated. This does not include stats.arenas.<i>.pdirty, nor pages entirely devoted to allocator metadata. stats.metadata (size_t) r- [] Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap-sensitive internal allocator data structures, arena chunk headers (see stats.arenas.<i>.metadata.mapped), and internal allocations (see stats.arenas.<i>.metadata.allocated). stats.resident (size_t) r- [] Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand-zeroed virtual memory that has not yet been touched. This is a multiple of the page size, and is larger than stats.active. stats.mapped (size_t) r- [] Total number of bytes in active chunks mapped by the allocator. This is a multiple of the chunk size, and is larger than stats.active. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and stats.resident. stats.arenas.<i>.dss (const char *) r- dss (sbrk 2) allocation precedence as related to mmap 2 allocation. See opt.dss for details. stats.arenas.<i>.lg_dirty_mult (ssize_t) r- Minimum ratio (log base 2) of active to dirty pages. See opt.lg_dirty_mult for details. stats.arenas.<i>.nthreads (unsigned) r- Number of threads currently assigned to arena. stats.arenas.<i>.pactive (size_t) r- Number of pages in active runs. stats.arenas.<i>.pdirty (size_t) r- Number of pages within unused runs that are potentially dirty, and for which madvise... MADV_DONTNEED or similar has not been called. stats.arenas.<i>.mapped (size_t) r- [] Number of mapped bytes. stats.arenas.<i>.metadata.mapped (size_t) r- [] Number of mapped bytes in arena chunk headers, which track the states of the non-metadata pages. stats.arenas.<i>.metadata.allocated (size_t) r- [] Number of bytes dedicated to internal allocations. Internal allocations differ from application-originated allocations in that they are for internal use, and that they are omitted from heap profiles. This statistic is reported separately from stats.metadata and stats.arenas.<i>.metadata.mapped because it overlaps with e.g. the stats.allocated and stats.active statistics, whereas the other metadata statistics do not. stats.arenas.<i>.npurge (uint64_t) r- [] Number of dirty page purge sweeps performed. stats.arenas.<i>.nmadvise (uint64_t) r- [] Number of madvise... MADV_DONTNEED or similar calls made to purge dirty pages. stats.arenas.<i>.purged (uint64_t) r- [] Number of pages purged. stats.arenas.<i>.small.allocated (size_t) r- [] Number of bytes currently allocated by small objects. stats.arenas.<i>.small.nmalloc (uint64_t) r- [] Cumulative number of allocation requests served by small bins. stats.arenas.<i>.small.ndalloc (uint64_t) r- [] Cumulative number of small objects returned to bins. stats.arenas.<i>.small.nrequests (uint64_t) r- [] Cumulative number of small allocation requests. stats.arenas.<i>.large.allocated (size_t) r- [] Number of bytes currently allocated by large objects. stats.arenas.<i>.large.nmalloc (uint64_t) r- [] Cumulative number of large allocation requests served directly by the arena. stats.arenas.<i>.large.ndalloc (uint64_t) r- [] Cumulative number of large deallocation requests served directly by the arena. stats.arenas.<i>.large.nrequests (uint64_t) r- [] Cumulative number of large allocation requests. stats.arenas.<i>.huge.allocated (size_t) r- [] Number of bytes currently allocated by huge objects. stats.arenas.<i>.huge.nmalloc (uint64_t) r- [] Cumulative number of huge allocation requests served directly by the arena. stats.arenas.<i>.huge.ndalloc (uint64_t) r- [] Cumulative number of huge deallocation requests served directly by the arena. stats.arenas.<i>.huge.nrequests (uint64_t) r- [] Cumulative number of huge allocation requests. stats.arenas.<i>.bins.<j>.nmalloc (uint64_t) r- [] Cumulative number of allocations served by bin. stats.arenas.<i>.bins.<j>.ndalloc (uint64_t) r- [] Cumulative number of allocations returned to bin. stats.arenas.<i>.bins.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests. stats.arenas.<i>.bins.<j>.curregs (size_t) r- [] Current number of regions for this size class. stats.arenas.<i>.bins.<j>.nfills (uint64_t) r- [ ] Cumulative number of tcache fills. stats.arenas.<i>.bins.<j>.nflushes (uint64_t) r- [ ] Cumulative number of tcache flushes. stats.arenas.<i>.bins.<j>.nruns (uint64_t) r- [] Cumulative number of runs created. stats.arenas.<i>.bins.<j>.nreruns (uint64_t) r- [] Cumulative number of times the current run from which to allocate changed. stats.arenas.<i>.bins.<j>.curruns (size_t) r- [] Current number of runs. stats.arenas.<i>.lruns.<j>.nmalloc (uint64_t) r- [] Cumulative number of allocation requests for this size class served directly by the arena. stats.arenas.<i>.lruns.<j>.ndalloc (uint64_t) r- [] Cumulative number of deallocation requests for this size class served directly by the arena. stats.arenas.<i>.lruns.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests for this size class. stats.arenas.<i>.lruns.<j>.curruns (size_t) r- [] Current number of runs for this size class. stats.arenas.<i>.hchunks.<j>.nmalloc (uint64_t) r- [] Cumulative number of allocation requests for this size class served directly by the arena. stats.arenas.<i>.hchunks.<j>.ndalloc (uint64_t) r- [] Cumulative number of deallocation requests for this size class served directly by the arena. stats.arenas.<i>.hchunks.<j>.nrequests (uint64_t) r- [] Cumulative number of allocation requests for this size class. stats.arenas.<i>.hchunks.<j>.curhchunks (size_t) r- [] Current number of huge allocations for this size class. DEBUGGING MALLOC PROBLEMS When debugging, it is a good idea to configure/build jemalloc with the and options, and recompile the program with suitable options and symbols for debugger support. When so configured, jemalloc incorporates a wide variety of run-time assertions that catch application errors such as double-free, write-after-free, etc. Programs often accidentally depend on “uninitialized” memory actually being filled with zero bytes. Junk filling (see the opt.junk option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps. Conversely, zero filling (see the opt.zero option) eliminates the symptoms of such bugs. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs. This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive. However, jemalloc does integrate with the most excellent Valgrind tool if the configuration option is enabled. DIAGNOSTIC MESSAGES If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor STDERR_FILENO. Errors will result in the process dumping core. If the opt.abort option is set, most warnings are treated as errors. The malloc_message variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the STDERR_FILENO file descriptor is not suitable for this. malloc_message takes the cbopaque pointer argument that is NULL unless overridden by the arguments in a call to malloc_stats_print, followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock. All messages are prefixed by “<jemalloc>: ”. RETURN VALUES Standard API The malloc and calloc functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set to ENOMEM. The posix_memalign function returns the value 0 if successful; otherwise it returns an error value. The posix_memalign function will fail if: EINVAL The alignment parameter is not a power of 2 at least as large as sizeof(void *). ENOMEM Memory allocation error. The aligned_alloc function returns a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set. The aligned_alloc function will fail if: EINVAL The alignment parameter is not a power of 2. ENOMEM Memory allocation error. The realloc function returns a pointer, possibly identical to ptr, to the allocated memory if successful; otherwise a NULL pointer is returned, and errno is set to ENOMEM if the error was the result of an allocation failure. The realloc function always leaves the original buffer intact when an error occurs. The free function returns no value. Non-standard API The mallocx and rallocx functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned to indicate insufficient contiguous memory was available to service the allocation request. The xallocx function returns the real size of the resulting resized allocation pointed to by ptr, which is a value less than size if the allocation could not be adequately grown in place. The sallocx function returns the real size of the allocation pointed to by ptr. The nallocx returns the real size that would result from a successful equivalent mallocx function call, or zero if insufficient memory is available to perform the size computation. The mallctl, mallctlnametomib, and mallctlbymib functions return 0 on success; otherwise they return an error value. The functions will fail if: EINVAL newp is not NULL, and newlen is too large or too small. Alternatively, *oldlenp is too large or too small; in this case as much data as possible are read despite the error. ENOENT name or mib specifies an unknown/invalid value. EPERM Attempt to read or write void value, or attempt to write read-only value. EAGAIN A memory allocation failure occurred. EFAULT An interface with side effects failed in some way not directly related to mallctl* read/write processing. The malloc_usable_size function returns the usable size of the allocation pointed to by ptr. ENVIRONMENT The following environment variable affects the execution of the allocation functions: MALLOC_CONF If the environment variable MALLOC_CONF is set, the characters it contains will be interpreted as options. EXAMPLES To dump core whenever a problem occurs: ln -s 'abort:true' /etc/malloc.conf To specify in the source a chunk size that is 16 MiB: SEE ALSO madvise 2, mmap 2, sbrk 2, utrace 2, alloca 3, atexit 3, getpagesize 3 STANDARDS The malloc, calloc, realloc, and free functions conform to ISO/IEC 9899:1990 (“ISO C90”). The posix_memalign function conforms to IEEE Std 1003.1-2001 (“POSIX.1”).
disque-1.0-rc1/deps/jemalloc/doc/manpages.xsl.in000066400000000000000000000003171264176561100215570ustar00rootroot00000000000000 disque-1.0-rc1/deps/jemalloc/doc/stylesheet.xsl000066400000000000000000000004571264176561100215550ustar00rootroot00000000000000 ansi "" disque-1.0-rc1/deps/jemalloc/include/000077500000000000000000000000001264176561100175045ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/include/jemalloc/000077500000000000000000000000001264176561100212725ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/000077500000000000000000000000001264176561100231065ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/arena.h000066400000000000000000001223051264176561100243500ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) /* Maximum number of regions in one run. */ #define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) #define RUN_MAXREGS (1U << LG_RUN_MAXREGS) /* * Minimum redzone size. Redzones may be larger than this if necessary to * preserve region alignment. */ #define REDZONE_MINSIZE 16 /* * The minimum ratio of active:dirty pages per arena is computed as: * * (nactive >> lg_dirty_mult) >= ndirty * * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as * many active pages as dirty pages. */ #define LG_DIRTY_MULT_DEFAULT 3 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; typedef struct arena_run_s arena_run_t; typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; typedef struct arena_chunk_s arena_chunk_t; typedef struct arena_bin_info_s arena_bin_info_t; typedef struct arena_bin_s arena_bin_t; typedef struct arena_s arena_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_ARENA_STRUCTS_A struct arena_run_s { /* Index of bin this run is associated with. */ szind_t binind; /* Number of free regions in run. */ unsigned nfree; /* Per region allocated/deallocated bitmap. */ bitmap_t bitmap[BITMAP_GROUPS_MAX]; }; /* Each element of the chunk map corresponds to one page within the chunk. */ struct arena_chunk_map_bits_s { /* * Run address (or size) and various flags are stored together. The bit * layout looks like (assuming 32-bit system): * * ???????? ???????? ???nnnnn nnndumla * * ? : Unallocated: Run address for first/last pages, unset for internal * pages. * Small: Run page offset. * Large: Run page count for first page, unset for trailing pages. * n : binind for small size class, BININD_INVALID for large size class. * d : dirty? * u : unzeroed? * m : decommitted? * l : large? * a : allocated? * * Following are example bit patterns for the three types of runs. * * p : run page offset * s : run size * n : binind for size class; large objects set these to BININD_INVALID * x : don't care * - : 0 * + : 1 * [DUMLA] : bit set * [dumla] : bit unset * * Unallocated (clean): * ssssssss ssssssss sss+++++ +++dum-a * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx * ssssssss ssssssss sss+++++ +++dUm-a * * Unallocated (dirty): * ssssssss ssssssss sss+++++ +++D-m-a * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * ssssssss ssssssss sss+++++ +++D-m-a * * Small: * pppppppp pppppppp pppnnnnn nnnd---A * pppppppp pppppppp pppnnnnn nnn----A * pppppppp pppppppp pppnnnnn nnnd---A * * Large: * ssssssss ssssssss sss+++++ +++D--LA * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * -------- -------- ---+++++ +++D--LA * * Large (sampled, size <= LARGE_MINCLASS): * ssssssss ssssssss sssnnnnn nnnD--LA * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * -------- -------- ---+++++ +++D--LA * * Large (not sampled, size == LARGE_MINCLASS): * ssssssss ssssssss sss+++++ +++D--LA * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * -------- -------- ---+++++ +++D--LA */ size_t bits; #define CHUNK_MAP_ALLOCATED ((size_t)0x01U) #define CHUNK_MAP_LARGE ((size_t)0x02U) #define CHUNK_MAP_STATE_MASK ((size_t)0x3U) #define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) #define CHUNK_MAP_UNZEROED ((size_t)0x08U) #define CHUNK_MAP_DIRTY ((size_t)0x10U) #define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) #define CHUNK_MAP_BININD_SHIFT 5 #define BININD_INVALID ((size_t)0xffU) #define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) #define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK #define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) #define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) #define CHUNK_MAP_SIZE_MASK \ (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) }; struct arena_runs_dirty_link_s { qr(arena_runs_dirty_link_t) rd_link; }; /* * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just * like arena_chunk_map_bits_t. Two separate arrays are stored within each * chunk header in order to improve cache locality. */ struct arena_chunk_map_misc_s { /* * Linkage for run trees. There are two disjoint uses: * * 1) arena_t's runs_avail tree. * 2) arena_run_t conceptually uses this linkage for in-use non-full * runs, rather than directly embedding linkage. */ rb_node(arena_chunk_map_misc_t) rb_link; union { /* Linkage for list of dirty runs. */ arena_runs_dirty_link_t rd; /* Profile counters, used for large object runs. */ union { void *prof_tctx_pun; prof_tctx_t *prof_tctx; }; /* Small region run metadata. */ arena_run_t run; }; }; typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t; typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t; #endif /* JEMALLOC_ARENA_STRUCTS_A */ #ifdef JEMALLOC_ARENA_STRUCTS_B /* Arena chunk header. */ struct arena_chunk_s { /* * A pointer to the arena that owns the chunk is stored within the node. * This field as a whole is used by chunks_rtree to support both * ivsalloc() and core-based debugging. */ extent_node_t node; /* * Map of pages within chunk that keeps track of free/large/small. The * first map_bias entries are omitted, since the chunk header does not * need to be tracked in the map. This omission saves a header page * for common chunk sizes (e.g. 4 MiB). */ arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ }; /* * Read-only information associated with each element of arena_t's bins array * is stored separately, partly to reduce memory usage (only one copy, rather * than one per arena), but mainly to avoid false cacheline sharing. * * Each run has the following layout: * * /--------------------\ * | pad? | * |--------------------| * | redzone | * reg0_offset | region 0 | * | redzone | * |--------------------| \ * | redzone | | * | region 1 | > reg_interval * | redzone | / * |--------------------| * | ... | * | ... | * | ... | * |--------------------| * | redzone | * | region nregs-1 | * | redzone | * |--------------------| * | alignment pad? | * \--------------------/ * * reg_interval has at least the same minimum alignment as reg_size; this * preserves the alignment constraint that sa2u() depends on. Alignment pad is * either 0 or redzone_size; it is present only if needed to align reg0_offset. */ struct arena_bin_info_s { /* Size of regions in a run for this bin's size class. */ size_t reg_size; /* Redzone size. */ size_t redzone_size; /* Interval between regions (reg_size + (redzone_size << 1)). */ size_t reg_interval; /* Total size of a run for this bin's size class. */ size_t run_size; /* Total number of regions in a run for this bin's size class. */ uint32_t nregs; /* * Metadata used to manipulate bitmaps for runs associated with this * bin. */ bitmap_info_t bitmap_info; /* Offset of first region in a run for this bin's size class. */ uint32_t reg0_offset; }; struct arena_bin_s { /* * All operations on runcur, runs, and stats require that lock be * locked. Run allocation/deallocation are protected by the arena lock, * which may be acquired while holding one or more bin locks, but not * vise versa. */ malloc_mutex_t lock; /* * Current run being used to service allocations of this bin's size * class. */ arena_run_t *runcur; /* * Tree of non-full runs. This tree is used when looking for an * existing run when runcur is no longer usable. We choose the * non-full run that is lowest in memory; this policy tends to keep * objects packed well, and it can also help reduce the number of * almost-empty chunks. */ arena_run_tree_t runs; /* Bin statistics. */ malloc_bin_stats_t stats; }; struct arena_s { /* This arena's index within the arenas array. */ unsigned ind; /* * Number of threads currently assigned to this arena. This field is * protected by arenas_lock. */ unsigned nthreads; /* * There are three classes of arena operations from a locking * perspective: * 1) Thread assignment (modifies nthreads) is protected by arenas_lock. * 2) Bin-related operations are protected by bin locks. * 3) Chunk- and run-related operations are protected by this mutex. */ malloc_mutex_t lock; arena_stats_t stats; /* * List of tcaches for extant threads associated with this arena. * Stats from these are merged incrementally, and at exit if * opt_stats_print is enabled. */ ql_head(tcache_t) tcache_ql; uint64_t prof_accumbytes; /* * PRNG state for cache index randomization of large allocation base * pointers. */ uint64_t offset_state; dss_prec_t dss_prec; /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most * recently freed chunk. The spare is left in the arena's chunk trees * until it is deleted. * * There is one spare chunk per arena, rather than one spare total, in * order to avoid interactions between multiple threads that could make * a single spare inadequate. */ arena_chunk_t *spare; /* Minimum ratio (log base 2) of nactive:ndirty. */ ssize_t lg_dirty_mult; /* True if a thread is currently executing arena_purge(). */ bool purging; /* Number of pages in active runs and huge regions. */ size_t nactive; /* * Current count of pages within unused runs that are potentially * dirty, and for which madvise(... MADV_DONTNEED) has not been called. * By tracking this, we can institute a limit on how much dirty unused * memory is mapped for each arena. */ size_t ndirty; /* * Size/address-ordered tree of this arena's available runs. The tree * is used for first-best-fit run allocation. */ arena_avail_tree_t runs_avail; /* * Unused dirty memory this arena manages. Dirty memory is conceptually * tracked as an arbitrarily interleaved LRU of dirty runs and cached * chunks, but the list linkage is actually semi-duplicated in order to * avoid extra arena_chunk_map_misc_t space overhead. * * LRU-----------------------------------------------------------MRU * * /-- arena ---\ * | | * | | * |------------| /- chunk -\ * ...->|chunks_cache|<--------------------------->| /----\ |<--... * |------------| | |node| | * | | | | | | * | | /- run -\ /- run -\ | | | | * | | | | | | | | | | * | | | | | | | | | | * |------------| |-------| |-------| | |----| | * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... * |------------| |-------| |-------| | |----| | * | | | | | | | | | | * | | | | | | | \----/ | * | | \-------/ \-------/ | | * | | | | * | | | | * \------------/ \---------/ */ arena_runs_dirty_link_t runs_dirty; extent_node_t chunks_cache; /* Extant huge allocations. */ ql_head(extent_node_t) huge; /* Synchronizes all huge allocation/update/deallocation. */ malloc_mutex_t huge_mtx; /* * Trees of chunks that were previously allocated (trees differ only in * node ordering). These are used when allocating chunks, in an attempt * to re-use address space. Depending on function, different tree * orderings are needed, which is why there are two trees with the same * contents. */ extent_tree_t chunks_szad_cached; extent_tree_t chunks_ad_cached; extent_tree_t chunks_szad_retained; extent_tree_t chunks_ad_retained; malloc_mutex_t chunks_mtx; /* Cache of nodes that were allocated via base_alloc(). */ ql_head(extent_node_t) node_cache; malloc_mutex_t node_cache_mtx; /* User-configurable chunk hook functions. */ chunk_hooks_t chunk_hooks; /* bins is used to store trees of free regions. */ arena_bin_t bins[NBINS]; }; #endif /* JEMALLOC_ARENA_STRUCTS_B */ #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS static const size_t large_pad = #ifdef JEMALLOC_CACHE_OBLIVIOUS PAGE #else 0 #endif ; extern ssize_t opt_lg_dirty_mult; extern arena_bin_info_t arena_bin_info[NBINS]; extern size_t map_bias; /* Number of arena chunk header pages. */ extern size_t map_misc_offset; extern size_t arena_maxrun; /* Max run size for arenas. */ extern size_t large_maxclass; /* Max large size class. */ extern unsigned nlclasses; /* Number of large size classes. */ extern unsigned nhclasses; /* Number of huge size classes. */ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache); void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool cache); extent_node_t *arena_node_alloc(arena_t *arena); void arena_node_dalloc(arena_t *arena, extent_node_t *node); void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, bool *zero); void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize); void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, size_t usize); void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, size_t usize); bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, size_t usize, bool *zero); ssize_t arena_lg_dirty_mult_get(arena_t *arena); bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult); void arena_maybe_purge(arena_t *arena); void arena_purge_all(arena_t *arena); void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, uint8_t); extern arena_redzone_corruption_t *arena_redzone_corruption; typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; #else void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); #endif void arena_quarantine_junk_small(void *ptr, size_t usize); void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); void arena_prof_promoted(const void *ptr, size_t size); void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind); #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; #else void arena_dalloc_junk_large(void *ptr, size_t usize); #endif void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; #endif bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache); dss_prec_t arena_dss_prec_get(arena_t *arena); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); ssize_t arena_lg_dirty_mult_default_get(void); bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); void arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); arena_t *arena_new(unsigned ind); bool arena_boot(void); void arena_prefork(arena_t *arena); void arena_postfork_parent(arena_t *arena); void arena_postfork_child(arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk, size_t pageind); arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk, size_t pageind); size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm); void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm); arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbitsp_read(size_t *mapbitsp); size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_size_decode(size_t mapbits); size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); size_t arena_mapbits_size_encode(size_t size); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size); void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags); void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, szind_t binind); void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags); void arena_metadata_allocated_add(arena_t *arena, size_t size); void arena_metadata_allocated_sub(arena_t *arena, size_t size); size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); prof_tctx_t *arena_prof_tctx_get(const void *ptr); void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); void arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx); void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, tcache_t *tcache); arena_t *arena_aalloc(const void *ptr); size_t arena_salloc(const void *ptr, bool demote); void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) # ifdef JEMALLOC_ARENA_INLINE_A JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * arena_bitselm_get(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); assert(pageind < chunk_npages); return (&chunk->map_bits[pageind-map_bias]); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * arena_miscelm_get(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); assert(pageind < chunk_npages); return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + (uintptr_t)map_misc_offset) + pageind-map_bias); } JEMALLOC_ALWAYS_INLINE size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; assert(pageind >= map_bias); assert(pageind < chunk_npages); return (pageind); } JEMALLOC_ALWAYS_INLINE void * arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm); return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) { arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); assert(arena_miscelm_to_pageind(miscelm) >= map_bias); assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); return (miscelm); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * arena_run_to_miscelm(arena_run_t *run) { arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); assert(arena_miscelm_to_pageind(miscelm) >= map_bias); assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); return (miscelm); } JEMALLOC_ALWAYS_INLINE size_t * arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) { return (&arena_bitselm_get(chunk, pageind)->bits); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbitsp_read(size_t *mapbitsp) { return (*mapbitsp); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) { return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_size_decode(size_t mapbits) { size_t size; #if CHUNK_MAP_SIZE_SHIFT > 0 size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; #elif CHUNK_MAP_SIZE_SHIFT == 0 size = mapbits & CHUNK_MAP_SIZE_MASK; #else size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; #endif return (size); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == CHUNK_MAP_ALLOCATED); return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); } JEMALLOC_ALWAYS_INLINE szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; szind_t binind; mapbits = arena_mapbits_get(chunk, pageind); binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; assert(binind < NBINS || binind == BININD_INVALID); return (binind); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_DIRTY); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_UNZEROED); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_DECOMMITTED); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_LARGE); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) { *mapbitsp = mapbits; } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_size_encode(size_t size) { size_t mapbits; #if CHUNK_MAP_SIZE_SHIFT > 0 mapbits = size << CHUNK_MAP_SIZE_SHIFT; #elif CHUNK_MAP_SIZE_SHIFT == 0 mapbits = size; #else mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; #endif assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); return (mapbits); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert((size & PAGE_MASK) == 0); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | (mapbits & ~CHUNK_MAP_SIZE_MASK)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, szind_t binind) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert(binind <= BININD_INVALID); assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + large_pad); arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | (binind << CHUNK_MAP_BININD_SHIFT)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert(binind < BININD_INVALID); assert(pageind - runind >= map_bias); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); } JEMALLOC_INLINE void arena_metadata_allocated_add(arena_t *arena, size_t size) { atomic_add_z(&arena->stats.metadata_allocated, size); } JEMALLOC_INLINE void arena_metadata_allocated_sub(arena_t *arena, size_t size) { atomic_sub_z(&arena->stats.metadata_allocated, size); } JEMALLOC_INLINE size_t arena_metadata_allocated_get(arena_t *arena) { return (atomic_read_z(&arena->stats.metadata_allocated)); } JEMALLOC_INLINE bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); assert(prof_interval != 0); arena->prof_accumbytes += accumbytes; if (arena->prof_accumbytes >= prof_interval) { arena->prof_accumbytes -= prof_interval; return (true); } return (false); } JEMALLOC_INLINE bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (likely(prof_interval == 0)) return (false); return (arena_prof_accum_impl(arena, accumbytes)); } JEMALLOC_INLINE bool arena_prof_accum(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (likely(prof_interval == 0)) return (false); { bool ret; malloc_mutex_lock(&arena->lock); ret = arena_prof_accum_impl(arena, accumbytes); malloc_mutex_unlock(&arena->lock); return (ret); } } JEMALLOC_ALWAYS_INLINE szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits) { szind_t binind; binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; if (config_debug) { arena_chunk_t *chunk; arena_t *arena; size_t pageind; size_t actual_mapbits; size_t rpages_ind; arena_run_t *run; arena_bin_t *bin; szind_t run_binind, actual_binind; arena_bin_info_t *bin_info; arena_chunk_map_misc_t *miscelm; void *rpages; assert(binind != BININD_INVALID); assert(binind < NBINS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena = extent_node_arena_get(&chunk->node); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; actual_mapbits = arena_mapbits_get(chunk, pageind); assert(mapbits == actual_mapbits); assert(arena_mapbits_large_get(chunk, pageind) == 0); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); miscelm = arena_miscelm_get(chunk, rpages_ind); run = &miscelm->run; run_binind = run->binind; bin = &arena->bins[run_binind]; actual_binind = bin - arena->bins; assert(run_binind == actual_binind); bin_info = &arena_bin_info[actual_binind]; rpages = arena_miscelm_to_rpages(miscelm); assert(((uintptr_t)ptr - ((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval == 0); } return (binind); } # endif /* JEMALLOC_ARENA_INLINE_A */ # ifdef JEMALLOC_ARENA_INLINE_B JEMALLOC_INLINE szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin) { szind_t binind = bin - arena->bins; assert(binind < NBINS); return (binind); } JEMALLOC_INLINE unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) { unsigned shift, diff, regind; size_t interval; arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); void *rpages = arena_miscelm_to_rpages(miscelm); /* * Freeing a pointer lower than region zero can cause assertion * failure. */ assert((uintptr_t)ptr >= (uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset); /* * Avoid doing division with a variable divisor if possible. Using * actual division here can reduce allocator throughput by over 20%! */ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages - bin_info->reg0_offset); /* Rescale (factor powers of 2 out of the numerator and denominator). */ interval = bin_info->reg_interval; shift = jemalloc_ffs(interval) - 1; diff >>= shift; interval >>= shift; if (interval == 1) { /* The divisor was a power of 2. */ regind = diff; } else { /* * To divide by a number D that is not a power of two we * multiply by (2^21 / D) and then right shift by 21 positions. * * X / D * * becomes * * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT * * We can omit the first three elements, because we never * divide by 0, and 1 and 2 are both powers of two, which are * handled above. */ #define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) #define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) static const unsigned interval_invs[] = { SIZE_INV(3), SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) }; if (likely(interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 2))) { regind = (diff * interval_invs[interval - 3]) >> SIZE_INV_SHIFT; } else regind = diff / interval; #undef SIZE_INV #undef SIZE_INV_SHIFT } assert(diff == regind * interval); assert(regind < bin_info->nregs); return (regind); } JEMALLOC_INLINE prof_tctx_t * arena_prof_tctx_get(const void *ptr) { prof_tctx_t *ret; arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) ret = (prof_tctx_t *)(uintptr_t)1U; else { arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk, pageind); ret = atomic_read_p(&elm->prof_tctx_pun); } } else ret = huge_prof_tctx_get(ptr); return (ret); } JEMALLOC_INLINE void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) { arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > (uintptr_t)1U)) { arena_chunk_map_misc_t *elm; assert(arena_mapbits_large_get(chunk, pageind) != 0); elm = arena_miscelm_get(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, tctx); } else { /* * tctx must always be initialized for large runs. * Assert that the surrounding conditional logic is * equivalent to checking whether ptr refers to a large * run. */ assert(arena_mapbits_large_get(chunk, pageind) == 0); } } else huge_prof_tctx_set(ptr, tctx); } JEMALLOC_INLINE void arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && (uintptr_t)old_tctx > (uintptr_t)1U))) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind; arena_chunk_map_misc_t *elm; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); assert(arena_mapbits_large_get(chunk, pageind) != 0); elm = arena_miscelm_get(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, (prof_tctx_t *)(uintptr_t)1U); } else huge_prof_tctx_reset(ptr); } } JEMALLOC_ALWAYS_INLINE void * arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, tcache_t *tcache) { assert(size != 0); arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); if (likely(size <= SMALL_MAXCLASS)) { if (likely(tcache != NULL)) { return (tcache_alloc_small(tsd, arena, tcache, size, zero)); } else return (arena_malloc_small(arena, size, zero)); } else if (likely(size <= large_maxclass)) { /* * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. */ if (likely(tcache != NULL) && size <= tcache_maxclass) { return (tcache_alloc_large(tsd, arena, tcache, size, zero)); } else return (arena_malloc_large(arena, size, zero)); } else return (huge_malloc(tsd, arena, size, zero, tcache)); } JEMALLOC_ALWAYS_INLINE arena_t * arena_aalloc(const void *ptr) { arena_chunk_t *chunk; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) return (extent_node_arena_get(&chunk->node)); else return (huge_aalloc(ptr)); } /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_ALWAYS_INLINE size_t arena_salloc(const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; size_t pageind; szind_t binind; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); binind = arena_mapbits_binind_get(chunk, pageind); if (unlikely(binind == BININD_INVALID || (config_prof && !demote && arena_mapbits_large_get(chunk, pageind) != 0))) { /* * Large allocation. In the common case (demote), and * as this is an inline function, most callers will only * end up looking at binind to determine that ptr is a * small allocation. */ assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); ret = arena_mapbits_large_size_get(chunk, pageind) - large_pad; assert(ret != 0); assert(pageind + ((ret+large_pad)>>LG_PAGE) <= chunk_npages); assert(arena_mapbits_dirty_get(chunk, pageind) == arena_mapbits_dirty_get(chunk, pageind+((ret+large_pad)>>LG_PAGE)-1)); } else { /* * Small allocation (possibly promoted to a large * object). */ assert(arena_mapbits_large_get(chunk, pageind) != 0 || arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) == binind); ret = index2size(binind); } } else ret = huge_salloc(ptr); return (ret); } JEMALLOC_ALWAYS_INLINE void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) { arena_chunk_t *chunk; size_t pageind, mapbits; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; mapbits = arena_mapbits_get(chunk, pageind); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); tcache_dalloc_small(tsd, tcache, ptr, binind); } else { arena_dalloc_small(extent_node_arena_get( &chunk->node), chunk, ptr, pageind); } } else { size_t size = arena_mapbits_large_size_get(chunk, pageind); assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size - large_pad <= tcache_maxclass) { tcache_dalloc_large(tsd, tcache, ptr, size - large_pad); } else { arena_dalloc_large(extent_node_arena_get( &chunk->node), chunk, ptr); } } } else huge_dalloc(tsd, ptr, tcache); } JEMALLOC_ALWAYS_INLINE void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) { arena_chunk_t *chunk; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { if (config_prof && opt_prof) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (arena_mapbits_large_get(chunk, pageind) != 0) { /* * Make sure to use promoted size, not request * size. */ size = arena_mapbits_large_size_get(chunk, pageind) - large_pad; } } assert(s2u(size) == s2u(arena_salloc(ptr, false))); if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = size2index(size); tcache_dalloc_small(tsd, tcache, ptr, binind); } else { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_dalloc_small(extent_node_arena_get( &chunk->node), chunk, ptr, pageind); } } else { assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size <= tcache_maxclass) tcache_dalloc_large(tsd, tcache, ptr, size); else { arena_dalloc_large(extent_node_arena_get( &chunk->node), chunk, ptr); } } } else huge_dalloc(tsd, ptr, tcache); } # endif /* JEMALLOC_ARENA_INLINE_B */ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/atomic.h000066400000000000000000000361171264176561100245430ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #define atomic_read_uint64(p) atomic_add_uint64(p, 0) #define atomic_read_uint32(p) atomic_add_uint32(p, 0) #define atomic_read_p(p) atomic_add_p(p, NULL) #define atomic_read_z(p) atomic_add_z(p, 0) #define atomic_read_u(p) atomic_add_u(p, 0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES /* * All arithmetic functions return the arithmetic result of the atomic * operation. Some atomic operation APIs return the value prior to mutation, in * which case the following functions must redundantly compute the result so * that it can be returned. These functions are normally inlined, so the extra * operations can be optimized away if the return values aren't used by the * callers. * * atomic_read_( *p) { return (*p); } * atomic_add_( *p, x) { return (*p + x); } * atomic_sub_( *p, x) { return (*p - x); } * bool atomic_cas_( *p, c, s) * { * if (*p != c) * return (true); * *p = s; * return (false); * } * void atomic_write_( *p, x) { *p = x; } */ #ifndef JEMALLOC_ENABLE_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); void atomic_write_uint64(uint64_t *p, uint64_t x); uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); void atomic_write_uint32(uint32_t *p, uint32_t x); void *atomic_add_p(void **p, void *x); void *atomic_sub_p(void **p, void *x); bool atomic_cas_p(void **p, void *c, void *s); void atomic_write_p(void **p, const void *x); size_t atomic_add_z(size_t *p, size_t x); size_t atomic_sub_z(size_t *p, size_t x); bool atomic_cas_z(size_t *p, size_t c, size_t s); void atomic_write_z(size_t *p, size_t x); unsigned atomic_add_u(unsigned *p, unsigned x); unsigned atomic_sub_u(unsigned *p, unsigned x); bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); void atomic_write_u(unsigned *p, unsigned x); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) /******************************************************************************/ /* 64-bit operations. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # if (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { uint64_t t = x; asm volatile ( "lock; xaddq %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { uint64_t t; x = (uint64_t)(-(int64_t)x); t = x; asm volatile ( "lock; xaddq %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { uint8_t success; asm volatile ( "lock; cmpxchgq %4, %0;" "sete %1;" : "=m" (*p), "=a" (success) /* Outputs. */ : "m" (*p), "a" (c), "r" (s) /* Inputs. */ : "memory" /* Clobbers. */ ); return (!(bool)success); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { asm volatile ( "xchgq %1, %0;" /* Lock is implied by xchgq. */ : "=m" (*p), "+r" (x) /* Outputs. */ : "m" (*p) /* Inputs. */ : "memory" /* Clobbers. */ ); } # elif (defined(JEMALLOC_C11ATOMICS)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (atomic_fetch_add(a, x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (atomic_fetch_sub(a, x) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (!atomic_compare_exchange_strong(a, &c, s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; atomic_store(a, x); } # elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { /* * atomic_fetchadd_64() doesn't exist, but we only ever use this * function on LP64 systems, so atomic_fetchadd_long() will do. */ assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); atomic_store_rel_long(p, x); } # elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { uint64_t o; /*The documented OSAtomic*() API does not expose an atomic exchange. */ do { o = atomic_read_uint64(p); } while (atomic_cas_uint64(p, o, x)); } # elif (defined(_MSC_VER)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { uint64_t o; o = InterlockedCompareExchange64(p, s, c); return (o != c); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { InterlockedExchange64(p, x); } # elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (__sync_sub_and_fetch(p, x)); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { return (!__sync_bool_compare_and_swap(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { __sync_lock_test_and_set(p, x); } # else # error "Missing implementation for 64-bit atomic operations" # endif #endif /******************************************************************************/ /* 32-bit operations. */ #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { uint32_t t = x; asm volatile ( "lock; xaddl %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { uint32_t t; x = (uint32_t)(-(int32_t)x); t = x; asm volatile ( "lock; xaddl %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { uint8_t success; asm volatile ( "lock; cmpxchgl %4, %0;" "sete %1;" : "=m" (*p), "=a" (success) /* Outputs. */ : "m" (*p), "a" (c), "r" (s) /* Inputs. */ : "memory" ); return (!(bool)success); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { asm volatile ( "xchgl %1, %0;" /* Lock is implied by xchgl. */ : "=m" (*p), "+r" (x) /* Outputs. */ : "m" (*p) /* Inputs. */ : "memory" /* Clobbers. */ ); } # elif (defined(JEMALLOC_C11ATOMICS)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (atomic_fetch_add(a, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (atomic_fetch_sub(a, x) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (!atomic_compare_exchange_strong(a, &c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; atomic_store(a, x); } #elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!atomic_cmpset_32(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { atomic_store_rel_32(p, x); } #elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { uint32_t o; /*The documented OSAtomic*() API does not expose an atomic exchange. */ do { o = atomic_read_uint32(p); } while (atomic_cas_uint32(p, o, x)); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { uint32_t o; o = InterlockedCompareExchange(p, s, c); return (o != c); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { InterlockedExchange(p, x); } #elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (__sync_sub_and_fetch(p, x)); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!__sync_bool_compare_and_swap(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { __sync_lock_test_and_set(p, x); } #else # error "Missing implementation for 32-bit atomic operations" #endif /******************************************************************************/ /* Pointer operations. */ JEMALLOC_INLINE void * atomic_add_p(void **p, void *x) { #if (LG_SIZEOF_PTR == 3) return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE void * atomic_sub_p(void **p, void *x) { #if (LG_SIZEOF_PTR == 3) return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_p(void **p, void *c, void *s) { #if (LG_SIZEOF_PTR == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_PTR == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_p(void **p, const void *x) { #if (LG_SIZEOF_PTR == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_PTR == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ /* size_t operations. */ JEMALLOC_INLINE size_t atomic_add_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE size_t atomic_sub_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_z(size_t *p, size_t c, size_t s) { #if (LG_SIZEOF_PTR == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_PTR == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_PTR == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ /* unsigned operations. */ JEMALLOC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_u(unsigned *p, unsigned c, unsigned s) { #if (LG_SIZEOF_INT == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_INT == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_INT == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/base.h000066400000000000000000000015271264176561100241760ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *base_alloc(size_t size); void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped); bool base_boot(void); void base_prefork(void); void base_postfork_parent(void); void base_postfork_child(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/bitmap.h000066400000000000000000000155011264176561100245350ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #define LG_BITMAP_MAXBITS LG_RUN_MAXREGS #define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) typedef struct bitmap_level_s bitmap_level_t; typedef struct bitmap_info_s bitmap_info_t; typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) /* Number of groups required to store a given number of bits. */ #define BITMAP_BITS2GROUPS(nbits) \ ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) /* * Number of groups required at a particular level for a given number of bits. */ #define BITMAP_GROUPS_L0(nbits) \ BITMAP_BITS2GROUPS(nbits) #define BITMAP_GROUPS_L1(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) #define BITMAP_GROUPS_L2(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) #define BITMAP_GROUPS_L3(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ BITMAP_BITS2GROUPS((nbits))))) /* * Assuming the number of levels, number of groups required for a given number * of bits. */ #define BITMAP_GROUPS_1_LEVEL(nbits) \ BITMAP_GROUPS_L0(nbits) #define BITMAP_GROUPS_2_LEVEL(nbits) \ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) #define BITMAP_GROUPS_3_LEVEL(nbits) \ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) #define BITMAP_GROUPS_4_LEVEL(nbits) \ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) /* * Maximum number of groups required to support LG_BITMAP_MAXBITS. */ #if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS # define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) #else # error "Unsupported bitmap size" #endif /* Maximum number of levels possible. */ #define BITMAP_MAX_LEVELS \ (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; }; struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; /* Number of levels necessary for nbits. */ unsigned nlevels; /* * Only the first (nlevels+1) elements are used, and levels are ordered * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); size_t bitmap_info_ngroups(const bitmap_info_t *binfo); size_t bitmap_size(size_t nbits); void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) JEMALLOC_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); } JEMALLOC_INLINE bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); } JEMALLOC_INLINE void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (g != 0) break; } } } /* sfu: set first unset. */ JEMALLOC_INLINE size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; assert(!bitmap_full(bitmap, binfo)); i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; bit = jemalloc_ffsl(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1); } bitmap_set(bitmap, binfo, bit); return (bit); } JEMALLOC_INLINE void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; propagate = (g == 0); assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(!bitmap_get(bitmap, binfo, bit)); /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (!propagate) break; } } } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/chunk.h000066400000000000000000000063431264176561100243750ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Size and alignment of memory chunks that are allocated by the OS's virtual * memory system. */ #define LG_CHUNK_DEFAULT 21 /* Return the chunk address for allocation address a. */ #define CHUNK_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~chunksize_mask)) /* Return the chunk offset of address a. */ #define CHUNK_ADDR2OFFSET(a) \ ((size_t)((uintptr_t)(a) & chunksize_mask)) /* Return the smallest chunk multiple that is >= s. */ #define CHUNK_CEILING(s) \ (((s) + chunksize_mask) & ~chunksize_mask) #define CHUNK_HOOKS_INITIALIZER { \ NULL, \ NULL, \ NULL, \ NULL, \ NULL, \ NULL, \ NULL \ } #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern size_t opt_lg_chunk; extern const char *opt_dss; extern rtree_t chunks_rtree; extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunk_npages; extern const chunk_hooks_t chunk_hooks_default; chunk_hooks_t chunk_hooks_get(arena_t *arena); chunk_hooks_t chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks); bool chunk_register(const void *chunk, const extent_node_t *node); void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node); void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed); void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed, bool committed); void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed); bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length); bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length); bool chunk_boot(void); void chunk_prefork(void); void chunk_postfork_parent(void); void chunk_postfork_child(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE extent_node_t *chunk_lookup(const void *chunk, bool dependent); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) JEMALLOC_INLINE extent_node_t * chunk_lookup(const void *ptr, bool dependent) { return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ #include "jemalloc/internal/chunk_dss.h" #include "jemalloc/internal/chunk_mmap.h" disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/chunk_dss.h000066400000000000000000000023361264176561100252440ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef enum { dss_prec_disabled = 0, dss_prec_primary = 1, dss_prec_secondary = 2, dss_prec_limit = 3 } dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS extern const char *dss_prec_names[]; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS dss_prec_t chunk_dss_prec_get(void); bool chunk_dss_prec_set(dss_prec_t dss_prec); void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool chunk_in_dss(void *chunk); bool chunk_dss_boot(void); void chunk_dss_prefork(void); void chunk_dss_postfork_parent(void); void chunk_dss_postfork_child(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h000066400000000000000000000014051264176561100254010ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit); bool chunk_dalloc_mmap(void *chunk, size_t size); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/ckh.h000066400000000000000000000052061264176561100240270ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ckh_s ckh_t; typedef struct ckhc_s ckhc_t; /* Typedefs to allow easy function pointer passing. */ typedef void ckh_hash_t (const void *, size_t[2]); typedef bool ckh_keycomp_t (const void *, const void *); /* Maintain counters used to get an idea of performance. */ /* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ /* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Hash table cell. */ struct ckhc_s { const void *key; const void *data; }; struct ckh_s { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ uint64_t ngrows; uint64_t nshrinks; uint64_t nshrinkfails; uint64_t ninserts; uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ #define CKH_A 1103515241 #define CKH_C 12347 uint32_t prng_state; /* Total number of items. */ size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ unsigned lg_minbuckets; unsigned lg_curbuckets; /* Hash and comparison functions. */ ckh_hash_t *hash; ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ ckhc_t *tab; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); void ckh_delete(tsd_t *tsd, ckh_t *ckh); size_t ckh_count(ckh_t *ckh); bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); void ckh_string_hash(const void *key, size_t r_hash[2]); bool ckh_string_keycomp(const void *k1, const void *k2); void ckh_pointer_hash(const void *key, size_t r_hash[2]); bool ckh_pointer_keycomp(const void *k1, const void *k2); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/ctl.h000066400000000000000000000061651264176561100240510ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ctl_node_s ctl_node_t; typedef struct ctl_named_node_s ctl_named_node_t; typedef struct ctl_indexed_node_s ctl_indexed_node_t; typedef struct ctl_arena_stats_s ctl_arena_stats_t; typedef struct ctl_stats_s ctl_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct ctl_node_s { bool named; }; struct ctl_named_node_s { struct ctl_node_s node; const char *name; /* If (nchildren == 0), this is a terminal node. */ unsigned nchildren; const ctl_node_t *children; int (*ctl)(const size_t *, size_t, void *, size_t *, void *, size_t); }; struct ctl_indexed_node_s { struct ctl_node_s node; const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); }; struct ctl_arena_stats_s { bool initialized; unsigned nthreads; const char *dss; ssize_t lg_dirty_mult; size_t pactive; size_t pdirty; arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; malloc_bin_stats_t bstats[NBINS]; malloc_large_stats_t *lstats; /* nlclasses elements. */ malloc_huge_stats_t *hstats; /* nhclasses elements. */ }; struct ctl_stats_s { size_t allocated; size_t active; size_t metadata; size_t resident; size_t mapped; unsigned narenas; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(void); void ctl_postfork_parent(void); void ctl_postfork_child(void); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ ": Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \ } \ } while (0) #define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf(": Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ abort(); \ } \ } while (0) #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ ": Failure in xmallctlbymib()\n"); \ abort(); \ } \ } while (0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/extent.h000066400000000000000000000134131264176561100245700ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct extent_node_s extent_node_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Tree of extents. Use accessor functions for en_* fields. */ struct extent_node_s { /* Arena from which this extent came, if any. */ arena_t *en_arena; /* Pointer to the extent that this tree node is responsible for. */ void *en_addr; /* Total region size. */ size_t en_size; /* * The zeroed flag is used by chunk recycling code to track whether * memory is zero-filled. */ bool en_zeroed; /* * True if physical memory is committed to the extent, whether * explicitly or implicitly as on a system that overcommits and * satisfies physical memory needs on demand via soft page faults. */ bool en_committed; /* * The achunk flag is used to validate that huge allocation lookups * don't return arena chunks. */ bool en_achunk; /* Profile counters, used for huge objects. */ prof_tctx_t *en_prof_tctx; /* Linkage for arena's runs_dirty and chunks_cache rings. */ arena_runs_dirty_link_t rd; qr(extent_node_t) cc_link; union { /* Linkage for the size/address-ordered tree. */ rb_node(extent_node_t) szad_link; /* Linkage for arena's huge and node_cache lists. */ ql_elm(extent_node_t) ql_link; }; /* Linkage for the address-ordered tree. */ rb_node(extent_node_t) ad_link; }; typedef rb_tree(extent_node_t) extent_tree_t; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE arena_t *extent_node_arena_get(const extent_node_t *node); void *extent_node_addr_get(const extent_node_t *node); size_t extent_node_size_get(const extent_node_t *node); bool extent_node_zeroed_get(const extent_node_t *node); bool extent_node_committed_get(const extent_node_t *node); bool extent_node_achunk_get(const extent_node_t *node); prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); void extent_node_arena_set(extent_node_t *node, arena_t *arena); void extent_node_addr_set(extent_node_t *node, void *addr); void extent_node_size_set(extent_node_t *node, size_t size); void extent_node_zeroed_set(extent_node_t *node, bool zeroed); void extent_node_committed_set(extent_node_t *node, bool committed); void extent_node_achunk_set(extent_node_t *node, bool achunk); void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, bool zeroed, bool committed); void extent_node_dirty_linkage_init(extent_node_t *node); void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); void extent_node_dirty_remove(extent_node_t *node); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) JEMALLOC_INLINE arena_t * extent_node_arena_get(const extent_node_t *node) { return (node->en_arena); } JEMALLOC_INLINE void * extent_node_addr_get(const extent_node_t *node) { return (node->en_addr); } JEMALLOC_INLINE size_t extent_node_size_get(const extent_node_t *node) { return (node->en_size); } JEMALLOC_INLINE bool extent_node_zeroed_get(const extent_node_t *node) { return (node->en_zeroed); } JEMALLOC_INLINE bool extent_node_committed_get(const extent_node_t *node) { assert(!node->en_achunk); return (node->en_committed); } JEMALLOC_INLINE bool extent_node_achunk_get(const extent_node_t *node) { return (node->en_achunk); } JEMALLOC_INLINE prof_tctx_t * extent_node_prof_tctx_get(const extent_node_t *node) { return (node->en_prof_tctx); } JEMALLOC_INLINE void extent_node_arena_set(extent_node_t *node, arena_t *arena) { node->en_arena = arena; } JEMALLOC_INLINE void extent_node_addr_set(extent_node_t *node, void *addr) { node->en_addr = addr; } JEMALLOC_INLINE void extent_node_size_set(extent_node_t *node, size_t size) { node->en_size = size; } JEMALLOC_INLINE void extent_node_zeroed_set(extent_node_t *node, bool zeroed) { node->en_zeroed = zeroed; } JEMALLOC_INLINE void extent_node_committed_set(extent_node_t *node, bool committed) { node->en_committed = committed; } JEMALLOC_INLINE void extent_node_achunk_set(extent_node_t *node, bool achunk) { node->en_achunk = achunk; } JEMALLOC_INLINE void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) { node->en_prof_tctx = tctx; } JEMALLOC_INLINE void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, bool zeroed, bool committed) { extent_node_arena_set(node, arena); extent_node_addr_set(node, addr); extent_node_size_set(node, size); extent_node_zeroed_set(node, zeroed); extent_node_committed_set(node, committed); extent_node_achunk_set(node, false); if (config_prof) extent_node_prof_tctx_set(node, NULL); } JEMALLOC_INLINE void extent_node_dirty_linkage_init(extent_node_t *node) { qr_new(&node->rd, rd_link); qr_new(node, cc_link); } JEMALLOC_INLINE void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) { qr_meld(runs_dirty, &node->rd, rd_link); qr_meld(chunks_dirty, node, cc_link); } JEMALLOC_INLINE void extent_node_dirty_remove(extent_node_t *node) { qr_remove(&node->rd, rd_link); qr_remove(node, cc_link); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/hash.h000066400000000000000000000174041264176561100242100ustar00rootroot00000000000000/* * The following hash function is based on MurmurHash3, placed into the public * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for * details. */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed); void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]); void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]); void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) /******************************************************************************/ /* Internal implementation. */ JEMALLOC_INLINE uint32_t hash_rotl_32(uint32_t x, int8_t r) { return ((x << r) | (x >> (32 - r))); } JEMALLOC_INLINE uint64_t hash_rotl_64(uint64_t x, int8_t r) { return ((x << r) | (x >> (64 - r))); } JEMALLOC_INLINE uint32_t hash_get_block_32(const uint32_t *p, int i) { return (p[i]); } JEMALLOC_INLINE uint64_t hash_get_block_64(const uint64_t *p, int i) { return (p[i]); } JEMALLOC_INLINE uint32_t hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return (h); } JEMALLOC_INLINE uint64_t hash_fmix_64(uint64_t k) { k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; return (k); } JEMALLOC_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; uint32_t h1 = seed; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 13); h1 = h1*5 + 0xe6546b64; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h1 = hash_fmix_32(h1); return (h1); } UNUSED JEMALLOC_INLINE void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 19); h1 += h2; h1 = h1*5 + 0x561ccd1b; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; h2 = hash_rotl_32(h2, 17); h2 += h3; h2 = h2*5 + 0x0bcaa747; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; h3 = hash_rotl_32(h3, 15); h3 += h4; h3 = h3*5 + 0x96cd1c35; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; h4 = hash_rotl_32(h4, 13); h4 += h1; h4 = h4*5 + 0x32ac3b17; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*16); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch (len & 15) { case 15: k4 ^= tail[14] << 16; case 14: k4 ^= tail[13] << 8; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; case 12: k3 ^= tail[11] << 24; case 11: k3 ^= tail[10] << 16; case 10: k3 ^= tail[ 9] << 8; case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; case 8: k2 ^= tail[ 7] << 24; case 7: k2 ^= tail[ 6] << 16; case 6: k2 ^= tail[ 5] << 8; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; case 4: k1 ^= tail[ 3] << 24; case 3: k1 ^= tail[ 2] << 16; case 2: k1 ^= tail[ 1] << 8; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = hash_fmix_32(h1); h2 = hash_fmix_32(h2); h3 = hash_fmix_32(h3); h4 = hash_fmix_32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; r_out[0] = (((uint64_t) h2) << 32) | h1; r_out[1] = (((uint64_t) h4) << 32) | h3; } UNUSED JEMALLOC_INLINE void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; const uint64_t c1 = KQU(0x87c37b91114253d5); const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { const uint64_t *blocks = (const uint64_t *) (data); int i; for (i = 0; i < nblocks; i++) { uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; h1 = hash_rotl_64(h1, 27); h1 += h2; h1 = h1*5 + 0x52dce729; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; h2 = hash_rotl_64(h2, 31); h2 += h1; h2 = h2*5 + 0x38495ab5; } } /* tail */ { const uint8_t *tail = (const uint8_t*)(data + nblocks*16); uint64_t k1 = 0; uint64_t k2 = 0; switch (len & 15) { case 15: k2 ^= ((uint64_t)(tail[14])) << 48; case 14: k2 ^= ((uint64_t)(tail[13])) << 40; case 13: k2 ^= ((uint64_t)(tail[12])) << 32; case 12: k2 ^= ((uint64_t)(tail[11])) << 24; case 11: k2 ^= ((uint64_t)(tail[10])) << 16; case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = hash_fmix_64(h1); h2 = hash_fmix_64(h2); h1 += h2; h2 += h1; r_out[0] = h1; r_out[1] = h2; } /******************************************************************************/ /* API. */ JEMALLOC_INLINE void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) hash_x64_128(key, len, seed, (uint64_t *)r_hash); #else uint64_t hashes[2]; hash_x86_128(key, len, seed, hashes); r_hash[0] = (size_t)hashes[0]; r_hash[1] = (size_t)hashes[1]; #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/huge.h000066400000000000000000000027321264176561100242130ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, tcache_t *tcache); void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, bool zero, tcache_t *tcache); bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero); void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache); #ifdef JEMALLOC_JET typedef void (huge_dalloc_junk_t)(void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; #endif void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); arena_t *huge_aalloc(const void *ptr); size_t huge_salloc(const void *ptr); prof_tctx_t *huge_prof_tctx_get(const void *ptr); void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); void huge_prof_tctx_reset(const void *ptr); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in000066400000000000000000000714261264176561100273600ustar00rootroot00000000000000#ifndef JEMALLOC_INTERNAL_H #define JEMALLOC_INTERNAL_H #include "jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" #ifdef JEMALLOC_UTRACE #include #endif #define JEMALLOC_NO_DEMANGLE #ifdef JEMALLOC_JET # define JEMALLOC_N(n) jet_##n # include "jemalloc/internal/public_namespace.h" # define JEMALLOC_NO_RENAME # include "../jemalloc@install_suffix@.h" # undef JEMALLOC_NO_RENAME #else # define JEMALLOC_N(n) @private_namespace@##n # include "../jemalloc@install_suffix@.h" #endif #include "jemalloc/internal/private_namespace.h" static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; static const bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; static const bool config_fill = #ifdef JEMALLOC_FILL true #else false #endif ; static const bool config_lazy_lock = #ifdef JEMALLOC_LAZY_LOCK true #else false #endif ; static const bool config_prof = #ifdef JEMALLOC_PROF true #else false #endif ; static const bool config_prof_libgcc = #ifdef JEMALLOC_PROF_LIBGCC true #else false #endif ; static const bool config_prof_libunwind = #ifdef JEMALLOC_PROF_LIBUNWIND true #else false #endif ; static const bool maps_coalesce = #ifdef JEMALLOC_MAPS_COALESCE true #else false #endif ; static const bool config_munmap = #ifdef JEMALLOC_MUNMAP true #else false #endif ; static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; static const bool config_tcache = #ifdef JEMALLOC_TCACHE true #else false #endif ; static const bool config_tls = #ifdef JEMALLOC_TLS true #else false #endif ; static const bool config_utrace = #ifdef JEMALLOC_UTRACE true #else false #endif ; static const bool config_valgrind = #ifdef JEMALLOC_VALGRIND true #else false #endif ; static const bool config_xmalloc = #ifdef JEMALLOC_XMALLOC true #else false #endif ; static const bool config_ivsalloc = #ifdef JEMALLOC_IVSALLOC true #else false #endif ; static const bool config_cache_oblivious = #ifdef JEMALLOC_CACHE_OBLIVIOUS true #else false #endif ; #ifdef JEMALLOC_C11ATOMICS #include #endif #ifdef JEMALLOC_ATOMIC9 #include #endif #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) #include #endif #ifdef JEMALLOC_ZONE #include #include #include #include #endif #define RB_COMPACT #include "jemalloc/internal/rb.h" #include "jemalloc/internal/qr.h" #include "jemalloc/internal/ql.h" /* * jemalloc can conceptually be broken into components (arena, tcache, etc.), * but there are circular dependencies that cannot be broken without * substantial performance degradation. In order to reduce the effect on * visual code flow, read the header files in multiple passes, with one of the * following cpp variables defined during each pass: * * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data * types. * JEMALLOC_H_STRUCTS : Data structures. * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. * JEMALLOC_H_INLINES : Inline functions. */ /******************************************************************************/ #define JEMALLOC_H_TYPES #include "jemalloc/internal/jemalloc_internal_macros.h" /* Size class index type. */ typedef unsigned szind_t; /* * Flags bits: * * a: arena * t: tcache * 0: unused * z: zero * n: alignment * * aaaaaaaa aaaatttt tttttttt 0znnnnnn */ #define MALLOCX_ARENA_MASK ((int)~0xfffff) #define MALLOCX_ARENA_MAX 0xffe #define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) #define MALLOCX_TCACHE_MAX 0xffd #define MALLOCX_LG_ALIGN_MASK ((int)0x3f) /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) #define MALLOCX_ALIGN_GET(flags) \ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) #define MALLOCX_ZERO_GET(flags) \ ((bool)(flags & MALLOCX_ZERO)) #define MALLOCX_TCACHE_GET(flags) \ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) #define MALLOCX_ARENA_GET(flags) \ (((unsigned)(((unsigned)flags) >> 20)) - 1) /* Smallest size class to support. */ #define TINY_MIN (1U << LG_TINY_MIN) /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #ifndef LG_QUANTUM # if (defined(__i386__) || defined(_M_IX86)) # define LG_QUANTUM 4 # endif # ifdef __ia64__ # define LG_QUANTUM 4 # endif # ifdef __alpha__ # define LG_QUANTUM 4 # endif # if (defined(__sparc64__) || defined(__sparcv9)) # define LG_QUANTUM 4 # endif # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # define LG_QUANTUM 4 # endif # ifdef __arm__ # define LG_QUANTUM 3 # endif # ifdef __aarch64__ # define LG_QUANTUM 4 # endif # ifdef __hppa__ # define LG_QUANTUM 4 # endif # ifdef __mips__ # define LG_QUANTUM 3 # endif # ifdef __or1k__ # define LG_QUANTUM 3 # endif # ifdef __powerpc__ # define LG_QUANTUM 4 # endif # ifdef __s390__ # define LG_QUANTUM 4 # endif # ifdef __SH4__ # define LG_QUANTUM 4 # endif # ifdef __tile__ # define LG_QUANTUM 4 # endif # ifdef __le32__ # define LG_QUANTUM 4 # endif # ifndef LG_QUANTUM # error "Unknown minimum alignment for architecture; specify via " "--with-lg-quantum" # endif #endif #define QUANTUM ((size_t)(1U << LG_QUANTUM)) #define QUANTUM_MASK (QUANTUM - 1) /* Return the smallest quantum multiple that is >= a. */ #define QUANTUM_CEILING(a) \ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) #define LONG ((size_t)(1U << LG_SIZEOF_LONG)) #define LONG_MASK (LONG - 1) /* Return the smallest long multiple that is >= a. */ #define LONG_CEILING(a) \ (((a) + LONG_MASK) & ~LONG_MASK) #define SIZEOF_PTR (1U << LG_SIZEOF_PTR) #define PTR_MASK (SIZEOF_PTR - 1) /* Return the smallest (void *) multiple that is >= a. */ #define PTR_CEILING(a) \ (((a) + PTR_MASK) & ~PTR_MASK) /* * Maximum size of L1 cache line. This is used to avoid cache line aliasing. * In addition, this controls the spacing of cacheline-spaced size classes. * * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can * only handle raw constants. */ #define LG_CACHELINE 6 #define CACHELINE 64 #define CACHELINE_MASK (CACHELINE - 1) /* Return the smallest cacheline multiple that is >= s. */ #define CACHELINE_CEILING(s) \ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) /* Page size. LG_PAGE is determined by the configure script. */ #ifdef PAGE_MASK # undef PAGE_MASK #endif #define PAGE ((size_t)(1U << LG_PAGE)) #define PAGE_MASK ((size_t)(PAGE - 1)) /* Return the smallest pagesize multiple that is >= s. */ #define PAGE_CEILING(s) \ (((s) + PAGE_MASK) & ~PAGE_MASK) /* Return the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2BASE(a, alignment) \ ((void *)((uintptr_t)(a) & (-(alignment)))) /* Return the offset between a and the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \ ((size_t)((uintptr_t)(a) & (alignment - 1))) /* Return the smallest alignment multiple that is >= s. */ #define ALIGNMENT_CEILING(s, alignment) \ (((s) + (alignment - 1)) & (-(alignment))) /* Declare a variable-length array. */ #if __STDC_VERSION__ < 199901L # ifdef _MSC_VER # include # define alloca _alloca # else # ifdef JEMALLOC_HAS_ALLOCA_H # include # else # include # endif # endif # define VARIABLE_ARRAY(type, name, count) \ type *name = alloca(sizeof(type) * (count)) #else # define VARIABLE_ARRAY(type, name, count) type name[(count)] #endif #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #undef JEMALLOC_H_TYPES /******************************************************************************/ #define JEMALLOC_H_STRUCTS #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #define JEMALLOC_ARENA_STRUCTS_A #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_STRUCTS_A #include "jemalloc/internal/extent.h" #define JEMALLOC_ARENA_STRUCTS_B #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_STRUCTS_B #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #include "jemalloc/internal/tsd.h" #undef JEMALLOC_H_STRUCTS /******************************************************************************/ #define JEMALLOC_H_EXTERNS extern bool opt_abort; extern const char *opt_junk; extern bool opt_junk_alloc; extern bool opt_junk_free; extern size_t opt_quarantine; extern bool opt_redzone; extern bool opt_utrace; extern bool opt_xmalloc; extern bool opt_zero; extern size_t opt_narenas; extern bool in_valgrind; /* Number of CPUs. */ extern unsigned ncpus; /* * index2size_tab encodes the same information as could be computed (at * unacceptable cost in some code paths) by index2size_compute(). */ extern size_t const index2size_tab[NSIZES]; /* * size2index_tab is a compact lookup table that rounds request sizes up to * size classes. In order to reduce cache footprint, the table is compressed, * and all accesses are via size2index(). */ extern uint8_t const size2index_tab[]; arena_t *a0get(void); void *a0malloc(size_t size); void a0dalloc(void *ptr); void *bootstrap_malloc(size_t size); void *bootstrap_calloc(size_t num, size_t size); void bootstrap_free(void *ptr); arena_t *arenas_extend(unsigned ind); arena_t *arena_init(unsigned ind); unsigned narenas_total_get(void); arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing); arena_t *arena_choose_hard(tsd_t *tsd); void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); unsigned arena_nbound(unsigned ind); void thread_allocated_cleanup(tsd_t *tsd); void thread_deallocated_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd); void arenas_cache_cleanup(tsd_t *tsd); void narenas_cache_cleanup(tsd_t *tsd); void arenas_cache_bypass_cleanup(tsd_t *tsd); void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #include "jemalloc/internal/tsd.h" #undef JEMALLOC_H_EXTERNS /******************************************************************************/ #define JEMALLOC_H_INLINES #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #ifndef JEMALLOC_ENABLE_INLINE szind_t size2index_compute(size_t size); szind_t size2index_lookup(size_t size); szind_t size2index(size_t size); size_t index2size_compute(szind_t index); size_t index2size_lookup(szind_t index); size_t index2size(szind_t index); size_t s2u_compute(size_t size); size_t s2u_lookup(size_t size); size_t s2u(size_t size); size_t sa2u(size_t size, size_t alignment); arena_t *arena_choose(tsd_t *tsd, arena_t *arena); arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, bool refresh_if_missing); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_INLINE szind_t size2index_compute(size_t size) { #if (NTBINS != 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; size_t lg_ceil = lg_floor(pow2_ceil(size)); return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); } #endif { size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ? (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1)) : lg_floor((size<<1)-1); size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); size_t grp = shift << LG_SIZE_CLASS_GROUP; size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t delta_inverse_mask = ZI(-1) << lg_delta; size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); size_t index = NTBINS + grp + mod; return (index); } } JEMALLOC_ALWAYS_INLINE szind_t size2index_lookup(size_t size) { assert(size <= LOOKUP_MAXCLASS); { size_t ret = ((size_t)(size2index_tab[(size-1) >> LG_TINY_MIN])); assert(ret == size2index_compute(size)); return (ret); } } JEMALLOC_ALWAYS_INLINE szind_t size2index(size_t size) { assert(size > 0); if (likely(size <= LOOKUP_MAXCLASS)) return (size2index_lookup(size)); return (size2index_compute(size)); } JEMALLOC_INLINE size_t index2size_compute(szind_t index) { #if (NTBINS > 0) if (index < NTBINS) return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); #endif { size_t reduced_index = index - NTBINS; size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); size_t grp_size_mask = ~((!!grp)-1); size_t grp_size = ((ZU(1) << (LG_QUANTUM + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; size_t lg_delta = shift + (LG_QUANTUM-1); size_t mod_size = (mod+1) << lg_delta; size_t usize = grp_size + mod_size; return (usize); } } JEMALLOC_ALWAYS_INLINE size_t index2size_lookup(szind_t index) { size_t ret = (size_t)index2size_tab[index]; assert(ret == index2size_compute(index)); return (ret); } JEMALLOC_ALWAYS_INLINE size_t index2size(szind_t index) { assert(index < NSIZES); return (index2size_lookup(index)); } JEMALLOC_ALWAYS_INLINE size_t s2u_compute(size_t size) { #if (NTBINS > 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; size_t lg_ceil = lg_floor(pow2_ceil(size)); return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : (ZU(1) << lg_ceil)); } #endif { size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ? (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1)) : lg_floor((size<<1)-1); size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (size + delta_mask) & ~delta_mask; return (usize); } } JEMALLOC_ALWAYS_INLINE size_t s2u_lookup(size_t size) { size_t ret = index2size_lookup(size2index_lookup(size)); assert(ret == s2u_compute(size)); return (ret); } /* * Compute usable size that would result from allocating an object with the * specified size. */ JEMALLOC_ALWAYS_INLINE size_t s2u(size_t size) { assert(size > 0); if (likely(size <= LOOKUP_MAXCLASS)) return (s2u_lookup(size)); return (s2u_compute(size)); } /* * Compute usable size that would result from allocating an object with the * specified size and alignment. */ JEMALLOC_ALWAYS_INLINE size_t sa2u(size_t size, size_t alignment) { size_t usize; assert(alignment != 0 && ((alignment - 1) & alignment) == 0); /* Try for a small size class. */ if (size <= SMALL_MAXCLASS && alignment < PAGE) { /* * Round size up to the nearest multiple of alignment. * * This done, we can take advantage of the fact that for each * small size class, every object is aligned at the smallest * power of two that is non-zero in the base two representation * of the size. For example: * * Size | Base 2 | Minimum alignment * -----+----------+------------------ * 96 | 1100000 | 32 * 144 | 10100000 | 32 * 192 | 11000000 | 64 */ usize = s2u(ALIGNMENT_CEILING(size, alignment)); if (usize < LARGE_MINCLASS) return (usize); } /* Try for a large size class. */ if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { /* * We can't achieve subpage alignment, so round up alignment * to the minimum that can actually be supported. */ alignment = PAGE_CEILING(alignment); /* Make sure result is a large size class. */ usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); /* * Calculate the size of the over-size run that arena_palloc() * would need to allocate in order to guarantee the alignment. */ if (usize + large_pad + alignment - PAGE <= arena_maxrun) return (usize); } /* Huge size class. Beware of size_t overflow. */ /* * We can't achieve subchunk alignment, so round up alignment to the * minimum that can actually be supported. */ alignment = CHUNK_CEILING(alignment); if (alignment == 0) { /* size_t overflow. */ return (0); } /* Make sure result is a huge size class. */ if (size <= chunksize) usize = chunksize; else { usize = s2u(size); if (usize < size) { /* size_t overflow. */ return (0); } } /* * Calculate the multi-chunk mapping that huge_palloc() would need in * order to guarantee the alignment. */ if (usize + alignment - PAGE < usize) { /* size_t overflow. */ return (0); } return (usize); } /* Choose an arena based on a per-thread value. */ JEMALLOC_INLINE arena_t * arena_choose(tsd_t *tsd, arena_t *arena) { arena_t *ret; if (arena != NULL) return (arena); if (unlikely((ret = tsd_arena_get(tsd)) == NULL)) ret = arena_choose_hard(tsd); return (ret); } JEMALLOC_INLINE arena_t * arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, bool refresh_if_missing) { arena_t *arena; arena_t **arenas_cache = tsd_arenas_cache_get(tsd); /* init_if_missing requires refresh_if_missing. */ assert(!init_if_missing || refresh_if_missing); if (unlikely(arenas_cache == NULL)) { /* arenas_cache hasn't been initialized yet. */ return (arena_get_hard(tsd, ind, init_if_missing)); } if (unlikely(ind >= tsd_narenas_cache_get(tsd))) { /* * ind is invalid, cache is old (too small), or arena to be * initialized. */ return (refresh_if_missing ? arena_get_hard(tsd, ind, init_if_missing) : NULL); } arena = arenas_cache[ind]; if (likely(arena != NULL) || !refresh_if_missing) return (arena); return (arena_get_hard(tsd, ind, init_if_missing)); } #endif #include "jemalloc/internal/bitmap.h" /* * Include portions of arena.h interleaved with tcache.h in order to resolve * circular dependencies. */ #define JEMALLOC_ARENA_INLINE_A #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_INLINE_A #include "jemalloc/internal/tcache.h" #define JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #ifndef JEMALLOC_ENABLE_INLINE arena_t *iaalloc(const void *ptr); size_t isalloc(const void *ptr, bool demote); void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena); void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena); void *imalloc(tsd_t *tsd, size_t size); void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena); void *icalloc(tsd_t *tsd, size_t size); void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena); void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); size_t ivsalloc(const void *ptr, bool demote); size_t u2rz(size_t usize); size_t p2rz(const void *ptr); void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata); void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache); void idalloc(tsd_t *tsd, void *ptr); void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero); bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_ALWAYS_INLINE arena_t * iaalloc(const void *ptr) { assert(ptr != NULL); return (arena_aalloc(ptr)); } /* * Typical usage: * void *ptr = [...] * size_t sz = isalloc(ptr, config_prof); */ JEMALLOC_ALWAYS_INLINE size_t isalloc(const void *ptr, bool demote) { assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ assert(config_prof || !demote); return (arena_salloc(ptr, demote)); } JEMALLOC_ALWAYS_INLINE void * iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena) { void *ret; assert(size != 0); ret = arena_malloc(tsd, arena, size, zero, tcache); if (config_stats && is_metadata && likely(ret != NULL)) { arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, config_prof)); } return (ret); } JEMALLOC_ALWAYS_INLINE void * imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena) { return (iallocztm(tsd, size, false, tcache, false, arena)); } JEMALLOC_ALWAYS_INLINE void * imalloc(tsd_t *tsd, size_t size) { return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL)); } JEMALLOC_ALWAYS_INLINE void * icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena) { return (iallocztm(tsd, size, true, tcache, false, arena)); } JEMALLOC_ALWAYS_INLINE void * icalloc(tsd_t *tsd, size_t size) { return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL)); } JEMALLOC_ALWAYS_INLINE void * ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena) { void *ret; assert(usize != 0); assert(usize == sa2u(usize, alignment)); ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); if (config_stats && is_metadata && likely(ret != NULL)) { arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, config_prof)); } return (ret); } JEMALLOC_ALWAYS_INLINE void * ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena)); } JEMALLOC_ALWAYS_INLINE void * ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, NULL), false, NULL)); } JEMALLOC_ALWAYS_INLINE size_t ivsalloc(const void *ptr, bool demote) { extent_node_t *node; /* Return 0 if ptr is not within a chunk managed by jemalloc. */ node = chunk_lookup(ptr, false); if (node == NULL) return (0); /* Only arena chunks should be looked up via interior pointers. */ assert(extent_node_addr_get(node) == ptr || extent_node_achunk_get(node)); return (isalloc(ptr, demote)); } JEMALLOC_INLINE size_t u2rz(size_t usize) { size_t ret; if (usize <= SMALL_MAXCLASS) { szind_t binind = size2index(usize); ret = arena_bin_info[binind].redzone_size; } else ret = 0; return (ret); } JEMALLOC_INLINE size_t p2rz(const void *ptr) { size_t usize = isalloc(ptr, false); return (u2rz(usize)); } JEMALLOC_ALWAYS_INLINE void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata) { assert(ptr != NULL); if (config_stats && is_metadata) { arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr, config_prof)); } arena_dalloc(tsd, ptr, tcache); } JEMALLOC_ALWAYS_INLINE void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache) { idalloctm(tsd, ptr, tcache, false); } JEMALLOC_ALWAYS_INLINE void idalloc(tsd_t *tsd, void *ptr) { idalloctm(tsd, ptr, tcache_get(tsd, false), false); } JEMALLOC_ALWAYS_INLINE void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) { if (config_fill && unlikely(opt_quarantine)) quarantine(tsd, ptr); else idalloctm(tsd, ptr, tcache, false); } JEMALLOC_ALWAYS_INLINE void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) { arena_sdalloc(tsd, ptr, size, tcache); } JEMALLOC_ALWAYS_INLINE void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) { if (config_fill && unlikely(opt_quarantine)) quarantine(tsd, ptr); else isdalloct(tsd, ptr, size, tcache); } JEMALLOC_ALWAYS_INLINE void * iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { void *p; size_t usize, copysize; usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); p = ipalloct(tsd, usize, alignment, zero, tcache, arena); if (p == NULL) { if (extra == 0) return (NULL); /* Try again, without extra this time. */ usize = sa2u(size, alignment); if (usize == 0) return (NULL); p = ipalloct(tsd, usize, alignment, zero, tcache, arena); if (p == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; memcpy(p, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache); return (p); } JEMALLOC_ALWAYS_INLINE void * iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { assert(ptr != NULL); assert(size != 0); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* * Existing object alignment is inadequate; allocate new space * and copy. */ return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, zero, tcache, arena)); } return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, tcache)); } JEMALLOC_ALWAYS_INLINE void * iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero) { return (iralloct(tsd, ptr, oldsize, size, alignment, zero, tcache_get(tsd, true), NULL)); } JEMALLOC_ALWAYS_INLINE bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero) { assert(ptr != NULL); assert(size != 0); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* Existing object alignment is inadequate. */ return (true); } return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); } #endif #include "jemalloc/internal/prof.h" #undef JEMALLOC_H_INLINES /******************************************************************************/ #endif /* JEMALLOC_INTERNAL_H */ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h000066400000000000000000000025021264176561100301120ustar00rootroot00000000000000#ifndef JEMALLOC_INTERNAL_DECLS_H #define JEMALLOC_INTERNAL_DECLS_H #include #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" #else # include # include # if !defined(__pnacl__) && !defined(__native_client__) # include # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif # include # endif # include # include #endif #include #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include #include #include #include #include #include #ifndef offsetof # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) #endif #include #include #include #ifdef _MSC_VER # include typedef intptr_t ssize_t; # define PATH_MAX 1024 # define STDERR_FILENO 2 # define __func__ __FUNCTION__ # ifdef JEMALLOC_HAS_RESTRICT # define restrict __restrict # endif /* Disable warnings about deprecated system functions. */ # pragma warning(disable: 4996) #if _MSC_VER < 1800 static int isblank(int c) { return (c == '\t' || c == ' '); } #endif #else # include #endif #include #endif /* JEMALLOC_INTERNAL_H */ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in000066400000000000000000000170131264176561100303510ustar00rootroot00000000000000#ifndef JEMALLOC_INTERNAL_DEFS_H_ #define JEMALLOC_INTERNAL_DEFS_H_ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use * multiple allocators simultaneously. */ #undef JEMALLOC_PREFIX #undef JEMALLOC_CPREFIX /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. * For shared libraries, symbol visibility mechanisms prevent these symbols * from being exported, but for static libraries, naming collisions are a real * possibility. */ #undef JEMALLOC_PRIVATE_NAMESPACE /* * Hyper-threaded CPUs may need a special instruction inside spin loops in * order to yield to another virtual CPU. */ #undef CPU_SPINWAIT /* Defined if C11 atomics are available. */ #undef JEMALLOC_C11ATOMICS /* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ #undef JEMALLOC_ATOMIC9 /* * Defined if OSAtomic*() functions are available, as provided by Darwin, and * documented in the atomic(3) manual page. */ #undef JEMALLOC_OSATOMIC /* * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the * functions are defined in libgcc instead of being inlines). */ #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 /* * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the * functions are defined in libgcc instead of being inlines). */ #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 /* * Defined if __builtin_clz() and __builtin_clzl() are available. */ #undef JEMALLOC_HAVE_BUILTIN_CLZ /* * Defined if madvise(2) is available. */ #undef JEMALLOC_HAVE_MADVISE /* * Defined if OSSpin*() functions are available, as provided by Darwin, and * documented in the spinlock(3) manual page. */ #undef JEMALLOC_OSSPIN /* * Defined if secure_getenv(3) is available. */ #undef JEMALLOC_HAVE_SECURE_GETENV /* * Defined if issetugid(2) is available. */ #undef JEMALLOC_HAVE_ISSETUGID /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc * bootstrapping will cause recursion into the pthreads library. Therefore, if * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in * malloc_tsd. */ #undef JEMALLOC_MALLOC_THREAD_CLEANUP /* * Defined if threaded initialization is known to be safe on this platform. * Among other things, it must be possible to initialize a mutex without * triggering allocation in order for threaded allocation to be safe. */ #undef JEMALLOC_THREADED_INIT /* * Defined if the pthreads implementation defines * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ #undef JEMALLOC_MUTEX_INIT_CB /* Non-empty if the tls_model attribute is supported. */ #undef JEMALLOC_TLS_MODEL /* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ #undef JEMALLOC_CC_SILENCE /* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ #undef JEMALLOC_CODE_COVERAGE /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. */ #undef JEMALLOC_DEBUG /* JEMALLOC_STATS enables statistics calculation. */ #undef JEMALLOC_STATS /* JEMALLOC_PROF enables allocation profiling. */ #undef JEMALLOC_PROF /* Use libunwind for profile backtracing if defined. */ #undef JEMALLOC_PROF_LIBUNWIND /* Use libgcc for profile backtracing if defined. */ #undef JEMALLOC_PROF_LIBGCC /* Use gcc intrinsics for profile backtracing if defined. */ #undef JEMALLOC_PROF_GCC /* * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. * This makes it possible to allocate/deallocate objects without any locking * when the cache is in the steady state. */ #undef JEMALLOC_TCACHE /* * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage * segment (DSS). */ #undef JEMALLOC_DSS /* Support memory filling (junk/zero/quarantine/redzone). */ #undef JEMALLOC_FILL /* Support utrace(2)-based tracing. */ #undef JEMALLOC_UTRACE /* Support Valgrind. */ #undef JEMALLOC_VALGRIND /* Support optional abort() on OOM. */ #undef JEMALLOC_XMALLOC /* Support lazy locking (avoid locking unless a second thread is launched). */ #undef JEMALLOC_LAZY_LOCK /* Minimum size class to support is 2^LG_TINY_MIN bytes. */ #undef LG_TINY_MIN /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #undef LG_QUANTUM /* One page is 2^LG_PAGE bytes. */ #undef LG_PAGE /* * If defined, adjacent virtual memory mappings with identical attributes * automatically coalesce, and they fragment when changes are made to subranges. * This is the normal order of things for mmap()/munmap(), but on Windows * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. * mappings do *not* coalesce/fragment. */ #undef JEMALLOC_MAPS_COALESCE /* * If defined, use munmap() to unmap freed chunks, rather than storing them for * later reuse. This is disabled by default on Linux because common sequences * of mmap()/munmap() calls will cause virtual memory map holes. */ #undef JEMALLOC_MUNMAP /* TLS is used to map arenas and magazine caches to threads. */ #undef JEMALLOC_TLS /* * ffs()/ffsl() functions to use for bitmapping. Don't use these directly; * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h. */ #undef JEMALLOC_INTERNAL_FFSL #undef JEMALLOC_INTERNAL_FFS /* * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside * within jemalloc-owned chunks before dereferencing them. */ #undef JEMALLOC_IVSALLOC /* * If defined, explicitly attempt to more uniformly distribute large allocation * pointer alignments across all cache indices. */ #undef JEMALLOC_CACHE_OBLIVIOUS /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ #undef JEMALLOC_ZONE #undef JEMALLOC_ZONE_VERSION /* * Methods for purging unused pages differ between operating systems. * * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, * such that new pages will be demand-zeroed if * the address region is later touched. * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being * unused, such that they will be discarded rather * than swapped out. */ #undef JEMALLOC_PURGE_MADVISE_DONTNEED #undef JEMALLOC_PURGE_MADVISE_FREE /* Define if operating system has alloca.h header. */ #undef JEMALLOC_HAS_ALLOCA_H /* C99 restrict keyword supported. */ #undef JEMALLOC_HAS_RESTRICT /* For use by hash code. */ #undef JEMALLOC_BIG_ENDIAN /* sizeof(int) == 2^LG_SIZEOF_INT. */ #undef LG_SIZEOF_INT /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #undef LG_SIZEOF_LONG /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #undef LG_SIZEOF_INTMAX_T /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ #undef JEMALLOC_GLIBC_MALLOC_HOOK /* glibc memalign hook. */ #undef JEMALLOC_GLIBC_MEMALIGN_HOOK /* Adaptive mutex support in pthreads. */ #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP /* * If defined, jemalloc symbols are not exported (doesn't work when * JEMALLOC_PREFIX is not defined). */ #undef JEMALLOC_EXPORT #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h000066400000000000000000000032051264176561100303050ustar00rootroot00000000000000/* * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for * functions that are static inline functions if inlining is enabled, and * single-definition library-private functions if inlining is disabled. * * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in * which case the denoted functions are always static, regardless of whether * inlining is enabled. */ #if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) /* Disable inlining to make debugging/profiling easier. */ # define JEMALLOC_ALWAYS_INLINE # define JEMALLOC_ALWAYS_INLINE_C static # define JEMALLOC_INLINE # define JEMALLOC_INLINE_C static # define inline #else # define JEMALLOC_ENABLE_INLINE # ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ALWAYS_INLINE \ static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) # define JEMALLOC_ALWAYS_INLINE_C \ static inline JEMALLOC_ATTR(always_inline) # else # define JEMALLOC_ALWAYS_INLINE static inline # define JEMALLOC_ALWAYS_INLINE_C static inline # endif # define JEMALLOC_INLINE static inline # define JEMALLOC_INLINE_C static inline # ifdef _MSC_VER # define inline _inline # endif #endif #ifdef JEMALLOC_CC_SILENCE # define UNUSED JEMALLOC_ATTR(unused) #else # define UNUSED #endif #define ZU(z) ((size_t)z) #define ZI(z) ((ssize_t)z) #define QU(q) ((uint64_t)q) #define QI(q) ((int64_t)q) #define KZU(z) ZU(z##ULL) #define KZI(z) ZI(z##LL) #define KQU(q) QU(q##ULL) #define KQI(q) QI(q##LL) #ifndef __DECONST # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #ifndef JEMALLOC_HAS_RESTRICT # define restrict #endif disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/mb.h000066400000000000000000000051771264176561100236670ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void mb_write(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) #ifdef __i386__ /* * According to the Intel Architecture Software Developer's Manual, current * processors execute instructions in order from the perspective of other * processors in a multiprocessor system, but 1) Intel reserves the right to * change that, and 2) the compiler's optimizer could re-order instructions if * there weren't some form of barrier. Therefore, even if running on an * architecture that does not need memory barriers (everything through at least * i686), an "optimizer barrier" is necessary. */ JEMALLOC_INLINE void mb_write(void) { # if 0 /* This is a true memory barrier. */ asm volatile ("pusha;" "xor %%eax,%%eax;" "cpuid;" "popa;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); #else /* * This is hopefully enough to keep the compiler from reordering * instructions around this one. */ asm volatile ("nop;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); #endif } #elif (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE void mb_write(void) { asm volatile ("sfence" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__powerpc__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("eieio" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__sparc64__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("membar #StoreStore" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__tile__) JEMALLOC_INLINE void mb_write(void) { __sync_synchronize(); } #else /* * This is much slower than a simple memory barrier, but the semantics of mutex * unlock make this work. */ JEMALLOC_INLINE void mb_write(void) { malloc_mutex_t mtx; malloc_mutex_init(&mtx); malloc_mutex_lock(&mtx); malloc_mutex_unlock(&mtx); } #endif #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/mutex.h000066400000000000000000000055561264176561100244340ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct malloc_mutex_s malloc_mutex_t; #ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OSSPIN)) # define MALLOC_MUTEX_INITIALIZER {0} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} #else # if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP # define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} # else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} # endif #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct malloc_mutex_s { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 SRWLOCK lock; # else CRITICAL_SECTION lock; # endif #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else # undef isthreaded /* Undo private_namespace.h definition. */ # define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex); void malloc_mutex_prefork(malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); void malloc_mutex_postfork_child(malloc_mutex_t *mutex); bool mutex_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void malloc_mutex_lock(malloc_mutex_t *mutex); void malloc_mutex_unlock(malloc_mutex_t *mutex); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE void malloc_mutex_lock(malloc_mutex_t *mutex) { if (isthreaded) { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 AcquireSRWLockExclusive(&mutex->lock); # else EnterCriticalSection(&mutex->lock); # endif #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mutex->lock); #else pthread_mutex_lock(&mutex->lock); #endif } } JEMALLOC_INLINE void malloc_mutex_unlock(malloc_mutex_t *mutex) { if (isthreaded) { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 ReleaseSRWLockExclusive(&mutex->lock); # else LeaveCriticalSection(&mutex->lock); # endif #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mutex->lock); #else pthread_mutex_unlock(&mutex->lock); #endif } } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/pages.h000066400000000000000000000016541264176561100243640ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *pages_map(void *addr, size_t size); void pages_unmap(void *addr, size_t size); void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size); bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge(void *addr, size_t size); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/private_namespace.sh000077500000000000000000000001351264176561100271320ustar00rootroot00000000000000#!/bin/sh for symbol in `cat $1` ; do echo "#define ${symbol} JEMALLOC_N(${symbol})" done disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/private_symbols.txt000066400000000000000000000210711264176561100270720ustar00rootroot00000000000000a0dalloc a0get a0malloc arena_aalloc arena_alloc_junk_small arena_bin_index arena_bin_info arena_bitselm_get arena_boot arena_choose arena_choose_hard arena_chunk_alloc_huge arena_chunk_cache_maybe_insert arena_chunk_cache_maybe_remove arena_chunk_dalloc_huge arena_chunk_ralloc_huge_expand arena_chunk_ralloc_huge_shrink arena_chunk_ralloc_huge_similar arena_cleanup arena_dalloc arena_dalloc_bin arena_dalloc_bin_junked_locked arena_dalloc_junk_large arena_dalloc_junk_small arena_dalloc_large arena_dalloc_large_junked_locked arena_dalloc_small arena_dss_prec_get arena_dss_prec_set arena_get arena_get_hard arena_init arena_lg_dirty_mult_default_get arena_lg_dirty_mult_default_set arena_lg_dirty_mult_get arena_lg_dirty_mult_set arena_malloc arena_malloc_large arena_malloc_small arena_mapbits_allocated_get arena_mapbits_binind_get arena_mapbits_decommitted_get arena_mapbits_dirty_get arena_mapbits_get arena_mapbits_internal_set arena_mapbits_large_binind_set arena_mapbits_large_get arena_mapbits_large_set arena_mapbits_large_size_get arena_mapbitsp_get arena_mapbitsp_read arena_mapbitsp_write arena_mapbits_size_decode arena_mapbits_size_encode arena_mapbits_small_runind_get arena_mapbits_small_set arena_mapbits_unallocated_set arena_mapbits_unallocated_size_get arena_mapbits_unallocated_size_set arena_mapbits_unzeroed_get arena_maxrun arena_maybe_purge arena_metadata_allocated_add arena_metadata_allocated_get arena_metadata_allocated_sub arena_migrate arena_miscelm_get arena_miscelm_to_pageind arena_miscelm_to_rpages arena_nbound arena_new arena_node_alloc arena_node_dalloc arena_palloc arena_postfork_child arena_postfork_parent arena_prefork arena_prof_accum arena_prof_accum_impl arena_prof_accum_locked arena_prof_promoted arena_prof_tctx_get arena_prof_tctx_reset arena_prof_tctx_set arena_ptr_small_binind_get arena_purge_all arena_quarantine_junk_small arena_ralloc arena_ralloc_junk_large arena_ralloc_no_move arena_rd_to_miscelm arena_redzone_corruption arena_run_regind arena_run_to_miscelm arena_salloc arenas_cache_bypass_cleanup arenas_cache_cleanup arena_sdalloc arena_stats_merge arena_tcache_fill_small atomic_add_p atomic_add_u atomic_add_uint32 atomic_add_uint64 atomic_add_z atomic_cas_p atomic_cas_u atomic_cas_uint32 atomic_cas_uint64 atomic_cas_z atomic_sub_p atomic_sub_u atomic_sub_uint32 atomic_sub_uint64 atomic_sub_z base_alloc base_boot base_postfork_child base_postfork_parent base_prefork base_stats_get bitmap_full bitmap_get bitmap_info_init bitmap_info_ngroups bitmap_init bitmap_set bitmap_sfu bitmap_size bitmap_unset bootstrap_calloc bootstrap_free bootstrap_malloc bt_init buferror chunk_alloc_base chunk_alloc_cache chunk_alloc_dss chunk_alloc_mmap chunk_alloc_wrapper chunk_boot chunk_dalloc_arena chunk_dalloc_cache chunk_dalloc_mmap chunk_dalloc_wrapper chunk_deregister chunk_dss_boot chunk_dss_postfork_child chunk_dss_postfork_parent chunk_dss_prec_get chunk_dss_prec_set chunk_dss_prefork chunk_hooks_default chunk_hooks_get chunk_hooks_set chunk_in_dss chunk_lookup chunk_npages chunk_postfork_child chunk_postfork_parent chunk_prefork chunk_purge_arena chunk_purge_wrapper chunk_register chunksize chunksize_mask chunks_rtree ckh_count ckh_delete ckh_insert ckh_iter ckh_new ckh_pointer_hash ckh_pointer_keycomp ckh_remove ckh_search ckh_string_hash ckh_string_keycomp ctl_boot ctl_bymib ctl_byname ctl_nametomib ctl_postfork_child ctl_postfork_parent ctl_prefork dss_prec_names extent_node_achunk_get extent_node_achunk_set extent_node_addr_get extent_node_addr_set extent_node_arena_get extent_node_arena_set extent_node_dirty_insert extent_node_dirty_linkage_init extent_node_dirty_remove extent_node_init extent_node_prof_tctx_get extent_node_prof_tctx_set extent_node_size_get extent_node_size_set extent_node_zeroed_get extent_node_zeroed_set extent_tree_ad_empty extent_tree_ad_first extent_tree_ad_insert extent_tree_ad_iter extent_tree_ad_iter_recurse extent_tree_ad_iter_start extent_tree_ad_last extent_tree_ad_new extent_tree_ad_next extent_tree_ad_nsearch extent_tree_ad_prev extent_tree_ad_psearch extent_tree_ad_remove extent_tree_ad_reverse_iter extent_tree_ad_reverse_iter_recurse extent_tree_ad_reverse_iter_start extent_tree_ad_search extent_tree_szad_empty extent_tree_szad_first extent_tree_szad_insert extent_tree_szad_iter extent_tree_szad_iter_recurse extent_tree_szad_iter_start extent_tree_szad_last extent_tree_szad_new extent_tree_szad_next extent_tree_szad_nsearch extent_tree_szad_prev extent_tree_szad_psearch extent_tree_szad_remove extent_tree_szad_reverse_iter extent_tree_szad_reverse_iter_recurse extent_tree_szad_reverse_iter_start extent_tree_szad_search get_errno hash hash_fmix_32 hash_fmix_64 hash_get_block_32 hash_get_block_64 hash_rotl_32 hash_rotl_64 hash_x64_128 hash_x86_128 hash_x86_32 huge_aalloc huge_dalloc huge_dalloc_junk huge_malloc huge_palloc huge_prof_tctx_get huge_prof_tctx_reset huge_prof_tctx_set huge_ralloc huge_ralloc_no_move huge_salloc iaalloc iallocztm icalloc icalloct idalloc idalloct idalloctm imalloc imalloct index2size index2size_compute index2size_lookup index2size_tab in_valgrind ipalloc ipalloct ipallocztm iqalloc iralloc iralloct iralloct_realign isalloc isdalloct isqalloc isthreaded ivsalloc ixalloc jemalloc_postfork_child jemalloc_postfork_parent jemalloc_prefork large_maxclass lg_floor malloc_cprintf malloc_mutex_init malloc_mutex_lock malloc_mutex_postfork_child malloc_mutex_postfork_parent malloc_mutex_prefork malloc_mutex_unlock malloc_printf malloc_snprintf malloc_strtoumax malloc_tsd_boot0 malloc_tsd_boot1 malloc_tsd_cleanup_register malloc_tsd_dalloc malloc_tsd_malloc malloc_tsd_no_cleanup malloc_vcprintf malloc_vsnprintf malloc_write map_bias map_misc_offset mb_write mutex_boot narenas_cache_cleanup narenas_total_get ncpus nhbins opt_abort opt_dss opt_junk opt_junk_alloc opt_junk_free opt_lg_chunk opt_lg_dirty_mult opt_lg_prof_interval opt_lg_prof_sample opt_lg_tcache_max opt_narenas opt_prof opt_prof_accum opt_prof_active opt_prof_final opt_prof_gdump opt_prof_leak opt_prof_prefix opt_prof_thread_active_init opt_quarantine opt_redzone opt_stats_print opt_tcache opt_utrace opt_xmalloc opt_zero p2rz pages_commit pages_decommit pages_map pages_purge pages_trim pages_unmap pow2_ceil prof_active_get prof_active_get_unlocked prof_active_set prof_alloc_prep prof_alloc_rollback prof_backtrace prof_boot0 prof_boot1 prof_boot2 prof_dump_header prof_dump_open prof_free prof_free_sampled_object prof_gdump prof_gdump_get prof_gdump_get_unlocked prof_gdump_set prof_gdump_val prof_idump prof_interval prof_lookup prof_malloc prof_malloc_sample_object prof_mdump prof_postfork_child prof_postfork_parent prof_prefork prof_realloc prof_reset prof_sample_accum_update prof_sample_threshold_update prof_tctx_get prof_tctx_reset prof_tctx_set prof_tdata_cleanup prof_tdata_get prof_tdata_init prof_tdata_reinit prof_thread_active_get prof_thread_active_init_get prof_thread_active_init_set prof_thread_active_set prof_thread_name_get prof_thread_name_set quarantine quarantine_alloc_hook quarantine_alloc_hook_work quarantine_cleanup register_zone rtree_child_read rtree_child_read_hard rtree_child_tryread rtree_delete rtree_get rtree_new rtree_node_valid rtree_set rtree_start_level rtree_subkey rtree_subtree_read rtree_subtree_read_hard rtree_subtree_tryread rtree_val_read rtree_val_write s2u s2u_compute s2u_lookup sa2u set_errno size2index size2index_compute size2index_lookup size2index_tab stats_cactive stats_cactive_add stats_cactive_get stats_cactive_sub stats_print tcache_alloc_easy tcache_alloc_large tcache_alloc_small tcache_alloc_small_hard tcache_arena_associate tcache_arena_dissociate tcache_arena_reassociate tcache_bin_flush_large tcache_bin_flush_small tcache_bin_info tcache_boot tcache_cleanup tcache_create tcache_dalloc_large tcache_dalloc_small tcache_enabled_cleanup tcache_enabled_get tcache_enabled_set tcache_event tcache_event_hard tcache_flush tcache_get tcache_get_hard tcache_maxclass tcaches tcache_salloc tcaches_create tcaches_destroy tcaches_flush tcaches_get tcache_stats_merge thread_allocated_cleanup thread_deallocated_cleanup tsd_arena_get tsd_arena_set tsd_boot tsd_boot0 tsd_boot1 tsd_booted tsd_cleanup tsd_cleanup_wrapper tsd_fetch tsd_get tsd_wrapper_get tsd_wrapper_set tsd_initialized tsd_init_check_recursion tsd_init_finish tsd_init_head tsd_nominal tsd_quarantine_get tsd_quarantine_set tsd_set tsd_tcache_enabled_get tsd_tcache_enabled_set tsd_tcache_get tsd_tcache_set tsd_tls tsd_tsd tsd_prof_tdata_get tsd_prof_tdata_set tsd_thread_allocated_get tsd_thread_allocated_set tsd_thread_deallocated_get tsd_thread_deallocated_set u2rz valgrind_freelike_block valgrind_make_mem_defined valgrind_make_mem_noaccess valgrind_make_mem_undefined disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh000077500000000000000000000001061264176561100274730ustar00rootroot00000000000000#!/bin/sh for symbol in `cat $1` ; do echo "#undef ${symbol}" done disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/prng.h000066400000000000000000000037511264176561100242330ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Simple linear congruential pseudo-random number generator: * * prng(y) = (a*x + c) % m * * where the following constants ensure maximal period: * * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. * c == Odd number (relatively prime to 2^n). * m == 2^32 * * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. * * Macro parameters: * uint32_t r : Result. * unsigned lg_range : (0..32], number of least significant bits to return. * uint32_t state : Seed value. * const uint32_t a, c : See above discussion. */ #define prng32(r, lg_range, state, a, c) do { \ assert((lg_range) > 0); \ assert((lg_range) <= 32); \ \ r = (state * (a)) + (c); \ state = r; \ r >>= (32 - (lg_range)); \ } while (false) /* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ #define prng64(r, lg_range, state, a, c) do { \ assert((lg_range) > 0); \ assert((lg_range) <= 64); \ \ r = (state * (a)) + (c); \ state = r; \ r >>= (64 - (lg_range)); \ } while (false) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/prof.h000066400000000000000000000360771264176561100242420ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct prof_bt_s prof_bt_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_tctx_s prof_tctx_t; typedef struct prof_gctx_s prof_gctx_t; typedef struct prof_tdata_s prof_tdata_t; /* Option defaults. */ #ifdef JEMALLOC_PROF # define PROF_PREFIX_DEFAULT "jeprof" #else # define PROF_PREFIX_DEFAULT "" #endif #define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that * is based on __builtin_return_address() necessarily has a hard-coded number * of backtrace frame handlers, and should be kept in sync with this setting. */ #define PROF_BT_MAX 128 /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #define PROF_DUMP_BUFSIZE 65536 /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all gctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NCTX_LOCKS 1024 /* * Number of mutexes shared among all tdata's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NTDATA_LOCKS 256 /* * prof_tdata pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) #define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) #define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct prof_bt_s { /* Backtrace, stored as len program counters. */ void **vec; unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { prof_bt_t *bt; unsigned max; } prof_unwind_data_t; #endif struct prof_cnt_s { /* Profiling counters. */ uint64_t curobjs; uint64_t curbytes; uint64_t accumobjs; uint64_t accumbytes; }; typedef enum { prof_tctx_state_initializing, prof_tctx_state_nominal, prof_tctx_state_dumping, prof_tctx_state_purgatory /* Dumper must finish destroying. */ } prof_tctx_state_t; struct prof_tctx_s { /* Thread data for thread that performed the allocation. */ prof_tdata_t *tdata; /* * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be * defunct during teardown. */ uint64_t thr_uid; uint64_t thr_discrim; /* Profiling counters, protected by tdata->lock. */ prof_cnt_t cnts; /* Associated global context. */ prof_gctx_t *gctx; /* * UID that distinguishes multiple tctx's created by the same thread, * but coexisting in gctx->tctxs. There are two ways that such * coexistence can occur: * - A dumper thread can cause a tctx to be retained in the purgatory * state. * - Although a single "producer" thread must create all tctx's which * share the same thr_uid, multiple "consumers" can each concurrently * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only * gets called once each time cnts.cur{objs,bytes} drop to 0, but this * threshold can be hit again before the first consumer finishes * executing prof_tctx_destroy(). */ uint64_t tctx_uid; /* Linkage into gctx's tctxs. */ rb_node(prof_tctx_t) tctx_link; /* * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents * sample vs destroy race. */ bool prepared; /* Current dump-related state, protected by gctx->lock. */ prof_tctx_state_t state; /* * Copy of cnts snapshotted during early dump phase, protected by * dump_mtx. */ prof_cnt_t dump_cnts; }; typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; struct prof_gctx_s { /* Protects nlimbo, cnt_summed, and tctxs. */ malloc_mutex_t *lock; /* * Number of threads that currently cause this gctx to be in a state of * limbo due to one of: * - Initializing this gctx. * - Initializing per thread counters associated with this gctx. * - Preparing to destroy this gctx. * - Dumping a heap profile that includes this gctx. * nlimbo must be 1 (single destroyer) in order to safely destroy the * gctx. */ unsigned nlimbo; /* * Tree of profile counters, one for each thread that has allocated in * this context. */ prof_tctx_tree_t tctxs; /* Linkage for tree of contexts to be dumped. */ rb_node(prof_gctx_t) dump_link; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Associated backtrace. */ prof_bt_t bt; /* Backtrace vector, variable size, referred to by bt. */ void *vec[1]; }; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; struct prof_tdata_s { malloc_mutex_t *lock; /* Monotonically increasing unique thread identifier. */ uint64_t thr_uid; /* * Monotonically increasing discriminator among tdata structures * associated with the same thr_uid. */ uint64_t thr_discrim; /* Included in heap profile dumps if non-NULL. */ char *thread_name; bool attached; bool expired; rb_node(prof_tdata_t) tdata_link; /* * Counter used to initialize prof_tctx_t's tctx_uid. No locking is * necessary when incrementing this field, because only one thread ever * does so. */ uint64_t tctx_uid_next; /* * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks * backtraces for which it has non-zero allocation/deallocation counters * associated with thread-specific prof_tctx_t objects. Other threads * may write to prof_tctx_t contents when freeing associated objects. */ ckh_t bt2tctx; /* Sampling state. */ uint64_t prng_state; uint64_t bytes_until_sample; /* State used to avoid dumping while operating on prof internals. */ bool enq; bool enq_idump; bool enq_gdump; /* * Set to true during an early dump phase for tdata's which are * currently being dumped. New threads' tdata's have this initialized * to false so that they aren't accidentally included in later dump * phases. */ bool dumping; /* * True if profiling is active for this tdata's thread * (thread.prof.active mallctl). */ bool active; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Backtrace vector, used for calls to prof_backtrace(). */ void *vec[PROF_BT_MAX]; }; typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_prof; extern bool opt_prof_active; extern bool opt_prof_thread_active_init; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* Accessed via prof_active_[gs]et{_unlocked,}(). */ extern bool prof_active; /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ extern bool prof_gdump_val; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a * profile dump when it reaches this threshold. The effect is that the * interval between profile dumps averages prof_interval, though the actual * interval between dumps will tend to be sporadic, and the interval will be a * maximum of approximately (prof_interval * narenas). */ extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ extern size_t lg_prof_sample; void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); void prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); #ifdef JEMALLOC_JET size_t prof_tdata_count(void); size_t prof_bt_count(void); const prof_cnt_t *prof_cnt_all(void); typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *prof_dump_open; typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *); extern prof_dump_header_t *prof_dump_header; #endif void prof_idump(void); bool prof_mdump(const char *filename); void prof_gdump(void); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); void prof_tdata_cleanup(tsd_t *tsd); const char *prof_thread_name_get(void); bool prof_active_get(void); bool prof_active_set(bool active); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); bool prof_thread_active_get(void); bool prof_thread_active_set(bool active); bool prof_thread_active_init_get(void); bool prof_thread_active_init_set(bool active_init); bool prof_gdump_get(void); bool prof_gdump_set(bool active); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(void); void prof_prefork(void); void prof_postfork_parent(void); void prof_postfork_child(void); void prof_sample_threshold_update(prof_tdata_t *tdata); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool prof_active_get_unlocked(void); bool prof_gdump_get_unlocked(void); prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, prof_tdata_t **tdata_out); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update); prof_tctx_t *prof_tctx_get(const void *ptr); void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *tctx); void prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx); void prof_free(tsd_t *tsd, const void *ptr, size_t usize); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) JEMALLOC_ALWAYS_INLINE bool prof_active_get_unlocked(void) { /* * Even if opt_prof is true, sampling can be temporarily disabled by * setting prof_active to false. No locking is used when reading * prof_active in the fast path, so there are no guarantees regarding * how long it will take for all threads to notice state changes. */ return (prof_active); } JEMALLOC_ALWAYS_INLINE bool prof_gdump_get_unlocked(void) { /* * No locking is used when reading prof_gdump_val in the fast path, so * there are no guarantees regarding how long it will take for all * threads to notice state changes. */ return (prof_gdump_val); } JEMALLOC_ALWAYS_INLINE prof_tdata_t * prof_tdata_get(tsd_t *tsd, bool create) { prof_tdata_t *tdata; cassert(config_prof); tdata = tsd_prof_tdata_get(tsd); if (create) { if (unlikely(tdata == NULL)) { if (tsd_nominal(tsd)) { tdata = prof_tdata_init(tsd); tsd_prof_tdata_set(tsd, tdata); } } else if (unlikely(tdata->expired)) { tdata = prof_tdata_reinit(tsd, tdata); tsd_prof_tdata_set(tsd, tdata); } assert(tdata == NULL || tdata->attached); } return (tdata); } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_tctx_get(const void *ptr) { cassert(config_prof); assert(ptr != NULL); return (arena_prof_tctx_get(ptr)); } JEMALLOC_ALWAYS_INLINE void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_set(ptr, usize, tctx); } JEMALLOC_ALWAYS_INLINE void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx); } JEMALLOC_ALWAYS_INLINE bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, prof_tdata_t **tdata_out) { prof_tdata_t *tdata; cassert(config_prof); tdata = prof_tdata_get(tsd, true); if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) tdata = NULL; if (tdata_out != NULL) *tdata_out = tdata; if (tdata == NULL) return (true); if (tdata->bytes_until_sample >= usize) { if (update) tdata->bytes_until_sample -= usize; return (true); } else { /* Compute new sample threshold. */ if (update) prof_sample_threshold_update(tdata); return (!tdata->active); } } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { prof_tctx_t *ret; prof_tdata_t *tdata; prof_bt_t bt; assert(usize == s2u(usize)); if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, &tdata))) ret = (prof_tctx_t *)(uintptr_t)1U; else { bt_init(&bt, tdata->vec); prof_backtrace(&bt); ret = prof_lookup(tsd, &bt); } return (ret); } JEMALLOC_ALWAYS_INLINE void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); assert(usize == isalloc(ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_malloc_sample_object(ptr, usize, tctx); else prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U); } JEMALLOC_ALWAYS_INLINE void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) { bool sampled, old_sampled; cassert(config_prof); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); if (prof_active && !updated && ptr != NULL) { assert(usize == isalloc(ptr, true)); if (prof_sample_accum_update(tsd, usize, true, NULL)) { /* * Don't sample. The usize passed to prof_alloc_prep() * was larger than what actually got allocated, so a * backtrace was captured for this allocation, even * though its actual usize was insufficient to cross the * sample threshold. */ tctx = (prof_tctx_t *)(uintptr_t)1U; } } sampled = ((uintptr_t)tctx > (uintptr_t)1U); old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); if (unlikely(sampled)) prof_malloc_sample_object(ptr, usize, tctx); else prof_tctx_reset(ptr, usize, old_ptr, old_tctx); if (unlikely(old_sampled)) prof_free_sampled_object(tsd, old_usize, old_tctx); } JEMALLOC_ALWAYS_INLINE void prof_free(tsd_t *tsd, const void *ptr, size_t usize) { prof_tctx_t *tctx = prof_tctx_get(ptr); cassert(config_prof); assert(usize == isalloc(ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_free_sampled_object(tsd, usize, tctx); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/public_namespace.sh000077500000000000000000000002011264176561100267300ustar00rootroot00000000000000#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#define je_${n} JEMALLOC_N(${n})" done disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh000077500000000000000000000001571264176561100273050ustar00rootroot00000000000000#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#undef je_${n}" done disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/ql.h000066400000000000000000000045011264176561100236730ustar00rootroot00000000000000/* List definitions. */ #define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } #define ql_head_initializer(a_head) {NULL} #define ql_elm(a_type) qr(a_type) /* List functions. */ #define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) #define ql_first(a_head) ((a_head)->qlh_first) #define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) #define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) #define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) #define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) #define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) #define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) #define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ if (ql_first(a_head) != (a_elm)) { \ qr_remove((a_elm), a_field); \ } else { \ ql_first(a_head) = NULL; \ } \ } while (0) #define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) #define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/qr.h000066400000000000000000000043231264176561100237030ustar00rootroot00000000000000/* Ring definitions. */ #define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ #define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_after_insert(a_qrelm, a_qr, a_field) \ do \ { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) #define qr_meld(a_qr_a, a_qr_b, a_field) do { \ void *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) /* * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ #define qr_split(a_qr_a, a_qr_b, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_field) #define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ = (a_qr)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) #define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL)) disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/quarantine.h000066400000000000000000000030711264176561100254270ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct quarantine_obj_s quarantine_obj_t; typedef struct quarantine_s quarantine_t; /* Default per thread quarantine size if valgrind is enabled. */ #define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct quarantine_obj_s { void *ptr; size_t usize; }; struct quarantine_s { size_t curbytes; size_t curobjs; size_t first; #define LG_MAXOBJS_INIT 10 size_t lg_maxobjs; quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void quarantine_alloc_hook_work(tsd_t *tsd); void quarantine(tsd_t *tsd, void *ptr); void quarantine_cleanup(tsd_t *tsd); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void quarantine_alloc_hook(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) JEMALLOC_ALWAYS_INLINE void quarantine_alloc_hook(void) { tsd_t *tsd; assert(config_fill && opt_quarantine); tsd = tsd_fetch(); if (tsd_quarantine_get(tsd) == NULL) quarantine_alloc_hook_work(tsd); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/rb.h000066400000000000000000001107421264176561100236670ustar00rootroot00000000000000/*- ******************************************************************************* * * cpp macro implementation of left-leaning 2-3 red-black trees. Parent * pointers are not used, and color bits are stored in the least significant * bit of right-child pointers (if RB_COMPACT is defined), thus making node * linkage as compact as is possible for red-black trees. * * Usage: * * #include * #include * #define NDEBUG // (Optional, see assert(3).) * #include * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) * #include * ... * ******************************************************************************* */ #ifndef RB_H_ #define RB_H_ #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ bool rbn_red; \ } #endif /* Root structure. */ #define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ a_type rbt_nil; \ } /* Left accessors. */ #define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) #else /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) #endif /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) /* Tree initializer. */ #define rb_new(a_type, a_field, a_rbt) do { \ (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ } while (0) /* Internal utility macros. */ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != &(a_rbt)->rbt_nil) { \ for (; \ rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != &(a_rbt)->rbt_nil) { \ for (; rbtn_right_get(a_type, a_field, (r_node)) != \ &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ (r_node))) { \ } \ } \ } while (0) #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ } while (0) /* * The rb_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to rb_gen(). */ #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, a_type *key); \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, * based on the above cpp macros. * * Arguments: * * a_attr : Function attribute for generated functions (ex: static). * a_prefix : Prefix for generated functions (ex: ex_). * a_rb_type : Type for red-black tree data structure (ex: ex_t). * a_type : Type for red-black tree node data structure (ex: ex_node_t). * a_field : Name of red-black tree node linkage (ex: ex_link). * a_cmp : Node comparison function name, with the following prototype: * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key * Interpretation of comparison function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other * In all cases, the a_node or a_key macro argument is the first * argument to the comparison function, which makes it possible * to write comparison functions that treat the first argument * specially. * * Assuming the following setup: * * typedef struct ex_node_s ex_node_t; * struct ex_node_s { * rb_node(ex_node_t) ex_link; * }; * typedef rb_tree(ex_node_t) ex_t; * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) * * The following API is generated: * * static void * ex_new(ex_t *tree); * Description: Initialize a red-black tree structure. * Args: * tree: Pointer to an uninitialized red-black tree object. * * static bool * ex_empty(ex_t *tree); * Description: Determine whether tree is empty. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: True if tree is empty, false otherwise. * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * * ex_last(ex_t *tree); * Description: Get the first/last node in tree. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: First/last node in tree, or NULL if tree is empty. * * static ex_node_t * * ex_next(ex_t *tree, ex_node_t *node); * static ex_node_t * * ex_prev(ex_t *tree, ex_node_t *node); * Description: Get node's successor/predecessor. * Args: * tree: Pointer to an initialized red-black tree object. * node: A node in tree. * Ret: node's successor/predecessor in tree, or NULL if node is * last/first. * * static ex_node_t * * ex_search(ex_t *tree, ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * * ex_nsearch(ex_t *tree, ex_node_t *key); * static ex_node_t * * ex_psearch(ex_t *tree, ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or if no match, hypothetical node's * successor/predecessor (NULL if no successor/predecessor). * * static void * ex_insert(ex_t *tree, ex_node_t *node); * Description: Insert node into tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node to be inserted into tree. * * static void * ex_remove(ex_t *tree, ex_node_t *node); * Description: Remove node from tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node in tree to be removed. * * static ex_node_t * * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * static ex_node_t * * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * Description: Iterate forward/backward over tree, starting at node. If * tree is modified, iteration must be immediately * terminated by the callback function that causes the * modification. * Args: * tree : Pointer to an initialized red-black tree object. * start: Node at which to start iteration, or NULL to start at * first/last node. * cb : Callback function, which is called for each node during * iteration. Under normal circumstances the callback function * should return NULL, which causes iteration to continue. If a * callback function returns non-NULL, iteration is immediately * terminated and the non-NULL return value is returned by the * iterator. This is useful for re-starting iteration after * modifying tree. * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. */ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree) { \ return (rbtree->rbt_root == &rbtree->rbt_nil); \ } \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != &rbtree->rbt_nil); \ ret = &rbtree->rbt_nil; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != &rbtree->rbt_nil); \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != &rbtree->rbt_nil); \ ret = &rbtree->rbt_nil; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != &rbtree->rbt_nil); \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ while (ret != &rbtree->rbt_nil \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ } else { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = &rbtree->rbt_nil; \ while (tnode != &rbtree->rbt_nil) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = &rbtree->rbt_nil; \ while (tnode != &rbtree->rbt_nil) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } path[sizeof(void *) << 4], *pathp; \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ } \ } \ pathp->node = node; \ /* Unwind. */ \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ a_type *cnode = pathp->node; \ if (pathp->cmp < 0) { \ a_type *left = pathp[1].node; \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (rbtn_red_get(a_type, a_field, leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, cnode, tnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } else { \ a_type *right = pathp[1].node; \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ if (rbtn_red_get(a_type, a_field, left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ rbtn_red_set(a_type, a_field, cnode); \ } else { \ /* Lean left. */ \ a_type *tnode; \ bool tred = rbtn_red_get(a_type, a_field, cnode); \ rbtn_rotate_left(a_type, a_field, cnode, tnode); \ rbtn_color_set(a_type, a_field, tnode, tred); \ rbtn_red_set(a_type, a_field, cnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } \ pathp->node = cnode; \ } \ /* Set root, and make it black. */ \ rbtree->rbt_root = path->node; \ rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ } \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } *pathp, *nodep, path[sizeof(void *) << 4]; \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ if (cmp == 0) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ for (pathp++; pathp->node != &rbtree->rbt_nil; \ pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } \ break; \ } \ } \ } \ assert(nodep->node == node); \ pathp--; \ if (pathp->node != node) { \ /* Swap node with its successor. */ \ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ rbtn_color_set(a_type, a_field, pathp->node, \ rbtn_red_get(a_type, a_field, node)); \ rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_get(a_type, a_field, node)); \ /* If node's successor is its right child, the following code */\ /* will do the wrong thing for the right child pointer. */\ /* However, it doesn't matter, because the pointer will be */\ /* properly set when the successor is pruned. */\ rbtn_right_set(a_type, a_field, pathp->node, \ rbtn_right_get(a_type, a_field, node)); \ rbtn_color_set(a_type, a_field, node, tred); \ /* The pruned leaf node's child pointers are never accessed */\ /* again, so don't bother setting them to nil. */\ nodep->node = pathp->node; \ pathp->node = node; \ if (nodep == path) { \ rbtree->rbt_root = nodep->node; \ } else { \ if (nodep[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } else { \ rbtn_right_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ if (left != &rbtree->rbt_nil) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ assert(!rbtn_red_get(a_type, a_field, node)); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ rbtree->rbt_root = left; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ left); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ left); \ } \ } \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ rbtree->rbt_root = &rbtree->rbt_nil; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ &rbtree->rbt_nil); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ pathp->node = &rbtree->rbt_nil; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ if (rbtn_red_get(a_type, a_field, rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ /* */\ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } else { \ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ /* */\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ if (rbtn_red_get(a_type, a_field, rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, rightleft); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ a_type *tnode; \ rbtn_red_set(a_type, a_field, pathp->node); \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ pathp->node = tnode; \ } \ } \ } else { \ a_type *left; \ rbtn_right_set(a_type, a_field, pathp->node, \ pathp[1].node); \ left = rbtn_left_get(a_type, a_field, pathp->node); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *tnode; \ a_type *leftright = rbtn_right_get(a_type, a_field, \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (r) */\ a_type *unode; \ rbtn_black_set(a_type, a_field, leftrightleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ unode); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_right_set(a_type, a_field, unode, tnode); \ rbtn_rotate_left(a_type, a_field, unode, tnode); \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (b) */\ assert(leftright != &rbtree->rbt_nil); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_black_set(a_type, a_field, tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root, which may actually be the tree root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ } \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (rbtn_red_get(a_type, a_field, leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, pathp->node); \ /* Balance restored. */ \ return; \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (rbtn_red_get(a_type, a_field, leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ } \ } \ } \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == &rbtree->rbt_nil) { \ return (&rbtree->rbt_nil); \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ a_field, node), cb, arg)) != &rbtree->rbt_nil \ || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)) != \ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } else if (cmp > 0) { \ return (a_prefix##iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ cb, arg); \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == &rbtree->rbt_nil) { \ return (&rbtree->rbt_nil); \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != \ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != \ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else if (cmp < 0) { \ return (a_prefix##reverse_iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtree->rbt_root, cb, arg); \ } else { \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } #endif /* RB_H_ */ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/rtree.h000066400000000000000000000207241264176561100244050ustar00rootroot00000000000000/* * This radix tree implementation is tailored to the singular purpose of * associating metadata with chunks that are currently owned by jemalloc. * ******************************************************************************* */ #ifdef JEMALLOC_H_TYPES typedef struct rtree_node_elm_s rtree_node_elm_t; typedef struct rtree_level_s rtree_level_t; typedef struct rtree_s rtree_t; /* * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the * machine address width. */ #define LG_RTREE_BITS_PER_LEVEL 4 #define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL) #define RTREE_HEIGHT_MAX \ ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) /* Used for two-stage lock-free node initialization. */ #define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) /* * The node allocation callback function's argument is the number of contiguous * rtree_node_elm_t structures to allocate, and the resulting memory must be * zeroed. */ typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct rtree_node_elm_s { union { void *pun; rtree_node_elm_t *child; extent_node_t *val; }; }; struct rtree_level_s { /* * A non-NULL subtree points to a subtree rooted along the hypothetical * path to the leaf node corresponding to key 0. Depending on what keys * have been used to store to the tree, an arbitrary combination of * subtree pointers may remain NULL. * * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. * This results in a 3-level tree, and the leftmost leaf can be directly * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding * 0x00000000) can be accessed via subtrees[1], and the remainder of the * tree can be accessed via subtrees[0]. * * levels[0] : [ | 0x0001******** | 0x0002******** | ...] * * levels[1] : [ | 0x00000001**** | 0x00000002**** | ... ] * * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] * * This has practical implications on x64, which currently uses only the * lower 47 bits of virtual address space in userland, thus leaving * subtrees[0] unused and avoiding a level of tree traversal. */ union { void *subtree_pun; rtree_node_elm_t *subtree; }; /* Number of key bits distinguished by this level. */ unsigned bits; /* * Cumulative number of key bits distinguished by traversing to * corresponding tree level. */ unsigned cumbits; }; struct rtree_s { rtree_node_alloc_t *alloc; rtree_node_dalloc_t *dalloc; unsigned height; /* * Precomputed table used to convert from the number of leading 0 key * bits to which subtree level to start at. */ unsigned start_level[RTREE_HEIGHT_MAX]; rtree_level_t levels[RTREE_HEIGHT_MAX]; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, rtree_node_dalloc_t *dalloc); void rtree_delete(rtree_t *rtree); rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, unsigned level); rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); bool rtree_node_valid(rtree_node_elm_t *node); rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm); rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level); extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent); void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val); rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level); rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level); extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) JEMALLOC_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key) { unsigned start_level; if (unlikely(key == 0)) return (rtree->height - 1); start_level = rtree->start_level[lg_floor(key) >> LG_RTREE_BITS_PER_LEVEL]; assert(start_level < rtree->height); return (start_level); } JEMALLOC_INLINE uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) { return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - rtree->levels[level].cumbits)) & ((ZU(1) << rtree->levels[level].bits) - 1)); } JEMALLOC_INLINE bool rtree_node_valid(rtree_node_elm_t *node) { return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); } JEMALLOC_INLINE rtree_node_elm_t * rtree_child_tryread(rtree_node_elm_t *elm) { rtree_node_elm_t *child; /* Double-checked read (first read may be stale. */ child = elm->child; if (!rtree_node_valid(child)) child = atomic_read_p(&elm->pun); return (child); } JEMALLOC_INLINE rtree_node_elm_t * rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) { rtree_node_elm_t *child; child = rtree_child_tryread(elm); if (unlikely(!rtree_node_valid(child))) child = rtree_child_read_hard(rtree, elm, level); return (child); } JEMALLOC_INLINE extent_node_t * rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) { if (dependent) { /* * Reading a val on behalf of a pointer to a valid allocation is * guaranteed to be a clean read even without synchronization, * because the rtree update became visible in memory before the * pointer came into existence. */ return (elm->val); } else { /* * An arbitrary read, e.g. on behalf of ivsalloc(), may not be * dependent on a previous rtree write, which means a stale read * could result if synchronization were omitted here. */ return (atomic_read_p(&elm->pun)); } } JEMALLOC_INLINE void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) { atomic_write_p(&elm->pun, val); } JEMALLOC_INLINE rtree_node_elm_t * rtree_subtree_tryread(rtree_t *rtree, unsigned level) { rtree_node_elm_t *subtree; /* Double-checked read (first read may be stale. */ subtree = rtree->levels[level].subtree; if (!rtree_node_valid(subtree)) subtree = atomic_read_p(&rtree->levels[level].subtree_pun); return (subtree); } JEMALLOC_INLINE rtree_node_elm_t * rtree_subtree_read(rtree_t *rtree, unsigned level) { rtree_node_elm_t *subtree; subtree = rtree_subtree_tryread(rtree, level); if (unlikely(!rtree_node_valid(subtree))) subtree = rtree_subtree_read_hard(rtree, level); return (subtree); } JEMALLOC_INLINE extent_node_t * rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) { uintptr_t subkey; unsigned i, start_level; rtree_node_elm_t *node, *child; start_level = rtree_start_level(rtree, key); for (i = start_level, node = rtree_subtree_tryread(rtree, start_level); /**/; i++, node = child) { if (!dependent && unlikely(!rtree_node_valid(node))) return (NULL); subkey = rtree_subkey(rtree, key, i); if (i == rtree->height - 1) { /* * node is a leaf, so it contains values rather than * child pointers. */ return (rtree_val_read(rtree, &node[subkey], dependent)); } assert(i < rtree->height - 1); child = rtree_child_tryread(&node[subkey]); } not_reached(); } JEMALLOC_INLINE bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) { uintptr_t subkey; unsigned i, start_level; rtree_node_elm_t *node, *child; start_level = rtree_start_level(rtree, key); node = rtree_subtree_read(rtree, start_level); if (node == NULL) return (true); for (i = start_level; /**/; i++, node = child) { subkey = rtree_subkey(rtree, key, i); if (i == rtree->height - 1) { /* * node is a leaf, so it contains values rather than * child pointers. */ rtree_val_write(rtree, &node[subkey], val); return (false); } assert(i + 1 < rtree->height); child = rtree_child_read(rtree, &node[subkey], i); if (child == NULL) return (true); } not_reached(); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/size_classes.sh000077500000000000000000000174761264176561100261530ustar00rootroot00000000000000#!/bin/sh # # Usage: size_classes.sh # The following limits are chosen such that they cover all supported platforms. # Pointer sizes. lg_zarr="2 3" # Quanta. lg_qarr=$1 # The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. lg_tmin=$2 # Maximum lookup size. lg_kmax=12 # Page sizes. lg_parr=`echo $3 | tr ',' ' '` # Size class group size (number of size classes for each size doubling). lg_g=$4 pow2() { e=$1 pow2_result=1 while [ ${e} -gt 0 ] ; do pow2_result=$((${pow2_result} + ${pow2_result})) e=$((${e} - 1)) done } lg() { x=$1 lg_result=0 while [ ${x} -gt 1 ] ; do lg_result=$((${lg_result} + 1)) x=$((${x} / 2)) done } size_class() { index=$1 lg_grp=$2 lg_delta=$3 ndelta=$4 lg_p=$5 lg_kmax=$6 lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} if [ ${pow2_result} -lt ${ndelta} ] ; then rem="yes" else rem="no" fi lg_size=${lg_grp} if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then lg_size=$((${lg_grp} + 1)) else lg_size=${lg_grp} rem="yes" fi if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then bin="yes" else bin="no" fi if [ ${lg_size} -lt ${lg_kmax} \ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then lg_delta_lookup=${lg_delta} else lg_delta_lookup="no" fi printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup} # Defined upon return: # - lg_delta_lookup (${lg_delta} or "no") # - bin ("yes" or "no") } sep_line() { echo " \\" } size_classes() { lg_z=$1 lg_q=$2 lg_t=$3 lg_p=$4 lg_g=$5 pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} pow2 ${lg_g}; g=${pow2_result} echo "#define SIZE_CLASSES \\" echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\" ntbins=0 nlbins=0 lg_tiny_maxclass='"NA"' nbins=0 # Tiny size classes. ndelta=0 index=0 lg_grp=${lg_t} lg_delta=${lg_grp} while [ ${lg_grp} -lt ${lg_q} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) fi ntbins=$((${ntbins} + 1)) lg_tiny_maxclass=${lg_grp} # Final written value is correct. index=$((${index} + 1)) lg_delta=${lg_grp} lg_grp=$((${lg_grp} + 1)) done # First non-tiny group. if [ ${ntbins} -gt 0 ] ; then sep_line # The first size class has an unusual encoding, because the size has to be # split between grp and delta*ndelta. lg_grp=$((${lg_grp} - 1)) ndelta=1 size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) fi while [ ${ndelta} -lt ${g} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done # All remaining groups. lg_grp=$((${lg_grp} + ${lg_g})) while [ ${lg_grp} -lt ${ptr_bits} ] ; do sep_line ndelta=1 if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then ndelta_limit=$((${g} - 1)) else ndelta_limit=${g} fi while [ ${ndelta} -le ${ndelta_limit} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) # Final written value is correct: lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) # Final written value is correct: small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" if [ ${lg_g} -gt 0 ] ; then lg_large_minclass=$((${lg_grp} + 1)) else lg_large_minclass=$((${lg_grp} + 2)) fi fi # Final written value is correct: huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) done echo nsizes=${index} # Defined upon completion: # - ntbins # - nlbins # - nbins # - nsizes # - lg_tiny_maxclass # - lookup_maxclass # - small_maxclass # - lg_large_minclass # - huge_maxclass } cat < 255) # error "Too many small size classes" #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ EOF disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/stats.h000066400000000000000000000110411264176561100244120ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_stats_s tcache_bin_stats_t; typedef struct malloc_bin_stats_s malloc_bin_stats_t; typedef struct malloc_large_stats_s malloc_large_stats_t; typedef struct malloc_huge_stats_s malloc_huge_stats_t; typedef struct arena_stats_s arena_stats_t; typedef struct chunk_stats_s chunk_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct tcache_bin_stats_s { /* * Number of allocation requests that corresponded to the size of this * bin. */ uint64_t nrequests; }; struct malloc_bin_stats_s { /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to the size of this * bin. This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of regions of this size class, including regions * currently cached by tcache. */ size_t curregs; /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; /* Total number of runs created for this bin's size class. */ uint64_t nruns; /* * Total number of runs reused by extracting them from the runs tree for * this bin's size class. */ uint64_t reruns; /* Current number of runs in this bin. */ size_t curruns; }; struct malloc_large_stats_s { /* * Total number of allocation/deallocation requests served directly by * the arena. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of runs of this size class, including runs currently * cached by tcache. */ size_t curruns; }; struct malloc_huge_stats_s { /* * Total number of allocation/deallocation requests served directly by * the arena. */ uint64_t nmalloc; uint64_t ndalloc; /* Current number of (multi-)chunk allocations of this size class. */ size_t curhchunks; }; struct arena_stats_s { /* Number of bytes currently mapped. */ size_t mapped; /* * Total number of purge sweeps, total number of madvise calls made, * and total pages purged in order to keep dirty unused memory under * control. */ uint64_t npurge; uint64_t nmadvise; uint64_t purged; /* * Number of bytes currently mapped purely for metadata purposes, and * number of bytes currently allocated for internal metadata. */ size_t metadata_mapped; size_t metadata_allocated; /* Protected via atomic_*_z(). */ /* Per-size-category statistics. */ size_t allocated_large; uint64_t nmalloc_large; uint64_t ndalloc_large; uint64_t nrequests_large; size_t allocated_huge; uint64_t nmalloc_huge; uint64_t ndalloc_huge; /* One element for each large size class. */ malloc_large_stats_t *lstats; /* One element for each huge size class. */ malloc_huge_stats_t *hstats; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_stats_print; extern size_t stats_cactive; void stats_print(void (*write)(void *, const char *), void *cbopaque, const char *opts); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE size_t stats_cactive_get(void); void stats_cactive_add(size_t size); void stats_cactive_sub(size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) JEMALLOC_INLINE size_t stats_cactive_get(void) { return (atomic_read_z(&stats_cactive)); } JEMALLOC_INLINE void stats_cactive_add(size_t size) { atomic_add_z(&stats_cactive, size); } JEMALLOC_INLINE void stats_cactive_sub(size_t size) { atomic_sub_z(&stats_cactive, size); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/tcache.h000066400000000000000000000271431264176561100245150ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_info_s tcache_bin_info_t; typedef struct tcache_bin_s tcache_bin_t; typedef struct tcache_s tcache_t; typedef struct tcaches_s tcaches_t; /* * tcache pointers close to NULL are used to encode state information that is * used for two purposes: preventing thread caching on a per thread basis and * cleaning up during thread shutdown. */ #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY /* * Absolute minimum number of cache slots for each small bin. */ #define TCACHE_NSLOTS_SMALL_MIN 20 /* * Absolute maximum number of cache slots for each small bin in the thread * cache. This is an additional constraint beyond that imposed as: twice the * number of regions per run for this size class. * * This constant must be an even number. */ #define TCACHE_NSLOTS_SMALL_MAX 200 /* Number of cache slots for large size classes. */ #define TCACHE_NSLOTS_LARGE 20 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ #define LG_TCACHE_MAXCLASS_DEFAULT 15 /* * TCACHE_GC_SWEEP is the approximate number of allocation events between * full GC sweeps. Integer rounding may cause the actual number to be * slightly higher, since GC is performed incrementally. */ #define TCACHE_GC_SWEEP 8192 /* Number of tcache allocation/deallocation events between incremental GCs. */ #define TCACHE_GC_INCR \ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS typedef enum { tcache_enabled_false = 0, /* Enable cast to/from bool. */ tcache_enabled_true = 1, tcache_enabled_default = 2 } tcache_enabled_t; /* * Read-only information associated with each element of tcache_t's tbins array * is stored separately, mainly to reduce memory usage. */ struct tcache_bin_info_s { unsigned ncached_max; /* Upper limit on ncached. */ }; struct tcache_bin_s { tcache_bin_stats_t tstats; int low_water; /* Min # cached since last GC. */ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ unsigned ncached; /* # of cached objects. */ void **avail; /* Stack of available objects. */ }; struct tcache_s { ql_elm(tcache_t) link; /* Used for aggregating stats. */ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ unsigned ev_cnt; /* Event count since incremental GC. */ szind_t next_gc_bin; /* Next bin to GC. */ tcache_bin_t tbins[1]; /* Dynamically sized. */ /* * The pointer stacks associated with tbins follow as a contiguous * array. During tcache initialization, the avail pointer in each * element of tbins is initialized to point to the proper offset within * this array. */ }; /* Linkage for list of available (previously used) explicit tcache IDs. */ struct tcaches_s { union { tcache_t *tcache; tcaches_t *next; }; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_tcache; extern ssize_t opt_lg_tcache_max; extern tcache_bin_info_t *tcache_bin_info; /* * Number of tcache bins. There are NBINS small-object bins, plus 0 or more * large-object bins. */ extern size_t nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; /* * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are * completely disjoint from this data structure. tcaches starts off as a sparse * array, so it has no physical memory footprint until individual pages are * touched. This allows the entire array to be allocated the first time an * explicit tcache is created without a disproportionate impact on memory usage. */ extern tcaches_t *tcaches; size_t tcache_salloc(const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_associate(tcache_t *tcache, arena_t *arena); void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena); void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena); tcache_t *tcache_get_hard(tsd_t *tsd); tcache_t *tcache_create(tsd_t *tsd, arena_t *arena); void tcache_cleanup(tsd_t *tsd); void tcache_enabled_cleanup(tsd_t *tsd); void tcache_stats_merge(tcache_t *tcache, arena_t *arena); bool tcaches_create(tsd_t *tsd, unsigned *r_ind); void tcaches_flush(tsd_t *tsd, unsigned ind); void tcaches_destroy(tsd_t *tsd, unsigned ind); bool tcache_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache); void tcache_flush(void); bool tcache_enabled_get(void); tcache_t *tcache_get(tsd_t *tsd, bool create); void tcache_enabled_set(bool enabled); void *tcache_alloc_easy(tcache_bin_t *tbin); void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, bool zero); void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, bool zero); void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind); void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size); tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) JEMALLOC_INLINE void tcache_flush(void) { tsd_t *tsd; cassert(config_tcache); tsd = tsd_fetch(); tcache_cleanup(tsd); } JEMALLOC_INLINE bool tcache_enabled_get(void) { tsd_t *tsd; tcache_enabled_t tcache_enabled; cassert(config_tcache); tsd = tsd_fetch(); tcache_enabled = tsd_tcache_enabled_get(tsd); if (tcache_enabled == tcache_enabled_default) { tcache_enabled = (tcache_enabled_t)opt_tcache; tsd_tcache_enabled_set(tsd, tcache_enabled); } return ((bool)tcache_enabled); } JEMALLOC_INLINE void tcache_enabled_set(bool enabled) { tsd_t *tsd; tcache_enabled_t tcache_enabled; cassert(config_tcache); tsd = tsd_fetch(); tcache_enabled = (tcache_enabled_t)enabled; tsd_tcache_enabled_set(tsd, tcache_enabled); if (!enabled) tcache_cleanup(tsd); } JEMALLOC_ALWAYS_INLINE tcache_t * tcache_get(tsd_t *tsd, bool create) { tcache_t *tcache; if (!config_tcache) return (NULL); tcache = tsd_tcache_get(tsd); if (!create) return (tcache); if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { tcache = tcache_get_hard(tsd); tsd_tcache_set(tsd, tcache); } return (tcache); } JEMALLOC_ALWAYS_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache) { if (TCACHE_GC_INCR == 0) return; tcache->ev_cnt++; assert(tcache->ev_cnt <= TCACHE_GC_INCR); if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR)) tcache_event_hard(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_easy(tcache_bin_t *tbin) { void *ret; if (unlikely(tbin->ncached == 0)) { tbin->low_water = -1; return (NULL); } tbin->ncached--; if (unlikely((int)tbin->ncached < tbin->low_water)) tbin->low_water = tbin->ncached; ret = tbin->avail[tbin->ncached]; return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, bool zero) { void *ret; szind_t binind; size_t usize; tcache_bin_t *tbin; binind = size2index(size); assert(binind < NBINS); tbin = &tcache->tbins[binind]; usize = index2size(binind); ret = tcache_alloc_easy(tbin); if (unlikely(ret == NULL)) { ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind); if (ret == NULL) return (NULL); } assert(tcache_salloc(ret) == usize); if (likely(!zero)) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (unlikely(opt_zero)) memset(ret, 0, usize); } } else { if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } memset(ret, 0, usize); } if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += usize; tcache_event(tsd, tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, bool zero) { void *ret; szind_t binind; size_t usize; tcache_bin_t *tbin; binind = size2index(size); usize = index2size(binind); assert(usize <= tcache_maxclass); assert(binind < nhbins); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin); if (unlikely(ret == NULL)) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ ret = arena_malloc_large(arena, usize, zero); if (ret == NULL) return (NULL); } else { if (config_prof && usize == LARGE_MINCLASS) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> LG_PAGE); arena_mapbits_large_binind_set(chunk, pageind, BININD_INVALID); } if (likely(!zero)) { if (config_fill) { if (unlikely(opt_junk_alloc)) memset(ret, 0xa5, usize); else if (unlikely(opt_zero)) memset(ret, 0, usize); } } else memset(ret, 0, usize); if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += usize; } tcache_event(tsd, tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind) { tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); if (config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_small(tsd, tcache, tbin, binind, (tbin_info->ncached_max >> 1)); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; tbin->ncached++; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size) { szind_t binind; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert((size & PAGE_MASK) == 0); assert(tcache_salloc(ptr) > SMALL_MAXCLASS); assert(tcache_salloc(ptr) <= tcache_maxclass); binind = size2index(size); if (config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_large(ptr, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_large(tsd, tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; tbin->ncached++; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE tcache_t * tcaches_get(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; if (unlikely(elm->tcache == NULL)) elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL)); return (elm->tcache); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/tsd.h000066400000000000000000000453131264176561100240570ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum number of malloc_tsd users with cleanup functions. */ #define MALLOC_TSD_CLEANUPS_MAX 2 typedef bool (*malloc_tsd_cleanup_t)(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) typedef struct tsd_init_block_s tsd_init_block_t; typedef struct tsd_init_head_s tsd_init_head_t; #endif typedef struct tsd_s tsd_t; typedef enum { tsd_state_uninitialized, tsd_state_nominal, tsd_state_purgatory, tsd_state_reincarnated } tsd_state_t; /* * TLS/TSD-agnostic macro-based implementation of thread-specific data. There * are five macros that support (at least) three use cases: file-private, * library-private, and library-private inlined. Following is an example * library-private tsd variable: * * In example.h: * typedef struct { * int x; * int y; * } example_t; * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) * malloc_tsd_types(example_, example_t) * malloc_tsd_protos(, example_, example_t) * malloc_tsd_externs(example_, example_t) * In example.c: * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, * example_tsd_cleanup) * * The result is a set of generated functions, e.g.: * * bool example_tsd_boot(void) {...} * example_t *example_tsd_get() {...} * void example_tsd_set(example_t *val) {...} * * Note that all of the functions deal in terms of (a_type *) rather than * (a_type) so that it is possible to support non-pointer types (unlike * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * cast to (void *). This means that the cleanup function needs to cast the * function argument to (a_type *), then dereference the resulting pointer to * access fields, e.g. * * void * example_tsd_cleanup(void *arg) * { * example_t *example = (example_t *)arg; * * example->x = 42; * [...] * if ([want the cleanup function to be called again]) * example_tsd_set(example); * } * * If example_tsd_set() is called within example_tsd_cleanup(), it will be * called again. This is similar to how pthreads TSD destruction works, except * that pthreads only calls the cleanup function again if the value was set to * non-NULL. */ /* malloc_tsd_types(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_types(a_name, a_type) #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_types(a_name, a_type) #elif (defined(_WIN32)) #define malloc_tsd_types(a_name, a_type) \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##tsd_wrapper_t; #else #define malloc_tsd_types(a_name, a_type) \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##tsd_wrapper_t; #endif /* malloc_tsd_protos(). */ #define malloc_tsd_protos(a_attr, a_name, a_type) \ a_attr bool \ a_name##tsd_boot0(void); \ a_attr void \ a_name##tsd_boot1(void); \ a_attr bool \ a_name##tsd_boot(void); \ a_attr a_type * \ a_name##tsd_get(void); \ a_attr void \ a_name##tsd_set(a_type *val); /* malloc_tsd_externs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##tsd_tls; \ extern __thread bool a_name##tsd_initialized; \ extern bool a_name##tsd_booted; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##tsd_tls; \ extern pthread_key_t a_name##tsd_tsd; \ extern bool a_name##tsd_booted; #elif (defined(_WIN32)) #define malloc_tsd_externs(a_name, a_type) \ extern DWORD a_name##tsd_tsd; \ extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ extern bool a_name##tsd_booted; #else #define malloc_tsd_externs(a_name, a_type) \ extern pthread_key_t a_name##tsd_tsd; \ extern tsd_init_head_t a_name##tsd_init_head; \ extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ extern bool a_name##tsd_booted; #endif /* malloc_tsd_data(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##tsd_tls = a_initializer; \ a_attr __thread bool JEMALLOC_TLS_MODEL \ a_name##tsd_initialized = false; \ a_attr bool a_name##tsd_booted = false; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##tsd_tls = a_initializer; \ a_attr pthread_key_t a_name##tsd_tsd; \ a_attr bool a_name##tsd_booted = false; #elif (defined(_WIN32)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr DWORD a_name##tsd_tsd; \ a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ false, \ a_initializer \ }; \ a_attr bool a_name##tsd_booted = false; #else #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr pthread_key_t a_name##tsd_tsd; \ a_attr tsd_init_head_t a_name##tsd_init_head = { \ ql_head_initializer(blocks), \ MALLOC_MUTEX_INITIALIZER \ }; \ a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ false, \ a_initializer \ }; \ a_attr bool a_name##tsd_booted = false; #endif /* malloc_tsd_funcs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_cleanup_wrapper(void) \ { \ \ if (a_name##tsd_initialized) { \ a_name##tsd_initialized = false; \ a_cleanup(&a_name##tsd_tls); \ } \ return (a_name##tsd_initialized); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##tsd_cleanup_wrapper); \ } \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ \ assert(a_name##tsd_booted); \ a_name##tsd_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ a_name##tsd_initialized = true; \ } #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ 0) \ return (true); \ } \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ \ assert(a_name##tsd_booted); \ a_name##tsd_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)(&a_name##tsd_tls))) { \ malloc_write(": Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ } \ } #elif (defined(_WIN32)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_cleanup_wrapper(void) \ { \ DWORD error = GetLastError(); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ TlsGetValue(a_name##tsd_tsd); \ SetLastError(error); \ \ if (wrapper == NULL) \ return (false); \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ return (true); \ } \ } \ malloc_tsd_dalloc(wrapper); \ return (false); \ } \ a_attr void \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ { \ \ if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ malloc_write(": Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ a_name##tsd_wrapper_get(void) \ { \ DWORD error = GetLastError(); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ TlsGetValue(a_name##tsd_tsd); \ SetLastError(error); \ \ if (unlikely(wrapper == NULL)) { \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write(": Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ wrapper->initialized = false; \ wrapper->val = a_initializer; \ } \ a_name##tsd_wrapper_set(wrapper); \ } \ return (wrapper); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ a_name##tsd_tsd = TlsAlloc(); \ if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ return (true); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##tsd_cleanup_wrapper); \ } \ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write(": Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \ sizeof(a_name##tsd_wrapper_t)); \ a_name##tsd_wrapper_set(wrapper); \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #else #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr void \ a_name##tsd_cleanup_wrapper(void *arg) \ { \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)wrapper)) { \ malloc_write(": Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ return; \ } \ } \ malloc_tsd_dalloc(wrapper); \ } \ a_attr void \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ { \ \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)wrapper)) { \ malloc_write(": Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ a_name##tsd_wrapper_get(void) \ { \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ pthread_getspecific(a_name##tsd_tsd); \ \ if (unlikely(wrapper == NULL)) { \ tsd_init_block_t block; \ wrapper = tsd_init_check_recursion( \ &a_name##tsd_init_head, &block); \ if (wrapper) \ return (wrapper); \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ block.data = wrapper; \ if (wrapper == NULL) { \ malloc_write(": Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ wrapper->initialized = false; \ wrapper->val = a_initializer; \ } \ a_name##tsd_wrapper_set(wrapper); \ tsd_init_finish(&a_name##tsd_init_head, &block); \ } \ return (wrapper); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (pthread_key_create(&a_name##tsd_tsd, \ a_name##tsd_cleanup_wrapper) != 0) \ return (true); \ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write(": Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \ sizeof(a_name##tsd_wrapper_t)); \ a_name##tsd_wrapper_set(wrapper); \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) struct tsd_init_block_s { ql_elm(tsd_init_block_t) link; pthread_t thread; void *data; }; struct tsd_init_head_s { ql_head(tsd_init_block_t) blocks; malloc_mutex_t lock; }; #endif #define MALLOC_TSD \ /* O(name, type) */ \ O(tcache, tcache_t *) \ O(thread_allocated, uint64_t) \ O(thread_deallocated, uint64_t) \ O(prof_tdata, prof_tdata_t *) \ O(arena, arena_t *) \ O(arenas_cache, arena_t **) \ O(narenas_cache, unsigned) \ O(arenas_cache_bypass, bool) \ O(tcache_enabled, tcache_enabled_t) \ O(quarantine, quarantine_t *) \ #define TSD_INITIALIZER { \ tsd_state_uninitialized, \ NULL, \ 0, \ 0, \ NULL, \ NULL, \ NULL, \ 0, \ false, \ tcache_enabled_default, \ NULL \ } struct tsd_s { tsd_state_t state; #define O(n, t) \ t n; MALLOC_TSD #undef O }; static const tsd_t tsd_initializer = TSD_INITIALIZER; malloc_tsd_types(, tsd_t) #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_no_cleanup(void *arg); void malloc_tsd_cleanup_register(bool (*f)(void)); bool malloc_tsd_boot0(void); void malloc_tsd_boot1(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); #endif void tsd_cleanup(void *arg); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) tsd_t *tsd_fetch(void); bool tsd_nominal(tsd_t *tsd); #define O(n, t) \ t *tsd_##n##p_get(tsd_t *tsd); \ t tsd_##n##_get(tsd_t *tsd); \ void tsd_##n##_set(tsd_t *tsd, t n); MALLOC_TSD #undef O #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) malloc_tsd_externs(, tsd_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch(void) { tsd_t *tsd = tsd_get(); if (unlikely(tsd->state != tsd_state_nominal)) { if (tsd->state == tsd_state_uninitialized) { tsd->state = tsd_state_nominal; /* Trigger cleanup handler registration. */ tsd_set(tsd); } else if (tsd->state == tsd_state_purgatory) { tsd->state = tsd_state_reincarnated; tsd_set(tsd); } else assert(tsd->state == tsd_state_reincarnated); } return (tsd); } JEMALLOC_INLINE bool tsd_nominal(tsd_t *tsd) { return (tsd->state == tsd_state_nominal); } #define O(n, t) \ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get(tsd_t *tsd) \ { \ \ return (&tsd->n); \ } \ \ JEMALLOC_ALWAYS_INLINE t \ tsd_##n##_get(tsd_t *tsd) \ { \ \ return (*tsd_##n##p_get(tsd)); \ } \ \ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t n) \ { \ \ assert(tsd->state == tsd_state_nominal); \ tsd->n = n; \ } MALLOC_TSD #undef O #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/util.h000066400000000000000000000162621264176561100242430ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #ifdef _WIN32 # ifdef _WIN64 # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "ll" # else # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "" # endif # define FMTd32 "d" # define FMTu32 "u" # define FMTx32 "x" # define FMTd64 FMT64_PREFIX "d" # define FMTu64 FMT64_PREFIX "u" # define FMTx64 FMT64_PREFIX "x" # define FMTdPTR FMTPTR_PREFIX "d" # define FMTuPTR FMTPTR_PREFIX "u" # define FMTxPTR FMTPTR_PREFIX "x" #else # include # define FMTd32 PRId32 # define FMTu32 PRIu32 # define FMTx32 PRIx32 # define FMTd64 PRId64 # define FMTu64 PRIu64 # define FMTx64 PRIx64 # define FMTdPTR PRIdPTR # define FMTuPTR PRIuPTR # define FMTxPTR PRIxPTR #endif /* Size of stack-allocated buffer passed to buferror(). */ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ #ifdef JEMALLOC_CC_SILENCE # define JEMALLOC_CC_SILENCE_INIT(v) = v #else # define JEMALLOC_CC_SILENCE_INIT(v) #endif #define JEMALLOC_GNUC_PREREQ(major, minor) \ (!defined(__clang__) && \ (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))) #ifndef __has_builtin # define __has_builtin(builtin) (0) #endif #define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \ (defined(__clang__) && __has_builtin(builtin)) #ifdef __GNUC__ # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) # if JEMALLOC_GNUC_PREREQ(4, 6) || \ JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable) # define unreachable() __builtin_unreachable() # else # define unreachable() # endif #else # define likely(x) !!(x) # define unlikely(x) !!(x) # define unreachable() #endif /* * Define a custom assert() in order to reduce the chances of deadlock during * assertion failure. */ #ifndef assert #define assert(e) do { \ if (unlikely(config_debug && !(e))) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #endif #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } \ unreachable(); \ } while (0) #endif #ifndef not_implemented #define not_implemented() do { \ if (config_debug) { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef assert_not_implemented #define assert_not_implemented(e) do { \ if (unlikely(config_debug && !(e))) \ not_implemented(); \ } while (0) #endif /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #define cassert(c) do { \ if (unlikely(!(c))) \ not_reached(); \ } while (0) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int buferror(int err, char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ int malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); int malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE int jemalloc_ffsl(long bitmap); int jemalloc_ffs(int bitmap); size_t pow2_ceil(size_t x); size_t lg_floor(size_t x); void set_errno(int errnum); int get_errno(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) /* Sanity check. */ #if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS) # error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure #endif JEMALLOC_ALWAYS_INLINE int jemalloc_ffsl(long bitmap) { return (JEMALLOC_INTERNAL_FFSL(bitmap)); } JEMALLOC_ALWAYS_INLINE int jemalloc_ffs(int bitmap) { return (JEMALLOC_INTERNAL_FFS(bitmap)); } /* Compute the smallest power of 2 that is >= x. */ JEMALLOC_INLINE size_t pow2_ceil(size_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; #if (LG_SIZEOF_PTR == 3) x |= x >> 32; #endif x++; return (x); } #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE size_t lg_floor(size_t x) { size_t ret; assert(x != 0); asm ("bsr %1, %0" : "=r"(ret) // Outputs. : "r"(x) // Inputs. ); return (ret); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE size_t lg_floor(size_t x) { unsigned long ret; assert(x != 0); #if (LG_SIZEOF_PTR == 3) _BitScanReverse64(&ret, x); #elif (LG_SIZEOF_PTR == 2) _BitScanReverse(&ret, x); #else # error "Unsupported type sizes for lg_floor()" #endif return (ret); } #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) JEMALLOC_INLINE size_t lg_floor(size_t x) { assert(x != 0); #if (LG_SIZEOF_PTR == LG_SIZEOF_INT) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); #elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); #else # error "Unsupported type sizes for lg_floor()" #endif } #else JEMALLOC_INLINE size_t lg_floor(size_t x) { assert(x != 0); x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); #if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG) x |= (x >> 32); if (x == KZU(0xffffffffffffffff)) return (63); x++; return (jemalloc_ffsl(x) - 2); #elif (LG_SIZEOF_PTR == 2) if (x == KZU(0xffffffff)) return (31); x++; return (jemalloc_ffs(x) - 2); #else # error "Unsupported type sizes for lg_floor()" #endif } #endif /* Set error code. */ JEMALLOC_INLINE void set_errno(int errnum) { #ifdef _WIN32 SetLastError(errnum); #else errno = errnum; #endif } /* Get last error code. */ JEMALLOC_INLINE int get_errno(void) { #ifdef _WIN32 return (GetLastError()); #else return (errno); #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/internal/valgrind.h000066400000000000000000000102231264176561100250630ustar00rootroot00000000000000/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_VALGRIND #include /* * The size that is reported to Valgrind must be consistent through a chain of * malloc..realloc..realloc calls. Request size isn't recorded anywhere in * jemalloc, so it is critical that all callers of these macros provide usize * rather than request size. As a result, buffer overflow detection is * technically weakened for the standard API, though it is generally accepted * practice to consider any extra bytes reported by malloc_usable_size() as * usable space. */ #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_noaccess(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_undefined(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_defined(ptr, usize); \ } while (0) /* * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro * calls must be embedded in macros rather than in functions so that when * Valgrind reports errors, there are no extra stack frames in the backtraces. */ #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ if (unlikely(in_valgrind && cond)) \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ } while (0) #define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do { \ if (unlikely(in_valgrind)) { \ size_t rzsize = p2rz(ptr); \ \ if (!maybe_moved || ptr == old_ptr) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ usize, rzsize); \ if (zero && old_usize < usize) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ old_usize), usize - old_usize); \ } \ } else { \ if (!old_ptr_maybe_null || old_ptr != NULL) { \ valgrind_freelike_block(old_ptr, \ old_rzsize); \ } \ if (!ptr_maybe_null || ptr != NULL) { \ size_t copy_size = (old_usize < usize) \ ? old_usize : usize; \ size_t tail_size = usize - copy_size; \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ rzsize, false); \ if (copy_size > 0) { \ valgrind_make_mem_defined(ptr, \ copy_size); \ } \ if (zero && tail_size > 0) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ copy_size), tail_size); \ } \ } \ } \ } \ } while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ if (unlikely(in_valgrind)) \ valgrind_freelike_block(ptr, rzsize); \ } while (0) #else #define RUNNING_ON_VALGRIND ((unsigned)0) #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) #define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do {} while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_VALGRIND void valgrind_make_mem_noaccess(void *ptr, size_t usize); void valgrind_make_mem_undefined(void *ptr, size_t usize); void valgrind_make_mem_defined(void *ptr, size_t usize); void valgrind_freelike_block(void *ptr, size_t usize); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/include/jemalloc/jemalloc.sh000077500000000000000000000007631264176561100234250ustar00rootroot00000000000000#!/bin/sh objroot=$1 cat < #include #include #include #include #define JEMALLOC_VERSION "@jemalloc_version@" #define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ #define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ #define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" # define MALLOCX_LG_ALIGN(la) (la) # if LG_SIZEOF_PTR == 2 # define MALLOCX_ALIGN(a) (ffs(a)-1) # else # define MALLOCX_ALIGN(a) \ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) # endif # define MALLOCX_ZERO ((int)0x40) /* * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 * encodes MALLOCX_TCACHE_NONE. */ # define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) # define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) /* * Bias arena index bits so that 0 encodes "use an automatically chosen arena". */ # define MALLOCX_ARENA(a) ((int)(((a)+1) << 20)) #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) # define JEMALLOC_CXX_THROW throw() #else # define JEMALLOC_CXX_THROW #endif #ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) # ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE # define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) # define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) # else # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # endif # ifndef JEMALLOC_EXPORT # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) # else # define JEMALLOC_FORMAT_PRINTF(s, i) # endif # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) # define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #elif _MSC_VER # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # ifndef JEMALLOC_EXPORT # ifdef DLLEXPORT # define JEMALLOC_EXPORT __declspec(dllexport) # else # define JEMALLOC_EXPORT __declspec(dllimport) # endif # endif # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE __declspec(noinline) # ifdef __cplusplus # define JEMALLOC_NOTHROW __declspec(nothrow) # else # define JEMALLOC_NOTHROW # endif # define JEMALLOC_SECTION(s) __declspec(allocate(s)) # define JEMALLOC_RESTRICT_RETURN __declspec(restrict) # if _MSC_VER >= 1900 && !defined(__EDG__) # define JEMALLOC_ALLOCATOR __declspec(allocator) # else # define JEMALLOC_ALLOCATOR # endif #else # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) # define JEMALLOC_ALLOC_SIZE(s) # define JEMALLOC_ALLOC_SIZE2(s1, s2) # define JEMALLOC_EXPORT # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE # define JEMALLOC_NOTHROW # define JEMALLOC_SECTION(s) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR #endif disque-1.0-rc1/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh000077500000000000000000000023521264176561100247440ustar00rootroot00000000000000#!/bin/sh public_symbols_txt=$1 symbol_prefix=$2 cat < /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ /* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as * a built-in type. */ #ifndef __clang__ typedef BOOL _Bool; #endif #define bool _Bool #define true 1 #define false 0 #define __bool_true_false_are_defined 1 #endif /* stdbool_h */ disque-1.0-rc1/deps/jemalloc/include/msvc_compat/C99/stdint.h000066400000000000000000000170601264176561100240450ustar00rootroot00000000000000// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ] disque-1.0-rc1/deps/jemalloc/include/msvc_compat/strings.h000066400000000000000000000007521264176561100236650ustar00rootroot00000000000000#ifndef strings_h #define strings_h /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ #ifdef _MSC_VER # include # pragma intrinsic(_BitScanForward) static __forceinline int ffsl(long x) { unsigned long i; if (_BitScanForward(&i, x)) return (i + 1); return (0); } static __forceinline int ffs(int x) { return (ffsl(x)); } #else # define ffsl(x) __builtin_ffsl(x) # define ffs(x) __builtin_ffs(x) #endif #endif /* strings_h */ disque-1.0-rc1/deps/jemalloc/include/msvc_compat/windows_extra.h000066400000000000000000000010211264176561100250570ustar00rootroot00000000000000#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H #define MSVC_COMPAT_WINDOWS_EXTRA_H #ifndef ENOENT # define ENOENT ERROR_PATH_NOT_FOUND #endif #ifndef EINVAL # define EINVAL ERROR_BAD_ARGUMENTS #endif #ifndef EAGAIN # define EAGAIN ERROR_OUTOFMEMORY #endif #ifndef EPERM # define EPERM ERROR_WRITE_FAULT #endif #ifndef EFAULT # define EFAULT ERROR_INVALID_ADDRESS #endif #ifndef ENOMEM # define ENOMEM ERROR_NOT_ENOUGH_MEMORY #endif #ifndef ERANGE # define ERANGE ERROR_INVALID_DATA #endif #endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ disque-1.0-rc1/deps/jemalloc/install-sh000077500000000000000000000127211264176561100200700ustar00rootroot00000000000000#! /bin/sh # # install - install a program, script, or datafile # This comes from X11R5 (mit/util/scripts/install.sh). # # Copyright 1991 by the Massachusetts Institute of Technology # # Permission to use, copy, modify, distribute, and sell this software and its # documentation for any purpose is hereby granted without fee, provided that # the above copyright notice appear in all copies and that both that # copyright notice and this permission notice appear in supporting # documentation, and that the name of M.I.T. not be used in advertising or # publicity pertaining to distribution of the software without specific, # written prior permission. M.I.T. makes no representations about the # suitability of this software for any purpose. It is provided "as is" # without express or implied warranty. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. It can only install one file at a time, a restriction # shared with many OS's install programs. # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit="${DOITPROG-}" # put in absolute paths if you don't have them in your path; or use env. vars. mvprog="${MVPROG-mv}" cpprog="${CPPROG-cp}" chmodprog="${CHMODPROG-chmod}" chownprog="${CHOWNPROG-chown}" chgrpprog="${CHGRPPROG-chgrp}" stripprog="${STRIPPROG-strip}" rmprog="${RMPROG-rm}" mkdirprog="${MKDIRPROG-mkdir}" transformbasename="" transform_arg="" instcmd="$mvprog" chmodcmd="$chmodprog 0755" chowncmd="" chgrpcmd="" stripcmd="" rmcmd="$rmprog -f" mvcmd="$mvprog" src="" dst="" dir_arg="" while [ x"$1" != x ]; do case $1 in -c) instcmd="$cpprog" shift continue;; -d) dir_arg=true shift continue;; -m) chmodcmd="$chmodprog $2" shift shift continue;; -o) chowncmd="$chownprog $2" shift shift continue;; -g) chgrpcmd="$chgrpprog $2" shift shift continue;; -s) stripcmd="$stripprog" shift continue;; -t=*) transformarg=`echo $1 | sed 's/-t=//'` shift continue;; -b=*) transformbasename=`echo $1 | sed 's/-b=//'` shift continue;; *) if [ x"$src" = x ] then src=$1 else # this colon is to work around a 386BSD /bin/sh bug : dst=$1 fi shift continue;; esac done if [ x"$src" = x ] then echo "install: no input file specified" exit 1 else true fi if [ x"$dir_arg" != x ]; then dst=$src src="" if [ -d $dst ]; then instcmd=: else instcmd=mkdir fi else # Waiting for this to be detected by the "$instcmd $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if [ -f $src -o -d $src ] then true else echo "install: $src does not exist" exit 1 fi if [ x"$dst" = x ] then echo "install: no destination specified" exit 1 else true fi # If destination is a directory, append the input filename; if your system # does not like double slashes in filenames, you may need to add some logic if [ -d $dst ] then dst="$dst"/`basename $src` else true fi fi ## this sed command emulates the dirname command dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` # Make sure that the destination directory exists. # this part is taken from Noah Friedman's mkinstalldirs script # Skip lots of stat calls in the usual case. if [ ! -d "$dstdir" ]; then defaultIFS=' ' IFS="${IFS-${defaultIFS}}" oIFS="${IFS}" # Some sh's can't handle IFS=/ for some reason. IFS='%' set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` IFS="${oIFS}" pathcomp='' while [ $# -ne 0 ] ; do pathcomp="${pathcomp}${1}" shift if [ ! -d "${pathcomp}" ] ; then $mkdirprog "${pathcomp}" else true fi pathcomp="${pathcomp}/" done fi if [ x"$dir_arg" != x ] then $doit $instcmd $dst && if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi else # If we're going to rename the final executable, determine the name now. if [ x"$transformarg" = x ] then dstfile=`basename $dst` else dstfile=`basename $dst $transformbasename | sed $transformarg`$transformbasename fi # don't allow the sed command to completely eliminate the filename if [ x"$dstfile" = x ] then dstfile=`basename $dst` else true fi # Make a temp file name in the proper directory. dsttmp=$dstdir/#inst.$$# # Move or copy the file name to the temp name $doit $instcmd $src $dsttmp && trap "rm -f ${dsttmp}" 0 && # and set any options; do chmod last to preserve setuid bits # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $instcmd $src $dsttmp" command. if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && # Now rename the file to the real destination. $doit $rmcmd -f $dstdir/$dstfile && $doit $mvcmd $dsttmp $dstdir/$dstfile fi && exit 0 disque-1.0-rc1/deps/jemalloc/jemalloc.pc.in000066400000000000000000000006151264176561100206020ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ install_suffix=@install_suffix@ Name: jemalloc Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. URL: http://www.canonware.com/jemalloc Version: @jemalloc_version@ Cflags: -I${includedir} Libs: -L${libdir} -ljemalloc${install_suffix} disque-1.0-rc1/deps/jemalloc/src/000077500000000000000000000000001264176561100166505ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/src/arena.c000066400000000000000000002711521264176561100201120ustar00rootroot00000000000000#define JEMALLOC_ARENA_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; static ssize_t lg_dirty_mult_default; arena_bin_info_t arena_bin_info[NBINS]; size_t map_bias; size_t map_misc_offset; size_t arena_maxrun; /* Max run size for arenas. */ size_t large_maxclass; /* Max large size class. */ static size_t small_maxrun; /* Max run size used for small size classes. */ static bool *small_run_tab; /* Valid small run page multiples. */ unsigned nlclasses; /* Number of large size classes. */ unsigned nhclasses; /* Number of huge size classes. */ /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void arena_purge(arena_t *arena, bool all); static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, bool decommitted); static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); /******************************************************************************/ #define CHUNK_MAP_KEY ((uintptr_t)0x1U) JEMALLOC_INLINE_C arena_chunk_map_misc_t * arena_miscelm_key_create(size_t size) { return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | CHUNK_MAP_KEY)); } JEMALLOC_INLINE_C bool arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) { return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); } #undef CHUNK_MAP_KEY JEMALLOC_INLINE_C size_t arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) { assert(arena_miscelm_is_key(miscelm)); return (arena_mapbits_size_decode((uintptr_t)miscelm)); } JEMALLOC_INLINE_C size_t arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk; size_t pageind, mapbits; assert(!arena_miscelm_is_key(miscelm)); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); pageind = arena_miscelm_to_pageind(miscelm); mapbits = arena_mapbits_get(chunk, pageind); return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_INLINE_C int arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) { uintptr_t a_miscelm = (uintptr_t)a; uintptr_t b_miscelm = (uintptr_t)b; assert(a != NULL); assert(b != NULL); return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); } /* Generate red-black tree functions. */ rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, rb_link, arena_run_comp) static size_t run_quantize(size_t size) { size_t qsize; assert(size != 0); assert(size == PAGE_CEILING(size)); /* Don't change sizes that are valid small run sizes. */ if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) return (size); /* * Round down to the nearest run size that can actually be requested * during normal large allocation. Add large_pad so that cache index * randomization can offset the allocation from the page boundary. */ qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; if (qsize <= SMALL_MAXCLASS + large_pad) return (run_quantize(size - large_pad)); assert(qsize <= size); return (qsize); } static size_t run_quantize_next(size_t size) { size_t large_run_size_next; assert(size != 0); assert(size == PAGE_CEILING(size)); /* * Return the next quantized size greater than the input size. * Quantized sizes comprise the union of run sizes that back small * region runs, and run sizes that back large regions with no explicit * alignment constraints. */ if (size > SMALL_MAXCLASS) { large_run_size_next = PAGE_CEILING(index2size(size2index(size - large_pad) + 1) + large_pad); } else large_run_size_next = SIZE_T_MAX; if (size >= small_maxrun) return (large_run_size_next); while (true) { size += PAGE; assert(size <= small_maxrun); if (small_run_tab[size >> LG_PAGE]) { if (large_run_size_next < size) return (large_run_size_next); return (size); } } } static size_t run_quantize_first(size_t size) { size_t qsize = run_quantize(size); if (qsize < size) { /* * Skip a quantization that may have an adequately large run, * because under-sized runs may be mixed in. This only happens * when an unusual size is requested, i.e. for aligned * allocation, and is just one of several places where linear * search would potentially find sufficiently aligned available * memory somewhere lower. */ qsize = run_quantize_next(size); } return (qsize); } JEMALLOC_INLINE_C int arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) { int ret; uintptr_t a_miscelm = (uintptr_t)a; size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ? arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a)); size_t b_qsize = run_quantize(arena_miscelm_size_get(b)); /* * Compare based on quantized size rather than size, in order to sort * equally useful runs only by address. */ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); if (ret == 0) { if (!arena_miscelm_is_key(a)) { uintptr_t b_miscelm = (uintptr_t)b; ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); } else { /* * Treat keys as if they are lower than anything else. */ ret = -1; } } return (ret); } /* Generate red-black tree functions. */ rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_misc_t, rb_link, arena_avail_comp) static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, pageind)); } static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, pageind)); } static void arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == CHUNK_MAP_DIRTY); qr_new(&miscelm->rd, rd_link); qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); arena->ndirty += npages; } static void arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == CHUNK_MAP_DIRTY); qr_remove(&miscelm->rd, rd_link); assert(arena->ndirty >= npages); arena->ndirty -= npages; } static size_t arena_chunk_dirty_npages(const extent_node_t *node) { return (extent_node_size_get(node) >> LG_PAGE); } void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) { if (cache) { extent_node_dirty_linkage_init(node); extent_node_dirty_insert(node, &arena->runs_dirty, &arena->chunks_cache); arena->ndirty += arena_chunk_dirty_npages(node); } } void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) { if (dirty) { extent_node_dirty_remove(node); assert(arena->ndirty >= arena_chunk_dirty_npages(node)); arena->ndirty -= arena_chunk_dirty_npages(node); } } JEMALLOC_INLINE_C void * arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) { void *ret; unsigned regind; arena_chunk_map_misc_t *miscelm; void *rpages; assert(run->nfree > 0); assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); miscelm = arena_run_to_miscelm(run); rpages = arena_miscelm_to_rpages(miscelm); ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + (uintptr_t)(bin_info->reg_interval * regind)); run->nfree--; return (ret); } JEMALLOC_INLINE_C void arena_run_reg_dalloc(arena_run_t *run, void *ptr) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t mapbits = arena_mapbits_get(chunk, pageind); szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); arena_bin_info_t *bin_info = &arena_bin_info[binind]; unsigned regind = arena_run_regind(run, bin_info, ptr); assert(run->nfree < bin_info->nregs); /* Freeing an interior pointer can cause assertion failure. */ assert(((uintptr_t)ptr - ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + (uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_interval == 0); assert((uintptr_t)ptr >= (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + (uintptr_t)bin_info->reg0_offset); /* Freeing an unallocated pointer can cause assertion failure. */ assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); run->nfree++; } JEMALLOC_INLINE_C void arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (npages << LG_PAGE)); memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, (npages << LG_PAGE)); } JEMALLOC_INLINE_C void arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) { JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), PAGE); } JEMALLOC_INLINE_C void arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) { size_t i; UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); arena_run_page_mark_zeroed(chunk, run_ind); for (i = 0; i < PAGE / sizeof(size_t); i++) assert(p[i] == 0); } static void arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) { if (config_stats) { ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << LG_PAGE); if (cactive_diff != 0) stats_cactive_add(cactive_diff); } } static void arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, size_t flag_dirty, size_t flag_decommitted, size_t need_pages) { size_t total_pages, rem_pages; assert(flag_dirty == 0 || flag_decommitted == 0); total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> LG_PAGE; assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == flag_dirty); assert(need_pages <= total_pages); rem_pages = total_pages - need_pages; arena_avail_remove(arena, chunk, run_ind, total_pages); if (flag_dirty != 0) arena_run_dirty_remove(arena, chunk, run_ind, total_pages); arena_cactive_update(arena, need_pages, 0); arena->nactive += need_pages; /* Keep track of trailing unused pages for later use. */ if (rem_pages > 0) { size_t flags = flag_dirty | flag_decommitted; size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 0; arena_mapbits_unallocated_set(chunk, run_ind+need_pages, (rem_pages << LG_PAGE), flags | (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & flag_unzeroed_mask)); arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, (rem_pages << LG_PAGE), flags | (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & flag_unzeroed_mask)); if (flag_dirty != 0) { arena_run_dirty_insert(arena, chunk, run_ind+need_pages, rem_pages); } arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); } } static bool arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, bool remove, bool zero) { arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; size_t flag_dirty, flag_decommitted, run_ind, need_pages; size_t flag_unzeroed_mask; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); miscelm = arena_run_to_miscelm(run); run_ind = arena_miscelm_to_pageind(miscelm); flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); need_pages = (size >> LG_PAGE); assert(need_pages > 0); if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, run_ind << LG_PAGE, size, arena->ind)) return (true); if (remove) { arena_run_split_remove(arena, chunk, run_ind, flag_dirty, flag_decommitted, need_pages); } if (zero) { if (flag_decommitted != 0) { /* The run is untouched, and therefore zeroed. */ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); } else if (flag_dirty != 0) { /* The run is dirty, so all pages must be zeroed. */ arena_run_zero(chunk, run_ind, need_pages); } else { /* * The run is clean, so some pages may be zeroed (i.e. * never before touched). */ size_t i; for (i = 0; i < need_pages; i++) { if (arena_mapbits_unzeroed_get(chunk, run_ind+i) != 0) arena_run_zero(chunk, run_ind+i, 1); else if (config_debug) { arena_run_page_validate_zeroed(chunk, run_ind+i); } else { arena_run_page_mark_zeroed(chunk, run_ind+i); } } } } else { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); } /* * Set the last element first, in case the run only contains one page * (i.e. both statements set the same element). */ flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? CHUNK_MAP_UNZEROED : 0; arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1))); arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); return (false); } static bool arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) { return (arena_run_split_large_helper(arena, run, size, true, zero)); } static bool arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) { return (arena_run_split_large_helper(arena, run, size, false, zero)); } static bool arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, szind_t binind) { arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; assert(binind != BININD_INVALID); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); miscelm = arena_run_to_miscelm(run); run_ind = arena_miscelm_to_pageind(miscelm); flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); need_pages = (size >> LG_PAGE); assert(need_pages > 0); if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, run_ind << LG_PAGE, size, arena->ind)) return (true); arena_run_split_remove(arena, chunk, run_ind, flag_dirty, flag_decommitted, need_pages); for (i = 0; i < need_pages; i++) { size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, run_ind+i); arena_mapbits_small_set(chunk, run_ind+i, i, binind, flag_unzeroed); if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) arena_run_page_validate_zeroed(chunk, run_ind+i); } JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); return (false); } static arena_chunk_t * arena_chunk_init_spare(arena_t *arena) { arena_chunk_t *chunk; assert(arena->spare != NULL); chunk = arena->spare; arena->spare = NULL; assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == arena_maxrun); assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == arena_maxrun); assert(arena_mapbits_dirty_get(chunk, map_bias) == arena_mapbits_dirty_get(chunk, chunk_npages-1)); return (chunk); } static bool arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) { /* * The extent node notion of "committed" doesn't directly apply to * arena chunks. Arbitrarily mark them as committed. The commit state * of runs is tracked individually, and upon chunk deallocation the * entire chunk is in a consistent commit state. */ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); extent_node_achunk_set(&chunk->node, true); return (chunk_register(chunk, &chunk->node)); } static arena_chunk_t * arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) { arena_chunk_t *chunk; malloc_mutex_unlock(&arena->lock); chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, chunksize, chunksize, zero, commit); if (chunk != NULL && !*commit) { /* Commit header. */ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind)) { chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, chunksize, *commit); chunk = NULL; } } if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { if (!*commit) { /* Undo commit of header. */ chunk_hooks->decommit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind); } chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, chunksize, *commit); chunk = NULL; } malloc_mutex_lock(&arena->lock); return (chunk); } static arena_chunk_t * arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) { arena_chunk_t *chunk; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, chunksize, zero, true); if (chunk != NULL) { if (arena_chunk_register(arena, chunk, *zero)) { chunk_dalloc_cache(arena, &chunk_hooks, chunk, chunksize, true); return (NULL); } *commit = true; } if (chunk == NULL) { chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, zero, commit); } if (config_stats && chunk != NULL) { arena->stats.mapped += chunksize; arena->stats.metadata_mapped += (map_bias << LG_PAGE); } return (chunk); } static arena_chunk_t * arena_chunk_init_hard(arena_t *arena) { arena_chunk_t *chunk; bool zero, commit; size_t flag_unzeroed, flag_decommitted, i; assert(arena->spare == NULL); zero = false; commit = false; chunk = arena_chunk_alloc_internal(arena, &zero, &commit); if (chunk == NULL) return (NULL); /* * Initialize the map to contain one maximal free untouched run. Mark * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted * chunk. */ flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, flag_unzeroed | flag_decommitted); /* * There is no need to initialize the internal page map entries unless * the chunk is not zeroed. */ if (!zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( (void *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) arena_bitselm_get(chunk, chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); for (i = map_bias+1; i < chunk_npages-1; i++) arena_mapbits_internal_set(chunk, i, flag_unzeroed); } else { JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) arena_bitselm_get(chunk, chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); if (config_debug) { for (i = map_bias+1; i < chunk_npages-1; i++) { assert(arena_mapbits_unzeroed_get(chunk, i) == flag_unzeroed); } } } arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, flag_unzeroed); return (chunk); } static arena_chunk_t * arena_chunk_alloc(arena_t *arena) { arena_chunk_t *chunk; if (arena->spare != NULL) chunk = arena_chunk_init_spare(arena); else { chunk = arena_chunk_init_hard(arena); if (chunk == NULL) return (NULL); } /* Insert the run into the runs_avail tree. */ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); return (chunk); } static void arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) { assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == arena_maxrun); assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == arena_maxrun); assert(arena_mapbits_dirty_get(chunk, map_bias) == arena_mapbits_dirty_get(chunk, chunk_npages-1)); assert(arena_mapbits_decommitted_get(chunk, map_bias) == arena_mapbits_decommitted_get(chunk, chunk_npages-1)); /* * Remove run from the runs_avail tree, so that the arena does not use * it. */ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); if (arena->spare != NULL) { arena_chunk_t *spare = arena->spare; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; bool committed; arena->spare = chunk; if (arena_mapbits_dirty_get(spare, map_bias) != 0) { arena_run_dirty_remove(arena, spare, map_bias, chunk_npages-map_bias); } chunk_deregister(spare, &spare->node); committed = (arena_mapbits_decommitted_get(spare, map_bias) == 0); if (!committed) { /* * Decommit the header. Mark the chunk as decommitted * even if header decommit fails, since treating a * partially committed chunk as committed has a high * potential for causing later access of decommitted * memory. */ chunk_hooks = chunk_hooks_get(arena); chunk_hooks.decommit(spare, chunksize, 0, map_bias << LG_PAGE, arena->ind); } chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, chunksize, committed); if (config_stats) { arena->stats.mapped -= chunksize; arena->stats.metadata_mapped -= (map_bias << LG_PAGE); } } else arena->spare = chunk; } static void arena_huge_malloc_stats_update(arena_t *arena, size_t usize) { szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); arena->stats.nmalloc_huge++; arena->stats.allocated_huge += usize; arena->stats.hstats[index].nmalloc++; arena->stats.hstats[index].curhchunks++; } static void arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) { szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); arena->stats.nmalloc_huge--; arena->stats.allocated_huge -= usize; arena->stats.hstats[index].nmalloc--; arena->stats.hstats[index].curhchunks--; } static void arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) { szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); arena->stats.ndalloc_huge++; arena->stats.allocated_huge -= usize; arena->stats.hstats[index].ndalloc++; arena->stats.hstats[index].curhchunks--; } static void arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) { szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); arena->stats.ndalloc_huge--; arena->stats.allocated_huge += usize; arena->stats.hstats[index].ndalloc--; arena->stats.hstats[index].curhchunks++; } static void arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) { arena_huge_dalloc_stats_update(arena, oldsize); arena_huge_malloc_stats_update(arena, usize); } static void arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, size_t usize) { arena_huge_dalloc_stats_update_undo(arena, oldsize); arena_huge_malloc_stats_update_undo(arena, usize); } extent_node_t * arena_node_alloc(arena_t *arena) { extent_node_t *node; malloc_mutex_lock(&arena->node_cache_mtx); node = ql_last(&arena->node_cache, ql_link); if (node == NULL) { malloc_mutex_unlock(&arena->node_cache_mtx); return (base_alloc(sizeof(extent_node_t))); } ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); malloc_mutex_unlock(&arena->node_cache_mtx); return (node); } void arena_node_dalloc(arena_t *arena, extent_node_t *node) { malloc_mutex_lock(&arena->node_cache_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->node_cache, node, ql_link); malloc_mutex_unlock(&arena->node_cache_mtx); } static void * arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, size_t csize) { void *ret; bool commit = true; ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, zero, &commit); if (ret == NULL) { /* Revert optimistic stats updates. */ malloc_mutex_lock(&arena->lock); if (config_stats) { arena_huge_malloc_stats_update_undo(arena, usize); arena->stats.mapped -= usize; } arena->nactive -= (usize >> LG_PAGE); malloc_mutex_unlock(&arena->lock); } return (ret); } void * arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, bool *zero) { void *ret; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize = CHUNK_CEILING(usize); malloc_mutex_lock(&arena->lock); /* Optimistically update stats. */ if (config_stats) { arena_huge_malloc_stats_update(arena, usize); arena->stats.mapped += usize; } arena->nactive += (usize >> LG_PAGE); ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, zero, true); malloc_mutex_unlock(&arena->lock); if (ret == NULL) { ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, alignment, zero, csize); } if (config_stats && ret != NULL) stats_cactive_add(usize); return (ret); } void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize; csize = CHUNK_CEILING(usize); malloc_mutex_lock(&arena->lock); if (config_stats) { arena_huge_dalloc_stats_update(arena, usize); arena->stats.mapped -= usize; stats_cactive_sub(usize); } arena->nactive -= (usize >> LG_PAGE); chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); malloc_mutex_unlock(&arena->lock); } void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, size_t usize) { assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); assert(oldsize != usize); malloc_mutex_lock(&arena->lock); if (config_stats) arena_huge_ralloc_stats_update(arena, oldsize, usize); if (oldsize < usize) { size_t udiff = usize - oldsize; arena->nactive += udiff >> LG_PAGE; if (config_stats) stats_cactive_add(udiff); } else { size_t udiff = oldsize - usize; arena->nactive -= udiff >> LG_PAGE; if (config_stats) stats_cactive_sub(udiff); } malloc_mutex_unlock(&arena->lock); } void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, size_t usize) { size_t udiff = oldsize - usize; size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); malloc_mutex_lock(&arena->lock); if (config_stats) { arena_huge_ralloc_stats_update(arena, oldsize, usize); if (cdiff != 0) { arena->stats.mapped -= cdiff; stats_cactive_sub(udiff); } } arena->nactive -= udiff >> LG_PAGE; if (cdiff != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(usize)); chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); } malloc_mutex_unlock(&arena->lock); } static bool arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, size_t udiff, size_t cdiff) { bool err; bool commit = true; err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, zero, &commit) == NULL); if (err) { /* Revert optimistic stats updates. */ malloc_mutex_lock(&arena->lock); if (config_stats) { arena_huge_ralloc_stats_update_undo(arena, oldsize, usize); arena->stats.mapped -= cdiff; } arena->nactive -= (udiff >> LG_PAGE); malloc_mutex_unlock(&arena->lock); } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, true); err = true; } return (err); } bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, size_t usize, bool *zero) { bool err; chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); size_t udiff = usize - oldsize; size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); malloc_mutex_lock(&arena->lock); /* Optimistically update stats. */ if (config_stats) { arena_huge_ralloc_stats_update(arena, oldsize, usize); arena->stats.mapped += cdiff; } arena->nactive += (udiff >> LG_PAGE); err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, chunksize, zero, true) == NULL); malloc_mutex_unlock(&arena->lock); if (err) { err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, cdiff); } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, true); err = true; } if (config_stats && !err) stats_cactive_add(udiff); return (err); } /* * Do first-best-fit run selection, i.e. select the lowest run that best fits. * Run sizes are quantized, so not all candidate runs are necessarily exactly * the same size. */ static arena_run_t * arena_run_first_best_fit(arena_t *arena, size_t size) { size_t search_size = run_quantize_first(size); arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size); arena_chunk_map_misc_t *miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key); if (miscelm == NULL) return (NULL); return (&miscelm->run); } static arena_run_t * arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) { arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); if (run != NULL) { if (arena_run_split_large(arena, run, size, zero)) run = NULL; } return (run); } static arena_run_t * arena_run_alloc_large(arena_t *arena, size_t size, bool zero) { arena_chunk_t *chunk; arena_run_t *run; assert(size <= arena_maxrun); assert(size == PAGE_CEILING(size)); /* Search the arena's chunks for the lowest best fit. */ run = arena_run_alloc_large_helper(arena, size, zero); if (run != NULL) return (run); /* * No usable runs. Create a new chunk from which to allocate the run. */ chunk = arena_chunk_alloc(arena); if (chunk != NULL) { run = &arena_miscelm_get(chunk, map_bias)->run; if (arena_run_split_large(arena, run, size, zero)) run = NULL; return (run); } /* * arena_chunk_alloc() failed, but another thread may have made * sufficient memory available while this one dropped arena->lock in * arena_chunk_alloc(), so search one more time. */ return (arena_run_alloc_large_helper(arena, size, zero)); } static arena_run_t * arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) { arena_run_t *run = arena_run_first_best_fit(arena, size); if (run != NULL) { if (arena_run_split_small(arena, run, size, binind)) run = NULL; } return (run); } static arena_run_t * arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) { arena_chunk_t *chunk; arena_run_t *run; assert(size <= arena_maxrun); assert(size == PAGE_CEILING(size)); assert(binind != BININD_INVALID); /* Search the arena's chunks for the lowest best fit. */ run = arena_run_alloc_small_helper(arena, size, binind); if (run != NULL) return (run); /* * No usable runs. Create a new chunk from which to allocate the run. */ chunk = arena_chunk_alloc(arena); if (chunk != NULL) { run = &arena_miscelm_get(chunk, map_bias)->run; if (arena_run_split_small(arena, run, size, binind)) run = NULL; return (run); } /* * arena_chunk_alloc() failed, but another thread may have made * sufficient memory available while this one dropped arena->lock in * arena_chunk_alloc(), so search one more time. */ return (arena_run_alloc_small_helper(arena, size, binind)); } static bool arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) { return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3)); } ssize_t arena_lg_dirty_mult_get(arena_t *arena) { ssize_t lg_dirty_mult; malloc_mutex_lock(&arena->lock); lg_dirty_mult = arena->lg_dirty_mult; malloc_mutex_unlock(&arena->lock); return (lg_dirty_mult); } bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) { if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) return (true); malloc_mutex_lock(&arena->lock); arena->lg_dirty_mult = lg_dirty_mult; arena_maybe_purge(arena); malloc_mutex_unlock(&arena->lock); return (false); } void arena_maybe_purge(arena_t *arena) { /* Don't purge if the option is disabled. */ if (arena->lg_dirty_mult < 0) return; /* Don't recursively purge. */ if (arena->purging) return; /* * Iterate, since preventing recursive purging could otherwise leave too * many dirty pages. */ while (true) { size_t threshold = (arena->nactive >> arena->lg_dirty_mult); if (threshold < chunk_npages) threshold = chunk_npages; /* * Don't purge unless the number of purgeable pages exceeds the * threshold. */ if (arena->ndirty <= threshold) return; arena_purge(arena, false); } } static size_t arena_dirty_count(arena_t *arena) { size_t ndirty = 0; arena_runs_dirty_link_t *rdelm; extent_node_t *chunkselm; for (rdelm = qr_next(&arena->runs_dirty, rd_link), chunkselm = qr_next(&arena->chunks_cache, cc_link); rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { size_t npages; if (rdelm == &chunkselm->rd) { npages = extent_node_size_get(chunkselm) >> LG_PAGE; chunkselm = qr_next(chunkselm, cc_link); } else { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( rdelm); arena_chunk_map_misc_t *miscelm = arena_rd_to_miscelm(rdelm); size_t pageind = arena_miscelm_to_pageind(miscelm); assert(arena_mapbits_allocated_get(chunk, pageind) == 0); assert(arena_mapbits_large_get(chunk, pageind) == 0); assert(arena_mapbits_dirty_get(chunk, pageind) != 0); npages = arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE; } ndirty += npages; } return (ndirty); } static size_t arena_compute_npurge(arena_t *arena, bool all) { size_t npurge; /* * Compute the minimum number of pages that this thread should try to * purge. */ if (!all) { size_t threshold = (arena->nactive >> arena->lg_dirty_mult); threshold = threshold < chunk_npages ? chunk_npages : threshold; npurge = arena->ndirty - threshold; } else npurge = arena->ndirty; return (npurge); } static size_t arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { arena_runs_dirty_link_t *rdelm, *rdelm_next; extent_node_t *chunkselm; size_t nstashed = 0; /* Stash at least npurge pages. */ for (rdelm = qr_next(&arena->runs_dirty, rd_link), chunkselm = qr_next(&arena->chunks_cache, cc_link); rdelm != &arena->runs_dirty; rdelm = rdelm_next) { size_t npages; rdelm_next = qr_next(rdelm, rd_link); if (rdelm == &chunkselm->rd) { extent_node_t *chunkselm_next; bool zero; UNUSED void *chunk; chunkselm_next = qr_next(chunkselm, cc_link); /* * Allocate. chunkselm remains valid due to the * dalloc_node=false argument to chunk_alloc_cache(). */ zero = false; chunk = chunk_alloc_cache(arena, chunk_hooks, extent_node_addr_get(chunkselm), extent_node_size_get(chunkselm), chunksize, &zero, false); assert(chunk == extent_node_addr_get(chunkselm)); assert(zero == extent_node_zeroed_get(chunkselm)); extent_node_dirty_insert(chunkselm, purge_runs_sentinel, purge_chunks_sentinel); npages = extent_node_size_get(chunkselm) >> LG_PAGE; chunkselm = chunkselm_next; } else { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); arena_chunk_map_misc_t *miscelm = arena_rd_to_miscelm(rdelm); size_t pageind = arena_miscelm_to_pageind(miscelm); arena_run_t *run = &miscelm->run; size_t run_size = arena_mapbits_unallocated_size_get(chunk, pageind); npages = run_size >> LG_PAGE; assert(pageind + npages <= chunk_npages); assert(arena_mapbits_dirty_get(chunk, pageind) == arena_mapbits_dirty_get(chunk, pageind+npages-1)); /* * If purging the spare chunk's run, make it available * prior to allocation. */ if (chunk == arena->spare) arena_chunk_alloc(arena); /* Temporarily allocate the free dirty run. */ arena_run_split_large(arena, run, run_size, false); /* Stash. */ if (false) qr_new(rdelm, rd_link); /* Redundant. */ else { assert(qr_next(rdelm, rd_link) == rdelm); assert(qr_prev(rdelm, rd_link) == rdelm); } qr_meld(purge_runs_sentinel, rdelm, rd_link); } nstashed += npages; if (!all && nstashed >= npurge) break; } return (nstashed); } static size_t arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { size_t npurged, nmadvise; arena_runs_dirty_link_t *rdelm; extent_node_t *chunkselm; if (config_stats) nmadvise = 0; npurged = 0; malloc_mutex_unlock(&arena->lock); for (rdelm = qr_next(purge_runs_sentinel, rd_link), chunkselm = qr_next(purge_chunks_sentinel, cc_link); rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { size_t npages; if (rdelm == &chunkselm->rd) { /* * Don't actually purge the chunk here because 1) * chunkselm is embedded in the chunk and must remain * valid, and 2) we deallocate the chunk in * arena_unstash_purged(), where it is destroyed, * decommitted, or purged, depending on chunk * deallocation policy. */ size_t size = extent_node_size_get(chunkselm); npages = size >> LG_PAGE; chunkselm = qr_next(chunkselm, cc_link); } else { size_t pageind, run_size, flag_unzeroed, flags, i; bool decommitted; arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); arena_chunk_map_misc_t *miscelm = arena_rd_to_miscelm(rdelm); pageind = arena_miscelm_to_pageind(miscelm); run_size = arena_mapbits_large_size_get(chunk, pageind); npages = run_size >> LG_PAGE; assert(pageind + npages <= chunk_npages); assert(!arena_mapbits_decommitted_get(chunk, pageind)); assert(!arena_mapbits_decommitted_get(chunk, pageind+npages-1)); decommitted = !chunk_hooks->decommit(chunk, chunksize, pageind << LG_PAGE, npages << LG_PAGE, arena->ind); if (decommitted) { flag_unzeroed = 0; flags = CHUNK_MAP_DECOMMITTED; } else { flag_unzeroed = chunk_purge_wrapper(arena, chunk_hooks, chunk, chunksize, pageind << LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; flags = flag_unzeroed; } arena_mapbits_large_set(chunk, pageind+npages-1, 0, flags); arena_mapbits_large_set(chunk, pageind, run_size, flags); /* * Set the unzeroed flag for internal pages, now that * chunk_purge_wrapper() has returned whether the pages * were zeroed as a side effect of purging. This chunk * map modification is safe even though the arena mutex * isn't currently owned by this thread, because the run * is marked as allocated, thus protecting it from being * modified by any other thread. As long as these * writes don't perturb the first and last elements' * CHUNK_MAP_ALLOCATED bits, behavior is well defined. */ for (i = 1; i < npages-1; i++) { arena_mapbits_internal_set(chunk, pageind+i, flag_unzeroed); } } npurged += npages; if (config_stats) nmadvise++; } malloc_mutex_lock(&arena->lock); if (config_stats) { arena->stats.nmadvise += nmadvise; arena->stats.purged += npurged; } return (npurged); } static void arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { arena_runs_dirty_link_t *rdelm, *rdelm_next; extent_node_t *chunkselm; /* Deallocate chunks/runs. */ for (rdelm = qr_next(purge_runs_sentinel, rd_link), chunkselm = qr_next(purge_chunks_sentinel, cc_link); rdelm != purge_runs_sentinel; rdelm = rdelm_next) { rdelm_next = qr_next(rdelm, rd_link); if (rdelm == &chunkselm->rd) { extent_node_t *chunkselm_next = qr_next(chunkselm, cc_link); void *addr = extent_node_addr_get(chunkselm); size_t size = extent_node_size_get(chunkselm); bool zeroed = extent_node_zeroed_get(chunkselm); bool committed = extent_node_committed_get(chunkselm); extent_node_dirty_remove(chunkselm); arena_node_dalloc(arena, chunkselm); chunkselm = chunkselm_next; chunk_dalloc_arena(arena, chunk_hooks, addr, size, zeroed, committed); } else { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); arena_chunk_map_misc_t *miscelm = arena_rd_to_miscelm(rdelm); size_t pageind = arena_miscelm_to_pageind(miscelm); bool decommitted = (arena_mapbits_decommitted_get(chunk, pageind) != 0); arena_run_t *run = &miscelm->run; qr_remove(rdelm, rd_link); arena_run_dalloc(arena, run, false, true, decommitted); } } } static void arena_purge(arena_t *arena, bool all) { chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); size_t npurge, npurgeable, npurged; arena_runs_dirty_link_t purge_runs_sentinel; extent_node_t purge_chunks_sentinel; arena->purging = true; /* * Calls to arena_dirty_count() are disabled even for debug builds * because overhead grows nonlinearly as memory usage increases. */ if (false && config_debug) { size_t ndirty = arena_dirty_count(arena); assert(ndirty == arena->ndirty); } assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); if (config_stats) arena->stats.npurge++; npurge = arena_compute_npurge(arena, all); qr_new(&purge_runs_sentinel, rd_link); extent_node_dirty_linkage_init(&purge_chunks_sentinel); npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, &purge_runs_sentinel, &purge_chunks_sentinel); assert(npurgeable >= npurge); npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, &purge_chunks_sentinel); assert(npurged == npurgeable); arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, &purge_chunks_sentinel); arena->purging = false; } void arena_purge_all(arena_t *arena) { malloc_mutex_lock(&arena->lock); arena_purge(arena, true); malloc_mutex_unlock(&arena->lock); } static void arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, size_t flag_decommitted) { size_t size = *p_size; size_t run_ind = *p_run_ind; size_t run_pages = *p_run_pages; /* Try to coalesce forward. */ if (run_ind + run_pages < chunk_npages && arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == flag_decommitted) { size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages); size_t nrun_pages = nrun_size >> LG_PAGE; /* * Remove successor from runs_avail; the coalesced run is * inserted later. */ assert(arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages+nrun_pages-1) == nrun_size); assert(arena_mapbits_dirty_get(chunk, run_ind+run_pages+nrun_pages-1) == flag_dirty); assert(arena_mapbits_decommitted_get(chunk, run_ind+run_pages+nrun_pages-1) == flag_decommitted); arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); /* * If the successor is dirty, remove it from the set of dirty * pages. */ if (flag_dirty != 0) { arena_run_dirty_remove(arena, chunk, run_ind+run_pages, nrun_pages); } size += nrun_size; run_pages += nrun_pages; arena_mapbits_unallocated_size_set(chunk, run_ind, size); arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, size); } /* Try to coalesce backward. */ if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == flag_decommitted) { size_t prun_size = arena_mapbits_unallocated_size_get(chunk, run_ind-1); size_t prun_pages = prun_size >> LG_PAGE; run_ind -= prun_pages; /* * Remove predecessor from runs_avail; the coalesced run is * inserted later. */ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == prun_size); assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); assert(arena_mapbits_decommitted_get(chunk, run_ind) == flag_decommitted); arena_avail_remove(arena, chunk, run_ind, prun_pages); /* * If the predecessor is dirty, remove it from the set of dirty * pages. */ if (flag_dirty != 0) { arena_run_dirty_remove(arena, chunk, run_ind, prun_pages); } size += prun_size; run_pages += prun_pages; arena_mapbits_unallocated_size_set(chunk, run_ind, size); arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, size); } *p_size = size; *p_run_ind = run_ind; *p_run_pages = run_pages; } static size_t arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t run_ind) { size_t size; assert(run_ind >= map_bias); assert(run_ind < chunk_npages); if (arena_mapbits_large_get(chunk, run_ind) != 0) { size = arena_mapbits_large_size_get(chunk, run_ind); assert(size == PAGE || arena_mapbits_large_size_get(chunk, run_ind+(size>>LG_PAGE)-1) == 0); } else { arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; size = bin_info->run_size; } return (size); } static bool arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); size_t run_ind = arena_miscelm_to_pageind(miscelm); size_t offset = run_ind << LG_PAGE; size_t length = arena_run_size_get(arena, chunk, run, run_ind); return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, arena->ind)); } static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, bool decommitted) { arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); miscelm = arena_run_to_miscelm(run); run_ind = arena_miscelm_to_pageind(miscelm); assert(run_ind >= map_bias); assert(run_ind < chunk_npages); size = arena_run_size_get(arena, chunk, run, run_ind); run_pages = (size >> LG_PAGE); arena_cactive_update(arena, 0, run_pages); arena->nactive -= run_pages; /* * The run is dirty if the caller claims to have dirtied it, as well as * if it was already dirty before being allocated and the caller * doesn't claim to have cleaned it. */ assert(arena_mapbits_dirty_get(chunk, run_ind) == arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) != 0) dirty = true; flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; /* Mark pages as unallocated in the chunk map. */ if (dirty || decommitted) { size_t flags = flag_dirty | flag_decommitted; arena_mapbits_unallocated_set(chunk, run_ind, size, flags); arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, flags); } else { arena_mapbits_unallocated_set(chunk, run_ind, size, arena_mapbits_unzeroed_get(chunk, run_ind)); arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); } arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty, flag_decommitted); /* Insert into runs_avail, now that coalescing is complete. */ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); assert(arena_mapbits_dirty_get(chunk, run_ind) == arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); assert(arena_mapbits_decommitted_get(chunk, run_ind) == arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); arena_avail_insert(arena, chunk, run_ind, run_pages); if (dirty) arena_run_dirty_insert(arena, chunk, run_ind, run_pages); /* Deallocate chunk if it is now completely unused. */ if (size == arena_maxrun) { assert(run_ind == map_bias); assert(run_pages == (arena_maxrun >> LG_PAGE)); arena_chunk_dalloc(arena, chunk); } /* * It is okay to do dirty page processing here even if the chunk was * deallocated above, since in that case it is the spare. Waiting * until after possible chunk deallocation to do dirty processing * allows for an old spare to be fully deallocated, thus decreasing the * chances of spuriously crossing the dirty page purging threshold. */ if (dirty) arena_maybe_purge(arena); } static void arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) { bool committed = arena_run_decommit(arena, chunk, run); arena_run_dalloc(arena, run, committed, false, !committed); } static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); size_t pageind = arena_miscelm_to_pageind(miscelm); size_t head_npages = (oldsize - newsize) >> LG_PAGE; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? CHUNK_MAP_UNZEROED : 0; assert(oldsize > newsize); /* * Update the chunk map so that arena_run_dalloc() can treat the * leading run as separately allocated. Set the last element of each * run first, in case of single-page runs. */ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1))); arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); if (config_debug) { UNUSED size_t tail_npages = newsize >> LG_PAGE; assert(arena_mapbits_large_size_get(chunk, pageind+head_npages+tail_npages-1) == 0); assert(arena_mapbits_dirty_get(chunk, pageind+head_npages+tail_npages-1) == flag_dirty); } arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages))); arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); } static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); size_t pageind = arena_miscelm_to_pageind(miscelm); size_t head_npages = newsize >> LG_PAGE; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? CHUNK_MAP_UNZEROED : 0; arena_chunk_map_misc_t *tail_miscelm; arena_run_t *tail_run; assert(oldsize > newsize); /* * Update the chunk map so that arena_run_dalloc() can treat the * trailing run as separately allocated. Set the last element of each * run first, in case of single-page runs. */ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1))); arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); if (config_debug) { UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; assert(arena_mapbits_large_size_get(chunk, pageind+head_npages+tail_npages-1) == 0); assert(arena_mapbits_dirty_get(chunk, pageind+head_npages+tail_npages-1) == flag_dirty); } arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages))); tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); tail_run = &tail_miscelm->run; arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != 0)); } static arena_run_t * arena_bin_runs_first(arena_bin_t *bin) { arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); if (miscelm != NULL) return (&miscelm->run); return (NULL); } static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); arena_run_tree_insert(&bin->runs, miscelm); } static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); arena_run_tree_remove(&bin->runs, miscelm); } static arena_run_t * arena_bin_nonfull_run_tryget(arena_bin_t *bin) { arena_run_t *run = arena_bin_runs_first(bin); if (run != NULL) { arena_bin_runs_remove(bin, run); if (config_stats) bin->stats.reruns++; } return (run); } static arena_run_t * arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) { arena_run_t *run; szind_t binind; arena_bin_info_t *bin_info; /* Look for a usable run. */ run = arena_bin_nonfull_run_tryget(bin); if (run != NULL) return (run); /* No existing runs have any space available. */ binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; /* Allocate a new run. */ malloc_mutex_unlock(&bin->lock); /******************************/ malloc_mutex_lock(&arena->lock); run = arena_run_alloc_small(arena, bin_info->run_size, binind); if (run != NULL) { /* Initialize run internals. */ run->binind = binind; run->nfree = bin_info->nregs; bitmap_init(run->bitmap, &bin_info->bitmap_info); } malloc_mutex_unlock(&arena->lock); /********************************/ malloc_mutex_lock(&bin->lock); if (run != NULL) { if (config_stats) { bin->stats.nruns++; bin->stats.curruns++; } return (run); } /* * arena_run_alloc_small() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ run = arena_bin_nonfull_run_tryget(bin); if (run != NULL) return (run); return (NULL); } /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ static void * arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) { szind_t binind; arena_bin_info_t *bin_info; arena_run_t *run; binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; bin->runcur = NULL; run = arena_bin_nonfull_run_get(arena, bin); if (bin->runcur != NULL && bin->runcur->nfree > 0) { /* * Another thread updated runcur while this one ran without the * bin lock in arena_bin_nonfull_run_get(). */ void *ret; assert(bin->runcur->nfree > 0); ret = arena_run_reg_alloc(bin->runcur, bin_info); if (run != NULL) { arena_chunk_t *chunk; /* * arena_run_alloc_small() may have allocated run, or * it may have pulled run from the bin's run tree. * Therefore it is unsafe to make any assumptions about * how run has previously been used, and * arena_bin_lower_run() must be called, as if a region * were just deallocated from the run. */ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); if (run->nfree == bin_info->nregs) arena_dalloc_bin_run(arena, chunk, run, bin); else arena_bin_lower_run(arena, chunk, run, bin); } return (ret); } if (run == NULL) return (NULL); bin->runcur = run; assert(bin->runcur->nfree > 0); return (arena_run_reg_alloc(bin->runcur, bin_info)); } void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; arena_bin_t *bin; assert(tbin->ncached == 0); if (config_prof && arena_prof_accum(arena, prof_accumbytes)) prof_idump(); bin = &arena->bins[binind]; malloc_mutex_lock(&bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tbin->lg_fill_div); i < nfill; i++) { arena_run_t *run; void *ptr; if ((run = bin->runcur) != NULL && run->nfree > 0) ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); else ptr = arena_bin_malloc_hard(arena, bin); if (ptr == NULL) { /* * OOM. tbin->avail isn't yet filled down to its first * element, so the successful allocations (if any) must * be moved to the base of tbin->avail before bailing * out. */ if (i > 0) { memmove(tbin->avail, &tbin->avail[nfill - i], i * sizeof(void *)); } break; } if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ptr, &arena_bin_info[binind], true); } /* Insert such that low regions get used first. */ tbin->avail[nfill - 1 - i] = ptr; } if (config_stats) { bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.curregs += i; bin->stats.nfills++; tbin->tstats.nrequests = 0; } malloc_mutex_unlock(&bin->lock); tbin->ncached = i; } void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) { if (zero) { size_t redzone_size = bin_info->redzone_size; memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, redzone_size); memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, redzone_size); } else { memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, bin_info->reg_interval); } } #ifdef JEMALLOC_JET #undef arena_redzone_corruption #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) #endif static void arena_redzone_corruption(void *ptr, size_t usize, bool after, size_t offset, uint8_t byte) { malloc_printf(": Corrupt redzone %zu byte%s %s %p " "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", after ? "after" : "before", ptr, usize, byte); } #ifdef JEMALLOC_JET #undef arena_redzone_corruption #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) arena_redzone_corruption_t *arena_redzone_corruption = JEMALLOC_N(arena_redzone_corruption_impl); #endif static void arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) { bool error = false; if (opt_junk_alloc) { size_t size = bin_info->reg_size; size_t redzone_size = bin_info->redzone_size; size_t i; for (i = 1; i <= redzone_size; i++) { uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); if (*byte != 0xa5) { error = true; arena_redzone_corruption(ptr, size, false, i, *byte); if (reset) *byte = 0xa5; } } for (i = 0; i < redzone_size; i++) { uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); if (*byte != 0xa5) { error = true; arena_redzone_corruption(ptr, size, true, i, *byte); if (reset) *byte = 0xa5; } } } if (opt_abort && error) abort(); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_small #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) #endif void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) { size_t redzone_size = bin_info->redzone_size; arena_redzones_validate(ptr, bin_info, false); memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, bin_info->reg_interval); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_small #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) arena_dalloc_junk_small_t *arena_dalloc_junk_small = JEMALLOC_N(arena_dalloc_junk_small_impl); #endif void arena_quarantine_junk_small(void *ptr, size_t usize) { szind_t binind; arena_bin_info_t *bin_info; cassert(config_fill); assert(opt_junk_free); assert(opt_quarantine); assert(usize <= SMALL_MAXCLASS); binind = size2index(usize); bin_info = &arena_bin_info[binind]; arena_redzones_validate(ptr, bin_info, true); } void * arena_malloc_small(arena_t *arena, size_t size, bool zero) { void *ret; arena_bin_t *bin; arena_run_t *run; szind_t binind; binind = size2index(size); assert(binind < NBINS); bin = &arena->bins[binind]; size = index2size(binind); malloc_mutex_lock(&bin->lock); if ((run = bin->runcur) != NULL && run->nfree > 0) ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); else ret = arena_bin_malloc_hard(arena, bin); if (ret == NULL) { malloc_mutex_unlock(&bin->lock); return (NULL); } if (config_stats) { bin->stats.nmalloc++; bin->stats.nrequests++; bin->stats.curregs++; } malloc_mutex_unlock(&bin->lock); if (config_prof && !isthreaded && arena_prof_accum(arena, size)) prof_idump(); if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (unlikely(opt_zero)) memset(ret, 0, size); } JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } return (ret); } void * arena_malloc_large(arena_t *arena, size_t size, bool zero) { void *ret; size_t usize; uintptr_t random_offset; arena_run_t *run; arena_chunk_map_misc_t *miscelm; UNUSED bool idump; /* Large allocation. */ usize = s2u(size); malloc_mutex_lock(&arena->lock); if (config_cache_oblivious) { uint64_t r; /* * Compute a uniformly distributed offset within the first page * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 * for 4 KiB pages and 64-byte cachelines. */ prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, UINT64_C(6364136223846793009), UINT64_C(1442695040888963409)); random_offset = ((uintptr_t)r) << LG_CACHELINE; } else random_offset = 0; run = arena_run_alloc_large(arena, usize + large_pad, zero); if (run == NULL) { malloc_mutex_unlock(&arena->lock); return (NULL); } miscelm = arena_run_to_miscelm(run); ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + random_offset); if (config_stats) { szind_t index = size2index(usize) - NBINS; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += usize; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } if (config_prof) idump = arena_prof_accum_locked(arena, usize); malloc_mutex_unlock(&arena->lock); if (config_prof && idump) prof_idump(); if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) memset(ret, 0xa5, usize); else if (unlikely(opt_zero)) memset(ret, 0, usize); } } return (ret); } /* Only handles large allocations that require more than page alignment. */ static void * arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, bool zero) { void *ret; size_t alloc_size, leadsize, trailsize; arena_run_t *run; arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; void *rpages; assert(usize == PAGE_CEILING(usize)); arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); alignment = PAGE_CEILING(alignment); alloc_size = usize + large_pad + alignment - PAGE; malloc_mutex_lock(&arena->lock); run = arena_run_alloc_large(arena, alloc_size, false); if (run == NULL) { malloc_mutex_unlock(&arena->lock); return (NULL); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); miscelm = arena_run_to_miscelm(run); rpages = arena_miscelm_to_rpages(miscelm); leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - (uintptr_t)rpages; assert(alloc_size >= leadsize + usize); trailsize = alloc_size - leadsize - usize - large_pad; if (leadsize != 0) { arena_chunk_map_misc_t *head_miscelm = miscelm; arena_run_t *head_run = run; miscelm = arena_miscelm_get(chunk, arena_miscelm_to_pageind(head_miscelm) + (leadsize >> LG_PAGE)); run = &miscelm->run; arena_run_trim_head(arena, chunk, head_run, alloc_size, alloc_size - leadsize); } if (trailsize != 0) { arena_run_trim_tail(arena, chunk, run, usize + large_pad + trailsize, usize + large_pad, false); } if (arena_run_init_large(arena, run, usize + large_pad, zero)) { size_t run_ind = arena_miscelm_to_pageind(arena_run_to_miscelm(run)); bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); bool decommitted = (arena_mapbits_decommitted_get(chunk, run_ind) != 0); assert(decommitted); /* Cause of OOM. */ arena_run_dalloc(arena, run, dirty, false, decommitted); malloc_mutex_unlock(&arena->lock); return (NULL); } ret = arena_miscelm_to_rpages(miscelm); if (config_stats) { szind_t index = size2index(usize) - NBINS; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += usize; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } malloc_mutex_unlock(&arena->lock); if (config_fill && !zero) { if (unlikely(opt_junk_alloc)) memset(ret, 0xa5, usize); else if (unlikely(opt_zero)) memset(ret, 0, usize); } return (ret); } void * arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special run placement. */ ret = arena_malloc(tsd, arena, usize, zero, tcache); } else if (usize <= large_maxclass && alignment <= PAGE) { /* * Large; alignment doesn't require special run placement. * However, the cached pointer may be at a random offset from * the base of the run, so do some bit manipulation to retrieve * the base. */ ret = arena_malloc(tsd, arena, usize, zero, tcache); if (config_cache_oblivious) ret = (void *)((uintptr_t)ret & ~PAGE_MASK); } else { if (likely(usize <= large_maxclass)) { ret = arena_palloc_large(tsd, arena, usize, alignment, zero); } else if (likely(alignment <= chunksize)) ret = huge_malloc(tsd, arena, usize, zero, tcache); else { ret = huge_palloc(tsd, arena, usize, alignment, zero, tcache); } } return (ret); } void arena_prof_promoted(const void *ptr, size_t size) { arena_chunk_t *chunk; size_t pageind; szind_t binind; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(isalloc(ptr, false) == LARGE_MINCLASS); assert(isalloc(ptr, true) == LARGE_MINCLASS); assert(size <= SMALL_MAXCLASS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; binind = size2index(size); assert(binind < NBINS); arena_mapbits_large_binind_set(chunk, pageind, binind); assert(isalloc(ptr, false) == LARGE_MINCLASS); assert(isalloc(ptr, true) == size); } static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { /* Dissociate run from bin. */ if (run == bin->runcur) bin->runcur = NULL; else { szind_t binind = arena_bin_index(extent_node_arena_get( &chunk->node), bin); arena_bin_info_t *bin_info = &arena_bin_info[binind]; if (bin_info->nregs != 1) { /* * This block's conditional is necessary because if the * run only contains one region, then it never gets * inserted into the non-full runs tree. */ arena_bin_runs_remove(bin, run); } } } static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { assert(run != bin->runcur); assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == NULL); malloc_mutex_unlock(&bin->lock); /******************************/ malloc_mutex_lock(&arena->lock); arena_run_dalloc_decommit(arena, chunk, run); malloc_mutex_unlock(&arena->lock); /****************************/ malloc_mutex_lock(&bin->lock); if (config_stats) bin->stats.curruns--; } static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { /* * Make sure that if bin->runcur is non-NULL, it refers to the lowest * non-full run. It is okay to NULL runcur out rather than proactively * keeping it pointing at the lowest non-full run. */ if ((uintptr_t)run < (uintptr_t)bin->runcur) { /* Switch runcur. */ if (bin->runcur->nfree > 0) arena_bin_runs_insert(bin, bin->runcur); bin->runcur = run; if (config_stats) bin->stats.reruns++; } else arena_bin_runs_insert(bin, run); } static void arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) { size_t pageind, rpages_ind; arena_run_t *run; arena_bin_t *bin; arena_bin_info_t *bin_info; szind_t binind; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); run = &arena_miscelm_get(chunk, rpages_ind)->run; binind = run->binind; bin = &arena->bins[binind]; bin_info = &arena_bin_info[binind]; if (!junked && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, bin_info); arena_run_reg_dalloc(run, ptr); if (run->nfree == bin_info->nregs) { arena_dissociate_bin_run(chunk, run, bin); arena_dalloc_bin_run(arena, chunk, run, bin); } else if (run->nfree == 1 && run != bin->runcur) arena_bin_lower_run(arena, chunk, run, bin); if (config_stats) { bin->stats.ndalloc++; bin->stats.curregs--; } } void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) { arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); } void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm) { arena_run_t *run; arena_bin_t *bin; size_t rpages_ind; rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); run = &arena_miscelm_get(chunk, rpages_ind)->run; bin = &arena->bins[run->binind]; malloc_mutex_lock(&bin->lock); arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); malloc_mutex_unlock(&bin->lock); } void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind) { arena_chunk_map_bits_t *bitselm; if (config_debug) { /* arena_ptr_small_binind_get() does extra sanity checking. */ assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) != BININD_INVALID); } bitselm = arena_bitselm_get(chunk, pageind); arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_large #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) #endif void arena_dalloc_junk_large(void *ptr, size_t usize) { if (config_fill && unlikely(opt_junk_free)) memset(ptr, 0x5a, usize); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_large #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) arena_dalloc_junk_large_t *arena_dalloc_junk_large = JEMALLOC_N(arena_dalloc_junk_large_impl); #endif static void arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool junked) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); arena_run_t *run = &miscelm->run; if (config_fill || config_stats) { size_t usize = arena_mapbits_large_size_get(chunk, pageind) - large_pad; if (!junked) arena_dalloc_junk_large(ptr, usize); if (config_stats) { szind_t index = size2index(usize) - NBINS; arena->stats.ndalloc_large++; arena->stats.allocated_large -= usize; arena->stats.lstats[index].ndalloc++; arena->stats.lstats[index].curruns--; } } arena_run_dalloc_decommit(arena, chunk, run); } void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) { arena_dalloc_large_locked_impl(arena, chunk, ptr, true); } void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) { malloc_mutex_lock(&arena->lock); arena_dalloc_large_locked_impl(arena, chunk, ptr, false); malloc_mutex_unlock(&arena->lock); } static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); arena_run_t *run = &miscelm->run; assert(size < oldsize); /* * Shrink the run, and make trailing pages available for other * allocations. */ malloc_mutex_lock(&arena->lock); arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + large_pad, true); if (config_stats) { szind_t oldindex = size2index(oldsize) - NBINS; szind_t index = size2index(size) - NBINS; arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; arena->stats.lstats[oldindex].ndalloc++; arena->stats.lstats[oldindex].curruns--; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } malloc_mutex_unlock(&arena->lock); } static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t npages = (oldsize + large_pad) >> LG_PAGE; size_t followsize; assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - large_pad); /* Try to extend the run. */ malloc_mutex_lock(&arena->lock); if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, pageind+npages) != 0) goto label_fail; followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); if (oldsize + followsize >= usize_min) { /* * The next run is available and sufficiently large. Split the * following run, then merge the first part with the existing * allocation. */ arena_run_t *run; size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; usize = usize_max; while (oldsize + followsize < usize) usize = index2size(size2index(usize)-1); assert(usize >= usize_min); assert(usize >= oldsize); splitsize = usize - oldsize; if (splitsize == 0) goto label_fail; run = &arena_miscelm_get(chunk, pageind+npages)->run; if (arena_run_split_large(arena, run, splitsize, zero)) goto label_fail; if (config_cache_oblivious && zero) { /* * Zero the trailing bytes of the original allocation's * last page, since they are in an indeterminate state. */ assert(PAGE_CEILING(oldsize) == oldsize); memset((void *)((uintptr_t)ptr + oldsize), 0, PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr); } size = oldsize + splitsize; npages = (size + large_pad) >> LG_PAGE; /* * Mark the extended run as dirty if either portion of the run * was dirty before allocation. This is rather pedantic, * because there's not actually any sequence of events that * could cause the resulting run to be passed to * arena_run_dalloc() with the dirty argument set to false * (which is when dirty flag consistency would really matter). */ flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | arena_mapbits_dirty_get(chunk, pageind+npages-1); flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; arena_mapbits_large_set(chunk, pageind, size + large_pad, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+npages-1))); if (config_stats) { szind_t oldindex = size2index(oldsize) - NBINS; szind_t index = size2index(size) - NBINS; arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; arena->stats.lstats[oldindex].ndalloc++; arena->stats.lstats[oldindex].curruns--; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } malloc_mutex_unlock(&arena->lock); return (false); } label_fail: malloc_mutex_unlock(&arena->lock); return (true); } #ifdef JEMALLOC_JET #undef arena_ralloc_junk_large #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) #endif static void arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) { if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, old_usize - usize); } } #ifdef JEMALLOC_JET #undef arena_ralloc_junk_large #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) arena_ralloc_junk_large_t *arena_ralloc_junk_large = JEMALLOC_N(arena_ralloc_junk_large_impl); #endif /* * Try to resize a large allocation, in order to avoid copying. This will * always fail if growing an object, and the following run is already in use. */ static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { arena_chunk_t *chunk; arena_t *arena; if (oldsize == usize_max) { /* Current size class is compatible and maximal. */ return (false); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena = extent_node_arena_get(&chunk->node); if (oldsize < usize_max) { bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, usize_min, usize_max, zero); if (config_fill && !ret && !zero) { if (unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, isalloc(ptr, config_prof) - oldsize); } else if (unlikely(opt_zero)) { memset((void *)((uintptr_t)ptr + oldsize), 0, isalloc(ptr, config_prof) - oldsize); } } return (ret); } assert(oldsize > usize_max); /* Fill before shrinking in order avoid a race. */ arena_ralloc_junk_large(ptr, oldsize, usize_max); arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); return (false); } bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { size_t usize_min, usize_max; usize_min = s2u(size); usize_max = s2u(size + extra); if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { /* * Avoid moving the allocation if the size class can be left the * same. */ if (oldsize <= SMALL_MAXCLASS) { assert(arena_bin_info[size2index(oldsize)].reg_size == oldsize); if ((usize_max <= SMALL_MAXCLASS && size2index(usize_max) == size2index(oldsize)) || (size <= oldsize && usize_max >= oldsize)) return (false); } else { if (usize_max > SMALL_MAXCLASS) { if (!arena_ralloc_large(ptr, oldsize, usize_min, usize_max, zero)) return (false); } } /* Reallocation would require a move. */ return (true); } else { return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, zero)); } } static void * arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment == 0) return (arena_malloc(tsd, arena, usize, zero, tcache)); usize = sa2u(usize, alignment); if (usize == 0) return (NULL); return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); } void * arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t usize; usize = s2u(size); if (usize == 0) return (NULL); if (likely(usize <= large_maxclass)) { size_t copysize; /* Try to avoid moving the allocation. */ if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) return (ptr); /* * size and oldsize are different enough that we need to move * the object. In that case, fall back to allocating new space * and copying. */ ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, zero, tcache); if (ret == NULL) return (NULL); /* * Junk/zero-filling were already done by * ipalloc()/arena_malloc(). */ copysize = (usize < oldsize) ? usize : oldsize; JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache); } else { ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, zero, tcache); } return (ret); } dss_prec_t arena_dss_prec_get(arena_t *arena) { dss_prec_t ret; malloc_mutex_lock(&arena->lock); ret = arena->dss_prec; malloc_mutex_unlock(&arena->lock); return (ret); } bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); malloc_mutex_lock(&arena->lock); arena->dss_prec = dss_prec; malloc_mutex_unlock(&arena->lock); return (false); } ssize_t arena_lg_dirty_mult_default_get(void) { return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); } bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) { if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) return (true); atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); return (false); } void arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) { unsigned i; malloc_mutex_lock(&arena->lock); *dss = dss_prec_names[arena->dss_prec]; *lg_dirty_mult = arena->lg_dirty_mult; *nactive += arena->nactive; *ndirty += arena->ndirty; astats->mapped += arena->stats.mapped; astats->npurge += arena->stats.npurge; astats->nmadvise += arena->stats.nmadvise; astats->purged += arena->stats.purged; astats->metadata_mapped += arena->stats.metadata_mapped; astats->metadata_allocated += arena_metadata_allocated_get(arena); astats->allocated_large += arena->stats.allocated_large; astats->nmalloc_large += arena->stats.nmalloc_large; astats->ndalloc_large += arena->stats.ndalloc_large; astats->nrequests_large += arena->stats.nrequests_large; astats->allocated_huge += arena->stats.allocated_huge; astats->nmalloc_huge += arena->stats.nmalloc_huge; astats->ndalloc_huge += arena->stats.ndalloc_huge; for (i = 0; i < nlclasses; i++) { lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; lstats[i].nrequests += arena->stats.lstats[i].nrequests; lstats[i].curruns += arena->stats.lstats[i].curruns; } for (i = 0; i < nhclasses; i++) { hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; } malloc_mutex_unlock(&arena->lock); for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; malloc_mutex_lock(&bin->lock); bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].nrequests += bin->stats.nrequests; bstats[i].curregs += bin->stats.curregs; if (config_tcache) { bstats[i].nfills += bin->stats.nfills; bstats[i].nflushes += bin->stats.nflushes; } bstats[i].nruns += bin->stats.nruns; bstats[i].reruns += bin->stats.reruns; bstats[i].curruns += bin->stats.curruns; malloc_mutex_unlock(&bin->lock); } } arena_t * arena_new(unsigned ind) { arena_t *arena; unsigned i; arena_bin_t *bin; /* * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly * because there is no way to clean up if base_alloc() OOMs. */ if (config_stats) { arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + nhclasses) * sizeof(malloc_huge_stats_t)); } else arena = (arena_t *)base_alloc(sizeof(arena_t)); if (arena == NULL) return (NULL); arena->ind = ind; arena->nthreads = 0; if (malloc_mutex_init(&arena->lock)) return (NULL); if (config_stats) { memset(&arena->stats, 0, sizeof(arena_stats_t)); arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena + CACHELINE_CEILING(sizeof(arena_t))); memset(arena->stats.lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena + CACHELINE_CEILING(sizeof(arena_t)) + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); memset(arena->stats.hstats, 0, nhclasses * sizeof(malloc_huge_stats_t)); if (config_tcache) ql_new(&arena->tcache_ql); } if (config_prof) arena->prof_accumbytes = 0; if (config_cache_oblivious) { /* * A nondeterministic seed based on the address of arena reduces * the likelihood of lockstep non-uniform cache index * utilization among identical concurrent processes, but at the * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ arena->offset_state = config_debug ? ind : (uint64_t)(uintptr_t)arena; } arena->dss_prec = chunk_dss_prec_get(); arena->spare = NULL; arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); arena->purging = false; arena->nactive = 0; arena->ndirty = 0; arena_avail_tree_new(&arena->runs_avail); qr_new(&arena->runs_dirty, rd_link); qr_new(&arena->chunks_cache, cc_link); ql_new(&arena->huge); if (malloc_mutex_init(&arena->huge_mtx)) return (NULL); extent_tree_szad_new(&arena->chunks_szad_cached); extent_tree_ad_new(&arena->chunks_ad_cached); extent_tree_szad_new(&arena->chunks_szad_retained); extent_tree_ad_new(&arena->chunks_ad_retained); if (malloc_mutex_init(&arena->chunks_mtx)) return (NULL); ql_new(&arena->node_cache); if (malloc_mutex_init(&arena->node_cache_mtx)) return (NULL); arena->chunk_hooks = chunk_hooks_default; /* Initialize bins. */ for (i = 0; i < NBINS; i++) { bin = &arena->bins[i]; if (malloc_mutex_init(&bin->lock)) return (NULL); bin->runcur = NULL; arena_run_tree_new(&bin->runs); if (config_stats) memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); } return (arena); } /* * Calculate bin_info->run_size such that it meets the following constraints: * * *) bin_info->run_size <= arena_maxrun * *) bin_info->nregs <= RUN_MAXREGS * * bin_info->nregs and bin_info->reg0_offset are also calculated here, since * these settings are all interdependent. */ static void bin_info_run_size_calc(arena_bin_info_t *bin_info) { size_t pad_size; size_t try_run_size, perfect_run_size, actual_run_size; uint32_t try_nregs, perfect_nregs, actual_nregs; /* * Determine redzone size based on minimum alignment and minimum * redzone size. Add padding to the end of the run if it is needed to * align the regions. The padding allows each redzone to be half the * minimum alignment; without the padding, each redzone would have to * be twice as large in order to maintain alignment. */ if (config_fill && unlikely(opt_redzone)) { size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1); if (align_min <= REDZONE_MINSIZE) { bin_info->redzone_size = REDZONE_MINSIZE; pad_size = 0; } else { bin_info->redzone_size = align_min >> 1; pad_size = bin_info->redzone_size; } } else { bin_info->redzone_size = 0; pad_size = 0; } bin_info->reg_interval = bin_info->reg_size + (bin_info->redzone_size << 1); /* * Compute run size under ideal conditions (no redzones, no limit on run * size). */ try_run_size = PAGE; try_nregs = try_run_size / bin_info->reg_size; do { perfect_run_size = try_run_size; perfect_nregs = try_nregs; try_run_size += PAGE; try_nregs = try_run_size / bin_info->reg_size; } while (perfect_run_size != perfect_nregs * bin_info->reg_size); assert(perfect_nregs <= RUN_MAXREGS); actual_run_size = perfect_run_size; actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; /* * Redzones can require enough padding that not even a single region can * fit within the number of pages that would normally be dedicated to a * run for this size class. Increase the run size until at least one * region fits. */ while (actual_nregs == 0) { assert(config_fill && unlikely(opt_redzone)); actual_run_size += PAGE; actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; } /* * Make sure that the run will fit within an arena chunk. */ while (actual_run_size > arena_maxrun) { actual_run_size -= PAGE; actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; } assert(actual_nregs > 0); assert(actual_run_size == s2u(actual_run_size)); /* Copy final settings. */ bin_info->run_size = actual_run_size; bin_info->nregs = actual_nregs; bin_info->reg0_offset = actual_run_size - (actual_nregs * bin_info->reg_interval) - pad_size + bin_info->redzone_size; if (actual_run_size > small_maxrun) small_maxrun = actual_run_size; assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs * bin_info->reg_interval) + pad_size == bin_info->run_size); } static void bin_info_init(void) { arena_bin_info_t *bin_info; #define BIN_INFO_INIT_bin_yes(index, size) \ bin_info = &arena_bin_info[index]; \ bin_info->reg_size = size; \ bin_info_run_size_calc(bin_info); \ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); #define BIN_INFO_INIT_bin_no(index, size) #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ BIN_INFO_INIT_bin_##bin(index, (ZU(1)<> LG_PAGE)); if (small_run_tab == NULL) return (true); #define TAB_INIT_bin_yes(index, size) { \ arena_bin_info_t *bin_info = &arena_bin_info[index]; \ small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ } #define TAB_INIT_bin_no(index, size) #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ TAB_INIT_bin_##bin(index, (ZU(1)<= the result * from (2), and will always be correct. */ map_bias = 0; for (i = 0; i < 3; i++) { size_t header_size = offsetof(arena_chunk_t, map_bits) + ((sizeof(arena_chunk_map_bits_t) + sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); map_bias = (header_size + PAGE_MASK) >> LG_PAGE; } assert(map_bias > 0); map_misc_offset = offsetof(arena_chunk_t, map_bits) + sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); arena_maxrun = chunksize - (map_bias << LG_PAGE); assert(arena_maxrun > 0); large_maxclass = index2size(size2index(chunksize)-1); if (large_maxclass > arena_maxrun) { /* * For small chunk sizes it's possible for there to be fewer * non-header pages available than are necessary to serve the * size classes just below chunksize. */ large_maxclass = arena_maxrun; } assert(large_maxclass > 0); nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); nhclasses = NSIZES - nlclasses - NBINS; bin_info_init(); return (small_run_size_init()); } void arena_prefork(arena_t *arena) { unsigned i; malloc_mutex_prefork(&arena->lock); malloc_mutex_prefork(&arena->huge_mtx); malloc_mutex_prefork(&arena->chunks_mtx); malloc_mutex_prefork(&arena->node_cache_mtx); for (i = 0; i < NBINS; i++) malloc_mutex_prefork(&arena->bins[i].lock); } void arena_postfork_parent(arena_t *arena) { unsigned i; for (i = 0; i < NBINS; i++) malloc_mutex_postfork_parent(&arena->bins[i].lock); malloc_mutex_postfork_parent(&arena->node_cache_mtx); malloc_mutex_postfork_parent(&arena->chunks_mtx); malloc_mutex_postfork_parent(&arena->huge_mtx); malloc_mutex_postfork_parent(&arena->lock); } void arena_postfork_child(arena_t *arena) { unsigned i; for (i = 0; i < NBINS; i++) malloc_mutex_postfork_child(&arena->bins[i].lock); malloc_mutex_postfork_child(&arena->node_cache_mtx); malloc_mutex_postfork_child(&arena->chunks_mtx); malloc_mutex_postfork_child(&arena->huge_mtx); malloc_mutex_postfork_child(&arena->lock); } disque-1.0-rc1/deps/jemalloc/src/atomic.c000066400000000000000000000001141264176561100202640ustar00rootroot00000000000000#define JEMALLOC_ATOMIC_C_ #include "jemalloc/internal/jemalloc_internal.h" disque-1.0-rc1/deps/jemalloc/src/base.c000066400000000000000000000076201264176561100177330ustar00rootroot00000000000000#define JEMALLOC_BASE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ static malloc_mutex_t base_mtx; static extent_tree_t base_avail_szad; static extent_node_t *base_nodes; static size_t base_allocated; static size_t base_resident; static size_t base_mapped; /******************************************************************************/ /* base_mtx must be held. */ static extent_node_t * base_node_try_alloc(void) { extent_node_t *node; if (base_nodes == NULL) return (NULL); node = base_nodes; base_nodes = *(extent_node_t **)node; JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); return (node); } /* base_mtx must be held. */ static void base_node_dalloc(extent_node_t *node) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); *(extent_node_t **)node = base_nodes; base_nodes = node; } /* base_mtx must be held. */ static extent_node_t * base_chunk_alloc(size_t minsize) { extent_node_t *node; size_t csize, nsize; void *addr; assert(minsize != 0); node = base_node_try_alloc(); /* Allocate enough space to also carve a node out if necessary. */ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; csize = CHUNK_CEILING(minsize + nsize); addr = chunk_alloc_base(csize); if (addr == NULL) { if (node != NULL) base_node_dalloc(node); return (NULL); } base_mapped += csize; if (node == NULL) { node = (extent_node_t *)addr; addr = (void *)((uintptr_t)addr + nsize); csize -= nsize; if (config_stats) { base_allocated += nsize; base_resident += PAGE_CEILING(nsize); } } extent_node_init(node, NULL, addr, csize, true, true); return (node); } /* * base_alloc() guarantees demand-zeroed memory, in order to make multi-page * sparse data structures such as radix tree nodes efficient with respect to * physical memory usage. */ void * base_alloc(size_t size) { void *ret; size_t csize, usize; extent_node_t *node; extent_node_t key; /* * Round size up to nearest multiple of the cacheline size, so that * there is no chance of false cache line sharing. */ csize = CACHELINE_CEILING(size); usize = s2u(csize); extent_node_init(&key, NULL, NULL, usize, false, false); malloc_mutex_lock(&base_mtx); node = extent_tree_szad_nsearch(&base_avail_szad, &key); if (node != NULL) { /* Use existing space. */ extent_tree_szad_remove(&base_avail_szad, node); } else { /* Try to allocate more space. */ node = base_chunk_alloc(csize); } if (node == NULL) { ret = NULL; goto label_return; } ret = extent_node_addr_get(node); if (extent_node_size_get(node) > csize) { extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); extent_node_size_set(node, extent_node_size_get(node) - csize); extent_tree_szad_insert(&base_avail_szad, node); } else base_node_dalloc(node); if (config_stats) { base_allocated += csize; /* * Add one PAGE to base_resident for every page boundary that is * crossed by the new allocation. */ base_resident += PAGE_CEILING((uintptr_t)ret + csize) - PAGE_CEILING((uintptr_t)ret); } JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); label_return: malloc_mutex_unlock(&base_mtx); return (ret); } void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) { malloc_mutex_lock(&base_mtx); assert(base_allocated <= base_resident); assert(base_resident <= base_mapped); *allocated = base_allocated; *resident = base_resident; *mapped = base_mapped; malloc_mutex_unlock(&base_mtx); } bool base_boot(void) { if (malloc_mutex_init(&base_mtx)) return (true); extent_tree_szad_new(&base_avail_szad); base_nodes = NULL; return (false); } void base_prefork(void) { malloc_mutex_prefork(&base_mtx); } void base_postfork_parent(void) { malloc_mutex_postfork_parent(&base_mtx); } void base_postfork_child(void) { malloc_mutex_postfork_child(&base_mtx); } disque-1.0-rc1/deps/jemalloc/src/bitmap.c000066400000000000000000000043431264176561100202740ustar00rootroot00000000000000#define JEMALLOC_BITMAP_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; size_t group_count; assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); /* * Compute the number of groups necessary to store nbits bits, and * progressively work upward through the levels until reaching a level * that requires only one group. */ binfo->levels[0].group_offset = 0; group_count = BITMAP_BITS2GROUPS(nbits); for (i = 1; group_count > 1; i++) { assert(i < BITMAP_MAX_LEVELS); binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; group_count = BITMAP_BITS2GROUPS(group_count); } binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); binfo->nlevels = i; binfo->nbits = nbits; } size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); } size_t bitmap_size(size_t nbits) { bitmap_info_t binfo; bitmap_info_init(&binfo, nbits); return (bitmap_info_ngroups(&binfo)); } void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t extra; unsigned i; /* * Bits are actually inverted with regard to the external bitmap * interface, so the bitmap starts out with all 1 bits, except for * trailing unused bits (if any). Note that each group uses bit 0 to * correspond to the first logical bit in the group, so extra bits * are the most significant bits of the last group. */ memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) bitmap[binfo->levels[1].group_offset - 1] >>= extra; for (i = 1; i < binfo->nlevels; i++) { size_t group_count = binfo->levels[i].group_offset - binfo->levels[i-1].group_offset; extra = (BITMAP_GROUP_NBITS - (group_count & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; } } disque-1.0-rc1/deps/jemalloc/src/chunk.c000066400000000000000000000521001264176561100201220ustar00rootroot00000000000000#define JEMALLOC_CHUNK_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ const char *opt_dss = DSS_DEFAULT; size_t opt_lg_chunk = 0; /* Used exclusively for gdump triggering. */ static size_t curchunks; static size_t highchunks; rtree_t chunks_rtree; /* Various chunk-related settings. */ size_t chunksize; size_t chunksize_mask; /* (chunksize - 1). */ size_t chunk_npages; static void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, unsigned arena_ind); static bool chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind); static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, bool committed, unsigned arena_ind); const chunk_hooks_t chunk_hooks_default = { chunk_alloc_default, chunk_dalloc_default, chunk_commit_default, chunk_decommit_default, chunk_purge_default, chunk_split_default, chunk_merge_default }; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed, bool committed); /******************************************************************************/ static chunk_hooks_t chunk_hooks_get_locked(arena_t *arena) { return (arena->chunk_hooks); } chunk_hooks_t chunk_hooks_get(arena_t *arena) { chunk_hooks_t chunk_hooks; malloc_mutex_lock(&arena->chunks_mtx); chunk_hooks = chunk_hooks_get_locked(arena); malloc_mutex_unlock(&arena->chunks_mtx); return (chunk_hooks); } chunk_hooks_t chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) { chunk_hooks_t old_chunk_hooks; malloc_mutex_lock(&arena->chunks_mtx); old_chunk_hooks = arena->chunk_hooks; /* * Copy each field atomically so that it is impossible for readers to * see partially updated pointers. There are places where readers only * need one hook function pointer (therefore no need to copy the * entirety of arena->chunk_hooks), and stale reads do not affect * correctness, so they perform unlocked reads. */ #define ATOMIC_COPY_HOOK(n) do { \ union { \ chunk_##n##_t **n; \ void **v; \ } u; \ u.n = &arena->chunk_hooks.n; \ atomic_write_p(u.v, chunk_hooks->n); \ } while (0) ATOMIC_COPY_HOOK(alloc); ATOMIC_COPY_HOOK(dalloc); ATOMIC_COPY_HOOK(commit); ATOMIC_COPY_HOOK(decommit); ATOMIC_COPY_HOOK(purge); ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(merge); #undef ATOMIC_COPY_HOOK malloc_mutex_unlock(&arena->chunks_mtx); return (old_chunk_hooks); } static void chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, bool locked) { static const chunk_hooks_t uninitialized_hooks = CHUNK_HOOKS_INITIALIZER; if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == 0) { *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : chunk_hooks_get(arena); } } static void chunk_hooks_assure_initialized_locked(arena_t *arena, chunk_hooks_t *chunk_hooks) { chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); } static void chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) { chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); } bool chunk_register(const void *chunk, const extent_node_t *node) { assert(extent_node_addr_get(node) == chunk); if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) return (true); if (config_prof && opt_prof) { size_t size = extent_node_size_get(node); size_t nadd = (size == 0) ? 1 : size / chunksize; size_t cur = atomic_add_z(&curchunks, nadd); size_t high = atomic_read_z(&highchunks); while (cur > high && atomic_cas_z(&highchunks, high, cur)) { /* * Don't refresh cur, because it may have decreased * since this thread lost the highchunks update race. */ high = atomic_read_z(&highchunks); } if (cur > high && prof_gdump_get_unlocked()) prof_gdump(); } return (false); } void chunk_deregister(const void *chunk, const extent_node_t *node) { bool err; err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); assert(!err); if (config_prof && opt_prof) { size_t size = extent_node_size_get(node); size_t nsub = (size == 0) ? 1 : size / chunksize; assert(atomic_read_z(&curchunks) >= nsub); atomic_sub_z(&curchunks, nsub); } } /* * Do first-best-fit chunk selection, i.e. select the lowest chunk that best * fits. */ static extent_node_t * chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size) { extent_node_t key; assert(size == CHUNK_CEILING(size)); extent_node_init(&key, arena, NULL, size, false, false); return (extent_tree_szad_nsearch(chunks_szad, &key)); } static void * chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, bool dalloc_node) { void *ret; extent_node_t *node; size_t alloc_size, leadsize, trailsize; bool zeroed, committed; assert(new_addr == NULL || alignment == chunksize); /* * Cached chunks use the node linkage embedded in their headers, in * which case dalloc_node is true, and new_addr is non-NULL because * we're operating on a specific chunk. */ assert(dalloc_node || new_addr != NULL); alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); malloc_mutex_lock(&arena->chunks_mtx); chunk_hooks_assure_initialized_locked(arena, chunk_hooks); if (new_addr != NULL) { extent_node_t key; extent_node_init(&key, arena, new_addr, alloc_size, false, false); node = extent_tree_ad_search(chunks_ad, &key); } else { node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, alloc_size); } if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < size)) { malloc_mutex_unlock(&arena->chunks_mtx); return (NULL); } leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), alignment) - (uintptr_t)extent_node_addr_get(node); assert(new_addr == NULL || leadsize == 0); assert(extent_node_size_get(node) >= leadsize + size); trailsize = extent_node_size_get(node) - leadsize - size; ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); zeroed = extent_node_zeroed_get(node); if (zeroed) *zero = true; committed = extent_node_committed_get(node); if (committed) *commit = true; /* Split the lead. */ if (leadsize != 0 && chunk_hooks->split(extent_node_addr_get(node), extent_node_size_get(node), leadsize, size, false, arena->ind)) { malloc_mutex_unlock(&arena->chunks_mtx); return (NULL); } /* Remove node from the tree. */ extent_tree_szad_remove(chunks_szad, node); extent_tree_ad_remove(chunks_ad, node); arena_chunk_cache_maybe_remove(arena, node, cache); if (leadsize != 0) { /* Insert the leading space as a smaller chunk. */ extent_node_size_set(node, leadsize); extent_tree_szad_insert(chunks_szad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (trailsize != 0) { /* Split the trail. */ if (chunk_hooks->split(ret, size + trailsize, size, trailsize, false, arena->ind)) { if (dalloc_node && node != NULL) arena_node_dalloc(arena, node); malloc_mutex_unlock(&arena->chunks_mtx); chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, ret, size + trailsize, zeroed, committed); return (NULL); } /* Insert the trailing space as a smaller chunk. */ if (node == NULL) { node = arena_node_alloc(arena); if (node == NULL) { malloc_mutex_unlock(&arena->chunks_mtx); chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, ret, size + trailsize, zeroed, committed); return (NULL); } } extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), trailsize, zeroed, committed); extent_tree_szad_insert(chunks_szad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { malloc_mutex_unlock(&arena->chunks_mtx); chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, ret, size, zeroed, committed); return (NULL); } malloc_mutex_unlock(&arena->chunks_mtx); assert(dalloc_node || node != NULL); if (dalloc_node && node != NULL) arena_node_dalloc(arena, node); if (*zero) { if (!zeroed) memset(ret, 0, size); else if (config_debug) { size_t i; size_t *p = (size_t *)(uintptr_t)ret; JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); } } return (ret); } /* * If the caller specifies (!*zero), it is still possible to receive zeroed * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes * advantage of this to avoid demanding zeroed chunks, but taking advantage of * them if they are returned. */ static void * chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); /* Retained. */ if ((ret = chunk_recycle(arena, &chunk_hooks, &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, new_addr, size, alignment, zero, commit, true)) != NULL) return (ret); /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != NULL) return (ret); /* * mmap. Requesting an address is not implemented for * chunk_alloc_mmap(), so only call it if (new_addr == NULL). */ if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero, commit)) != NULL) return (ret); /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != NULL) return (ret); /* All strategies for allocation failed. */ return (NULL); } void * chunk_alloc_base(size_t size) { void *ret; bool zero, commit; /* * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() * because it's critical that chunk_alloc_base() return untouched * demand-zeroed virtual memory. */ zero = true; commit = true; ret = chunk_alloc_mmap(size, chunksize, &zero, &commit); if (ret == NULL) return (NULL); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } void * chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node) { void *ret; bool commit; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); commit = true; ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, &commit, dalloc_node); if (ret == NULL) return (NULL); assert(commit); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } static arena_t * chunk_arena_get(unsigned arena_ind) { arena_t *arena; /* Dodge tsd for a0 in order to avoid bootstrapping issues. */ arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind, false, true); /* * The arena we're allocating on behalf of must have been initialized * already. */ assert(arena != NULL); return (arena); } static void * chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { void *ret; arena_t *arena; arena = chunk_arena_get(arena_ind); ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit, arena->dss_prec); if (ret == NULL) return (NULL); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } void * chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; chunk_hooks_assure_initialized(arena, chunk_hooks); ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, arena->ind); if (ret == NULL) return (NULL); if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); return (ret); } static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed, bool committed) { bool unzeroed; extent_node_t *node, *prev; extent_node_t key; assert(!cache || !zeroed); unzeroed = cache || !zeroed; JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); malloc_mutex_lock(&arena->chunks_mtx); chunk_hooks_assure_initialized_locked(arena, chunk_hooks); extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, false, false); node = extent_tree_ad_nsearch(chunks_ad, &key); /* Try to coalesce forward. */ if (node != NULL && extent_node_addr_get(node) == extent_node_addr_get(&key) && extent_node_committed_get(node) == committed && !chunk_hooks->merge(chunk, size, extent_node_addr_get(node), extent_node_size_get(node), false, arena->ind)) { /* * Coalesce chunk with the following address range. This does * not change the position within chunks_ad, so only * remove/insert from/into chunks_szad. */ extent_tree_szad_remove(chunks_szad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, chunk); extent_node_size_set(node, size + extent_node_size_get(node)); extent_node_zeroed_set(node, extent_node_zeroed_get(node) && !unzeroed); extent_tree_szad_insert(chunks_szad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } else { /* Coalescing forward failed, so insert a new node. */ node = arena_node_alloc(arena); if (node == NULL) { /* * Node allocation failed, which is an exceedingly * unlikely failure. Leak chunk after making sure its * pages have already been purged, so that this is only * a virtual memory leak. */ if (cache) { chunk_purge_wrapper(arena, chunk_hooks, chunk, size, 0, size); } goto label_return; } extent_node_init(node, arena, chunk, size, !unzeroed, committed); extent_tree_ad_insert(chunks_ad, node); extent_tree_szad_insert(chunks_szad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } /* Try to coalesce backward. */ prev = extent_tree_ad_prev(chunks_ad, node); if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + extent_node_size_get(prev)) == chunk && extent_node_committed_get(prev) == committed && !chunk_hooks->merge(extent_node_addr_get(prev), extent_node_size_get(prev), chunk, size, false, arena->ind)) { /* * Coalesce chunk with the previous address range. This does * not change the position within chunks_ad, so only * remove/insert node from/into chunks_szad. */ extent_tree_szad_remove(chunks_szad, prev); extent_tree_ad_remove(chunks_ad, prev); arena_chunk_cache_maybe_remove(arena, prev, cache); extent_tree_szad_remove(chunks_szad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, extent_node_addr_get(prev)); extent_node_size_set(node, extent_node_size_get(prev) + extent_node_size_get(node)); extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && extent_node_zeroed_get(node)); extent_tree_szad_insert(chunks_szad, node); arena_chunk_cache_maybe_insert(arena, node, cache); arena_node_dalloc(arena, prev); } label_return: malloc_mutex_unlock(&arena->chunks_mtx); } void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed) { assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, &arena->chunks_ad_cached, true, chunk, size, false, committed); arena_maybe_purge(arena); } void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed, bool committed) { assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); chunk_hooks_assure_initialized(arena, chunk_hooks); /* Try to deallocate. */ if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) return; /* Try to decommit; purge if that fails. */ if (committed) { committed = chunk_hooks->decommit(chunk, size, 0, size, arena->ind); } zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, arena->ind); chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); } static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, unsigned arena_ind) { if (!have_dss || !chunk_in_dss(chunk)) return (chunk_dalloc_mmap(chunk, size)); return (true); } void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed) { chunk_hooks_assure_initialized(arena, chunk_hooks); chunk_hooks->dalloc(chunk, size, committed, arena->ind); if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); } static bool chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), length)); } static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), length)); } bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) { assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert((offset & PAGE_MASK) == 0); assert(length != 0); assert((length & PAGE_MASK) == 0); return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), length)); } static bool chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, length)); } bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length) { chunk_hooks_assure_initialized(arena, chunk_hooks); return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); } static bool chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { if (!maps_coalesce) return (true); return (false); } static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, bool committed, unsigned arena_ind) { if (!maps_coalesce) return (true); if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) return (true); return (false); } static rtree_node_elm_t * chunks_rtree_node_alloc(size_t nelms) { return ((rtree_node_elm_t *)base_alloc(nelms * sizeof(rtree_node_elm_t))); } bool chunk_boot(void) { #ifdef _WIN32 SYSTEM_INFO info; GetSystemInfo(&info); /* * Verify actual page size is equal to or an integral multiple of * configured page size. */ if (info.dwPageSize & ((1U << LG_PAGE) - 1)) return (true); /* * Configure chunksize (if not set) to match granularity (usually 64K), * so pages_map will always take fast path. */ if (!opt_lg_chunk) { opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity) - 1; } #else if (!opt_lg_chunk) opt_lg_chunk = LG_CHUNK_DEFAULT; #endif /* Set variables according to the value of opt_lg_chunk. */ chunksize = (ZU(1) << opt_lg_chunk); assert(chunksize >= PAGE); chunksize_mask = chunksize - 1; chunk_npages = (chunksize >> LG_PAGE); if (have_dss && chunk_dss_boot()) return (true); if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk, chunks_rtree_node_alloc, NULL)) return (true); return (false); } void chunk_prefork(void) { chunk_dss_prefork(); } void chunk_postfork_parent(void) { chunk_dss_postfork_parent(); } void chunk_postfork_child(void) { chunk_dss_postfork_child(); } disque-1.0-rc1/deps/jemalloc/src/chunk_dss.c000066400000000000000000000111311264176561100207720ustar00rootroot00000000000000#define JEMALLOC_CHUNK_DSS_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ const char *dss_prec_names[] = { "disabled", "primary", "secondary", "N/A" }; /* Current dss precedence default, used when creating new arenas. */ static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; /* * Protects sbrk() calls. This avoids malloc races among threads, though it * does not protect against races with threads that call sbrk() directly. */ static malloc_mutex_t dss_mtx; /* Base address of the DSS. */ static void *dss_base; /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ static void *dss_prev; /* Current upper limit on DSS addresses. */ static void *dss_max; /******************************************************************************/ static void * chunk_dss_sbrk(intptr_t increment) { #ifdef JEMALLOC_DSS return (sbrk(increment)); #else not_implemented(); return (NULL); #endif } dss_prec_t chunk_dss_prec_get(void) { dss_prec_t ret; if (!have_dss) return (dss_prec_disabled); malloc_mutex_lock(&dss_mtx); ret = dss_prec_default; malloc_mutex_unlock(&dss_mtx); return (ret); } bool chunk_dss_prec_set(dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); malloc_mutex_lock(&dss_mtx); dss_prec_default = dss_prec; malloc_mutex_unlock(&dss_mtx); return (false); } void * chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { cassert(have_dss); assert(size > 0 && (size & chunksize_mask) == 0); assert(alignment > 0 && (alignment & chunksize_mask) == 0); /* * sbrk() uses a signed increment argument, so take care not to * interpret a huge allocation request as a negative increment. */ if ((intptr_t)size < 0) return (NULL); malloc_mutex_lock(&dss_mtx); if (dss_prev != (void *)-1) { /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ do { void *ret, *cpad, *dss_next; size_t gap_size, cpad_size; intptr_t incr; /* Avoid an unnecessary system call. */ if (new_addr != NULL && dss_max != new_addr) break; /* Get the current end of the DSS. */ dss_max = chunk_dss_sbrk(0); /* Make sure the earlier condition still holds. */ if (new_addr != NULL && dss_max != new_addr) break; /* * Calculate how much padding is necessary to * chunk-align the end of the DSS. */ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & chunksize_mask; /* * Compute how much chunk-aligned pad space (if any) is * necessary to satisfy alignment. This space can be * recycled for later use. */ cpad = (void *)((uintptr_t)dss_max + gap_size); ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, alignment); cpad_size = (uintptr_t)ret - (uintptr_t)cpad; dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)dss_max || (uintptr_t)dss_next < (uintptr_t)dss_max) { /* Wrap-around. */ malloc_mutex_unlock(&dss_mtx); return (NULL); } incr = gap_size + cpad_size + size; dss_prev = chunk_dss_sbrk(incr); if (dss_prev == dss_max) { /* Success. */ dss_max = dss_next; malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_dalloc_wrapper(arena, &chunk_hooks, cpad, cpad_size, true); } if (*zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( ret, size); memset(ret, 0, size); } if (!*commit) *commit = pages_decommit(ret, size); return (ret); } } while (dss_prev != (void *)-1); } malloc_mutex_unlock(&dss_mtx); return (NULL); } bool chunk_in_dss(void *chunk) { bool ret; cassert(have_dss); malloc_mutex_lock(&dss_mtx); if ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < (uintptr_t)dss_max) ret = true; else ret = false; malloc_mutex_unlock(&dss_mtx); return (ret); } bool chunk_dss_boot(void) { cassert(have_dss); if (malloc_mutex_init(&dss_mtx)) return (true); dss_base = chunk_dss_sbrk(0); dss_prev = dss_base; dss_max = dss_base; return (false); } void chunk_dss_prefork(void) { if (have_dss) malloc_mutex_prefork(&dss_mtx); } void chunk_dss_postfork_parent(void) { if (have_dss) malloc_mutex_postfork_parent(&dss_mtx); } void chunk_dss_postfork_child(void) { if (have_dss) malloc_mutex_postfork_child(&dss_mtx); } /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/src/chunk_mmap.c000066400000000000000000000037301264176561100211410ustar00rootroot00000000000000#define JEMALLOC_CHUNK_MMAP_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ static void * chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; size_t alloc_size; alloc_size = size + alignment - PAGE; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); do { void *pages; size_t leadsize; pages = pages_map(NULL, alloc_size); if (pages == NULL) return (NULL); leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; ret = pages_trim(pages, alloc_size, leadsize, size); } while (ret == NULL); assert(ret != NULL); *zero = true; if (!*commit) *commit = pages_decommit(ret, size); return (ret); } void * chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; size_t offset; /* * Ideally, there would be a way to specify alignment to mmap() (like * NetBSD has), but in the absence of such a feature, we have to work * hard to efficiently create aligned mappings. The reliable, but * slow method is to create a mapping that is over-sized, then trim the * excess. However, that always results in one or two calls to * pages_unmap(). * * Optimistically try mapping precisely the right amount before falling * back to the slow method, with the expectation that the optimistic * approach works most of the time. */ assert(alignment != 0); assert((alignment & chunksize_mask) == 0); ret = pages_map(NULL, size); if (ret == NULL) return (NULL); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); if (offset != 0) { pages_unmap(ret, size); return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); } assert(ret != NULL); *zero = true; if (!*commit) *commit = pages_decommit(ret, size); return (ret); } bool chunk_dalloc_mmap(void *chunk, size_t size) { if (config_munmap) pages_unmap(chunk, size); return (!config_munmap); } disque-1.0-rc1/deps/jemalloc/src/ckh.c000066400000000000000000000336401264176561100175670ustar00rootroot00000000000000/* ******************************************************************************* * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash * functions are employed. The original cuckoo hashing algorithm was described * in: * * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms * 51(2):122-144. * * Generalization of cuckoo hashing was discussed in: * * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical * alternative to traditional hash tables. In Proceedings of the 7th * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, * January 2006. * * This implementation uses precisely two hash functions because that is the * fewest that can work, and supporting multiple hashes is an implementation * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) * that shows approximate expected maximum load factors for various * configurations: * * | #cells/bucket | * #hashes | 1 | 2 | 4 | 8 | * --------+-------+-------+-------+-------+ * 1 | 0.006 | 0.006 | 0.03 | 0.12 | * 2 | 0.49 | 0.86 |>0.93< |>0.96< | * 3 | 0.91 | 0.97 | 0.98 | 0.999 | * 4 | 0.97 | 0.99 | 0.999 | | * * The number of cells per bucket is chosen such that a bucket fits in one cache * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, * respectively. * ******************************************************************************/ #define JEMALLOC_CKH_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); /******************************************************************************/ /* * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ JEMALLOC_INLINE_C size_t ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; if (cell->key != NULL && ckh->keycomp(key, cell->key)) return ((bucket << LG_CKH_BUCKET_CELLS) + i); } return (SIZE_T_MAX); } /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ JEMALLOC_INLINE_C size_t ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); ckh->hash(key, hashes); /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); if (cell != SIZE_T_MAX) return (cell); /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); return (cell); } JEMALLOC_INLINE_C bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, const void *data) { ckhc_t *cell; unsigned offset, i; /* * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; if (cell->key == NULL) { cell->key = key; cell->data = data; ckh->count++; return (false); } } return (true); } /* * No space is available in bucket. Randomly evict an item, then try to find an * alternate location for that item. Iteratively repeat this * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ JEMALLOC_INLINE_C bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; unsigned i; bucket = argbucket; key = *argkey; data = *argdata; while (true) { /* * Choose a random item within the bucket to evict. This is * critical to correct function, because without (eventually) * evicting all items within a bucket during iteration, it * would be possible to get stuck in an infinite loop if there * were an item for which both hashes indicated the same * bucket. */ prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); /* Swap cell->{key,data} and {key,data} (evict). */ tkey = cell->key; tdata = cell->data; cell->key = key; cell->data = data; key = tkey; data = tdata; #ifdef CKH_COUNT ckh->nrelocs++; #endif /* Find the alternate bucket for the evicted item. */ ckh->hash(key, hashes); tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (tbucket == bucket) { tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); /* * It may be that (tbucket == bucket) still, if the * item's hashes both indicate this bucket. However, * we are guaranteed to eventually escape this bucket * during iteration, assuming pseudo-random item * selection (true randomness would make infinite * looping a remote possibility). The reason we can * never get trapped forever is that there are two * cases: * * 1) This bucket == argbucket, so we will quickly * detect an eviction cycle and terminate. * 2) An item was evicted to this bucket from another, * which means that at least one item in this bucket * has hashes that indicate distinct buckets. */ } /* Check for a cycle. */ if (tbucket == argbucket) { *argkey = key; *argdata = data; return (true); } bucket = tbucket; if (!ckh_try_bucket_insert(ckh, bucket, key, data)) return (false); } } JEMALLOC_INLINE_C bool ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; ckh->hash(key, hashes); /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) return (false); /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) return (false); /* * Try to find a place for this item via iterative eviction/relocation. */ return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); } /* * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ JEMALLOC_INLINE_C bool ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; count = ckh->count; ckh->count = 0; for (i = nins = 0; nins < count; i++) { if (aTab[i].key != NULL) { key = aTab[i].key; data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; return (true); } nins++; } } return (false); } static bool ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; size_t lg_curcells; unsigned lg_prevbuckets; #ifdef CKH_COUNT ckh->ngrows++; #endif /* * It is possible (though unlikely, given well behaved hashes) that the * table will have to be doubled more than once in order to create a * usable table. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; while (true) { size_t usize; lg_curcells++; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (usize == 0) { ret = true; goto label_return; } tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, NULL); if (tab == NULL) { ret = true; goto label_return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd, tab, tcache_get(tsd, false), true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: return (ret); } static void ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; size_t lg_curcells, usize; unsigned lg_prevbuckets; /* * It is possible (though unlikely, given well behaved hashes) that the * table rebuild will fail. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (usize == 0) return; tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, NULL); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't * prevent this or future operations from proceeding. */ return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd, tab, tcache_get(tsd, false), true); #ifdef CKH_COUNT ckh->nshrinks++; #endif return; } /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT ckh->nshrinkfails++; #endif } bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; assert(minitems > 0); assert(hash != NULL); assert(keycomp != NULL); #ifdef CKH_COUNT ckh->ngrows = 0; ckh->nshrinks = 0; ckh->nshrinkfails = 0; ckh->ninserts = 0; ckh->nrelocs = 0; #endif ckh->prng_state = 42; /* Value doesn't really matter. */ ckh->count = 0; /* * Find the minimum power of 2 that is large enough to fit minitems * entries. We are using (2+,2) cuckoo hashing, which has an expected * maximum load factor of at least ~0.86, so 0.75 is a conservative load * factor that will typically allow mincells items to fit without ever * growing the table. */ assert(LG_CKH_BUCKET_CELLS > 0); mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; lg_mincells++) ; /* Do nothing. */ ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; ckh->keycomp = keycomp; usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); if (usize == 0) { ret = true; goto label_return; } ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, NULL); if (ckh->tab == NULL) { ret = true; goto label_return; } ret = false; label_return: return (ret); } void ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE malloc_printf( "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," " nrelocs: %"FMTu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, (unsigned long long)ckh->ninserts, (unsigned long long)ckh->nrelocs); #endif idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); if (config_debug) memset(ckh, 0x5a, sizeof(ckh_t)); } size_t ckh_count(ckh_t *ckh) { assert(ckh != NULL); return (ckh->count); } bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { if (key != NULL) *key = (void *)ckh->tab[i].key; if (data != NULL) *data = (void *)ckh->tab[i].data; *tabind = i + 1; return (false); } } return (true); } bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); assert(ckh_search(ckh, key, NULL, NULL)); #ifdef CKH_COUNT ckh->ninserts++; #endif while (ckh_try_insert(ckh, &key, &data)) { if (ckh_grow(tsd, ckh)) { ret = true; goto label_return; } } ret = false; label_return: return (ret); } bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { if (key != NULL) *key = (void *)ckh->tab[cell].key; if (data != NULL) *data = (void *)ckh->tab[cell].data; ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ ckh->count--; /* Try to halve the table if it is less than 1/4 full. */ if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets > ckh->lg_minbuckets) { /* Ignore error due to OOM. */ ckh_shrink(tsd, ckh); } return (false); } return (true); } bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { if (key != NULL) *key = (void *)ckh->tab[cell].key; if (data != NULL) *data = (void *)ckh->tab[cell].data; return (false); } return (true); } void ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool ckh_string_keycomp(const void *k1, const void *k2) { assert(k1 != NULL); assert(k2 != NULL); return (strcmp((char *)k1, (char *)k2) ? false : true); } void ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; } u; assert(sizeof(u.v) == sizeof(u.i)); u.v = key; hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); } bool ckh_pointer_keycomp(const void *k1, const void *k2) { return ((k1 == k2) ? true : false); } disque-1.0-rc1/deps/jemalloc/src/ctl.c000066400000000000000000001577411264176561100176150ustar00rootroot00000000000000#define JEMALLOC_CTL_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ /* * ctl_mtx protects the following: * - ctl_stats.* */ static malloc_mutex_t ctl_mtx; static bool ctl_initialized; static uint64_t ctl_epoch; static ctl_stats_t ctl_stats; /******************************************************************************/ /* Helpers for named and indexed nodes. */ JEMALLOC_INLINE_C const ctl_named_node_t * ctl_named_node(const ctl_node_t *node) { return ((node->named) ? (const ctl_named_node_t *)node : NULL); } JEMALLOC_INLINE_C const ctl_named_node_t * ctl_named_children(const ctl_named_node_t *node, int index) { const ctl_named_node_t *children = ctl_named_node(node->children); return (children ? &children[index] : NULL); } JEMALLOC_INLINE_C const ctl_indexed_node_t * ctl_indexed_node(const ctl_node_t *node) { return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } /******************************************************************************/ /* Function prototypes for non-inline static functions. */ #define CTL_PROTO(n) \ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen); #define INDEX_PROTO(n) \ static const ctl_named_node_t *n##_index(const size_t *mib, \ size_t miblen, size_t i); static bool ctl_arena_init(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats); static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena); static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats); static void ctl_arena_refresh(arena_t *arena, unsigned i); static bool ctl_grow(void); static void ctl_refresh(void); static bool ctl_init(void); static int ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); CTL_PROTO(version) CTL_PROTO(epoch) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_active) CTL_PROTO(thread_arena) CTL_PROTO(thread_allocated) CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) CTL_PROTO(config_munmap) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_stats) CTL_PROTO(config_tcache) CTL_PROTO(config_tls) CTL_PROTO(config_utrace) CTL_PROTO(config_valgrind) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_dss) CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_narenas) CTL_PROTO(opt_lg_dirty_mult) CTL_PROTO(opt_stats_print) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) CTL_PROTO(opt_quarantine) CTL_PROTO(opt_redzone) CTL_PROTO(opt_utrace) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_active) CTL_PROTO(opt_prof_thread_active_init) CTL_PROTO(opt_lg_prof_sample) CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) CTL_PROTO(arena_i_purge) static void arena_purge(unsigned arena_ind); CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_lg_dirty_mult) CTL_PROTO(arena_i_chunk_hooks) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_run_size) INDEX_PROTO(arenas_bin_i) CTL_PROTO(arenas_lrun_i_size) INDEX_PROTO(arenas_lrun_i) CTL_PROTO(arenas_hchunk_i_size) INDEX_PROTO(arenas_hchunk_i) CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_lg_dirty_mult) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nlruns) CTL_PROTO(arenas_nhchunks) CTL_PROTO(arenas_extend) CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) CTL_PROTO(prof_gdump) CTL_PROTO(prof_reset) CTL_PROTO(prof_interval) CTL_PROTO(lg_prof_sample) CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_nrequests) CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_huge_allocated) CTL_PROTO(stats_arenas_i_huge_nmalloc) CTL_PROTO(stats_arenas_i_huge_ndalloc) CTL_PROTO(stats_arenas_i_huge_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_curregs) CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nruns) CTL_PROTO(stats_arenas_i_bins_j_nreruns) CTL_PROTO(stats_arenas_i_bins_j_curruns) INDEX_PROTO(stats_arenas_i_bins_j) CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) CTL_PROTO(stats_arenas_i_lruns_j_nrequests) CTL_PROTO(stats_arenas_i_lruns_j_curruns) INDEX_PROTO(stats_arenas_i_lruns_j) CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc) CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc) CTL_PROTO(stats_arenas_i_hchunks_j_nrequests) CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks) INDEX_PROTO(stats_arenas_i_hchunks_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_lg_dirty_mult) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_metadata_mapped) CTL_PROTO(stats_arenas_i_metadata_allocated) INDEX_PROTO(stats_arenas_i) CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) CTL_PROTO(stats_metadata) CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) /******************************************************************************/ /* mallctl tree. */ /* Maximum tree depth. */ #define CTL_MAX_DEPTH 6 #define NAME(n) {true}, n #define CHILD(t, c) \ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ (ctl_node_t *)c##_node, \ NULL #define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ #define INDEX(i) {false}, i##_index static const ctl_named_node_t thread_tcache_node[] = { {NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("flush"), CTL(thread_tcache_flush)} }; static const ctl_named_node_t thread_prof_node[] = { {NAME("name"), CTL(thread_prof_name)}, {NAME("active"), CTL(thread_prof_active)} }; static const ctl_named_node_t thread_node[] = { {NAME("arena"), CTL(thread_arena)}, {NAME("allocated"), CTL(thread_allocated)}, {NAME("allocatedp"), CTL(thread_allocatedp)}, {NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)}, {NAME("tcache"), CHILD(named, thread_tcache)}, {NAME("prof"), CHILD(named, thread_prof)} }; static const ctl_named_node_t config_node[] = { {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, {NAME("debug"), CTL(config_debug)}, {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("munmap"), CTL(config_munmap)}, {NAME("prof"), CTL(config_prof)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("stats"), CTL(config_stats)}, {NAME("tcache"), CTL(config_tcache)}, {NAME("tls"), CTL(config_tls)}, {NAME("utrace"), CTL(config_utrace)}, {NAME("valgrind"), CTL(config_valgrind)}, {NAME("xmalloc"), CTL(config_xmalloc)} }; static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("dss"), CTL(opt_dss)}, {NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("narenas"), CTL(opt_narenas)}, {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, {NAME("stats_print"), CTL(opt_stats_print)}, {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, {NAME("quarantine"), CTL(opt_quarantine)}, {NAME("redzone"), CTL(opt_redzone)}, {NAME("utrace"), CTL(opt_utrace)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("tcache"), CTL(opt_tcache)}, {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_active"), CTL(opt_prof_active)}, {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, {NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_final"), CTL(opt_prof_final)}, {NAME("prof_leak"), CTL(opt_prof_leak)}, {NAME("prof_accum"), CTL(opt_prof_accum)} }; static const ctl_named_node_t tcache_node[] = { {NAME("create"), CTL(tcache_create)}, {NAME("flush"), CTL(tcache_flush)}, {NAME("destroy"), CTL(tcache_destroy)} }; static const ctl_named_node_t arena_i_node[] = { {NAME("purge"), CTL(arena_i_purge)}, {NAME("dss"), CTL(arena_i_dss)}, {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} }; static const ctl_named_node_t super_arena_i_node[] = { {NAME(""), CHILD(named, arena_i)} }; static const ctl_indexed_node_t arena_node[] = { {INDEX(arena_i)} }; static const ctl_named_node_t arenas_bin_i_node[] = { {NAME("size"), CTL(arenas_bin_i_size)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("run_size"), CTL(arenas_bin_i_run_size)} }; static const ctl_named_node_t super_arenas_bin_i_node[] = { {NAME(""), CHILD(named, arenas_bin_i)} }; static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; static const ctl_named_node_t arenas_lrun_i_node[] = { {NAME("size"), CTL(arenas_lrun_i_size)} }; static const ctl_named_node_t super_arenas_lrun_i_node[] = { {NAME(""), CHILD(named, arenas_lrun_i)} }; static const ctl_indexed_node_t arenas_lrun_node[] = { {INDEX(arenas_lrun_i)} }; static const ctl_named_node_t arenas_hchunk_i_node[] = { {NAME("size"), CTL(arenas_hchunk_i_size)} }; static const ctl_named_node_t super_arenas_hchunk_i_node[] = { {NAME(""), CHILD(named, arenas_hchunk_i)} }; static const ctl_indexed_node_t arenas_hchunk_node[] = { {INDEX(arenas_hchunk_i)} }; static const ctl_named_node_t arenas_node[] = { {NAME("narenas"), CTL(arenas_narenas)}, {NAME("initialized"), CTL(arenas_initialized)}, {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, {NAME("quantum"), CTL(arenas_quantum)}, {NAME("page"), CTL(arenas_page)}, {NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("nbins"), CTL(arenas_nbins)}, {NAME("nhbins"), CTL(arenas_nhbins)}, {NAME("bin"), CHILD(indexed, arenas_bin)}, {NAME("nlruns"), CTL(arenas_nlruns)}, {NAME("lrun"), CHILD(indexed, arenas_lrun)}, {NAME("nhchunks"), CTL(arenas_nhchunks)}, {NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, {NAME("extend"), CTL(arenas_extend)} }; static const ctl_named_node_t prof_node[] = { {NAME("thread_active_init"), CTL(prof_thread_active_init)}, {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, {NAME("gdump"), CTL(prof_gdump)}, {NAME("reset"), CTL(prof_reset)}, {NAME("interval"), CTL(prof_interval)}, {NAME("lg_sample"), CTL(lg_prof_sample)} }; static const ctl_named_node_t stats_arenas_i_metadata_node[] = { {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)}, {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)} }; static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} }; static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; static const ctl_named_node_t stats_arenas_i_huge_node[] = { {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} }; static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} }; static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_bins_j)} }; static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} }; static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} }; static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { {INDEX(stats_arenas_i_lruns_j)} }; static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)}, {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)} }; static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)} }; static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { {INDEX(stats_arenas_i_hchunks_j)} }; static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("purged"), CTL(stats_arenas_i_purged)}, {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}, {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { {NAME(""), CHILD(named, stats_arenas_i)} }; static const ctl_indexed_node_t stats_arenas_node[] = { {INDEX(stats_arenas_i)} }; static const ctl_named_node_t stats_node[] = { {NAME("cactive"), CTL(stats_cactive)}, {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, {NAME("resident"), CTL(stats_resident)}, {NAME("mapped"), CTL(stats_mapped)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} }; static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, {NAME("thread"), CHILD(named, thread)}, {NAME("config"), CHILD(named, config)}, {NAME("opt"), CHILD(named, opt)}, {NAME("tcache"), CHILD(named, tcache)}, {NAME("arena"), CHILD(indexed, arena)}, {NAME("arenas"), CHILD(named, arenas)}, {NAME("prof"), CHILD(named, prof)}, {NAME("stats"), CHILD(named, stats)} }; static const ctl_named_node_t super_root_node[] = { {NAME(""), CHILD(named, root)} }; #undef NAME #undef CHILD #undef CTL #undef INDEX /******************************************************************************/ static bool ctl_arena_init(ctl_arena_stats_t *astats) { if (astats->lstats == NULL) { astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * sizeof(malloc_large_stats_t)); if (astats->lstats == NULL) return (true); } if (astats->hstats == NULL) { astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * sizeof(malloc_huge_stats_t)); if (astats->hstats == NULL) return (true); } return (false); } static void ctl_arena_clear(ctl_arena_stats_t *astats) { astats->dss = dss_prec_names[dss_prec_limit]; astats->lg_dirty_mult = -1; astats->pactive = 0; astats->pdirty = 0; if (config_stats) { memset(&astats->astats, 0, sizeof(arena_stats_t)); astats->allocated_small = 0; astats->nmalloc_small = 0; astats->ndalloc_small = 0; astats->nrequests_small = 0; memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); memset(astats->hstats, 0, nhclasses * sizeof(malloc_huge_stats_t)); } } static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) { unsigned i; arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats, cstats->hstats); for (i = 0; i < NBINS; i++) { cstats->allocated_small += cstats->bstats[i].curregs * index2size(i); cstats->nmalloc_small += cstats->bstats[i].nmalloc; cstats->ndalloc_small += cstats->bstats[i].ndalloc; cstats->nrequests_small += cstats->bstats[i].nrequests; } } static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) { unsigned i; sstats->pactive += astats->pactive; sstats->pdirty += astats->pdirty; sstats->astats.mapped += astats->astats.mapped; sstats->astats.npurge += astats->astats.npurge; sstats->astats.nmadvise += astats->astats.nmadvise; sstats->astats.purged += astats->astats.purged; sstats->astats.metadata_mapped += astats->astats.metadata_mapped; sstats->astats.metadata_allocated += astats->astats.metadata_allocated; sstats->allocated_small += astats->allocated_small; sstats->nmalloc_small += astats->nmalloc_small; sstats->ndalloc_small += astats->ndalloc_small; sstats->nrequests_small += astats->nrequests_small; sstats->astats.allocated_large += astats->astats.allocated_large; sstats->astats.nmalloc_large += astats->astats.nmalloc_large; sstats->astats.ndalloc_large += astats->astats.ndalloc_large; sstats->astats.nrequests_large += astats->astats.nrequests_large; sstats->astats.allocated_huge += astats->astats.allocated_huge; sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; for (i = 0; i < NBINS; i++) { sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sstats->bstats[i].nrequests += astats->bstats[i].nrequests; sstats->bstats[i].curregs += astats->bstats[i].curregs; if (config_tcache) { sstats->bstats[i].nfills += astats->bstats[i].nfills; sstats->bstats[i].nflushes += astats->bstats[i].nflushes; } sstats->bstats[i].nruns += astats->bstats[i].nruns; sstats->bstats[i].reruns += astats->bstats[i].reruns; sstats->bstats[i].curruns += astats->bstats[i].curruns; } for (i = 0; i < nlclasses; i++) { sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; sstats->lstats[i].nrequests += astats->lstats[i].nrequests; sstats->lstats[i].curruns += astats->lstats[i].curruns; } for (i = 0; i < nhclasses; i++) { sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; } } static void ctl_arena_refresh(arena_t *arena, unsigned i) { ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; ctl_arena_clear(astats); sstats->nthreads += astats->nthreads; if (config_stats) { ctl_arena_stats_amerge(astats, arena); /* Merge into sum stats as well. */ ctl_arena_stats_smerge(sstats, astats); } else { astats->pactive += arena->nactive; astats->pdirty += arena->ndirty; /* Merge into sum stats as well. */ sstats->pactive += arena->nactive; sstats->pdirty += arena->ndirty; } } static bool ctl_grow(void) { ctl_arena_stats_t *astats; /* Initialize new arena. */ if (arena_init(ctl_stats.narenas) == NULL) return (true); /* Allocate extended arena stats. */ astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t)); if (astats == NULL) return (true); /* Initialize the new astats element. */ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { a0dalloc(astats); return (true); } /* Swap merged stats to their new location. */ { ctl_arena_stats_t tstats; memcpy(&tstats, &astats[ctl_stats.narenas], sizeof(ctl_arena_stats_t)); memcpy(&astats[ctl_stats.narenas], &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); memcpy(&astats[ctl_stats.narenas + 1], &tstats, sizeof(ctl_arena_stats_t)); } a0dalloc(ctl_stats.arenas); ctl_stats.arenas = astats; ctl_stats.narenas++; return (false); } static void ctl_refresh(void) { tsd_t *tsd; unsigned i; bool refreshed; VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); tsd = tsd_fetch(); for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { tarenas[i] = arena_get(tsd, i, false, false); if (tarenas[i] == NULL && !refreshed) { tarenas[i] = arena_get(tsd, i, false, true); refreshed = true; } } for (i = 0; i < ctl_stats.narenas; i++) { if (tarenas[i] != NULL) ctl_stats.arenas[i].nthreads = arena_nbound(i); else ctl_stats.arenas[i].nthreads = 0; } for (i = 0; i < ctl_stats.narenas; i++) { bool initialized = (tarenas[i] != NULL); ctl_stats.arenas[i].initialized = initialized; if (initialized) ctl_arena_refresh(tarenas[i], i); } if (config_stats) { size_t base_allocated, base_resident, base_mapped; base_stats_get(&base_allocated, &base_resident, &base_mapped); ctl_stats.allocated = ctl_stats.arenas[ctl_stats.narenas].allocated_small + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; ctl_stats.active = (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); ctl_stats.metadata = base_allocated + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + ctl_stats.arenas[ctl_stats.narenas].astats .metadata_allocated; ctl_stats.resident = base_resident + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + ((ctl_stats.arenas[ctl_stats.narenas].pactive + ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); ctl_stats.mapped = base_mapped + ctl_stats.arenas[ctl_stats.narenas].astats.mapped; } ctl_epoch++; } static bool ctl_init(void) { bool ret; malloc_mutex_lock(&ctl_mtx); if (!ctl_initialized) { /* * Allocate space for one extra arena stats element, which * contains summed stats across all arenas. */ ctl_stats.narenas = narenas_total_get(); ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); if (ctl_stats.arenas == NULL) { ret = true; goto label_return; } memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); /* * Initialize all stats structures, regardless of whether they * ever get used. Lazy initialization would allow errors to * cause inconsistent state to be viewable by the application. */ if (config_stats) { unsigned i; for (i = 0; i <= ctl_stats.narenas; i++) { if (ctl_arena_init(&ctl_stats.arenas[i])) { unsigned j; for (j = 0; j < i; j++) { a0dalloc( ctl_stats.arenas[j].lstats); a0dalloc( ctl_stats.arenas[j].hstats); } a0dalloc(ctl_stats.arenas); ctl_stats.arenas = NULL; ret = true; goto label_return; } } } ctl_stats.arenas[ctl_stats.narenas].initialized = true; ctl_epoch = 0; ctl_refresh(); ctl_initialized = true; } ret = false; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; const ctl_named_node_t *node; elm = name; /* Equivalent to strchrnul(). */ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); if (elen == 0) { ret = ENOENT; goto label_return; } node = super_root_node; for (i = 0; i < *depthp; i++) { assert(node); assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { const ctl_named_node_t *pnode = node; /* Children are named. */ for (j = 0; j < node->nchildren; j++) { const ctl_named_node_t *child = ctl_named_children(node, j); if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; if (nodesp != NULL) nodesp[i] = (const ctl_node_t *)node; mibp[i] = j; break; } } if (node == pnode) { ret = ENOENT; goto label_return; } } else { uintmax_t index; const ctl_indexed_node_t *inode; /* Children are indexed. */ index = malloc_strtoumax(elm, NULL, 10); if (index == UINTMAX_MAX || index > SIZE_T_MAX) { ret = ENOENT; goto label_return; } inode = ctl_indexed_node(node->children); node = inode->index(mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } if (nodesp != NULL) nodesp[i] = (const ctl_node_t *)node; mibp[i] = (size_t)index; } if (node->ctl != NULL) { /* Terminal node. */ if (*dot != '\0') { /* * The name contains more elements than are * in this path through the tree. */ ret = ENOENT; goto label_return; } /* Complete lookup successful. */ *depthp = i + 1; break; } /* Update elm. */ if (*dot == '\0') { /* No more elements. */ ret = ENOENT; goto label_return; } elm = &dot[1]; dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); } ret = 0; label_return: return (ret); } int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; if (!ctl_initialized && ctl_init()) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; ret = ctl_lookup(name, nodes, mib, &depth); if (ret != 0) goto label_return; node = ctl_named_node(nodes[depth-1]); if (node != NULL && node->ctl) ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } label_return: return(ret); } int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; if (!ctl_initialized && ctl_init()) { ret = EAGAIN; goto label_return; } ret = ctl_lookup(name, NULL, mibp, miblenp); label_return: return(ret); } int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; if (!ctl_initialized && ctl_init()) { ret = EAGAIN; goto label_return; } /* Iterate down the tree. */ node = super_root_node; for (i = 0; i < miblen; i++) { assert(node); assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { /* Children are named. */ if (node->nchildren <= mib[i]) { ret = ENOENT; goto label_return; } node = ctl_named_children(node, mib[i]); } else { const ctl_indexed_node_t *inode; /* Indexed element. */ inode = ctl_indexed_node(node->children); node = inode->index(mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; } } } /* Call the ctl function. */ if (node && node->ctl) ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); else { /* Partial MIB. */ ret = ENOENT; } label_return: return(ret); } bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx)) return (true); ctl_initialized = false; return (false); } void ctl_prefork(void) { malloc_mutex_prefork(&ctl_mtx); } void ctl_postfork_parent(void) { malloc_mutex_postfork_parent(&ctl_mtx); } void ctl_postfork_child(void) { malloc_mutex_postfork_child(&ctl_mtx); } /******************************************************************************/ /* *_ctl() functions. */ #define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define WRITEONLY() do { \ if (oldp != NULL || oldlenp != NULL) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define READ_XOR_WRITE() do { \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ newlen != 0)) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) #define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ ? sizeof(t) : *oldlenp; \ memcpy(oldp, (void *)&(v), copylen); \ ret = EINVAL; \ goto label_return; \ } \ *(t *)oldp = (v); \ } \ } while (0) #define WRITE(v, t) do { \ if (newp != NULL) { \ if (newlen != sizeof(t)) { \ ret = EINVAL; \ goto label_return; \ } \ (v) = *(t *)newp; \ } \ } while (0) /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ #define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if (!(c)) \ return (ENOENT); \ if (l) \ malloc_mutex_lock(&ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ if (l) \ malloc_mutex_unlock(&ctl_mtx); \ return (ret); \ } #define CTL_RO_CGEN(c, n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if (!(c)) \ return (ENOENT); \ malloc_mutex_lock(&ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ malloc_mutex_unlock(&ctl_mtx); \ return (ret); \ } #define CTL_RO_GEN(n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ malloc_mutex_lock(&ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ malloc_mutex_unlock(&ctl_mtx); \ return (ret); \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if (!(c)) \ return (ENOENT); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return (ret); \ } #define CTL_RO_NL_GEN(n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return (ret); \ } #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ tsd_t *tsd; \ \ if (!(c)) \ return (ENOENT); \ READONLY(); \ tsd = tsd_fetch(); \ oldval = (m(tsd)); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return (ret); \ } #define CTL_RO_BOOL_CONFIG_GEN(n) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ bool oldval; \ \ READONLY(); \ oldval = n; \ READ(oldval, bool); \ \ ret = 0; \ label_return: \ return (ret); \ } /******************************************************************************/ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; malloc_mutex_lock(&ctl_mtx); WRITE(newval, uint64_t); if (newp != NULL) ctl_refresh(); READ(ctl_epoch, uint64_t); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } /******************************************************************************/ CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious) CTL_RO_BOOL_CONFIG_GEN(config_debug) CTL_RO_BOOL_CONFIG_GEN(config_fill) CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) CTL_RO_BOOL_CONFIG_GEN(config_munmap) CTL_RO_BOOL_CONFIG_GEN(config_prof) CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) CTL_RO_BOOL_CONFIG_GEN(config_stats) CTL_RO_BOOL_CONFIG_GEN(config_tcache) CTL_RO_BOOL_CONFIG_GEN(config_tls) CTL_RO_BOOL_CONFIG_GEN(config_utrace) CTL_RO_BOOL_CONFIG_GEN(config_valgrind) CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, opt_prof_thread_active_init, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; arena_t *oldarena; unsigned newind, oldind; tsd = tsd_fetch(); oldarena = arena_choose(tsd, NULL); if (oldarena == NULL) return (EAGAIN); malloc_mutex_lock(&ctl_mtx); newind = oldind = oldarena->ind; WRITE(newind, unsigned); READ(oldind, unsigned); if (newind != oldind) { arena_t *newarena; if (newind >= ctl_stats.narenas) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } /* Initialize arena if necessary. */ newarena = arena_get(tsd, newind, true, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; } /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); if (config_tcache) { tcache_t *tcache = tsd_tcache_get(tsd); if (tcache != NULL) { tcache_arena_reassociate(tcache, oldarena, newarena); } } } ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, uint64_t *) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, tsd_thread_deallocatedp_get, uint64_t *) static int thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_tcache) return (ENOENT); oldval = tcache_enabled_get(); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } tcache_enabled_set(*(bool *)newp); } READ(oldval, bool); ret = 0; label_return: return (ret); } static int thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_tcache) return (ENOENT); READONLY(); WRITEONLY(); tcache_flush(); ret = 0; label_return: return (ret); } static int thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_prof) return (ENOENT); READ_XOR_WRITE(); if (newp != NULL) { tsd_t *tsd; if (newlen != sizeof(const char *)) { ret = EINVAL; goto label_return; } tsd = tsd_fetch(); if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != 0) goto label_return; } else { const char *oldname = prof_thread_name_get(); READ(oldname, const char *); } ret = 0; label_return: return (ret); } static int thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) return (ENOENT); oldval = prof_thread_active_get(); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } if (prof_thread_active_set(*(bool *)newp)) { ret = EAGAIN; goto label_return; } } READ(oldval, bool); ret = 0; label_return: return (ret); } /******************************************************************************/ static int tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); tsd = tsd_fetch(); malloc_mutex_lock(&ctl_mtx); READONLY(); if (tcaches_create(tsd, &tcache_ind)) { ret = EFAULT; goto label_return; } READ(tcache_ind, unsigned); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); tsd = tsd_fetch(); WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_flush(tsd, tcache_ind); ret = 0; label_return: return (ret); } static int tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); tsd = tsd_fetch(); WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_destroy(tsd, tcache_ind); ret = 0; label_return: return (ret); } /******************************************************************************/ /* ctl_mutex must be held during execution of this function. */ static void arena_purge(unsigned arena_ind) { tsd_t *tsd; unsigned i; bool refreshed; VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); tsd = tsd_fetch(); for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { tarenas[i] = arena_get(tsd, i, false, false); if (tarenas[i] == NULL && !refreshed) { tarenas[i] = arena_get(tsd, i, false, true); refreshed = true; } } if (arena_ind == ctl_stats.narenas) { unsigned i; for (i = 0; i < ctl_stats.narenas; i++) { if (tarenas[i] != NULL) arena_purge_all(tarenas[i]); } } else { assert(arena_ind < ctl_stats.narenas); if (tarenas[arena_ind] != NULL) arena_purge_all(tarenas[arena_ind]); } } static int arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; READONLY(); WRITEONLY(); malloc_mutex_lock(&ctl_mtx); arena_purge(mib[1]); malloc_mutex_unlock(&ctl_mtx); ret = 0; label_return: return (ret); } static int arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *dss = NULL; unsigned arena_ind = mib[1]; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; malloc_mutex_lock(&ctl_mtx); WRITE(dss, const char *); if (dss != NULL) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strcmp(dss_prec_names[i], dss) == 0) { dss_prec = i; match = true; break; } } if (!match) { ret = EINVAL; goto label_return; } } if (arena_ind < ctl_stats.narenas) { arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); if (arena == NULL || (dss_prec != dss_prec_limit && arena_dss_prec_set(arena, dss_prec))) { ret = EFAULT; goto label_return; } dss_prec_old = arena_dss_prec_get(arena); } else { if (dss_prec != dss_prec_limit && chunk_dss_prec_set(dss_prec)) { ret = EFAULT; goto label_return; } dss_prec_old = chunk_dss_prec_get(); } dss = dss_prec_names[dss_prec_old]; READ(dss, const char *); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind = mib[1]; arena_t *arena; arena = arena_get(tsd_fetch(), arena_ind, false, true); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { size_t oldval = arena_lg_dirty_mult_get(arena); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return (ret); } static int arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind = mib[1]; arena_t *arena; malloc_mutex_lock(&ctl_mtx); if (arena_ind < narenas_total_get() && (arena = arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { if (newp != NULL) { chunk_hooks_t old_chunk_hooks, new_chunk_hooks; WRITE(new_chunk_hooks, chunk_hooks_t); old_chunk_hooks = chunk_hooks_set(arena, &new_chunk_hooks); READ(old_chunk_hooks, chunk_hooks_t); } else { chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); READ(old_chunk_hooks, chunk_hooks_t); } } else { ret = EFAULT; goto label_return; } ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static const ctl_named_node_t * arena_i_index(const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t * ret; malloc_mutex_lock(&ctl_mtx); if (i > ctl_stats.narenas) { ret = NULL; goto label_return; } ret = super_arena_i_node; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } /******************************************************************************/ static int arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; malloc_mutex_lock(&ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; goto label_return; } narenas = ctl_stats.narenas; READ(narenas, unsigned); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned nread, i; malloc_mutex_lock(&ctl_mtx); READONLY(); if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { ret = EINVAL; nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; } else { ret = 0; nread = ctl_stats.narenas; } for (i = 0; i < nread; i++) ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (oldp != NULL && oldlenp != NULL) { size_t oldval = arena_lg_dirty_mult_default_get(); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return (ret); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) static const ctl_named_node_t * arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) { if (i > NBINS) return (NULL); return (super_arenas_bin_i_node); } CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) static const ctl_named_node_t * arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) { if (i > nlclasses) return (NULL); return (super_arenas_lrun_i_node); } CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t) static const ctl_named_node_t * arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) { if (i > nhclasses) return (NULL); return (super_arenas_hchunk_i_node); } static int arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; malloc_mutex_lock(&ctl_mtx); READONLY(); if (ctl_grow()) { ret = EAGAIN; goto label_return; } narenas = ctl_stats.narenas - 1; READ(narenas, unsigned); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } /******************************************************************************/ static int prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) return (ENOENT); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_thread_active_init_set(*(bool *)newp); } else oldval = prof_thread_active_init_get(); READ(oldval, bool); ret = 0; label_return: return (ret); } static int prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) return (ENOENT); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_active_set(*(bool *)newp); } else oldval = prof_active_get(); READ(oldval, bool); ret = 0; label_return: return (ret); } static int prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; if (!config_prof) return (ENOENT); WRITEONLY(); WRITE(filename, const char *); if (prof_mdump(filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: return (ret); } static int prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) return (ENOENT); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_gdump_set(*(bool *)newp); } else oldval = prof_gdump_get(); READ(oldval, bool); ret = 0; label_return: return (ret); } static int prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; tsd_t *tsd; if (!config_prof) return (ENOENT); WRITEONLY(); WRITE(lg_sample, size_t); if (lg_sample >= (sizeof(uint64_t) << 3)) lg_sample = (sizeof(uint64_t) << 3) - 1; tsd = tsd_fetch(); prof_reset(tsd, lg_sample); ret = 0; label_return: return (ret); } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) /******************************************************************************/ CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, ssize_t) CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated, ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, ctl_stats.arenas[mib[2]].allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t) CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) static const ctl_named_node_t * stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) { if (j > NBINS) return (NULL); return (super_stats_arenas_i_bins_j_node); } CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) static const ctl_named_node_t * stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) { if (j > nlclasses) return (NULL); return (super_stats_arenas_i_lruns_j_node); } CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) static const ctl_named_node_t * stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) { if (j > nhclasses) return (NULL); return (super_stats_arenas_i_hchunks_j_node); } static const ctl_named_node_t * stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t * ret; malloc_mutex_lock(&ctl_mtx); if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } disque-1.0-rc1/deps/jemalloc/src/extent.c000066400000000000000000000026651264176561100203340ustar00rootroot00000000000000#define JEMALLOC_EXTENT_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ JEMALLOC_INLINE_C size_t extent_quantize(size_t size) { /* * Round down to the nearest chunk size that can actually be requested * during normal huge allocation. */ return (index2size(size2index(size + 1) - 1)); } JEMALLOC_INLINE_C int extent_szad_comp(extent_node_t *a, extent_node_t *b) { int ret; size_t a_qsize = extent_quantize(extent_node_size_get(a)); size_t b_qsize = extent_quantize(extent_node_size_get(b)); /* * Compare based on quantized size rather than size, in order to sort * equally useful extents only by address. */ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); if (ret == 0) { uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); ret = (a_addr > b_addr) - (a_addr < b_addr); } return (ret); } /* Generate red-black tree functions. */ rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link, extent_szad_comp) JEMALLOC_INLINE_C int extent_ad_comp(extent_node_t *a, extent_node_t *b) { uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); return ((a_addr > b_addr) - (a_addr < b_addr)); } /* Generate red-black tree functions. */ rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) disque-1.0-rc1/deps/jemalloc/src/hash.c000066400000000000000000000001121264176561100177310ustar00rootroot00000000000000#define JEMALLOC_HASH_C_ #include "jemalloc/internal/jemalloc_internal.h" disque-1.0-rc1/deps/jemalloc/src/huge.c000066400000000000000000000256601264176561100177550ustar00rootroot00000000000000#define JEMALLOC_HUGE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ static extent_node_t * huge_node_get(const void *ptr) { extent_node_t *node; node = chunk_lookup(ptr, true); assert(!extent_node_achunk_get(node)); return (node); } static bool huge_node_set(const void *ptr, extent_node_t *node) { assert(extent_node_addr_get(node) == ptr); assert(!extent_node_achunk_get(node)); return (chunk_register(ptr, node)); } static void huge_node_unset(const void *ptr, const extent_node_t *node) { chunk_deregister(ptr, node); } void * huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, tcache_t *tcache) { size_t usize; usize = s2u(size); if (usize == 0) { /* size_t overflow. */ return (NULL); } return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache)); } void * huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t usize; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ usize = sa2u(size, alignment); if (unlikely(usize == 0)) return (NULL); assert(usize >= chunksize); /* Allocate an extent node with which to track the chunk. */ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), CACHELINE, false, tcache, true, arena); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; arena = arena_choose(tsd, arena); if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, size, alignment, &is_zeroed)) == NULL) { idalloctm(tsd, node, tcache, true); return (NULL); } extent_node_init(node, arena, ret, size, is_zeroed, true); if (huge_node_set(ret, node)) { arena_chunk_dalloc_huge(arena, ret, size); idalloctm(tsd, node, tcache, true); return (NULL); } /* Insert node into huge. */ malloc_mutex_lock(&arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); malloc_mutex_unlock(&arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) memset(ret, 0, size); } else if (config_fill && unlikely(opt_junk_alloc)) memset(ret, 0xa5, size); return (ret); } #ifdef JEMALLOC_JET #undef huge_dalloc_junk #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) #endif static void huge_dalloc_junk(void *ptr, size_t usize) { if (config_fill && have_dss && unlikely(opt_junk_free)) { /* * Only bother junk filling if the chunk isn't about to be * unmapped. */ if (!config_munmap || (have_dss && chunk_in_dss(ptr))) memset(ptr, 0x5a, usize); } } #ifdef JEMALLOC_JET #undef huge_dalloc_junk #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif static void huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t usize, usize_next; extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; bool pre_zeroed, post_zeroed; /* Increase usize to incorporate extra. */ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) <= oldsize; usize = usize_next) ; /* Do nothing. */ if (oldsize == usize) return; node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); /* Fill if necessary (shrinking). */ if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); post_zeroed = false; } else { post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, sdiff); } } else post_zeroed = pre_zeroed; malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); malloc_mutex_unlock(&arena->huge_mtx); arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { if (zero || (config_fill && unlikely(opt_zero))) { if (!pre_zeroed) { memset((void *)((uintptr_t)ptr + oldsize), 0, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - oldsize); } } } static bool huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) { extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks; size_t cdiff; bool pre_zeroed, post_zeroed; node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); chunk_hooks = chunk_hooks_get(arena); assert(oldsize > usize); /* Split excess chunks. */ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), CHUNK_CEILING(usize), cdiff, true, arena->ind)) return (true); if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { huge_dalloc_junk((void *)((uintptr_t)ptr + usize), sdiff); post_zeroed = false; } else { post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + usize), CHUNK_CEILING(oldsize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); } } else post_zeroed = pre_zeroed; malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ extent_node_size_set(node, usize); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); malloc_mutex_unlock(&arena->huge_mtx); /* Zap the excess chunks. */ arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize); return (false); } static bool huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->huge_mtx); is_zeroed_subchunk = extent_node_zeroed_get(node); malloc_mutex_unlock(&arena->huge_mtx); /* * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so * that it is possible to make correct junk/zero fill decisions below. */ is_zeroed_chunk = zero; if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, &is_zeroed_chunk)) return (true); malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ extent_node_size_set(node, usize); malloc_mutex_unlock(&arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed_subchunk) { memset((void *)((uintptr_t)ptr + oldsize), 0, CHUNK_CEILING(oldsize) - oldsize); } if (!is_zeroed_chunk) { memset((void *)((uintptr_t)ptr + CHUNK_CEILING(oldsize)), 0, usize - CHUNK_CEILING(oldsize)); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - oldsize); } return (false); } bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { assert(s2u(oldsize) == oldsize); /* Both allocations must be huge to avoid a move. */ if (oldsize < chunksize || usize_max < chunksize) return (true); if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { /* Attempt to expand the allocation in-place. */ if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero)) return (false); /* Try again, this time with usize_min. */ if (usize_min < usize_max && CHUNK_CEILING(usize_min) > CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, oldsize, usize_min, zero)) return (false); } /* * Avoid moving the allocation if the existing chunk size accommodates * the new size. */ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, zero); return (false); } /* Attempt to shrink the allocation in-place. */ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)); return (true); } static void * huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment <= chunksize) return (huge_malloc(tsd, arena, usize, zero, tcache)); return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); } void * huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; /* Try to avoid moving the allocation. */ if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero)) return (ptr); /* * usize and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, tcache); if (ret == NULL) return (NULL); copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache); return (ret); } void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); huge_node_unset(ptr, node); malloc_mutex_lock(&arena->huge_mtx); ql_remove(&arena->huge, node, ql_link); malloc_mutex_unlock(&arena->huge_mtx); huge_dalloc_junk(extent_node_addr_get(node), extent_node_size_get(node)); arena_chunk_dalloc_huge(extent_node_arena_get(node), extent_node_addr_get(node), extent_node_size_get(node)); idalloctm(tsd, node, tcache, true); } arena_t * huge_aalloc(const void *ptr) { return (extent_node_arena_get(huge_node_get(ptr))); } size_t huge_salloc(const void *ptr) { size_t size; extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->huge_mtx); size = extent_node_size_get(node); malloc_mutex_unlock(&arena->huge_mtx); return (size); } prof_tctx_t * huge_prof_tctx_get(const void *ptr) { prof_tctx_t *tctx; extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->huge_mtx); tctx = extent_node_prof_tctx_get(node); malloc_mutex_unlock(&arena->huge_mtx); return (tctx); } void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->huge_mtx); extent_node_prof_tctx_set(node, tctx); malloc_mutex_unlock(&arena->huge_mtx); } void huge_prof_tctx_reset(const void *ptr) { huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); } disque-1.0-rc1/deps/jemalloc/src/jemalloc.c000066400000000000000000001731151264176561100206120ustar00rootroot00000000000000#define JEMALLOC_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ /* Runtime configuration options. */ const char *je_malloc_conf JEMALLOC_ATTR(weak); bool opt_abort = #ifdef JEMALLOC_DEBUG true #else false #endif ; const char *opt_junk = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) "true" #else "false" #endif ; bool opt_junk_alloc = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; bool opt_junk_free = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; size_t opt_quarantine = ZU(0); bool opt_redzone = false; bool opt_utrace = false; bool opt_xmalloc = false; bool opt_zero = false; size_t opt_narenas = 0; /* Initialized to true if the process is running inside Valgrind. */ bool in_valgrind; unsigned ncpus; /* Protects arenas initialization (arenas, narenas_total). */ static malloc_mutex_t arenas_lock; /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. * * arenas[0..narenas_auto) are used for automatic multiplexing of threads and * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. */ static arena_t **arenas; static unsigned narenas_total; static arena_t *a0; /* arenas[0]; read-only after initialization. */ static unsigned narenas_auto; /* Read-only after initialization. */ typedef enum { malloc_init_uninitialized = 3, malloc_init_a0_initialized = 2, malloc_init_recursible = 1, malloc_init_initialized = 0 /* Common case --> jnz. */ } malloc_init_t; static malloc_init_t malloc_init_state = malloc_init_uninitialized; JEMALLOC_ALIGNED(CACHELINE) const size_t index2size_tab[NSIZES] = { #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ ((ZU(1)<= 0x0600 static malloc_mutex_t init_lock = SRWLOCK_INIT; #else static malloc_mutex_t init_lock; static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI _init_init_lock(void) { /* If another constructor in the same binary is using mallctl to * e.g. setup chunk hooks, it may end up running before this one, * and malloc_init_hard will crash trying to lock the uninitialized * lock. So we force an initialization of the lock in * malloc_init_hard as well. We don't try to care about atomicity * of the accessed to the init_lock_initialized boolean, since it * really only matters early in the process creation, before any * separate thread normally starts doing anything. */ if (!init_lock_initialized) malloc_mutex_init(&init_lock); init_lock_initialized = true; } #ifdef _MSC_VER # pragma section(".CRT$XCU", read) JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) static const void (WINAPI *init_init_lock)(void) = _init_init_lock; #endif #endif #else static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; #endif typedef struct { void *p; /* Input pointer (as in realloc(p, s)). */ size_t s; /* Request size. */ void *r; /* Result pointer. */ } malloc_utrace_t; #ifdef JEMALLOC_UTRACE # define UTRACE(a, b, c) do { \ if (unlikely(opt_utrace)) { \ int utrace_serrno = errno; \ malloc_utrace_t ut; \ ut.p = (a); \ ut.s = (b); \ ut.r = (c); \ utrace(&ut, sizeof(ut)); \ errno = utrace_serrno; \ } \ } while (0) #else # define UTRACE(a, b, c) #endif /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static bool malloc_init_hard_a0(void); static bool malloc_init_hard(void); /******************************************************************************/ /* * Begin miscellaneous support functions. */ JEMALLOC_ALWAYS_INLINE_C bool malloc_initialized(void) { return (malloc_init_state == malloc_init_initialized); } JEMALLOC_ALWAYS_INLINE_C void malloc_thread_init(void) { /* * TSD initialization can't be safely done as a side effect of * deallocation, because it is possible for a thread to do nothing but * deallocate its TLS data via free(), in which case writing to TLS * would cause write-after-free memory corruption. The quarantine * facility *only* gets used as a side effect of deallocation, so make * a best effort attempt at initializing its TSD by hooking all * allocation events. */ if (config_fill && unlikely(opt_quarantine)) quarantine_alloc_hook(); } JEMALLOC_ALWAYS_INLINE_C bool malloc_init_a0(void) { if (unlikely(malloc_init_state == malloc_init_uninitialized)) return (malloc_init_hard_a0()); return (false); } JEMALLOC_ALWAYS_INLINE_C bool malloc_init(void) { if (unlikely(!malloc_initialized()) && malloc_init_hard()) return (true); malloc_thread_init(); return (false); } /* * The a0*() functions are used instead of i[mcd]alloc() in situations that * cannot tolerate TLS variable access. */ arena_t * a0get(void) { assert(a0 != NULL); return (a0); } static void * a0ialloc(size_t size, bool zero, bool is_metadata) { if (unlikely(malloc_init_a0())) return (NULL); return (iallocztm(NULL, size, zero, false, is_metadata, a0get())); } static void a0idalloc(void *ptr, bool is_metadata) { idalloctm(NULL, ptr, false, is_metadata); } void * a0malloc(size_t size) { return (a0ialloc(size, false, true)); } void a0dalloc(void *ptr) { a0idalloc(ptr, true); } /* * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive * situations that cannot tolerate TLS variable access (TLS allocation and very * early internal data structure initialization). */ void * bootstrap_malloc(size_t size) { if (unlikely(size == 0)) size = 1; return (a0ialloc(size, false, false)); } void * bootstrap_calloc(size_t num, size_t size) { size_t num_size; num_size = num * size; if (unlikely(num_size == 0)) { assert(num == 0 || size == 0); num_size = 1; } return (a0ialloc(num_size, true, false)); } void bootstrap_free(void *ptr) { if (unlikely(ptr == NULL)) return; a0idalloc(ptr, false); } /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * arena_init_locked(unsigned ind) { arena_t *arena; /* Expand arenas if necessary. */ assert(ind <= narenas_total); if (ind > MALLOCX_ARENA_MAX) return (NULL); if (ind == narenas_total) { unsigned narenas_new = narenas_total + 1; arena_t **arenas_new = (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * sizeof(arena_t *))); if (arenas_new == NULL) return (NULL); memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); arenas_new[ind] = NULL; /* * Deallocate only if arenas came from a0malloc() (not * base_alloc()). */ if (narenas_total != narenas_auto) a0dalloc(arenas); arenas = arenas_new; narenas_total = narenas_new; } /* * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ arena = arenas[ind]; if (arena != NULL) { assert(ind < narenas_auto); return (arena); } /* Actually initialize the arena. */ arena = arenas[ind] = arena_new(ind); return (arena); } arena_t * arena_init(unsigned ind) { arena_t *arena; malloc_mutex_lock(&arenas_lock); arena = arena_init_locked(ind); malloc_mutex_unlock(&arenas_lock); return (arena); } unsigned narenas_total_get(void) { unsigned narenas; malloc_mutex_lock(&arenas_lock); narenas = narenas_total; malloc_mutex_unlock(&arenas_lock); return (narenas); } static void arena_bind_locked(tsd_t *tsd, unsigned ind) { arena_t *arena; arena = arenas[ind]; arena->nthreads++; if (tsd_nominal(tsd)) tsd_arena_set(tsd, arena); } static void arena_bind(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(&arenas_lock); arena_bind_locked(tsd, ind); malloc_mutex_unlock(&arenas_lock); } void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; malloc_mutex_lock(&arenas_lock); oldarena = arenas[oldind]; newarena = arenas[newind]; oldarena->nthreads--; newarena->nthreads++; malloc_mutex_unlock(&arenas_lock); tsd_arena_set(tsd, newarena); } unsigned arena_nbound(unsigned ind) { unsigned nthreads; malloc_mutex_lock(&arenas_lock); nthreads = arenas[ind]->nthreads; malloc_mutex_unlock(&arenas_lock); return (nthreads); } static void arena_unbind(tsd_t *tsd, unsigned ind) { arena_t *arena; malloc_mutex_lock(&arenas_lock); arena = arenas[ind]; arena->nthreads--; malloc_mutex_unlock(&arenas_lock); tsd_arena_set(tsd, NULL); } arena_t * arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing) { arena_t *arena; arena_t **arenas_cache = tsd_arenas_cache_get(tsd); unsigned narenas_cache = tsd_narenas_cache_get(tsd); unsigned narenas_actual = narenas_total_get(); /* Deallocate old cache if it's too small. */ if (arenas_cache != NULL && narenas_cache < narenas_actual) { a0dalloc(arenas_cache); arenas_cache = NULL; narenas_cache = 0; tsd_arenas_cache_set(tsd, arenas_cache); tsd_narenas_cache_set(tsd, narenas_cache); } /* Allocate cache if it's missing. */ if (arenas_cache == NULL) { bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd); assert(ind < narenas_actual || !init_if_missing); narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1; if (tsd_nominal(tsd) && !*arenas_cache_bypassp) { *arenas_cache_bypassp = true; arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * narenas_cache); *arenas_cache_bypassp = false; } if (arenas_cache == NULL) { /* * This function must always tell the truth, even if * it's slow, so don't let OOM, thread cleanup (note * tsd_nominal check), nor recursive allocation * avoidance (note arenas_cache_bypass check) get in the * way. */ if (ind >= narenas_actual) return (NULL); malloc_mutex_lock(&arenas_lock); arena = arenas[ind]; malloc_mutex_unlock(&arenas_lock); return (arena); } assert(tsd_nominal(tsd) && !*arenas_cache_bypassp); tsd_arenas_cache_set(tsd, arenas_cache); tsd_narenas_cache_set(tsd, narenas_cache); } /* * Copy to cache. It's possible that the actual number of arenas has * increased since narenas_total_get() was called above, but that causes * no correctness issues unless two threads concurrently execute the * arenas.extend mallctl, which we trust mallctl synchronization to * prevent. */ malloc_mutex_lock(&arenas_lock); memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual); malloc_mutex_unlock(&arenas_lock); if (narenas_cache > narenas_actual) { memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) * (narenas_cache - narenas_actual)); } /* Read the refreshed cache, and init the arena if necessary. */ arena = arenas_cache[ind]; if (init_if_missing && arena == NULL) arena = arenas_cache[ind] = arena_init(ind); return (arena); } /* Slow path, called only by arena_choose(). */ arena_t * arena_choose_hard(tsd_t *tsd) { arena_t *ret; if (narenas_auto > 1) { unsigned i, choose, first_null; choose = 0; first_null = narenas_auto; malloc_mutex_lock(&arenas_lock); assert(a0get() != NULL); for (i = 1; i < narenas_auto; i++) { if (arenas[i] != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ if (arenas[i]->nthreads < arenas[choose]->nthreads) choose = i; } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized * arena, in case all extant arenas are in use. * * NB: It is possible for there to be * discontinuities in terms of initialized * versus uninitialized arenas, due to the * "thread.arena" mallctl. */ first_null = i; } } if (arenas[choose]->nthreads == 0 || first_null == narenas_auto) { /* * Use an unloaded arena, or the least loaded arena if * all arenas are already initialized. */ ret = arenas[choose]; } else { /* Initialize a new arena. */ choose = first_null; ret = arena_init_locked(choose); if (ret == NULL) { malloc_mutex_unlock(&arenas_lock); return (NULL); } } arena_bind_locked(tsd, choose); malloc_mutex_unlock(&arenas_lock); } else { ret = a0get(); arena_bind(tsd, 0); } return (ret); } void thread_allocated_cleanup(tsd_t *tsd) { /* Do nothing. */ } void thread_deallocated_cleanup(tsd_t *tsd) { /* Do nothing. */ } void arena_cleanup(tsd_t *tsd) { arena_t *arena; arena = tsd_arena_get(tsd); if (arena != NULL) arena_unbind(tsd, arena->ind); } void arenas_cache_cleanup(tsd_t *tsd) { arena_t **arenas_cache; arenas_cache = tsd_arenas_cache_get(tsd); if (arenas_cache != NULL) { tsd_arenas_cache_set(tsd, NULL); a0dalloc(arenas_cache); } } void narenas_cache_cleanup(tsd_t *tsd) { /* Do nothing. */ } void arenas_cache_bypass_cleanup(tsd_t *tsd) { /* Do nothing. */ } static void stats_print_atexit(void) { if (config_tcache && config_stats) { unsigned narenas, i; /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats * events. As a consequence, the final stats may be slightly * out of date by the time they are reported, if other threads * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena = arenas[i]; if (arena != NULL) { tcache_t *tcache; /* * tcache_stats_merge() locks bins, so if any * code is introduced that acquires both arena * and bin locks in the opposite order, * deadlocks may result. */ malloc_mutex_lock(&arena->lock); ql_foreach(tcache, &arena->tcache_ql, link) { tcache_stats_merge(tcache, arena); } malloc_mutex_unlock(&arena->lock); } } } je_malloc_stats_print(NULL, NULL, NULL); } /* * End miscellaneous support functions. */ /******************************************************************************/ /* * Begin initialization functions. */ #ifndef JEMALLOC_HAVE_SECURE_GETENV static char * secure_getenv(const char *name) { # ifdef JEMALLOC_HAVE_ISSETUGID if (issetugid() != 0) return (NULL); # endif return (getenv(name)); } #endif static unsigned malloc_ncpus(void) { long result; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif return ((result == -1) ? 1 : (unsigned)result); } static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; *k_p = opts; for (accept = false; !accept;) { switch (*opts) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '_': opts++; break; case ':': opts++; *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; *v_p = opts; accept = true; break; case '\0': if (opts != *opts_p) { malloc_write(": Conf string ends " "with key\n"); } return (true); default: malloc_write(": Malformed conf string\n"); return (true); } } for (accept = false; !accept;) { switch (*opts) { case ',': opts++; /* * Look ahead one character here, because the next time * this function is called, it will assume that end of * input has been cleanly reached if no input remains, * but we have optimistically already consumed the * comma if one exists. */ if (*opts == '\0') { malloc_write(": Conf string ends " "with comma\n"); } *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; accept = true; break; case '\0': *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; accept = true; break; default: opts++; break; } } *opts_p = opts; return (false); } static void malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, size_t vlen) { malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); } static void malloc_conf_init(void) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; size_t klen, vlen; /* * Automatically configure valgrind before processing options. The * valgrind option remains in jemalloc 3.x for compatibility reasons. */ if (config_valgrind) { in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; if (config_fill && unlikely(in_valgrind)) { opt_junk = "false"; opt_junk_alloc = false; opt_junk_free = false; assert(!opt_zero); opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; opt_redzone = true; } if (config_tcache && unlikely(in_valgrind)) opt_tcache = false; } for (i = 0; i < 3; i++) { /* Get runtime configuration. */ switch (i) { case 0: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the * program. */ opts = je_malloc_conf; } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; case 1: { int linklen = 0; #ifndef _WIN32 int saved_errno = errno; const char *linkname = # ifdef JEMALLOC_PREFIX "/etc/"JEMALLOC_PREFIX"malloc.conf" # else "/etc/malloc.conf" # endif ; /* * Try to use the contents of the "/etc/malloc.conf" * symbolic link's name. */ linklen = readlink(linkname, buf, sizeof(buf) - 1); if (linklen == -1) { /* No configuration specified. */ linklen = 0; /* Restore errno. */ set_errno(saved_errno); } #endif buf[linklen] = '\0'; opts = buf; break; } case 2: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" #else "MALLOC_CONF" #endif ; if ((opts = secure_getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment * variable. */ } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; } default: not_reached(); buf[0] = '\0'; opts = buf; } while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, &vlen)) { #define CONF_MATCH(n) \ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) #define CONF_MATCH_VALUE(n) \ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) #define CONF_HANDLE_BOOL(o, n, cont) \ if (CONF_MATCH(n)) { \ if (CONF_MATCH_VALUE("true")) \ o = true; \ else if (CONF_MATCH_VALUE("false")) \ o = false; \ else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ if (cont) \ continue; \ } #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ \ set_errno(0); \ um = malloc_strtoumax(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ if ((min) != 0 && um < (min)) \ o = (min); \ else if (um > (max)) \ o = (max); \ else \ o = um; \ } else { \ if (((min) != 0 && um < (min)) \ || um > (max)) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ } else \ o = um; \ } \ continue; \ } #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (CONF_MATCH(n)) { \ long l; \ char *end; \ \ set_errno(0); \ l = strtol(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (l < (ssize_t)(min) || l > \ (ssize_t)(max)) { \ malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ } else \ o = l; \ continue; \ } #define CONF_HANDLE_CHAR_P(o, n, d) \ if (CONF_MATCH(n)) { \ size_t cpylen = (vlen <= \ sizeof(o)-1) ? vlen : \ sizeof(o)-1; \ strncpy(o, v, cpylen); \ o[cpylen] = '\0'; \ continue; \ } CONF_HANDLE_BOOL(opt_abort, "abort", true) /* * Chunks always require at least one header page, * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and * possibly an additional page in the presence of * redzones. In order to simplify options processing, * use a conservative bound that accommodates all these * constraints. */ CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, true) if (strncmp("dss", k, klen) == 0) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strncmp(dss_prec_names[i], v, vlen) == 0) { if (chunk_dss_prec_set(i)) { malloc_conf_error( "Error setting dss", k, klen, v, vlen); } else { opt_dss = dss_prec_names[i]; match = true; break; } } } if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, SIZE_T_MAX, false) CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", -1, (sizeof(size_t) << 3) - 1) CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) if (config_fill) { if (CONF_MATCH("junk")) { if (CONF_MATCH_VALUE("true")) { opt_junk = "true"; opt_junk_alloc = opt_junk_free = true; } else if (CONF_MATCH_VALUE("false")) { opt_junk = "false"; opt_junk_alloc = opt_junk_free = false; } else if (CONF_MATCH_VALUE("alloc")) { opt_junk = "alloc"; opt_junk_alloc = true; opt_junk_free = false; } else if (CONF_MATCH_VALUE("free")) { opt_junk = "free"; opt_junk_alloc = false; opt_junk_free = true; } else { malloc_conf_error( "Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 0, SIZE_T_MAX, false) CONF_HANDLE_BOOL(opt_redzone, "redzone", true) CONF_HANDLE_BOOL(opt_zero, "zero", true) } if (config_utrace) { CONF_HANDLE_BOOL(opt_utrace, "utrace", true) } if (config_xmalloc) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) } if (config_tcache) { CONF_HANDLE_BOOL(opt_tcache, "tcache", !config_valgrind || !in_valgrind) if (CONF_MATCH("tcache")) { assert(config_valgrind && in_valgrind); if (opt_tcache) { opt_tcache = false; malloc_conf_error( "tcache cannot be enabled " "while running inside Valgrind", k, klen, v, vlen); } continue; } CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", -1, (sizeof(size_t) << 3) - 1) } if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof", true) CONF_HANDLE_CHAR_P(opt_prof_prefix, "prof_prefix", "jeprof") CONF_HANDLE_BOOL(opt_prof_active, "prof_active", true) CONF_HANDLE_BOOL(opt_prof_thread_active_init, "prof_thread_active_init", true) CONF_HANDLE_SIZE_T(opt_lg_prof_sample, "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - 1, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", true) CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, (sizeof(uint64_t) << 3) - 1) CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", true) CONF_HANDLE_BOOL(opt_prof_final, "prof_final", true) CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", true) } malloc_conf_error("Invalid conf pair", k, klen, v, vlen); #undef CONF_MATCH #undef CONF_HANDLE_BOOL #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P } } } /* init_lock must be held. */ static bool malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == malloc_init_recursible)) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ return (false); } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { /* Busy-wait until the initializing thread completes. */ do { malloc_mutex_unlock(&init_lock); CPU_SPINWAIT; malloc_mutex_lock(&init_lock); } while (!malloc_initialized()); return (false); } #endif return (true); } /* init_lock must be held. */ static bool malloc_init_hard_a0_locked(void) { malloc_initializer = INITIALIZER; if (config_prof) prof_boot0(); malloc_conf_init(); if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) abort(); } } if (base_boot()) return (true); if (chunk_boot()) return (true); if (ctl_boot()) return (true); if (config_prof) prof_boot1(); if (arena_boot()) return (true); if (config_tcache && tcache_boot()) return (true); if (malloc_mutex_init(&arenas_lock)) return (true); /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ narenas_total = narenas_auto = 1; arenas = &a0; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); /* * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ if (arena_init(0) == NULL) return (true); malloc_init_state = malloc_init_a0_initialized; return (false); } static bool malloc_init_hard_a0(void) { bool ret; malloc_mutex_lock(&init_lock); ret = malloc_init_hard_a0_locked(); malloc_mutex_unlock(&init_lock); return (ret); } /* * Initialize data structures which may trigger recursive allocation. * * init_lock must be held. */ static void malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; malloc_mutex_unlock(&init_lock); ncpus = malloc_ncpus(); #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ && !defined(_WIN32) && !defined(__native_client__)) /* LinuxThreads's pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write(": Error in pthread_atfork()\n"); if (opt_abort) abort(); } #endif malloc_mutex_lock(&init_lock); } /* init_lock must be held. */ static bool malloc_init_hard_finish(void) { if (mutex_boot()) return (true); if (opt_narenas == 0) { /* * For SMP systems, create more than one arena per CPU by * default. */ if (ncpus > 1) opt_narenas = ncpus << 2; else opt_narenas = 1; } narenas_auto = opt_narenas; /* * Make sure that the arenas array can be allocated. In practice, this * limit is enough to allow the allocator to function, but the ctl * machinery will fail to allocate memory at far lower limits. */ if (narenas_auto > chunksize / sizeof(arena_t *)) { narenas_auto = chunksize / sizeof(arena_t *); malloc_printf(": Reducing narenas to limit (%d)\n", narenas_auto); } narenas_total = narenas_auto; /* Allocate and initialize arenas. */ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); if (arenas == NULL) return (true); /* * Zero the array. In practice, this should always be pre-zeroed, * since it was just mmap()ed, but let's be sure. */ memset(arenas, 0, sizeof(arena_t *) * narenas_total); /* Copy the pointer to the one arena that was already initialized. */ arenas[0] = a0; malloc_init_state = malloc_init_initialized; return (false); } static bool malloc_init_hard(void) { #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif malloc_mutex_lock(&init_lock); if (!malloc_init_hard_needed()) { malloc_mutex_unlock(&init_lock); return (false); } if (malloc_init_state != malloc_init_a0_initialized && malloc_init_hard_a0_locked()) { malloc_mutex_unlock(&init_lock); return (true); } if (malloc_tsd_boot0()) { malloc_mutex_unlock(&init_lock); return (true); } if (config_prof && prof_boot2()) { malloc_mutex_unlock(&init_lock); return (true); } malloc_init_hard_recursible(); if (malloc_init_hard_finish()) { malloc_mutex_unlock(&init_lock); return (true); } malloc_mutex_unlock(&init_lock); malloc_tsd_boot1(); return (false); } /* * End initialization functions. */ /******************************************************************************/ /* * Begin malloc(3)-compatible functions. */ static void * imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { p = imalloc(tsd, LARGE_MINCLASS); if (p == NULL) return (NULL); arena_prof_promoted(p, usize); } else p = imalloc(tsd, usize); return (p); } JEMALLOC_ALWAYS_INLINE_C void * imalloc_prof(tsd_t *tsd, size_t usize) { void *p; prof_tctx_t *tctx; tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = imalloc_prof_sample(tsd, usize, tctx); else p = imalloc(tsd, usize); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } prof_malloc(p, usize, tctx); return (p); } JEMALLOC_ALWAYS_INLINE_C void * imalloc_body(size_t size, tsd_t **tsd, size_t *usize) { if (unlikely(malloc_init())) return (NULL); *tsd = tsd_fetch(); if (config_prof && opt_prof) { *usize = s2u(size); if (unlikely(*usize == 0)) return (NULL); return (imalloc_prof(*tsd, *usize)); } if (config_stats || (config_valgrind && unlikely(in_valgrind))) *usize = s2u(size); return (imalloc(*tsd, size)); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { void *ret; tsd_t *tsd; size_t usize JEMALLOC_CC_SILENCE_INIT(0); if (size == 0) size = 1; ret = imalloc_body(size, &tsd, &usize); if (unlikely(ret == NULL)) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in malloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { assert(usize == isalloc(ret, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, ret); JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); return (ret); } static void * imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, prof_tctx_t *tctx) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); if (p == NULL) return (NULL); arena_prof_promoted(p, usize); } else p = ipalloc(tsd, usize, alignment, false); return (p); } JEMALLOC_ALWAYS_INLINE_C void * imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) { void *p; prof_tctx_t *tctx; tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = imemalign_prof_sample(tsd, alignment, usize, tctx); else p = ipalloc(tsd, usize, alignment, false); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } prof_malloc(p, usize, tctx); return (p); } JEMALLOC_ATTR(nonnull(1)) static int imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) { int ret; tsd_t *tsd; size_t usize; void *result; assert(min_alignment != 0); if (unlikely(malloc_init())) { result = NULL; goto label_oom; } tsd = tsd_fetch(); if (size == 0) size = 1; /* Make sure that alignment is a large enough power of 2. */ if (unlikely(((alignment - 1) & alignment) != 0 || (alignment < min_alignment))) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error allocating " "aligned memory: invalid alignment\n"); abort(); } result = NULL; ret = EINVAL; goto label_return; } usize = sa2u(size, alignment); if (unlikely(usize == 0)) { result = NULL; goto label_oom; } if (config_prof && opt_prof) result = imemalign_prof(tsd, alignment, usize); else result = ipalloc(tsd, usize, alignment, false); if (unlikely(result == NULL)) goto label_oom; assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); *memptr = result; ret = 0; label_return: if (config_stats && likely(result != NULL)) { assert(usize == isalloc(result, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, result); return (ret); label_oom: assert(result == NULL); if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error allocating aligned memory: " "out of memory\n"); abort(); } ret = ENOMEM; goto label_return; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) je_posix_memalign(void **memptr, size_t alignment, size_t size) { int ret = imemalign(memptr, alignment, size, sizeof(void *)); JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, config_prof), false); return (ret); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) je_aligned_alloc(size_t alignment, size_t size) { void *ret; int err; if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { ret = NULL; set_errno(err); } JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), false); return (ret); } static void * icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { p = icalloc(tsd, LARGE_MINCLASS); if (p == NULL) return (NULL); arena_prof_promoted(p, usize); } else p = icalloc(tsd, usize); return (p); } JEMALLOC_ALWAYS_INLINE_C void * icalloc_prof(tsd_t *tsd, size_t usize) { void *p; prof_tctx_t *tctx; tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = icalloc_prof_sample(tsd, usize, tctx); else p = icalloc(tsd, usize); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } prof_malloc(p, usize, tctx); return (p); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) je_calloc(size_t num, size_t size) { void *ret; tsd_t *tsd; size_t num_size; size_t usize JEMALLOC_CC_SILENCE_INIT(0); if (unlikely(malloc_init())) { num_size = 0; ret = NULL; goto label_return; } tsd = tsd_fetch(); num_size = num * size; if (unlikely(num_size == 0)) { if (num == 0 || size == 0) num_size = 1; else { ret = NULL; goto label_return; } /* * Try to avoid division here. We know that it isn't possible to * overflow during multiplication if neither operand uses any of the * most significant half of the bits in a size_t. */ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) && (num_size / size != num))) { /* size_t overflow. */ ret = NULL; goto label_return; } if (config_prof && opt_prof) { usize = s2u(num_size); if (unlikely(usize == 0)) { ret = NULL; goto label_return; } ret = icalloc_prof(tsd, usize); } else { if (config_stats || (config_valgrind && unlikely(in_valgrind))) usize = s2u(num_size); ret = icalloc(tsd, num_size); } label_return: if (unlikely(ret == NULL)) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in calloc(): out of " "memory\n"); abort(); } set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { assert(usize == isalloc(ret, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, num_size, ret); JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); return (ret); } static void * irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, prof_tctx_t *tctx) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); if (p == NULL) return (NULL); arena_prof_promoted(p, usize); } else p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); return (p); } JEMALLOC_ALWAYS_INLINE_C void * irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(old_ptr); tctx = prof_alloc_prep(tsd, usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); else p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); return (p); } JEMALLOC_INLINE_C void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache) { size_t usize; UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); if (config_prof && opt_prof) { usize = isalloc(ptr, config_prof); prof_free(tsd, ptr, usize); } else if (config_stats || config_valgrind) usize = isalloc(ptr, config_prof); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; if (config_valgrind && unlikely(in_valgrind)) rzsize = p2rz(ptr); iqalloc(tsd, ptr, tcache); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } JEMALLOC_INLINE_C void isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) { UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); if (config_prof && opt_prof) prof_free(tsd, ptr, usize); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; if (config_valgrind && unlikely(in_valgrind)) rzsize = p2rz(ptr); isqalloc(tsd, ptr, usize, tcache); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t size) { void *ret; tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); if (unlikely(size == 0)) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); tsd = tsd_fetch(); ifree(tsd, ptr, tcache_get(tsd, false)); return (NULL); } size = 1; } if (likely(ptr != NULL)) { assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); old_usize = isalloc(ptr, config_prof); if (config_valgrind && unlikely(in_valgrind)) old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); if (config_prof && opt_prof) { usize = s2u(size); ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd, ptr, old_usize, usize); } else { if (config_stats || (config_valgrind && unlikely(in_valgrind))) usize = s2u(size); ret = iralloc(tsd, ptr, old_usize, size, 0, false); } } else { /* realloc(NULL, size) is equivalent to malloc(size). */ ret = imalloc_body(size, &tsd, &usize); } if (unlikely(ret == NULL)) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in realloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { assert(usize == isalloc(ret, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, old_rzsize, true, false); return (ret); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { tsd_t *tsd = tsd_fetch(); ifree(tsd, ptr, tcache_get(tsd, false)); } } /* * End malloc(3)-compatible functions. */ /******************************************************************************/ /* * Begin non-standard override functions. */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_memalign(size_t alignment, size_t size) { void *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) ret = NULL; JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_valloc(size_t size) { void *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) ret = NULL; JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif /* * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has * #define je_malloc malloc */ #define malloc_is_malloc 1 #define is_malloc_(a) malloc_is_ ## a #define is_malloc(a) is_malloc_(a) #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) /* * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible * to inconsistently reference libc's malloc(3)-compatible functions * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). * * These definitions interpose hooks in glibc. The functions are actually * passed an extra argument for the caller return address, which will be * ignored. */ JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; # endif #endif /* * End non-standard override functions. */ /******************************************************************************/ /* * Begin non-standard functions. */ JEMALLOC_ALWAYS_INLINE_C bool imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) { if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { *alignment = 0; *usize = s2u(size); } else { *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); *usize = sa2u(size, *alignment); } assert(*usize != 0); *zero = MALLOCX_ZERO_GET(flags); if ((flags & MALLOCX_TCACHE_MASK) != 0) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) *tcache = NULL; else *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else *tcache = tcache_get(tsd, true); if ((flags & MALLOCX_ARENA_MASK) != 0) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); *arena = arena_get(tsd, arena_ind, true, true); if (unlikely(*arena == NULL)) return (true); } else *arena = NULL; return (false); } JEMALLOC_ALWAYS_INLINE_C bool imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) { if (likely(flags == 0)) { *usize = s2u(size); assert(*usize != 0); *alignment = 0; *zero = false; *tcache = tcache_get(tsd, true); *arena = NULL; return (false); } else { return (imallocx_flags_decode_hard(tsd, size, flags, usize, alignment, zero, tcache, arena)); } } JEMALLOC_ALWAYS_INLINE_C void * imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { if (unlikely(alignment != 0)) return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); if (unlikely(zero)) return (icalloct(tsd, usize, tcache, arena)); return (imalloct(tsd, usize, tcache, arena)); } static void * imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { void *p; if (usize <= SMALL_MAXCLASS) { assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, arena); if (p == NULL) return (NULL); arena_prof_promoted(p, usize); } else p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena); return (p); } JEMALLOC_ALWAYS_INLINE_C void * imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) { void *p; size_t alignment; bool zero; tcache_t *tcache; arena_t *arena; prof_tctx_t *tctx; if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, &zero, &tcache, &arena))) return (NULL); tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); if (likely((uintptr_t)tctx == (uintptr_t)1U)) p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); else if ((uintptr_t)tctx > (uintptr_t)1U) { p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, arena); } else p = NULL; if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } prof_malloc(p, *usize, tctx); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); } JEMALLOC_ALWAYS_INLINE_C void * imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) { void *p; size_t alignment; bool zero; tcache_t *tcache; arena_t *arena; if (likely(flags == 0)) { if (config_stats || (config_valgrind && unlikely(in_valgrind))) *usize = s2u(size); return (imalloc(tsd, size)); } if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize, &alignment, &zero, &tcache, &arena))) return (NULL); p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_mallocx(size_t size, int flags) { tsd_t *tsd; void *p; size_t usize; assert(size != 0); if (unlikely(malloc_init())) goto label_oom; tsd = tsd_fetch(); if (config_prof && opt_prof) p = imallocx_prof(tsd, size, flags, &usize); else p = imallocx_no_prof(tsd, size, flags, &usize); if (unlikely(p == NULL)) goto label_oom; if (config_stats) { assert(usize == isalloc(p, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, p); JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); return (p); label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in mallocx(): out of memory\n"); abort(); } UTRACE(0, size, 0); return (NULL); } static void * irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, zero, tcache, arena); if (p == NULL) return (NULL); arena_prof_promoted(p, usize); } else { p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, tcache, arena); } return (p); } JEMALLOC_ALWAYS_INLINE_C void * irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache, arena_t *arena) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(old_ptr); tctx = prof_alloc_prep(tsd, *usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, alignment, zero, tcache, arena, tctx); } else { p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, tcache, arena); } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } if (p == old_ptr && alignment != 0) { /* * The allocation did not move, so it is possible that the size * class is smaller than would guarantee the requested * alignment, and that the alignment constraint was * serendipitously satisfied. Additionally, old_usize may not * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ *usize = isalloc(p, config_prof); } prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); return (p); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_rallocx(void *ptr, size_t size, int flags) { void *p; tsd_t *tsd; size_t usize; size_t old_usize; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; arena_t *arena; tcache_t *tcache; assert(ptr != NULL); assert(size != 0); assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena = arena_get(tsd, arena_ind, true, true); if (unlikely(arena == NULL)) goto label_oom; } else arena = NULL; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; else tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else tcache = tcache_get(tsd, true); old_usize = isalloc(ptr, config_prof); if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); assert(usize != 0); p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, zero, tcache, arena); if (unlikely(p == NULL)) goto label_oom; } else { p = iralloct(tsd, ptr, old_usize, size, alignment, zero, tcache, arena); if (unlikely(p == NULL)) goto label_oom; if (config_stats || (config_valgrind && unlikely(in_valgrind))) usize = isalloc(p, config_prof); } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, old_rzsize, false, zero); return (p); label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in rallocx(): out of memory\n"); abort(); } UTRACE(ptr, size, 0); return (NULL); } JEMALLOC_ALWAYS_INLINE_C size_t ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t usize; if (ixalloc(ptr, old_usize, size, extra, alignment, zero)) return (old_usize); usize = isalloc(ptr, config_prof); return (usize); } static size_t ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; if (tctx == NULL) return (old_usize); usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); return (usize); } JEMALLOC_ALWAYS_INLINE_C size_t ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t usize_max, usize; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(ptr); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in * prof_alloc_prep() to decide whether to capture a backtrace. * prof_realloc() will use the actual usize to decide whether to sample. */ usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, alignment); assert(usize_max != 0); tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { usize = ixallocx_prof_sample(ptr, old_usize, size, extra, alignment, zero, tctx); } else { usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); return (usize); } prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, old_tctx); return (usize); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; size_t usize, old_usize; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; assert(ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); old_usize = isalloc(ptr, config_prof); /* Clamp extra if necessary to avoid (size + extra) overflow. */ if (unlikely(size + extra > HUGE_MAXCLASS)) { /* Check for size overflow. */ if (unlikely(size > HUGE_MAXCLASS)) { usize = old_usize; goto label_not_resized; } extra = HUGE_MAXCLASS - size; } if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero); } else { usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); } if (unlikely(usize == old_usize)) goto label_not_resized; if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, old_rzsize, false, zero); label_not_resized: UTRACE(ptr, size, ptr); return (usize); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, int flags) { size_t usize; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); if (config_ivsalloc) usize = ivsalloc(ptr, config_prof); else usize = isalloc(ptr, config_prof); return (usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags) { tsd_t *tsd; tcache_t *tcache; assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; else tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else tcache = tcache_get(tsd, false); UTRACE(ptr, 0, 0); ifree(tsd_fetch(), ptr, tcache); } JEMALLOC_ALWAYS_INLINE_C size_t inallocx(size_t size, int flags) { size_t usize; if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) usize = s2u(size); else usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); assert(usize != 0); return (usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags) { tsd_t *tsd; tcache_t *tcache; size_t usize; assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); usize = inallocx(size, flags); assert(usize == isalloc(ptr, config_prof)); tsd = tsd_fetch(); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; else tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else tcache = tcache_get(tsd, false); UTRACE(ptr, 0, 0); isfree(tsd, ptr, usize, tcache); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { assert(size != 0); if (unlikely(malloc_init())) return (0); return (inallocx(size, flags)); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (unlikely(malloc_init())) return (EAGAIN); return (ctl_byname(name, oldp, oldlenp, newp, newlen)); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { if (unlikely(malloc_init())) return (EAGAIN); return (ctl_nametomib(name, mibp, miblenp)); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (unlikely(malloc_init())) return (EAGAIN); return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { stats_print(write_cb, cbopaque, opts); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); if (config_ivsalloc) ret = ivsalloc(ptr, config_prof); else ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof); return (ret); } /* * End non-standard functions. */ /******************************************************************************/ /* * The following functions are used by threading libraries for protection of * malloc during fork(). */ /* * If an application creates a thread before doing any allocation in the main * thread, then calls fork(2) in the main thread followed by memory allocation * in the child process, a race can occur that results in deadlock within the * child: the main thread may have forked while the created thread had * partially initialized the allocator. Ordinarily jemalloc prevents * fork/malloc races via the following functions it registers during * initialization using pthread_atfork(), but of course that does no good if * the allocator isn't fully initialized at fork time. The following library * constructor is a partial solution to this problem. It may still be possible * to trigger the deadlock described above, but doing so would involve forking * via a library constructor that runs before jemalloc's runs. */ JEMALLOC_ATTR(constructor) static void jemalloc_constructor(void) { malloc_init(); } #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_prefork(void) #else JEMALLOC_EXPORT void _malloc_prefork(void) #endif { unsigned i; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) return; #endif assert(malloc_initialized()); /* Acquire all mutexes in a safe order. */ ctl_prefork(); prof_prefork(); malloc_mutex_prefork(&arenas_lock); for (i = 0; i < narenas_total; i++) { if (arenas[i] != NULL) arena_prefork(arenas[i]); } chunk_prefork(); base_prefork(); } #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_postfork_parent(void) #else JEMALLOC_EXPORT void _malloc_postfork(void) #endif { unsigned i; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) return; #endif assert(malloc_initialized()); /* Release all mutexes, now that fork() has completed. */ base_postfork_parent(); chunk_postfork_parent(); for (i = 0; i < narenas_total; i++) { if (arenas[i] != NULL) arena_postfork_parent(arenas[i]); } malloc_mutex_postfork_parent(&arenas_lock); prof_postfork_parent(); ctl_postfork_parent(); } void jemalloc_postfork_child(void) { unsigned i; assert(malloc_initialized()); /* Release all mutexes, now that fork() has completed. */ base_postfork_child(); chunk_postfork_child(); for (i = 0; i < narenas_total; i++) { if (arenas[i] != NULL) arena_postfork_child(arenas[i]); } malloc_mutex_postfork_child(&arenas_lock); prof_postfork_child(); ctl_postfork_child(); } /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/src/mb.c000066400000000000000000000001101264176561100174020ustar00rootroot00000000000000#define JEMALLOC_MB_C_ #include "jemalloc/internal/jemalloc_internal.h" disque-1.0-rc1/deps/jemalloc/src/mutex.c000066400000000000000000000065451264176561100201700ustar00rootroot00000000000000#define JEMALLOC_MUTEX_C_ #include "jemalloc/internal/jemalloc_internal.h" #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) #include #endif #ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ /* Data. */ #ifdef JEMALLOC_LAZY_LOCK bool isthreaded = false; #endif #ifdef JEMALLOC_MUTEX_INIT_CB static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) static void pthread_create_once(void); #endif /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the * process goes multi-threaded. */ #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); static void pthread_create_once(void) { pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); if (pthread_create_fptr == NULL) { malloc_write(": Error in dlsym(RTLD_NEXT, " "\"pthread_create\")\n"); abort(); } isthreaded = true; } JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { static pthread_once_t once_control = PTHREAD_ONCE_INIT; pthread_once(&once_control, pthread_create_once); return (pthread_create_fptr(thread, attr, start_routine, arg)); } #endif /******************************************************************************/ #ifdef JEMALLOC_MUTEX_INIT_CB JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif bool malloc_mutex_init(malloc_mutex_t *mutex) { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, _CRT_SPINCOUNT)) return (true); # endif #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) if (postpone_init) { mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, bootstrap_calloc) != 0) return (true); } #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) return (true); pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return (true); } pthread_mutexattr_destroy(&attr); #endif return (false); } void malloc_mutex_prefork(malloc_mutex_t *mutex) { malloc_mutex_lock(mutex); } void malloc_mutex_postfork_parent(malloc_mutex_t *mutex) { malloc_mutex_unlock(mutex); } void malloc_mutex_postfork_child(malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(mutex); #else if (malloc_mutex_init(mutex)) { malloc_printf(": Error re-initializing mutex in " "child\n"); if (opt_abort) abort(); } #endif } bool mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, bootstrap_calloc) != 0) return (true); postponed_mutexes = postponed_mutexes->postponed_next; } #endif return (false); } disque-1.0-rc1/deps/jemalloc/src/pages.c000066400000000000000000000072211264176561100201150ustar00rootroot00000000000000#define JEMALLOC_PAGES_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ void * pages_map(void *addr, size_t size) { void *ret; assert(size != 0); #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); #else /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); assert(ret != NULL); if (ret == MAP_FAILED) ret = NULL; else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ pages_unmap(ret, size); ret = NULL; } #endif assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && ret == addr)); return (ret); } void pages_unmap(void *addr, size_t size) { #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) #else if (munmap(addr, size) == -1) #endif { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); malloc_printf(": Error in " #ifdef _WIN32 "VirtualFree" #else "munmap" #endif "(): %s\n", buf); if (opt_abort) abort(); } } void * pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) { void *ret = (void *)((uintptr_t)addr + leadsize); assert(alloc_size >= leadsize + size); #ifdef _WIN32 { void *new_addr; pages_unmap(addr, alloc_size); new_addr = pages_map(ret, size); if (new_addr == ret) return (ret); if (new_addr) pages_unmap(new_addr, size); return (NULL); } #else { size_t trailsize = alloc_size - leadsize - size; if (leadsize != 0) pages_unmap(addr, leadsize); if (trailsize != 0) pages_unmap((void *)((uintptr_t)ret + size), trailsize); return (ret); } #endif } static bool pages_commit_impl(void *addr, size_t size, bool commit) { #ifndef _WIN32 /* * The following decommit/commit implementation is functional, but * always disabled because it doesn't add value beyong improved * debugging (at the cost of extra system calls) on systems that * overcommit. */ if (false) { int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE; void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0); if (result == MAP_FAILED) return (true); if (result != addr) { /* * We succeeded in mapping memory, but not in the right * place. */ pages_unmap(result, size); return (true); } return (false); } #endif return (true); } bool pages_commit(void *addr, size_t size) { return (pages_commit_impl(addr, size, true)); } bool pages_decommit(void *addr, size_t size) { return (pages_commit_impl(addr, size, false)); } bool pages_purge(void *addr, size_t size) { bool unzeroed; #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); unzeroed = true; #elif defined(JEMALLOC_HAVE_MADVISE) # ifdef JEMALLOC_PURGE_MADVISE_DONTNEED # define JEMALLOC_MADV_PURGE MADV_DONTNEED # define JEMALLOC_MADV_ZEROS true # elif defined(JEMALLOC_PURGE_MADVISE_FREE) # define JEMALLOC_MADV_PURGE MADV_FREE # define JEMALLOC_MADV_ZEROS false # else # error "No madvise(2) flag defined for purging unused dirty pages." # endif int err = madvise(addr, size, JEMALLOC_MADV_PURGE); unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); # undef JEMALLOC_MADV_PURGE # undef JEMALLOC_MADV_ZEROS #else /* Last resort no-op. */ unzeroed = true; #endif return (unzeroed); } disque-1.0-rc1/deps/jemalloc/src/prof.c000066400000000000000000001423301264176561100177650ustar00rootroot00000000000000#define JEMALLOC_PROF_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND #define UNW_LOCAL_ONLY #include #endif #ifdef JEMALLOC_PROF_LIBGCC #include #endif /******************************************************************************/ /* Data. */ bool opt_prof = false; bool opt_prof_active = true; bool opt_prof_thread_active_init = true; size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_gdump = false; bool opt_prof_final = false; bool opt_prof_leak = false; bool opt_prof_accum = false; char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* * Initialized as opt_prof_active, and accessed via * prof_active_[gs]et{_unlocked,}(). */ bool prof_active; static malloc_mutex_t prof_active_mtx; /* * Initialized as opt_prof_thread_active_init, and accessed via * prof_thread_active_init_[gs]et(). */ static bool prof_thread_active_init; static malloc_mutex_t prof_thread_active_init_mtx; /* * Initialized as opt_prof_gdump, and accessed via * prof_gdump_[gs]et{_unlocked,}(). */ bool prof_gdump_val; static malloc_mutex_t prof_gdump_mtx; uint64_t prof_interval = 0; size_t lg_prof_sample; /* * Table of mutexes that are shared among gctx's. These are leaf locks, so * there is no problem with using them for more than one gctx at the same time. * The primary motivation for this sharing though is that gctx's are ephemeral, * and destroying mutexes causes complications for systems that allocate when * creating/destroying mutexes. */ static malloc_mutex_t *gctx_locks; static unsigned cum_gctxs; /* Atomic counter. */ /* * Table of mutexes that are shared among tdata's. No operations require * holding multiple tdata locks, so there is no problem with using them for more * than one tdata at the same time, even though a gctx lock may be acquired * while holding a tdata lock. */ static malloc_mutex_t *tdata_locks; /* * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data * structure that knows about all backtraces currently captured. */ static ckh_t bt2gctx; static malloc_mutex_t bt2gctx_mtx; /* * Tree of all extant prof_tdata_t structures, regardless of state, * {attached,detached,expired}. */ static prof_tdata_tree_t tdatas; static malloc_mutex_t tdatas_mtx; static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; static malloc_mutex_t prof_dump_seq_mtx; static uint64_t prof_dump_seq; static uint64_t prof_dump_iseq; static uint64_t prof_dump_mseq; static uint64_t prof_dump_useq; /* * This buffer is rather large for stack allocation, so use a single buffer for * all profile dumps. */ static malloc_mutex_t prof_dump_mtx; static char prof_dump_buf[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PROF_DUMP_BUFSIZE #else 1 #endif ]; static unsigned prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ static bool prof_booted = false; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static bool prof_tctx_should_destroy(prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); static bool prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached); static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); /******************************************************************************/ /* Red-black trees. */ JEMALLOC_INLINE_C int prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; uint64_t b_thr_uid = b->thr_uid; int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); if (ret == 0) { uint64_t a_thr_discrim = a->thr_discrim; uint64_t b_thr_discrim = b->thr_discrim; ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < b_thr_discrim); if (ret == 0) { uint64_t a_tctx_uid = a->tctx_uid; uint64_t b_tctx_uid = b->tctx_uid; ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid); } } return (ret); } rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link, prof_tctx_comp) JEMALLOC_INLINE_C int prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { unsigned a_len = a->bt.len; unsigned b_len = b->bt.len; unsigned comp_len = (a_len < b_len) ? a_len : b_len; int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); if (ret == 0) ret = (a_len > b_len) - (a_len < b_len); return (ret); } rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, prof_gctx_comp) JEMALLOC_INLINE_C int prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; ret = ((a_uid > b_uid) - (a_uid < b_uid)); if (ret == 0) { uint64_t a_discrim = a->thr_discrim; uint64_t b_discrim = b->thr_discrim; ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); } return (ret); } rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, prof_tdata_comp) /******************************************************************************/ void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; cassert(config_prof); if (updated) { /* * Compute a new sample threshold. This isn't very important in * practice, because this function is rarely executed, so the * potential for sample bias is minimal except in contrived * programs. */ tdata = prof_tdata_get(tsd, true); if (tdata != NULL) prof_sample_threshold_update(tdata); } if ((uintptr_t)tctx > (uintptr_t)1U) { malloc_mutex_lock(tctx->tdata->lock); tctx->prepared = false; if (prof_tctx_should_destroy(tctx)) prof_tctx_destroy(tsd, tctx); else malloc_mutex_unlock(tctx->tdata->lock); } } void prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) { prof_tctx_set(ptr, usize, tctx); malloc_mutex_lock(tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { tctx->cnts.accumobjs++; tctx->cnts.accumbytes += usize; } tctx->prepared = false; malloc_mutex_unlock(tctx->tdata->lock); } void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { malloc_mutex_lock(tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; if (prof_tctx_should_destroy(tctx)) prof_tctx_destroy(tsd, tctx); else malloc_mutex_unlock(tctx->tdata->lock); } void bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); bt->vec = vec; bt->len = 0; } JEMALLOC_INLINE_C void prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); if (tdata != NULL) { assert(!tdata->enq); tdata->enq = true; } malloc_mutex_lock(&bt2gctx_mtx); } JEMALLOC_INLINE_C void prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); malloc_mutex_unlock(&bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; assert(tdata->enq); tdata->enq = false; idump = tdata->enq_idump; tdata->enq_idump = false; gdump = tdata->enq_gdump; tdata->enq_gdump = false; if (idump) prof_idump(); if (gdump) prof_gdump(); } } #ifdef JEMALLOC_PROF_LIBUNWIND void prof_backtrace(prof_bt_t *bt) { int nframes; cassert(config_prof); assert(bt->len == 0); assert(bt->vec != NULL); nframes = unw_backtrace(bt->vec, PROF_BT_MAX); if (nframes <= 0) return; bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); return (_URC_NO_REASON); } static _Unwind_Reason_Code prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; cassert(config_prof); ip = (void *)_Unwind_GetIP(context); if (ip == NULL) return (_URC_END_OF_STACK); data->bt->vec[data->bt->len] = ip; data->bt->len++; if (data->bt->len == data->max) return (_URC_END_OF_STACK); return (_URC_NO_REASON); } void prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); _Unwind_Backtrace(prof_unwind_callback, &data); } #elif (defined(JEMALLOC_PROF_GCC)) void prof_backtrace(prof_bt_t *bt) { #define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ void *p; \ if (__builtin_frame_address(i) == 0) \ return; \ p = __builtin_return_address(i); \ if (p == NULL) \ return; \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ } else \ return; cassert(config_prof); BT_FRAME(0) BT_FRAME(1) BT_FRAME(2) BT_FRAME(3) BT_FRAME(4) BT_FRAME(5) BT_FRAME(6) BT_FRAME(7) BT_FRAME(8) BT_FRAME(9) BT_FRAME(10) BT_FRAME(11) BT_FRAME(12) BT_FRAME(13) BT_FRAME(14) BT_FRAME(15) BT_FRAME(16) BT_FRAME(17) BT_FRAME(18) BT_FRAME(19) BT_FRAME(20) BT_FRAME(21) BT_FRAME(22) BT_FRAME(23) BT_FRAME(24) BT_FRAME(25) BT_FRAME(26) BT_FRAME(27) BT_FRAME(28) BT_FRAME(29) BT_FRAME(30) BT_FRAME(31) BT_FRAME(32) BT_FRAME(33) BT_FRAME(34) BT_FRAME(35) BT_FRAME(36) BT_FRAME(37) BT_FRAME(38) BT_FRAME(39) BT_FRAME(40) BT_FRAME(41) BT_FRAME(42) BT_FRAME(43) BT_FRAME(44) BT_FRAME(45) BT_FRAME(46) BT_FRAME(47) BT_FRAME(48) BT_FRAME(49) BT_FRAME(50) BT_FRAME(51) BT_FRAME(52) BT_FRAME(53) BT_FRAME(54) BT_FRAME(55) BT_FRAME(56) BT_FRAME(57) BT_FRAME(58) BT_FRAME(59) BT_FRAME(60) BT_FRAME(61) BT_FRAME(62) BT_FRAME(63) BT_FRAME(64) BT_FRAME(65) BT_FRAME(66) BT_FRAME(67) BT_FRAME(68) BT_FRAME(69) BT_FRAME(70) BT_FRAME(71) BT_FRAME(72) BT_FRAME(73) BT_FRAME(74) BT_FRAME(75) BT_FRAME(76) BT_FRAME(77) BT_FRAME(78) BT_FRAME(79) BT_FRAME(80) BT_FRAME(81) BT_FRAME(82) BT_FRAME(83) BT_FRAME(84) BT_FRAME(85) BT_FRAME(86) BT_FRAME(87) BT_FRAME(88) BT_FRAME(89) BT_FRAME(90) BT_FRAME(91) BT_FRAME(92) BT_FRAME(93) BT_FRAME(94) BT_FRAME(95) BT_FRAME(96) BT_FRAME(97) BT_FRAME(98) BT_FRAME(99) BT_FRAME(100) BT_FRAME(101) BT_FRAME(102) BT_FRAME(103) BT_FRAME(104) BT_FRAME(105) BT_FRAME(106) BT_FRAME(107) BT_FRAME(108) BT_FRAME(109) BT_FRAME(110) BT_FRAME(111) BT_FRAME(112) BT_FRAME(113) BT_FRAME(114) BT_FRAME(115) BT_FRAME(116) BT_FRAME(117) BT_FRAME(118) BT_FRAME(119) BT_FRAME(120) BT_FRAME(121) BT_FRAME(122) BT_FRAME(123) BT_FRAME(124) BT_FRAME(125) BT_FRAME(126) BT_FRAME(127) #undef BT_FRAME } #else void prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); } static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); } static prof_gctx_t * prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true), true, NULL); if (gctx == NULL) return (NULL); gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ gctx->nlimbo = 1; tctx_tree_new(&gctx->tctxs); /* Duplicate bt. */ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; return (gctx); } static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, prof_tdata_t *tdata) { cassert(config_prof); /* * Check that gctx is still unused by any thread cache before destroying * it. prof_lookup() increments gctx->nlimbo in order to avoid a race * condition with this function, as does prof_tctx_destroy() in order to * avoid a race between the main body of prof_tctx_destroy() and entry * into this function. */ prof_enter(tsd, tdata_self); malloc_mutex_lock(gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) not_reached(); prof_leave(tsd, tdata_self); /* Destroy gctx. */ malloc_mutex_unlock(gctx->lock); idalloctm(tsd, gctx, tcache_get(tsd, false), true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; malloc_mutex_unlock(gctx->lock); prof_leave(tsd, tdata_self); } } /* tctx->tdata->lock must be held. */ static bool prof_tctx_should_destroy(prof_tctx_t *tctx) { if (opt_prof_accum) return (false); if (tctx->cnts.curobjs != 0) return (false); if (tctx->prepared) return (false); return (true); } static bool prof_gctx_should_destroy(prof_gctx_t *gctx) { if (opt_prof_accum) return (false); if (!tctx_tree_empty(&gctx->tctxs)) return (false); if (gctx->nlimbo != 0) return (false); return (true); } /* tctx->tdata->lock is held upon entry, and released before return. */ static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); assert(tctx->cnts.accumobjs == 0); assert(tctx->cnts.accumbytes == 0); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); destroy_tdata = prof_tdata_should_destroy(tdata, false); malloc_mutex_unlock(tdata->lock); malloc_mutex_lock(gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); destroy_tctx = true; if (prof_gctx_should_destroy(gctx)) { /* * Increment gctx->nlimbo in order to keep another * thread from winning the race to destroy gctx while * this one has gctx->lock dropped. Without this, it * would be possible for another thread to: * * 1) Sample an allocation associated with gctx. * 2) Deallocate the sampled object. * 3) Successfully prof_gctx_try_destroy(gctx). * * The result would be that gctx no longer exists by the * time this thread accesses it in * prof_gctx_try_destroy(). */ gctx->nlimbo++; destroy_gctx = true; } else destroy_gctx = false; break; case prof_tctx_state_dumping: /* * A dumping thread needs tctx to remain valid until dumping * has finished. Change state such that the dumping thread will * complete destruction during a late dump iteration phase. */ tctx->state = prof_tctx_state_purgatory; destroy_tctx = false; destroy_gctx = false; break; default: not_reached(); destroy_tctx = false; destroy_gctx = false; } malloc_mutex_unlock(gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } if (destroy_tdata) prof_tdata_destroy(tsd, tdata, false); if (destroy_tctx) idalloctm(tsd, tctx, tcache_get(tsd, false), true); } static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { prof_gctx_t *p; void *v; } gctx; union { prof_bt_t *p; void *v; } btkey; bool new_gctx; prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ gctx.p = prof_gctx_create(tsd, bt); if (gctx.v == NULL) { prof_leave(tsd, tdata); return (true); } btkey.p = &gctx.p->bt; if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { /* OOM. */ prof_leave(tsd, tdata); idalloctm(tsd, gctx.v, tcache_get(tsd, false), true); return (true); } new_gctx = true; } else { /* * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ malloc_mutex_lock(gctx.p->lock); gctx.p->nlimbo++; malloc_mutex_unlock(gctx.p->lock); new_gctx = false; } prof_leave(tsd, tdata); *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; return (false); } prof_tctx_t * prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { prof_tctx_t *p; void *v; } ret; prof_tdata_t *tdata; bool not_found; cassert(config_prof); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return (NULL); malloc_mutex_lock(tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) /* Note double negative! */ ret.p->prepared = true; malloc_mutex_unlock(tdata->lock); if (not_found) { tcache_t *tcache; void *btkey; prof_gctx_t *gctx; bool new_gctx, error; /* * This thread's cache lacks bt. Look for it in the global * cache. */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, &new_gctx)) return (NULL); /* Link a prof_tctx_t into gctx for this thread. */ tcache = tcache_get(tsd, true); ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true, NULL); if (ret.p == NULL) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); return (NULL); } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; ret.p->thr_discrim = tdata->thr_discrim; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); ret.p->gctx = gctx; ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; malloc_mutex_lock(tdata->lock); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); malloc_mutex_unlock(tdata->lock); if (error) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); idalloctm(tsd, ret.v, tcache, true); return (NULL); } malloc_mutex_lock(gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; malloc_mutex_unlock(gctx->lock); } return (ret.p); } void prof_sample_threshold_update(prof_tdata_t *tdata) { /* * The body of this function is compiled out unless heap profiling is * enabled, so that it is possible to compile jemalloc with floating * point support completely disabled. Avoiding floating point code is * important on memory-constrained systems, but it also enables a * workaround for versions of glibc that don't properly save/restore * floating point registers during dynamic lazy symbol loading (which * internally calls into whatever malloc implementation happens to be * integrated into the application). Note that some compilers (e.g. * gcc 4.8) may use floating point registers for fast memory moves, so * jemalloc must be compiled with such optimizations disabled (e.g. * -mno-sse) in order for the workaround to be complete. */ #ifdef JEMALLOC_PROF uint64_t r; double u; if (!config_prof) return; if (lg_prof_sample == 0) { tdata->bytes_until_sample = 0; return; } /* * Compute sample interval as a geometrically distributed random * variable with mean (2^lg_prof_sample). * * __ __ * | log(u) | 1 * tdata->bytes_until_sample = | -------- |, where p = --------------- * | log(1-p) | lg_prof_sample * 2 * * For more information on the math, see: * * Non-Uniform Random Variate Generation * Luc Devroye * Springer-Verlag, New York, 1986 * pp 500 * (http://luc.devroye.org/rnbookindex.html) */ prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); u = (double)r * (1.0/9007199254740992.0L); tdata->bytes_until_sample = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + (uint64_t)1U; #endif } #ifdef JEMALLOC_JET static prof_tdata_t * prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { size_t *tdata_count = (size_t *)arg; (*tdata_count)++; return (NULL); } size_t prof_tdata_count(void) { size_t tdata_count = 0; malloc_mutex_lock(&tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); malloc_mutex_unlock(&tdatas_mtx); return (tdata_count); } #endif #ifdef JEMALLOC_JET size_t prof_bt_count(void) { size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return (0); malloc_mutex_lock(&bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); malloc_mutex_unlock(&bt2gctx_mtx); return (bt_count); } #endif #ifdef JEMALLOC_JET #undef prof_dump_open #define prof_dump_open JEMALLOC_N(prof_dump_open_impl) #endif static int prof_dump_open(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { malloc_printf(": creat(\"%s\"), 0644) failed\n", filename); if (opt_abort) abort(); } return (fd); } #ifdef JEMALLOC_JET #undef prof_dump_open #define prof_dump_open JEMALLOC_N(prof_dump_open) prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); #endif static bool prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; cassert(config_prof); err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { if (!propagate_err) { malloc_write(": write() failed during heap " "profile flush\n"); if (opt_abort) abort(); } ret = true; } prof_dump_buf_end = 0; return (ret); } static bool prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); ret = prof_dump_flush(propagate_err); close(prof_dump_fd); prof_dump_fd = -1; return (ret); } static bool prof_dump_write(bool propagate_err, const char *s) { unsigned i, slen, n; cassert(config_prof); i = 0; slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) if (prof_dump_flush(propagate_err) && propagate_err) return (true); if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ n = slen - i; } else { /* Write as much of s as will fit. */ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; } memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); prof_dump_buf_end += n; i += n; } return (false); } JEMALLOC_FORMAT_PRINTF(2, 3) static bool prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; va_start(ap, format); malloc_vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); ret = prof_dump_write(propagate_err, buf); return (ret); } /* tctx->tdata->lock is held. */ static void prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) { malloc_mutex_lock(tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: malloc_mutex_unlock(tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; malloc_mutex_unlock(tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { tdata->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; tdata->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; } break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: not_reached(); } } /* gctx->lock is held. */ static void prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) { gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; } } /* tctx->gctx is held. */ static prof_tctx_t * prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: prof_tctx_merge_gctx(tctx, tctx->gctx); break; default: not_reached(); } return (NULL); } /* gctx->lock is held. */ static prof_tctx_t * prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { bool propagate_err = *(bool *)arg; switch (tctx->state) { case prof_tctx_state_initializing: case prof_tctx_state_nominal: /* Not captured by this dump. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: if (prof_dump_printf(propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes)) return (tctx); break; default: not_reached(); } return (NULL); } /* tctx->gctx is held. */ static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { prof_tctx_t *ret; switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: tctx->state = prof_tctx_state_nominal; break; case prof_tctx_state_purgatory: ret = tctx; goto label_return; default: not_reached(); } ret = NULL; label_return: return (ret); } static void prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); malloc_mutex_lock(gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. * Additionally, link gctx into the dump list so that it is included in * prof_dump()'s second pass. */ gctx->nlimbo++; gctx_tree_insert(gctxs, gctx); memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); malloc_mutex_unlock(gctx->lock); } static prof_gctx_t * prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) { size_t *leak_ngctx = (size_t *)arg; malloc_mutex_lock(gctx->lock); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL); if (gctx->cnt_summed.curobjs != 0) (*leak_ngctx)++; malloc_mutex_unlock(gctx->lock); return (NULL); } static void prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); prof_gctx_t *gctx; /* * Standard tree iteration won't work here, because as soon as we * decrement gctx->nlimbo and unlock gctx, another thread can * concurrently destroy it, which will corrupt the tree. Therefore, * tear down the tree one node at a time during iteration. */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); malloc_mutex_lock(gctx->lock); { prof_tctx_t *next; next = NULL; do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, prof_tctx_finish_iter, NULL); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); idalloctm(tsd, to_destroy, tcache_get(tsd, false), true); } else next = NULL; } while (next != NULL); } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; malloc_mutex_unlock(gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } else malloc_mutex_unlock(gctx->lock); } } static prof_tdata_t * prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { prof_cnt_t *cnt_all = (prof_cnt_t *)arg; malloc_mutex_lock(tdata->lock); if (!tdata->expired) { size_t tabind; union { prof_tctx_t *p; void *v; } tctx; tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) prof_tctx_merge_tdata(tctx.p, tdata); cnt_all->curobjs += tdata->cnt_summed.curobjs; cnt_all->curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { cnt_all->accumobjs += tdata->cnt_summed.accumobjs; cnt_all->accumbytes += tdata->cnt_summed.accumbytes; } } else tdata->dumping = false; malloc_mutex_unlock(tdata->lock); return (NULL); } static prof_tdata_t * prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { bool propagate_err = *(bool *)arg; if (!tdata->dumping) return (NULL); if (prof_dump_printf(propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", tdata->thr_uid, tdata->cnt_summed.curobjs, tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, tdata->cnt_summed.accumbytes, (tdata->thread_name != NULL) ? " " : "", (tdata->thread_name != NULL) ? tdata->thread_name : "")) return (tdata); return (NULL); } #ifdef JEMALLOC_JET #undef prof_dump_header #define prof_dump_header JEMALLOC_N(prof_dump_header_impl) #endif static bool prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) return (true); malloc_mutex_lock(&tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); malloc_mutex_unlock(&tdatas_mtx); return (ret); } #ifdef JEMALLOC_JET #undef prof_dump_header #define prof_dump_header JEMALLOC_N(prof_dump_header) prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); #endif /* gctx->lock is held. */ static bool prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; cassert(config_prof); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { assert(gctx->cnt_summed.curobjs == 0); assert(gctx->cnt_summed.curbytes == 0); assert(gctx->cnt_summed.accumobjs == 0); assert(gctx->cnt_summed.accumbytes == 0); ret = false; goto label_return; } if (prof_dump_printf(propagate_err, "@")) { ret = true; goto label_return; } for (i = 0; i < bt->len; i++) { if (prof_dump_printf(propagate_err, " %#"FMTxPTR, (uintptr_t)bt->vec[i])) { ret = true; goto label_return; } } if (prof_dump_printf(propagate_err, "\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { ret = true; goto label_return; } if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, (void *)&propagate_err) != NULL) { ret = true; goto label_return; } ret = false; label_return: return (ret); } JEMALLOC_FORMAT_PRINTF(1, 2) static int prof_open_maps(const char *format, ...) { int mfd; va_list ap; char filename[PATH_MAX + 1]; va_start(ap, format); malloc_vsnprintf(filename, sizeof(filename), format, ap); va_end(ap); mfd = open(filename, O_RDONLY); return (mfd); } static bool prof_dump_maps(bool propagate_err) { bool ret; int mfd; cassert(config_prof); #ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); #else { int pid = getpid(); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); if (mfd == -1) mfd = prof_open_maps("/proc/%d/maps", pid); } #endif if (mfd != -1) { ssize_t nread; if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && propagate_err) { ret = true; goto label_return; } nread = 0; do { prof_dump_buf_end += nread; if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { /* Make space in prof_dump_buf before read(). */ if (prof_dump_flush(propagate_err) && propagate_err) { ret = true; goto label_return; } } nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - prof_dump_buf_end); } while (nread > 0); } else { ret = true; goto label_return; } ret = false; label_return: if (mfd != -1) close(mfd); return (ret); } static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, const char *filename) { if (cnt_all->curbytes != 0) { malloc_printf(": Leak summary: %"FMTu64" byte%s, %" FMTu64" object%s, %zu context%s\n", cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( ": Run jeprof on \"%s\" for leak detail\n", filename); } } static prof_gctx_t * prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) { prof_gctx_t *ret; bool propagate_err = *(bool *)arg; malloc_mutex_lock(gctx->lock); if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) { ret = gctx; goto label_return; } ret = NULL; label_return: malloc_mutex_unlock(gctx->lock); return (ret); } static bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { prof_tdata_t *tdata; prof_cnt_t cnt_all; size_t tabind; union { prof_gctx_t *p; void *v; } gctx; size_t leak_ngctx; prof_gctx_tree_t gctxs; cassert(config_prof); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (true); malloc_mutex_lock(&prof_dump_mtx); prof_enter(tsd, tdata); /* * Put gctx's in limbo and clear their counters in preparation for * summing. */ gctx_tree_new(&gctxs); for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) prof_dump_gctx_prep(gctx.p, &gctxs); /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ memset(&cnt_all, 0, sizeof(prof_cnt_t)); malloc_mutex_lock(&tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all); malloc_mutex_unlock(&tdatas_mtx); /* Merge tctx stats into gctx's. */ leak_ngctx = 0; gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx); prof_leave(tsd, tdata); /* Create dump file. */ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) goto label_open_close_error; /* Dump profile header. */ if (prof_dump_header(propagate_err, &cnt_all)) goto label_write_error; /* Dump per gctx profile stats. */ if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, (void *)&propagate_err) != NULL) goto label_write_error; /* Dump /proc//maps if possible. */ if (prof_dump_maps(propagate_err)) goto label_write_error; if (prof_dump_close(propagate_err)) goto label_open_close_error; prof_gctx_finish(tsd, &gctxs); malloc_mutex_unlock(&prof_dump_mtx); if (leakcheck) prof_leakcheck(&cnt_all, leak_ngctx, filename); return (false); label_write_error: prof_dump_close(propagate_err); label_open_close_error: prof_gctx_finish(tsd, &gctxs); malloc_mutex_unlock(&prof_dump_mtx); return (true); } #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { /* "...v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); } else { /* "....heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", opt_prof_prefix, (int)getpid(), prof_dump_seq, v); } prof_dump_seq++; } static void prof_fdump(void) { tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); if (!prof_booted) return; tsd = tsd_fetch(); malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); malloc_mutex_unlock(&prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } void prof_idump(void) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); if (!prof_booted) return; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; if (tdata->enq) { tdata->enq_idump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; malloc_mutex_unlock(&prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } bool prof_mdump(const char *filename) { tsd_t *tsd; char filename_buf[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); if (!opt_prof || !prof_booted) return (true); tsd = tsd_fetch(); if (filename == NULL) { /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') return (true); malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; malloc_mutex_unlock(&prof_dump_seq_mtx); filename = filename_buf; } return (prof_dump(tsd, true, filename, false)); } void prof_gdump(void) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); if (!prof_booted) return; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; if (tdata->enq) { tdata->enq_gdump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; malloc_mutex_unlock(&prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } static void prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); } static bool prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); if (bt1->len != bt2->len) return (false); return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } JEMALLOC_INLINE_C uint64_t prof_thr_uid_alloc(void) { uint64_t thr_uid; malloc_mutex_lock(&next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; malloc_mutex_unlock(&next_thr_uid_mtx); return (thr_uid); } static prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; tcache_t *tcache; cassert(config_prof); /* Initialize an empty cache for this thread. */ tcache = tcache_get(tsd, true); tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false, tcache, true, NULL); if (tdata == NULL) return (NULL); tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; tdata->thr_discrim = thr_discrim; tdata->thread_name = thread_name; tdata->attached = true; tdata->expired = false; tdata->tctx_uid_next = 0; if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { idalloctm(tsd, tdata, tcache, true); return (NULL); } tdata->prng_state = (uint64_t)(uintptr_t)tdata; prof_sample_threshold_update(tdata); tdata->enq = false; tdata->enq_idump = false; tdata->enq_gdump = false; tdata->dumping = false; tdata->active = active; malloc_mutex_lock(&tdatas_mtx); tdata_tree_insert(&tdatas, tdata); malloc_mutex_unlock(&tdatas_mtx); return (tdata); } prof_tdata_t * prof_tdata_init(tsd_t *tsd) { return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL, prof_thread_active_init_get())); } /* tdata->lock must be held. */ static bool prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) return (false); if (ckh_count(&tdata->bt2tctx) != 0) return (false); return (true); } /* tdatas_mtx must be held. */ static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { tcache_t *tcache; assert(prof_tdata_should_destroy(tdata, even_if_attached)); assert(tsd_prof_tdata_get(tsd) != tdata); tdata_tree_remove(&tdatas, tdata); tcache = tcache_get(tsd, false); if (tdata->thread_name != NULL) idalloctm(tsd, tdata->thread_name, tcache, true); ckh_delete(tsd, &tdata->bt2tctx); idalloctm(tsd, tdata, tcache, true); } static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_lock(&tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); malloc_mutex_unlock(&tdatas_mtx); } static void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tdata->lock); if (tdata->attached) { destroy_tdata = prof_tdata_should_destroy(tdata, true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. */ if (!destroy_tdata) tdata->attached = false; tsd_prof_tdata_set(tsd, NULL); } else destroy_tdata = false; malloc_mutex_unlock(tdata->lock); if (destroy_tdata) prof_tdata_destroy(tsd, tdata, true); } prof_tdata_t * prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, active)); } static bool prof_tdata_expire(prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : prof_tdata_should_destroy(tdata, false); } else destroy_tdata = false; malloc_mutex_unlock(tdata->lock); return (destroy_tdata); } static prof_tdata_t * prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { return (prof_tdata_expire(tdata) ? tdata : NULL); } void prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); malloc_mutex_lock(&prof_dump_mtx); malloc_mutex_lock(&tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, prof_tdata_reset_iter, NULL); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); } else next = NULL; } while (next != NULL); malloc_mutex_unlock(&tdatas_mtx); malloc_mutex_unlock(&prof_dump_mtx); } void prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; if (!config_prof) return; tdata = tsd_prof_tdata_get(tsd); if (tdata != NULL) prof_tdata_detach(tsd, tdata); } bool prof_active_get(void) { bool prof_active_current; malloc_mutex_lock(&prof_active_mtx); prof_active_current = prof_active; malloc_mutex_unlock(&prof_active_mtx); return (prof_active_current); } bool prof_active_set(bool active) { bool prof_active_old; malloc_mutex_lock(&prof_active_mtx); prof_active_old = prof_active; prof_active = active; malloc_mutex_unlock(&prof_active_mtx); return (prof_active_old); } const char * prof_thread_name_get(void) { tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (""); return (tdata->thread_name != NULL ? tdata->thread_name : ""); } static char * prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) { char *ret; size_t size; if (thread_name == NULL) return (NULL); size = strlen(thread_name) + 1; if (size == 1) return (""); ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL); if (ret == NULL) return (NULL); memcpy(ret, thread_name, size); return (ret); } int prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; unsigned i; char *s; tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (EAGAIN); /* Validate input. */ if (thread_name == NULL) return (EFAULT); for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; if (!isgraph(c) && !isblank(c)) return (EFAULT); } s = prof_thread_name_alloc(tsd, thread_name); if (s == NULL) return (EAGAIN); if (tdata->thread_name != NULL) { idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false), true); tdata->thread_name = NULL; } if (strlen(s) > 0) tdata->thread_name = s; return (0); } bool prof_thread_active_get(void) { tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (false); return (tdata->active); } bool prof_thread_active_set(bool active) { tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (true); tdata->active = active; return (false); } bool prof_thread_active_init_get(void) { bool active_init; malloc_mutex_lock(&prof_thread_active_init_mtx); active_init = prof_thread_active_init; malloc_mutex_unlock(&prof_thread_active_init_mtx); return (active_init); } bool prof_thread_active_init_set(bool active_init) { bool active_init_old; malloc_mutex_lock(&prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; malloc_mutex_unlock(&prof_thread_active_init_mtx); return (active_init_old); } bool prof_gdump_get(void) { bool prof_gdump_current; malloc_mutex_lock(&prof_gdump_mtx); prof_gdump_current = prof_gdump_val; malloc_mutex_unlock(&prof_gdump_mtx); return (prof_gdump_current); } bool prof_gdump_set(bool gdump) { bool prof_gdump_old; malloc_mutex_lock(&prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; malloc_mutex_unlock(&prof_gdump_mtx); return (prof_gdump_old); } void prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, sizeof(PROF_PREFIX_DEFAULT)); } void prof_boot1(void) { cassert(config_prof); /* * opt_prof must be in its final state before any arenas are * initialized, so this function must be executed early. */ if (opt_prof_leak && !opt_prof) { /* * Enable opt_prof, but in such a way that profiles are never * automatically dumped. */ opt_prof = true; opt_prof_gdump = false; } else if (opt_prof) { if (opt_lg_prof_interval >= 0) { prof_interval = (((uint64_t)1U) << opt_lg_prof_interval); } } } bool prof_boot2(void) { cassert(config_prof); if (opt_prof) { tsd_t *tsd; unsigned i; lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx)) return (true); prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx)) return (true); prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx)) return (true); tsd = tsd_fetch(); if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) return (true); if (malloc_mutex_init(&bt2gctx_mtx)) return (true); tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx)) return (true); next_thr_uid = 0; if (malloc_mutex_init(&next_thr_uid_mtx)) return (true); if (malloc_mutex_init(&prof_dump_seq_mtx)) return (true); if (malloc_mutex_init(&prof_dump_mtx)) return (true); if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) abort(); } gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * sizeof(malloc_mutex_t)); if (gctx_locks == NULL) return (true); for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i])) return (true); } tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t)); if (tdata_locks == NULL) return (true); for (i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i])) return (true); } } #ifdef JEMALLOC_PROF_LIBGCC /* * Cause the backtracing machinery to allocate its internal state * before enabling profiling. */ _Unwind_Backtrace(prof_unwind_init_callback, NULL); #endif prof_booted = true; return (false); } void prof_prefork(void) { if (opt_prof) { unsigned i; malloc_mutex_prefork(&tdatas_mtx); malloc_mutex_prefork(&bt2gctx_mtx); malloc_mutex_prefork(&next_thr_uid_mtx); malloc_mutex_prefork(&prof_dump_seq_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) malloc_mutex_prefork(&gctx_locks[i]); for (i = 0; i < PROF_NTDATA_LOCKS; i++) malloc_mutex_prefork(&tdata_locks[i]); } } void prof_postfork_parent(void) { if (opt_prof) { unsigned i; for (i = 0; i < PROF_NTDATA_LOCKS; i++) malloc_mutex_postfork_parent(&tdata_locks[i]); for (i = 0; i < PROF_NCTX_LOCKS; i++) malloc_mutex_postfork_parent(&gctx_locks[i]); malloc_mutex_postfork_parent(&prof_dump_seq_mtx); malloc_mutex_postfork_parent(&next_thr_uid_mtx); malloc_mutex_postfork_parent(&bt2gctx_mtx); malloc_mutex_postfork_parent(&tdatas_mtx); } } void prof_postfork_child(void) { if (opt_prof) { unsigned i; for (i = 0; i < PROF_NTDATA_LOCKS; i++) malloc_mutex_postfork_child(&tdata_locks[i]); for (i = 0; i < PROF_NCTX_LOCKS; i++) malloc_mutex_postfork_child(&gctx_locks[i]); malloc_mutex_postfork_child(&prof_dump_seq_mtx); malloc_mutex_postfork_child(&next_thr_uid_mtx); malloc_mutex_postfork_child(&bt2gctx_mtx); malloc_mutex_postfork_child(&tdatas_mtx); } } /******************************************************************************/ disque-1.0-rc1/deps/jemalloc/src/quarantine.c000066400000000000000000000124261264176561100211700ustar00rootroot00000000000000#define JEMALLOC_QUARANTINE_C_ #include "jemalloc/internal/jemalloc_internal.h" /* * Quarantine pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) #define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) #define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine); static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound); /******************************************************************************/ static quarantine_t * quarantine_init(tsd_t *tsd, size_t lg_maxobjs) { quarantine_t *quarantine; assert(tsd_nominal(tsd)); quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false, tcache_get(tsd, true), true, NULL); if (quarantine == NULL) return (NULL); quarantine->curbytes = 0; quarantine->curobjs = 0; quarantine->first = 0; quarantine->lg_maxobjs = lg_maxobjs; return (quarantine); } void quarantine_alloc_hook_work(tsd_t *tsd) { quarantine_t *quarantine; if (!tsd_nominal(tsd)) return; quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT); /* * Check again whether quarantine has been initialized, because * quarantine_init() may have triggered recursive initialization. */ if (tsd_quarantine_get(tsd) == NULL) tsd_quarantine_set(tsd, quarantine); else idalloctm(tsd, quarantine, tcache_get(tsd, false), true); } static quarantine_t * quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) { quarantine_t *ret; ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1); if (ret == NULL) { quarantine_drain_one(tsd, quarantine); return (quarantine); } ret->curbytes = quarantine->curbytes; ret->curobjs = quarantine->curobjs; if (quarantine->first + quarantine->curobjs <= (ZU(1) << quarantine->lg_maxobjs)) { /* objs ring buffer data are contiguous. */ memcpy(ret->objs, &quarantine->objs[quarantine->first], quarantine->curobjs * sizeof(quarantine_obj_t)); } else { /* objs ring buffer data wrap around. */ size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - quarantine->first; size_t ncopy_b = quarantine->curobjs - ncopy_a; memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a * sizeof(quarantine_obj_t)); memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * sizeof(quarantine_obj_t)); } idalloctm(tsd, quarantine, tcache_get(tsd, false), true); tsd_quarantine_set(tsd, ret); return (ret); } static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) { quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; assert(obj->usize == isalloc(obj->ptr, config_prof)); idalloctm(tsd, obj->ptr, NULL, false); quarantine->curbytes -= obj->usize; quarantine->curobjs--; quarantine->first = (quarantine->first + 1) & ((ZU(1) << quarantine->lg_maxobjs) - 1); } static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound) { while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) quarantine_drain_one(tsd, quarantine); } void quarantine(tsd_t *tsd, void *ptr) { quarantine_t *quarantine; size_t usize = isalloc(ptr, config_prof); cassert(config_fill); assert(opt_quarantine); if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { idalloctm(tsd, ptr, NULL, false); return; } /* * Drain one or more objects if the quarantine size limit would be * exceeded by appending ptr. */ if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; quarantine_drain(tsd, quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) quarantine = quarantine_grow(tsd, quarantine); /* quarantine_grow() must free a slot if it fails to grow. */ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); /* Append ptr if its size doesn't exceed the quarantine size. */ if (quarantine->curbytes + usize <= opt_quarantine) { size_t offset = (quarantine->first + quarantine->curobjs) & ((ZU(1) << quarantine->lg_maxobjs) - 1); quarantine_obj_t *obj = &quarantine->objs[offset]; obj->ptr = ptr; obj->usize = usize; quarantine->curbytes += usize; quarantine->curobjs++; if (config_fill && unlikely(opt_junk_free)) { /* * Only do redzone validation if Valgrind isn't in * operation. */ if ((!config_valgrind || likely(!in_valgrind)) && usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else memset(ptr, 0x5a, usize); } } else { assert(quarantine->curbytes == 0); idalloctm(tsd, ptr, NULL, false); } } void quarantine_cleanup(tsd_t *tsd) { quarantine_t *quarantine; if (!config_fill) return; quarantine = tsd_quarantine_get(tsd); if (quarantine != NULL) { quarantine_drain(tsd, quarantine, 0); idalloctm(tsd, quarantine, tcache_get(tsd, false), true); tsd_quarantine_set(tsd, NULL); } } disque-1.0-rc1/deps/jemalloc/src/rtree.c000066400000000000000000000061541264176561100201430ustar00rootroot00000000000000#define JEMALLOC_RTREE_C_ #include "jemalloc/internal/jemalloc_internal.h" static unsigned hmin(unsigned ha, unsigned hb) { return (ha < hb ? ha : hb); } /* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, rtree_node_dalloc_t *dalloc) { unsigned bits_in_leaf, height, i; assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL : (bits % RTREE_BITS_PER_LEVEL); if (bits > bits_in_leaf) { height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) height++; } else height = 1; assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); rtree->alloc = alloc; rtree->dalloc = dalloc; rtree->height = height; /* Root level. */ rtree->levels[0].subtree = NULL; rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : bits_in_leaf; rtree->levels[0].cumbits = rtree->levels[0].bits; /* Interior levels. */ for (i = 1; i < height-1; i++) { rtree->levels[i].subtree = NULL; rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + RTREE_BITS_PER_LEVEL; } /* Leaf level. */ if (height > 1) { rtree->levels[height-1].subtree = NULL; rtree->levels[height-1].bits = bits_in_leaf; rtree->levels[height-1].cumbits = bits; } /* Compute lookup table to be used by rtree_start_level(). */ for (i = 0; i < RTREE_HEIGHT_MAX; i++) { rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height - 1); } return (false); } static void rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level) { if (level + 1 < rtree->height) { size_t nchildren, i; nchildren = ZU(1) << rtree->levels[level].bits; for (i = 0; i < nchildren; i++) { rtree_node_elm_t *child = node[i].child; if (child != NULL) rtree_delete_subtree(rtree, child, level + 1); } } rtree->dalloc(node); } void rtree_delete(rtree_t *rtree) { unsigned i; for (i = 0; i < rtree->height; i++) { rtree_node_elm_t *subtree = rtree->levels[i].subtree; if (subtree != NULL) rtree_delete_subtree(rtree, subtree, i); } } static rtree_node_elm_t * rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) { rtree_node_elm_t *node; if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { /* * Another thread is already in the process of initializing. * Spin-wait until initialization is complete. */ do { CPU_SPINWAIT; node = atomic_read_p((void **)elmp); } while (node == RTREE_NODE_INITIALIZING); } else { node = rtree->alloc(ZU(1) << rtree->levels[level].bits); if (node == NULL) return (NULL); atomic_write_p((void **)elmp, node); } return (node); } rtree_node_elm_t * rtree_subtree_read_hard(rtree_t *rtree, unsigned level) { return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); } rtree_node_elm_t * rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) { return (rtree_node_init(rtree, level, &elm->child)); } disque-1.0-rc1/deps/jemalloc/src/stats.c000066400000000000000000000474161264176561100201660ustar00rootroot00000000000000#define JEMALLOC_STATS_C_ #include "jemalloc/internal/jemalloc_internal.h" #define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ xmallctl(n, v, &sz, NULL, 0); \ } while (0) #define CTL_M2_GET(n, i, v, t) do { \ size_t mib[6]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ } while (0) #define CTL_M2_M4_GET(n, i, j, v, t) do { \ size_t mib[6]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ mib[4] = (j); \ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ /* Data. */ bool opt_stats_print = false; size_t stats_cactive = 0; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); static void stats_arena_hchunks_print( void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i, bool bins, bool large, bool huge); /******************************************************************************/ static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i) { size_t page; bool config_tcache, in_gap; unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); CTL_GET("config.tcache", &config_tcache, bool); if (config_tcache) { malloc_cprintf(write_cb, cbopaque, "bins: size ind allocated nmalloc" " ndalloc nrequests curregs curruns regs" " pgs util nfills nflushes newruns" " reruns\n"); } else { malloc_cprintf(write_cb, cbopaque, "bins: size ind allocated nmalloc" " ndalloc nrequests curregs curruns regs" " pgs util newruns reruns\n"); } CTL_GET("arenas.nbins", &nbins, unsigned); for (j = 0, in_gap = false; j < nbins; j++) { uint64_t nruns; CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, uint64_t); if (nruns == 0) in_gap = true; else { size_t reg_size, run_size, curregs, availregs, milli; size_t curruns; uint32_t nregs; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t reruns; char util[6]; /* "x.yyy". */ if (in_gap) { malloc_cprintf(write_cb, cbopaque, " ---\n"); in_gap = false; } CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, size_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, &nrequests, uint64_t); if (config_tcache) { CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, uint64_t); } CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &reruns, uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns, size_t); availregs = nregs * curruns; milli = (availregs != 0) ? (1000 * curregs) / availregs : 1000; assert(milli <= 1000); if (milli < 10) { malloc_snprintf(util, sizeof(util), "0.00%zu", milli); } else if (milli < 100) { malloc_snprintf(util, sizeof(util), "0.0%zu", milli); } else if (milli < 1000) { malloc_snprintf(util, sizeof(util), "0.%zu", milli); } else malloc_snprintf(util, sizeof(util), "1"); if (config_tcache) { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64 " %12"FMTu64" %12"FMTu64" %12zu" " %12zu %4u %3zu %-5s %12"FMTu64 " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", reg_size, j, curregs * reg_size, nmalloc, ndalloc, nrequests, curregs, curruns, nregs, run_size / page, util, nfills, nflushes, nruns, reruns); } else { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64 " %12"FMTu64" %12"FMTu64" %12zu" " %12zu %4u %3zu %-5s %12"FMTu64 " %12"FMTu64"\n", reg_size, j, curregs * reg_size, nmalloc, ndalloc, nrequests, curregs, curruns, nregs, run_size / page, util, nruns, reruns); } } } if (in_gap) { malloc_cprintf(write_cb, cbopaque, " ---\n"); } } static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i) { unsigned nbins, nlruns, j; bool in_gap; malloc_cprintf(write_cb, cbopaque, "large: size ind allocated nmalloc ndalloc" " nrequests curruns\n"); CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned); for (j = 0, in_gap = false; j < nlruns; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t run_size, curruns; CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, &nrequests, uint64_t); if (nrequests == 0) in_gap = true; else { CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns, size_t); if (in_gap) { malloc_cprintf(write_cb, cbopaque, " ---\n"); in_gap = false; } malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64" %12zu\n", run_size, nbins + j, curruns * run_size, nmalloc, ndalloc, nrequests, curruns); } } if (in_gap) { malloc_cprintf(write_cb, cbopaque, " ---\n"); } } static void stats_arena_hchunks_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i) { unsigned nbins, nlruns, nhchunks, j; bool in_gap; malloc_cprintf(write_cb, cbopaque, "huge: size ind allocated nmalloc ndalloc" " nrequests curhchunks\n"); CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned); CTL_GET("arenas.nhchunks", &nhchunks, unsigned); for (j = 0, in_gap = false; j < nhchunks; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t hchunk_size, curhchunks; CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, &nrequests, uint64_t); if (nrequests == 0) in_gap = true; else { CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t); CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j, &curhchunks, size_t); if (in_gap) { malloc_cprintf(write_cb, cbopaque, " ---\n"); in_gap = false; } malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64" %12zu\n", hchunk_size, nbins + nlruns + j, curhchunks * hchunk_size, nmalloc, ndalloc, nrequests, curhchunks); } } if (in_gap) { malloc_cprintf(write_cb, cbopaque, " ---\n"); } } static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i, bool bins, bool large, bool huge) { unsigned nthreads; const char *dss; ssize_t lg_dirty_mult; size_t page, pactive, pdirty, mapped; size_t metadata_mapped, metadata_allocated; uint64_t npurge, nmadvise, purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; size_t huge_allocated; uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; CTL_GET("arenas.page", &page, size_t); CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); malloc_cprintf(write_cb, cbopaque, "assigned threads: %u\n", nthreads); CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", dss); CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); if (lg_dirty_mult >= 0) { malloc_cprintf(write_cb, cbopaque, "min active:dirty page ratio: %u:1\n", (1U << lg_dirty_mult)); } else { malloc_cprintf(write_cb, cbopaque, "min active:dirty page ratio: N/A\n"); } CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); malloc_cprintf(write_cb, cbopaque, "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64 " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge == 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged); malloc_cprintf(write_cb, cbopaque, " allocated nmalloc ndalloc" " nrequests\n"); CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, size_t); CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, uint64_t); malloc_cprintf(write_cb, cbopaque, "small: %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64"\n", small_allocated, small_nmalloc, small_ndalloc, small_nrequests); CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, size_t); CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, uint64_t); malloc_cprintf(write_cb, cbopaque, "large: %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64"\n", large_allocated, large_nmalloc, large_ndalloc, large_nrequests); CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, uint64_t); malloc_cprintf(write_cb, cbopaque, "huge: %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64"\n", huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); malloc_cprintf(write_cb, cbopaque, "total: %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64"\n", small_allocated + large_allocated + huge_allocated, small_nmalloc + large_nmalloc + huge_nmalloc, small_ndalloc + large_ndalloc + huge_ndalloc, small_nrequests + large_nrequests + huge_nrequests); malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, size_t); CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, size_t); malloc_cprintf(write_cb, cbopaque, "metadata: mapped: %zu, allocated: %zu\n", metadata_mapped, metadata_allocated); if (bins) stats_arena_bins_print(write_cb, cbopaque, i); if (large) stats_arena_lruns_print(write_cb, cbopaque, i); if (huge) stats_arena_hchunks_print(write_cb, cbopaque, i); } void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; bool general = true; bool merged = true; bool unmerged = true; bool bins = true; bool large = true; bool huge = true; /* * Refresh stats, in case mallctl() was called by the application. * * Check for OOM here, since refreshing the ctl cache can trigger * allocation. In practice, none of the subsequent mallctl()-related * calls in this function will cause OOM if this one succeeds. * */ epoch = 1; u64sz = sizeof(uint64_t); err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write(": Memory allocation failure in " "mallctl(\"epoch\", ...)\n"); return; } malloc_write(": Failure in mallctl(\"epoch\", " "...)\n"); abort(); } if (opts != NULL) { unsigned i; for (i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { case 'g': general = false; break; case 'm': merged = false; break; case 'a': unmerged = false; break; case 'b': bins = false; break; case 'l': large = false; break; case 'h': huge = false; break; default:; } } } malloc_cprintf(write_cb, cbopaque, "___ Begin jemalloc statistics ___\n"); if (general) { const char *cpv; bool bv; unsigned uv; ssize_t ssv; size_t sv, bsz, ssz, sssz, cpsz; bsz = sizeof(bool); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); CTL_GET("version", &cpv, const char *); malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); CTL_GET("config.debug", &bv, bool); malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); #define OPT_WRITE_BOOL(n) \ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ } #define OPT_WRITE_BOOL_MUTABLE(n, m) { \ bool bv2; \ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \ je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s ("#m": %s)\n", bv ? "true" \ : "false", bv2 ? "true" : "false"); \ } \ } #define OPT_WRITE_SIZE_T(n) \ if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zu\n", sv); \ } #define OPT_WRITE_SSIZE_T(n) \ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ } #define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \ ssize_t ssv2; \ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \ je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd ("#m": %zd)\n", \ ssv, ssv2); \ } \ } #define OPT_WRITE_CHAR_P(n) \ if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": \"%s\"\n", cpv); \ } malloc_cprintf(write_cb, cbopaque, "Run-time option settings:\n"); OPT_WRITE_BOOL(abort) OPT_WRITE_SIZE_T(lg_chunk) OPT_WRITE_CHAR_P(dss) OPT_WRITE_SIZE_T(narenas) OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult) OPT_WRITE_BOOL(stats_print) OPT_WRITE_CHAR_P(junk) OPT_WRITE_SIZE_T(quarantine) OPT_WRITE_BOOL(redzone) OPT_WRITE_BOOL(zero) OPT_WRITE_BOOL(utrace) OPT_WRITE_BOOL(valgrind) OPT_WRITE_BOOL(xmalloc) OPT_WRITE_BOOL(tcache) OPT_WRITE_SSIZE_T(lg_tcache_max) OPT_WRITE_BOOL(prof) OPT_WRITE_CHAR_P(prof_prefix) OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active) OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init) OPT_WRITE_SSIZE_T(lg_prof_sample) OPT_WRITE_BOOL(prof_accum) OPT_WRITE_SSIZE_T(lg_prof_interval) OPT_WRITE_BOOL(prof_gdump) OPT_WRITE_BOOL(prof_final) OPT_WRITE_BOOL(prof_leak) #undef OPT_WRITE_BOOL #undef OPT_WRITE_BOOL_MUTABLE #undef OPT_WRITE_SIZE_T #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_CHAR_P malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); CTL_GET("arenas.narenas", &uv, unsigned); malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", sizeof(void *)); CTL_GET("arenas.quantum", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); CTL_GET("arenas.page", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Min active:dirty page ratio per arena: %u:1\n", (1U << ssv)); } else { malloc_cprintf(write_cb, cbopaque, "Min active:dirty page ratio per arena: N/A\n"); } if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) { malloc_cprintf(write_cb, cbopaque, "Maximum thread-cached size class: %zu\n", sv); } if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) { CTL_GET("prof.lg_sample", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Average profile sample interval: %"FMTu64 " (2^%zu)\n", (((uint64_t)1U) << sv), sv); CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Average profile dump interval: %"FMTu64 " (2^%zd)\n", (((uint64_t)1U) << ssv), ssv); } else { malloc_cprintf(write_cb, cbopaque, "Average profile dump interval: N/A\n"); } } CTL_GET("opt.lg_chunk", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); } if (config_stats) { size_t *cactive; size_t allocated, active, metadata, resident, mapped; CTL_GET("stats.cactive", &cactive, size_t *); CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.metadata", &metadata, size_t); CTL_GET("stats.resident", &resident, size_t); CTL_GET("stats.mapped", &mapped, size_t); malloc_cprintf(write_cb, cbopaque, "Allocated: %zu, active: %zu, metadata: %zu," " resident: %zu, mapped: %zu\n", allocated, active, metadata, resident, mapped); malloc_cprintf(write_cb, cbopaque, "Current active ceiling: %zu\n", atomic_read_z(cactive)); if (merged) { unsigned narenas; CTL_GET("arenas.narenas", &narenas, unsigned); { VARIABLE_ARRAY(bool, initialized, narenas); size_t isz; unsigned i, ninitialized; isz = sizeof(bool) * narenas; xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = ninitialized = 0; i < narenas; i++) { if (initialized[i]) ninitialized++; } if (ninitialized > 1 || !unmerged) { /* Print merged arena stats. */ malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); stats_arena_print(write_cb, cbopaque, narenas, bins, large, huge); } } } if (unmerged) { unsigned narenas; /* Print stats for each arena. */ CTL_GET("arenas.narenas", &narenas, unsigned); { VARIABLE_ARRAY(bool, initialized, narenas); size_t isz; unsigned i; isz = sizeof(bool) * narenas; xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = 0; i < narenas; i++) { if (initialized[i]) { malloc_cprintf(write_cb, cbopaque, "\narenas[%u]:\n", i); stats_arena_print(write_cb, cbopaque, i, bins, large, huge); } } } } } malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); } disque-1.0-rc1/deps/jemalloc/src/tcache.c000066400000000000000000000321631264176561100202500ustar00rootroot00000000000000#define JEMALLOC_TCACHE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; tcache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ size_t nhbins; size_t tcache_maxclass; tcaches_t *tcaches; /* Index of first element within tcaches that has never been used. */ static unsigned tcaches_past; /* Head of singly linked list tracking available tcaches elements. */ static tcaches_t *tcaches_avail; /******************************************************************************/ size_t tcache_salloc(const void *ptr) { return (arena_salloc(ptr, false)); } void tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; tcache_bin_t *tbin = &tcache->tbins[binind]; tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; if (tbin->low_water > 0) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. */ if (binind < NBINS) { tcache_bin_flush_small(tsd, tcache, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2)); } else { tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); } /* * Reduce fill count by 2X. Limit lg_fill_div such that the * fill count is always at least 1. */ if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) tbin->lg_fill_div++; } else if (tbin->low_water < 0) { /* * Increase fill count by 2X. Make sure lg_fill_div stays * greater than 0. */ if (tbin->lg_fill_div > 1) tbin->lg_fill_div--; } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; if (tcache->next_gc_bin == nhbins) tcache->next_gc_bin = 0; tcache->ev_cnt = 0; } void * tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind) { void *ret; arena_tcache_fill_small(arena, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) tcache->prof_accumbytes = 0; ret = tcache_alloc_easy(tbin); return (ret); } void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, unsigned rem) { arena_t *arena; void *ptr; unsigned i, nflush, ndeferred; bool merged_stats = false; assert(binind < NBINS); assert(rem <= tbin->ncached); arena = arena_choose(tsd, NULL); assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena bin associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( tbin->avail[0]); arena_t *bin_arena = extent_node_arena_get(&chunk->node); arena_bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { if (arena_prof_accum(arena, tcache->prof_accumbytes)) prof_idump(); tcache->prof_accumbytes = 0; } malloc_mutex_lock(&bin->lock); if (config_stats && bin_arena == arena) { assert(!merged_stats); merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = tbin->avail[i]; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == bin_arena) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_bits_t *bitselm = arena_bitselm_get(chunk, pageind); arena_dalloc_bin_junked_locked(bin_arena, chunk, ptr, bitselm); } else { /* * This object was allocated via a different * arena bin than the one that is currently * locked. Stash the object, so that it can be * handled in a future pass. */ tbin->avail[ndeferred] = ptr; ndeferred++; } } malloc_mutex_unlock(&bin->lock); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ arena_bin_t *bin = &arena->bins[binind]; malloc_mutex_lock(&bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(&bin->lock); } memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { arena_t *arena; void *ptr; unsigned i, nflush, ndeferred; bool merged_stats = false; assert(binind < nhbins); assert(rem <= tbin->ncached); arena = arena_choose(tsd, NULL); assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( tbin->avail[0]); arena_t *locked_arena = extent_node_arena_get(&chunk->node); UNUSED bool idump; if (config_prof) idump = false; malloc_mutex_lock(&locked_arena->lock); if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { idump = arena_prof_accum_locked(arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = tbin->avail[i]; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == locked_arena) { arena_dalloc_large_junked_locked(locked_arena, chunk, ptr); } else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ tbin->avail[ndeferred] = ptr; ndeferred++; } } malloc_mutex_unlock(&locked_arena->lock); if (config_prof && idump) prof_idump(); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ malloc_mutex_lock(&arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(&arena->lock); } memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } void tcache_arena_associate(tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(&arena->lock); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); malloc_mutex_unlock(&arena->lock); } } void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena) { tcache_arena_dissociate(tcache, oldarena); tcache_arena_associate(tcache, newarena); } void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(&arena->lock); if (config_debug) { bool in_ql = false; tcache_t *iter; ql_foreach(iter, &arena->tcache_ql, link) { if (iter == tcache) { in_ql = true; break; } } assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); tcache_stats_merge(tcache, arena); malloc_mutex_unlock(&arena->lock); } } tcache_t * tcache_get_hard(tsd_t *tsd) { arena_t *arena; if (!tcache_enabled_get()) { if (tsd_nominal(tsd)) tcache_enabled_set(false); /* Memoize. */ return (NULL); } arena = arena_choose(tsd, NULL); if (unlikely(arena == NULL)) return (NULL); return (tcache_create(tsd, arena)); } tcache_t * tcache_create(tsd_t *tsd, arena_t *arena) { tcache_t *tcache; size_t size, stack_offset; unsigned i; size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sa2u(size, CACHELINE); tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get()); if (tcache == NULL) return (NULL); tcache_arena_associate(tcache, arena); assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); for (i = 0; i < nhbins; i++) { tcache->tbins[i].lg_fill_div = 1; tcache->tbins[i].avail = (void **)((uintptr_t)tcache + (uintptr_t)stack_offset); stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); } return (tcache); } static void tcache_destroy(tsd_t *tsd, tcache_t *tcache) { arena_t *arena; unsigned i; arena = arena_choose(tsd, NULL); tcache_arena_dissociate(tcache, arena); for (i = 0; i < NBINS; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_small(tsd, tcache, tbin, i, 0); if (config_stats && tbin->tstats.nrequests != 0) { arena_bin_t *bin = &arena->bins[i]; malloc_mutex_lock(&bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(&bin->lock); } } for (; i < nhbins; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { malloc_mutex_lock(&arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[i - NBINS].nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(&arena->lock); } } if (config_prof && tcache->prof_accumbytes > 0 && arena_prof_accum(arena, tcache->prof_accumbytes)) prof_idump(); idalloctm(tsd, tcache, false, true); } void tcache_cleanup(tsd_t *tsd) { tcache_t *tcache; if (!config_tcache) return; if ((tcache = tsd_tcache_get(tsd)) != NULL) { tcache_destroy(tsd, tcache); tsd_tcache_set(tsd, NULL); } } void tcache_enabled_cleanup(tsd_t *tsd) { /* Do nothing. */ } /* Caller must own arena->lock. */ void tcache_stats_merge(tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; tcache_bin_t *tbin = &tcache->tbins[i]; malloc_mutex_lock(&bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(&bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; tcache_bin_t *tbin = &tcache->tbins[i]; arena->stats.nrequests_large += tbin->tstats.nrequests; lstats->nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } bool tcaches_create(tsd_t *tsd, unsigned *r_ind) { tcache_t *tcache; tcaches_t *elm; if (tcaches == NULL) { tcaches = base_alloc(sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1)); if (tcaches == NULL) return (true); } if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) return (true); tcache = tcache_create(tsd, a0get()); if (tcache == NULL) return (true); if (tcaches_avail != NULL) { elm = tcaches_avail; tcaches_avail = tcaches_avail->next; elm->tcache = tcache; *r_ind = elm - tcaches; } else { elm = &tcaches[tcaches_past]; elm->tcache = tcache; *r_ind = tcaches_past; tcaches_past++; } return (false); } static void tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) { if (elm->tcache == NULL) return; tcache_destroy(tsd, elm->tcache); elm->tcache = NULL; } void tcaches_flush(tsd_t *tsd, unsigned ind) { tcaches_elm_flush(tsd, &tcaches[ind]); } void tcaches_destroy(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; tcaches_elm_flush(tsd, elm); elm->next = tcaches_avail; tcaches_avail = elm; } bool tcache_boot(void) { unsigned i; /* * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is * known. */ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) tcache_maxclass = SMALL_MAXCLASS; else if ((1U << opt_lg_tcache_max) > large_maxclass) tcache_maxclass = large_maxclass; else tcache_maxclass = (1U << opt_lg_tcache_max); nhbins = size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * sizeof(tcache_bin_info_t)); if (tcache_bin_info == NULL) return (true); stack_nelms = 0; for (i = 0; i < NBINS; i++) { if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; } else if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (arena_bin_info[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; } stack_nelms += tcache_bin_info[i].ncached_max; } for (; i < nhbins; i++) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; stack_nelms += tcache_bin_info[i].ncached_max; } return (false); } disque-1.0-rc1/deps/jemalloc/src/tsd.c000066400000000000000000000074011264176561100176100ustar00rootroot00000000000000#define JEMALLOC_TSD_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) /******************************************************************************/ void * malloc_tsd_malloc(size_t size) { return (a0malloc(CACHELINE_CEILING(size))); } void malloc_tsd_dalloc(void *wrapper) { a0dalloc(wrapper); } void malloc_tsd_no_cleanup(void *arg) { not_reached(); } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) #ifndef _WIN32 JEMALLOC_EXPORT #endif void _malloc_thread_cleanup(void) { bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; for (i = 0; i < ncleanups; i++) pending[i] = true; do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); if (pending[i]) again = true; } } } while (again); } #endif void malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } void tsd_cleanup(void *arg) { tsd_t *tsd = (tsd_t *)arg; switch (tsd->state) { case tsd_state_uninitialized: /* Do nothing. */ break; case tsd_state_nominal: #define O(n, t) \ n##_cleanup(tsd); MALLOC_TSD #undef O tsd->state = tsd_state_purgatory; tsd_set(tsd); break; case tsd_state_purgatory: /* * The previous time this destructor was called, we set the * state to tsd_state_purgatory so that other destructors * wouldn't cause re-creation of the tsd. This time, do * nothing, and do not request another callback. */ break; case tsd_state_reincarnated: /* * Another destructor deallocated memory after this destructor * was called. Reset state to tsd_state_purgatory and request * another callback. */ tsd->state = tsd_state_purgatory; tsd_set(tsd); break; default: not_reached(); } } bool malloc_tsd_boot0(void) { ncleanups = 0; if (tsd_boot0()) return (true); *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true; return (false); } void malloc_tsd_boot1(void) { tsd_boot1(); *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false; } #ifdef _WIN32 static BOOL WINAPI _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { #ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: isthreaded = true; break; #endif case DLL_THREAD_DETACH: _malloc_thread_cleanup(); break; default: break; } return (true); } #ifdef _MSC_VER # ifdef _M_IX86 # pragma comment(linker, "/INCLUDE:__tls_used") # else # pragma comment(linker, "/INCLUDE:_tls_used") # endif # pragma section(".CRT$XLY",long,read) #endif JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void * tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ malloc_mutex_lock(&head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { malloc_mutex_unlock(&head->lock); return (iter->data); } } /* Insert block into list. */ ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); malloc_mutex_unlock(&head->lock); return (NULL); } void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { malloc_mutex_lock(&head->lock); ql_remove(&head->blocks, block, link); malloc_mutex_unlock(&head->lock); } #endif disque-1.0-rc1/deps/jemalloc/src/util.c000066400000000000000000000331651264176561100200010ustar00rootroot00000000000000#define assert(e) do { \ if (config_debug && !(e)) { \ malloc_write(": Failed assertion\n"); \ abort(); \ } \ } while (0) #define not_reached() do { \ if (config_debug) { \ malloc_write(": Unreachable code reached\n"); \ abort(); \ } \ } while (0) #define not_implemented() do { \ if (config_debug) { \ malloc_write(": Not implemented\n"); \ abort(); \ } \ } while (0) #define JEMALLOC_UTIL_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void wrtmessage(void *cbopaque, const char *s); #define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); #define D2S_BUFSIZE (1 + U2S_BUFSIZE) static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); #define O2S_BUFSIZE (1 + U2S_BUFSIZE) static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); #define X2S_BUFSIZE (2 + U2S_BUFSIZE) static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p); /******************************************************************************/ /* malloc_message() setup. */ static void wrtmessage(void *cbopaque, const char *s) { #ifdef SYS_write /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary * on FreeBSD; most operating systems do not have this problem though. */ UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); #else UNUSED int result = write(STDERR_FILENO, s, strlen(s)); #endif } JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); /* * Wrapper around malloc_message() that avoids the need for * je_malloc_message(...) throughout the code. */ void malloc_write(const char *s) { if (je_malloc_message != NULL) je_malloc_message(NULL, s); else wrtmessage(NULL, s); } /* * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so * provide a wrapper. */ int buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPSTR)buf, buflen, NULL); return (0); #elif defined(__GLIBC__) && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } return (0); #else return (strerror_r(err, buf, buflen)); #endif } uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { uintmax_t ret, digit; unsigned b; bool neg; const char *p, *ns; p = nptr; if (base < 0 || base == 1 || base > 36) { ns = p; set_errno(EINVAL); ret = UINTMAX_MAX; goto label_return; } b = base; /* Swallow leading whitespace and get sign, if any. */ neg = false; while (true) { switch (*p) { case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': p++; break; case '-': neg = true; /* Fall through. */ case '+': p++; /* Fall through. */ default: goto label_prefix; } } /* Get prefix, if any. */ label_prefix: /* * Note where the first non-whitespace/sign character is so that it is * possible to tell whether any digits are consumed (e.g., " 0" vs. * " -x"). */ ns = p; if (*p == '0') { switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': if (b == 0) b = 8; if (b == 8) p++; break; case 'X': case 'x': switch (p[2]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': if (b == 0) b = 16; if (b == 16) p += 2; break; default: break; } break; default: p++; ret = 0; goto label_return; } } if (b == 0) b = 10; /* Convert. */ ret = 0; while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { uintmax_t pret = ret; ret *= b; ret += digit; if (ret < pret) { /* Overflow. */ set_errno(ERANGE); ret = UINTMAX_MAX; goto label_return; } p++; } if (neg) ret = -ret; if (p == ns) { /* No conversion performed. */ set_errno(EINVAL); ret = UINTMAX_MAX; goto label_return; } label_return: if (endptr != NULL) { if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; } else *endptr = (char *)p; } return (ret); } static char * u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { unsigned i; i = U2S_BUFSIZE - 1; s[i] = '\0'; switch (base) { case 10: do { i--; s[i] = "0123456789"[x % (uint64_t)10]; x /= (uint64_t)10; } while (x > 0); break; case 16: { const char *digits = (uppercase) ? "0123456789ABCDEF" : "0123456789abcdef"; do { i--; s[i] = digits[x & 0xf]; x >>= 4; } while (x > 0); break; } default: { const char *digits = (uppercase) ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" : "0123456789abcdefghijklmnopqrstuvwxyz"; assert(base >= 2 && base <= 36); do { i--; s[i] = digits[x % (uint64_t)base]; x /= (uint64_t)base; } while (x > 0); }} *slen_p = U2S_BUFSIZE - 1 - i; return (&s[i]); } static char * d2s(intmax_t x, char sign, char *s, size_t *slen_p) { bool neg; if ((neg = (x < 0))) x = -x; s = u2s(x, 10, false, s, slen_p); if (neg) sign = '-'; switch (sign) { case '-': if (!neg) break; /* Fall through. */ case ' ': case '+': s--; (*slen_p)++; *s = sign; break; default: not_reached(); } return (s); } static char * o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { s = u2s(x, 8, false, s, slen_p); if (alt_form && *s != '0') { s--; (*slen_p)++; *s = '0'; } return (s); } static char * x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { s = u2s(x, 16, uppercase, s, slen_p); if (alt_form) { s -= 2; (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } return (s); } int malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { int ret; size_t i; const char *f; #define APPEND_C(c) do { \ if (i < size) \ str[i] = (c); \ i++; \ } while (0) #define APPEND_S(s, slen) do { \ if (i < size) { \ size_t cpylen = (slen <= size - i) ? slen : size - i; \ memcpy(&str[i], s, cpylen); \ } \ i += slen; \ } while (0) #define APPEND_PADDED_S(s, slen, width, left_justify) do { \ /* Left padding. */ \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ (size_t)width - slen : 0); \ if (!left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) \ APPEND_C(' '); \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) \ APPEND_C(' '); \ } \ } while (0) #define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ break; \ case '?' | 0x80: \ val = va_arg(ap, unsigned int); \ break; \ case 'l': \ val = va_arg(ap, long); \ break; \ case 'l' | 0x80: \ val = va_arg(ap, unsigned long); \ break; \ case 'q': \ val = va_arg(ap, long long); \ break; \ case 'q' | 0x80: \ val = va_arg(ap, unsigned long long); \ break; \ case 'j': \ val = va_arg(ap, intmax_t); \ break; \ case 'j' | 0x80: \ val = va_arg(ap, uintmax_t); \ break; \ case 't': \ val = va_arg(ap, ptrdiff_t); \ break; \ case 'z': \ val = va_arg(ap, ssize_t); \ break; \ case 'z' | 0x80: \ val = va_arg(ap, size_t); \ break; \ case 'p': /* Synthetic; used for %p. */ \ val = va_arg(ap, uintptr_t); \ break; \ default: \ not_reached(); \ val = 0; \ } \ } while (0) i = 0; f = format; while (true) { switch (*f) { case '\0': goto label_out; case '%': { bool alt_form = false; bool left_justify = false; bool plus_space = false; bool plus_plus = false; int prec = -1; int width = -1; unsigned char len = '?'; f++; /* Flags. */ while (true) { switch (*f) { case '#': assert(!alt_form); alt_form = true; break; case '-': assert(!left_justify); left_justify = true; break; case ' ': assert(!plus_space); plus_space = true; break; case '+': assert(!plus_plus); plus_plus = true; break; default: goto label_width; } f++; } /* Width. */ label_width: switch (*f) { case '*': width = va_arg(ap, int); f++; if (width < 0) { left_justify = true; width = -width; } break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uwidth; set_errno(0); uwidth = malloc_strtoumax(f, (char **)&f, 10); assert(uwidth != UINTMAX_MAX || get_errno() != ERANGE); width = (int)uwidth; break; } default: break; } /* Width/precision separator. */ if (*f == '.') f++; else goto label_length; /* Precision. */ switch (*f) { case '*': prec = va_arg(ap, int); f++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uprec; set_errno(0); uprec = malloc_strtoumax(f, (char **)&f, 10); assert(uprec != UINTMAX_MAX || get_errno() != ERANGE); prec = (int)uprec; break; } default: break; } /* Length. */ label_length: switch (*f) { case 'l': f++; if (*f == 'l') { len = 'q'; f++; } else len = 'l'; break; case 'q': case 'j': case 't': case 'z': len = *f; f++; break; default: break; } /* Conversion specifier. */ switch (*f) { char *s; size_t slen; case '%': /* %% */ APPEND_C(*f); f++; break; case 'd': case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; GET_ARG_NUMERIC(val, len); s = d2s(val, (plus_plus ? '+' : (plus_space ? ' ' : '-')), buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'o': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[O2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = o2s(val, alt_form, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'u': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[U2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = u2s(val, 10, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'x': case 'X': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = x2s(val, alt_form, *f == 'X', buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'c': { unsigned char val; char buf[2]; assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); val = va_arg(ap, int); buf[0] = val; buf[1] = '\0'; APPEND_PADDED_S(buf, 1, width, left_justify); f++; break; } case 's': assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); slen = (prec < 0) ? strlen(s) : (size_t)prec; APPEND_PADDED_S(s, slen, width, left_justify); f++; break; case 'p': { uintmax_t val; char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, 'p'); s = x2s(val, true, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } default: not_reached(); } break; } default: { APPEND_C(*f); f++; break; }} } label_out: if (i < size) str[i] = '\0'; else str[size - 1] = '\0'; ret = i; #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC return (ret); } JEMALLOC_FORMAT_PRINTF(3, 4) int malloc_snprintf(char *str, size_t size, const char *format, ...) { int ret; va_list ap; va_start(ap, format); ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); return (ret); } void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { /* * The caller did not provide an alternate write_cb callback * function, so use the default one. malloc_write() is an * inline function, so use malloc_message() directly here. */ write_cb = (je_malloc_message != NULL) ? je_malloc_message : wrtmessage; cbopaque = NULL; } malloc_vsnprintf(buf, sizeof(buf), format, ap); write_cb(cbopaque, buf); } /* * Print to a callback function in such a way as to (hopefully) avoid memory * allocation. */ JEMALLOC_FORMAT_PRINTF(3, 4) void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(write_cb, cbopaque, format, ap); va_end(ap); } /* Print to stderr in such a way as to avoid memory allocation. */ JEMALLOC_FORMAT_PRINTF(1, 2) void malloc_printf(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); } disque-1.0-rc1/deps/jemalloc/src/valgrind.c000066400000000000000000000011051264176561100206170ustar00rootroot00000000000000#include "jemalloc/internal/jemalloc_internal.h" #ifndef JEMALLOC_VALGRIND # error "This source file is for Valgrind integration." #endif #include void valgrind_make_mem_noaccess(void *ptr, size_t usize) { VALGRIND_MAKE_MEM_NOACCESS(ptr, usize); } void valgrind_make_mem_undefined(void *ptr, size_t usize) { VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize); } void valgrind_make_mem_defined(void *ptr, size_t usize) { VALGRIND_MAKE_MEM_DEFINED(ptr, usize); } void valgrind_freelike_block(void *ptr, size_t usize) { VALGRIND_FREELIKE_BLOCK(ptr, usize); } disque-1.0-rc1/deps/jemalloc/src/zone.c000066400000000000000000000170011264176561100177660ustar00rootroot00000000000000#include "jemalloc/internal/jemalloc_internal.h" #ifndef JEMALLOC_ZONE # error "This source file is for zones on Darwin (OS X)." #endif /* * The malloc_default_purgeable_zone function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ static malloc_zone_t zone; static struct malloc_introspection_t zone_introspect; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static size_t zone_size(malloc_zone_t *zone, void *ptr); static void *zone_malloc(malloc_zone_t *zone, size_t size); static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); static void *zone_valloc(malloc_zone_t *zone, size_t size); static void zone_free(malloc_zone_t *zone, void *ptr); static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); #if (JEMALLOC_ZONE_VERSION >= 5) static void *zone_memalign(malloc_zone_t *zone, size_t alignment, #endif #if (JEMALLOC_ZONE_VERSION >= 6) size_t size); static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size); #endif static void *zone_destroy(malloc_zone_t *zone); static size_t zone_good_size(malloc_zone_t *zone, size_t size); static void zone_force_lock(malloc_zone_t *zone); static void zone_force_unlock(malloc_zone_t *zone); /******************************************************************************/ /* * Functions. */ static size_t zone_size(malloc_zone_t *zone, void *ptr) { /* * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If * we knew that all pointers were owned by *some* zone, we could split * our zone into two parts, and use one as the default allocator and * the other as the default deallocator/reallocator. Since that will * not work in practice, we must check all pointers to assure that they * reside within a mapped chunk before determining size. */ return (ivsalloc(ptr, config_prof)); } static void * zone_malloc(malloc_zone_t *zone, size_t size) { return (je_malloc(size)); } static void * zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { return (je_calloc(num, size)); } static void * zone_valloc(malloc_zone_t *zone, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, PAGE, size); return (ret); } static void zone_free(malloc_zone_t *zone, void *ptr) { if (ivsalloc(ptr, config_prof) != 0) { je_free(ptr); return; } free(ptr); } static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (ivsalloc(ptr, config_prof) != 0) return (je_realloc(ptr, size)); return (realloc(ptr, size)); } #if (JEMALLOC_ZONE_VERSION >= 5) static void * zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, alignment, size); return (ret); } #endif #if (JEMALLOC_ZONE_VERSION >= 6) static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { if (ivsalloc(ptr, config_prof) != 0) { assert(ivsalloc(ptr, config_prof) == size); je_free(ptr); return; } free(ptr); } #endif static void * zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ not_reached(); return (NULL); } static size_t zone_good_size(malloc_zone_t *zone, size_t size) { if (size == 0) size = 1; return (s2u(size)); } static void zone_force_lock(malloc_zone_t *zone) { if (isthreaded) jemalloc_prefork(); } static void zone_force_unlock(malloc_zone_t *zone) { if (isthreaded) jemalloc_postfork_parent(); } JEMALLOC_ATTR(constructor) void register_zone(void) { /* * If something else replaced the system default zone allocator, don't * register jemalloc's. */ malloc_zone_t *default_zone = malloc_default_zone(); malloc_zone_t *purgeable_zone = NULL; if (!default_zone->zone_name || strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { return; } zone.size = (void *)zone_size; zone.malloc = (void *)zone_malloc; zone.calloc = (void *)zone_calloc; zone.valloc = (void *)zone_valloc; zone.free = (void *)zone_free; zone.realloc = (void *)zone_realloc; zone.destroy = (void *)zone_destroy; zone.zone_name = "jemalloc_zone"; zone.batch_malloc = NULL; zone.batch_free = NULL; zone.introspect = &zone_introspect; zone.version = JEMALLOC_ZONE_VERSION; #if (JEMALLOC_ZONE_VERSION >= 5) zone.memalign = zone_memalign; #endif #if (JEMALLOC_ZONE_VERSION >= 6) zone.free_definite_size = zone_free_definite_size; #endif #if (JEMALLOC_ZONE_VERSION >= 8) zone.pressure_relief = NULL; #endif zone_introspect.enumerator = NULL; zone_introspect.good_size = (void *)zone_good_size; zone_introspect.check = NULL; zone_introspect.print = NULL; zone_introspect.log = NULL; zone_introspect.force_lock = (void *)zone_force_lock; zone_introspect.force_unlock = (void *)zone_force_unlock; zone_introspect.statistics = NULL; #if (JEMALLOC_ZONE_VERSION >= 6) zone_introspect.zone_locked = NULL; #endif #if (JEMALLOC_ZONE_VERSION >= 7) zone_introspect.enable_discharge_checking = NULL; zone_introspect.disable_discharge_checking = NULL; zone_introspect.discharge = NULL; #ifdef __BLOCKS__ zone_introspect.enumerate_discharged_pointers = NULL; #else zone_introspect.enumerate_unavailable_without_blocks = NULL; #endif #endif /* * The default purgeable zone is created lazily by OSX's libc. It uses * the default zone when it is created for "small" allocations * (< 15 KiB), but assumes the default zone is a scalable_zone. This * obviously fails when the default zone is the jemalloc zone, so * malloc_default_purgeable_zone is called beforehand so that the * default purgeable zone is created when the default zone is still * a scalable_zone. As purgeable zones only exist on >= 10.6, we need * to check for the existence of malloc_default_purgeable_zone() at * run time. */ if (malloc_default_purgeable_zone != NULL) purgeable_zone = malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ malloc_zone_register(&zone); do { default_zone = malloc_default_zone(); /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it * at the location of the specified zone. Unregistering the * default zone thus makes the last registered one the default. * On OSX < 10.6, unregistering shifts all registered zones. * The first registered zone then becomes the default. */ malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); /* * On OSX 10.6, having the default purgeable zone appear before * the default zone makes some things crash because it thinks it * owns the default zone allocated pointers. We thus * unregister/re-register it in order to ensure it's always * after the default zone. On OSX < 10.6, there is no purgeable * zone, so this does nothing. On OSX >= 10.6, unregistering * replaces the purgeable zone with the last registered zone * above, i.e. the default zone. Registering it again then puts * it at the end, obviously after the default zone. */ if (purgeable_zone) { malloc_zone_unregister(purgeable_zone); malloc_zone_register(purgeable_zone); } } while (malloc_default_zone() != &zone); } disque-1.0-rc1/deps/jemalloc/test/000077500000000000000000000000001264176561100170405ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/test/include/000077500000000000000000000000001264176561100204635ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/test/include/test/000077500000000000000000000000001264176561100214425ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-alti.h000066400000000000000000000134411264176561100233160ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-alti.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) * pseudorandom number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt */ #ifndef SFMT_ALTI_H #define SFMT_ALTI_H /** * This function represents the recursion formula in AltiVec and BIG ENDIAN. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @return output */ JEMALLOC_ALWAYS_INLINE vector unsigned int vec_recursion(vector unsigned int a, vector unsigned int b, vector unsigned int c, vector unsigned int d) { const vector unsigned int sl1 = ALTI_SL1; const vector unsigned int sr1 = ALTI_SR1; #ifdef ONLY64 const vector unsigned int mask = ALTI_MSK64; const vector unsigned char perm_sl = ALTI_SL2_PERM64; const vector unsigned char perm_sr = ALTI_SR2_PERM64; #else const vector unsigned int mask = ALTI_MSK; const vector unsigned char perm_sl = ALTI_SL2_PERM; const vector unsigned char perm_sr = ALTI_SR2_PERM; #endif vector unsigned int v, w, x, y, z; x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); v = a; y = vec_sr(b, sr1); z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); w = vec_sl(d, sl1); z = vec_xor(z, w); y = vec_and(y, mask); v = vec_xor(v, x); z = vec_xor(z, y); z = vec_xor(z, v); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { int i; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j].s = array[j + size - N].s; } for (; i < size; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; ctx->sfmt[j++].s = r; r1 = r2; r2 = r; } } #ifndef ONLY64 #if defined(__APPLE__) #define ALTI_SWAP (vector unsigned char) \ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) #else #define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} #endif /** * This function swaps high and low 32-bit of 64-bit integers in user * specified array. * * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ JEMALLOC_INLINE void swap(w128_t *array, int size) { int i; const vector unsigned char perm = ALTI_SWAP; for (i = 0; i < size; i++) { array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); } } #endif #endif disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params.h000066400000000000000000000102761264176561100236530ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS_H #define SFMT_PARAMS_H #if !defined(MEXP) #ifdef __GNUC__ #warning "MEXP is not defined. I assume MEXP is 19937." #endif #define MEXP 19937 #endif /*----------------- BASIC DEFINITIONS -----------------*/ /** Mersenne Exponent. The period of the sequence * is a multiple of 2^MEXP-1. * #define MEXP 19937 */ /** SFMT generator has an internal state array of 128-bit integers, * and N is its size. */ #define N (MEXP / 128 + 1) /** N32 is the size of internal state array when regarded as an array * of 32-bit integers.*/ #define N32 (N * 4) /** N64 is the size of internal state array when regarded as an array * of 64-bit integers.*/ #define N64 (N * 2) /*---------------------- the parameters of SFMT following definitions are in paramsXXXX.h file. ----------------------*/ /** the pick up position of the array. #define POS1 122 */ /** the parameter of shift left as four 32-bit registers. #define SL1 18 */ /** the parameter of shift left as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SL2 1 */ /** the parameter of shift right as four 32-bit registers. #define SR1 11 */ /** the parameter of shift right as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SR2 1 */ /** A bitmask, used in the recursion. These parameters are introduced * to break symmetry of SIMD. #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U */ /** These definitions are part of a 128-bit period certification vector. #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xc98e126aU */ #if MEXP == 607 #include "test/SFMT-params607.h" #elif MEXP == 1279 #include "test/SFMT-params1279.h" #elif MEXP == 2281 #include "test/SFMT-params2281.h" #elif MEXP == 4253 #include "test/SFMT-params4253.h" #elif MEXP == 11213 #include "test/SFMT-params11213.h" #elif MEXP == 19937 #include "test/SFMT-params19937.h" #elif MEXP == 44497 #include "test/SFMT-params44497.h" #elif MEXP == 86243 #include "test/SFMT-params86243.h" #elif MEXP == 132049 #include "test/SFMT-params132049.h" #elif MEXP == 216091 #include "test/SFMT-params216091.h" #else #ifdef __GNUC__ #error "MEXP is not valid." #undef MEXP #else #undef MEXP #endif #endif #endif /* SFMT_PARAMS_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params11213.h000066400000000000000000000067561264176561100242530ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS11213_H #define SFMT_PARAMS11213_H #define POS1 68 #define SL1 14 #define SL2 3 #define SR1 7 #define SR2 3 #define MSK1 0xeffff7fbU #define MSK2 0xffffffefU #define MSK3 0xdfdfbfffU #define MSK4 0x7fffdbfdU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xe8148000U #define PARITY4 0xd0c7afa3U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" #endif /* SFMT_PARAMS11213_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params1279.h000066400000000000000000000067401264176561100241770ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS1279_H #define SFMT_PARAMS1279_H #define POS1 7 #define SL1 14 #define SL2 3 #define SR1 5 #define SR2 1 #define MSK1 0xf7fefffdU #define MSK2 0x7fefcfffU #define MSK3 0xaff3ef3fU #define MSK4 0xb5ffff7fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x20000000U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" #endif /* SFMT_PARAMS1279_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params132049.h000066400000000000000000000067541264176561100243440ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS132049_H #define SFMT_PARAMS132049_H #define POS1 110 #define SL1 19 #define SL2 1 #define SR1 21 #define SR2 1 #define MSK1 0xffffbb5fU #define MSK2 0xfb6ebf95U #define MSK3 0xfffefffaU #define MSK4 0xcff77fffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xcb520000U #define PARITY4 0xc7e91c7dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" #endif /* SFMT_PARAMS132049_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params19937.h000066400000000000000000000067501264176561100242720ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS19937_H #define SFMT_PARAMS19937_H #define POS1 122 #define SL1 18 #define SL2 1 #define SR1 11 #define SR2 1 #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x13c9e684U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" #endif /* SFMT_PARAMS19937_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params216091.h000066400000000000000000000067561264176561100243460ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS216091_H #define SFMT_PARAMS216091_H #define POS1 627 #define SL1 11 #define SL2 3 #define SR1 10 #define SR2 1 #define MSK1 0xbff7bff7U #define MSK2 0xbfffffffU #define MSK3 0xbffffa7fU #define MSK4 0xffddfbfbU #define PARITY1 0xf8000001U #define PARITY2 0x89e80709U #define PARITY3 0x3bd2b64bU #define PARITY4 0x0c64b1e4U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" #endif /* SFMT_PARAMS216091_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params2281.h000066400000000000000000000067401264176561100241710ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS2281_H #define SFMT_PARAMS2281_H #define POS1 12 #define SL1 19 #define SL2 1 #define SR1 5 #define SR2 1 #define MSK1 0xbff7ffbfU #define MSK2 0xfdfffffeU #define MSK3 0xf7ffef7fU #define MSK4 0xf2f7cbbfU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x41dfa600U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" #endif /* SFMT_PARAMS2281_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params4253.h000066400000000000000000000067401264176561100241720ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS4253_H #define SFMT_PARAMS4253_H #define POS1 17 #define SL1 20 #define SL2 1 #define SR1 7 #define SR2 1 #define MSK1 0x9f7bffffU #define MSK2 0x9fffff5fU #define MSK3 0x3efffffbU #define MSK4 0xfffff7bbU #define PARITY1 0xa8000001U #define PARITY2 0xaf5390a3U #define PARITY3 0xb740b3f8U #define PARITY4 0x6c11486dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" #endif /* SFMT_PARAMS4253_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params44497.h000066400000000000000000000067561264176561100242770ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS44497_H #define SFMT_PARAMS44497_H #define POS1 330 #define SL1 5 #define SL2 3 #define SR1 9 #define SR2 3 #define MSK1 0xeffffffbU #define MSK2 0xdfbebfffU #define MSK3 0xbfbf7befU #define MSK4 0x9ffd7bffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xa3ac4000U #define PARITY4 0xecc1327aU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" #endif /* SFMT_PARAMS44497_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params607.h000066400000000000000000000067461264176561100241170ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS607_H #define SFMT_PARAMS607_H #define POS1 2 #define SL1 15 #define SL2 3 #define SR1 13 #define SR2 3 #define MSK1 0xfdff37ffU #define MSK2 0xef7f3f7dU #define MSK3 0xff777b7dU #define MSK4 0x7ff7fb2fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x5986f054U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" #endif /* SFMT_PARAMS607_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-params86243.h000066400000000000000000000067541264176561100242700ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS86243_H #define SFMT_PARAMS86243_H #define POS1 366 #define SL1 6 #define SL2 7 #define SR1 19 #define SR2 1 #define MSK1 0xfdbffbffU #define MSK2 0xbff7ff3fU #define MSK3 0xfd77efffU #define MSK4 0xbf9ff3ffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xe9528d85U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) #define ALTI_SL2_PERM64 \ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" #endif /* SFMT_PARAMS86243_H */ disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT-sse2.h000066400000000000000000000121371264176561100232420ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-sse2.h * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * @note We assume LITTLE ENDIAN in this file * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #ifndef SFMT_SSE2_H #define SFMT_SSE2_H /** * This function represents the recursion formula. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @param mask 128-bit mask * @return output */ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { __m128i v, x, y, z; x = _mm_load_si128(a); y = _mm_srli_epi32(*b, SR1); z = _mm_srli_si128(c, SR2); v = _mm_slli_epi32(d, SL1); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, v); x = _mm_slli_si128(x, SL2); y = _mm_and_si128(y, mask); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, y); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { int i; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { r = _mm_load_si128(&array[j + size - N].si); _mm_store_si128(&ctx->sfmt[j].si, r); } for (; i < size; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); _mm_store_si128(&ctx->sfmt[j++].si, r); r1 = r2; r2 = r; } } #endif disque-1.0-rc1/deps/jemalloc/test/include/test/SFMT.h000066400000000000000000000132551264176561100223720ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom * number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt * * @note We assume that your system has inttypes.h. If your system * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, * and you have to define PRIu64 and PRIx64 in this file as follows: * @verbatim typedef unsigned int uint32_t typedef unsigned long long uint64_t #define PRIu64 "llu" #define PRIx64 "llx" @endverbatim * uint32_t must be exactly 32-bit unsigned integer type (no more, no * less), and uint64_t must be exactly 64-bit unsigned integer type. * PRIu64 and PRIx64 are used for printf function to print 64-bit * unsigned int and 64-bit unsigned int in hexadecimal format. */ #ifndef SFMT_H #define SFMT_H typedef struct sfmt_s sfmt_t; uint32_t gen_rand32(sfmt_t *ctx); uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); uint64_t gen_rand64(sfmt_t *ctx); uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); void fill_array32(sfmt_t *ctx, uint32_t *array, int size); void fill_array64(sfmt_t *ctx, uint64_t *array, int size); sfmt_t *init_gen_rand(uint32_t seed); sfmt_t *init_by_array(uint32_t *init_key, int key_length); void fini_gen_rand(sfmt_t *ctx); const char *get_idstring(void); int get_min_array_size32(void); int get_min_array_size64(void); #ifndef JEMALLOC_ENABLE_INLINE double to_real1(uint32_t v); double genrand_real1(sfmt_t *ctx); double to_real2(uint32_t v); double genrand_real2(sfmt_t *ctx); double to_real3(uint32_t v); double genrand_real3(sfmt_t *ctx); double to_res53(uint64_t v); double to_res53_mix(uint32_t x, uint32_t y); double genrand_res53(sfmt_t *ctx); double genrand_res53_mix(sfmt_t *ctx); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ JEMALLOC_INLINE double to_real1(uint32_t v) { return v * (1.0/4294967295.0); /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) { return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ JEMALLOC_INLINE double to_real2(uint32_t v) { return v * (1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) { return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ JEMALLOC_INLINE double to_real3(uint32_t v) { return (((double)v) + 0.5)*(1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) { return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ JEMALLOC_INLINE double to_res53(uint64_t v) { return v * (1.0/18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) { return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) { return to_res53(gen_rand64(ctx)); } /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) { uint32_t x, y; x = gen_rand32(ctx); y = gen_rand32(ctx); return to_res53_mix(x, y); } #endif #endif disque-1.0-rc1/deps/jemalloc/test/include/test/btalloc.h000066400000000000000000000014711264176561100232360ustar00rootroot00000000000000/* btalloc() provides a mechanism for allocating via permuted backtraces. */ void *btalloc(size_t size, unsigned bits); #define btalloc_n_proto(n) \ void *btalloc_##n(size_t size, unsigned bits); btalloc_n_proto(0) btalloc_n_proto(1) #define btalloc_n_gen(n) \ void * \ btalloc_##n(size_t size, unsigned bits) \ { \ void *p; \ \ if (bits == 0) \ p = mallocx(size, 0); \ else { \ switch (bits & 0x1U) { \ case 0: \ p = (btalloc_0(size, bits >> 1)); \ break; \ case 1: \ p = (btalloc_1(size, bits >> 1)); \ break; \ default: not_reached(); \ } \ } \ /* Intentionally sabotage tail call optimization. */ \ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ return (p); \ } disque-1.0-rc1/deps/jemalloc/test/include/test/jemalloc_test.h.in000066400000000000000000000102541264176561100250470ustar00rootroot00000000000000#include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include #include #include #include #include #include #ifdef _WIN32 # include "msvc_compat/strings.h" #endif #include #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" #else # include #endif /******************************************************************************/ /* * Define always-enabled assertion macros, so that test assertions execute even * if assertions are disabled in the library code. These definitions must * exist prior to including "jemalloc/internal/util.h". */ #define assert(e) do { \ if (!(e)) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #define not_reached() do { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define not_implemented() do { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) #define assert_not_implemented(e) do { \ if (!(e)) \ not_implemented(); \ } while (0) #include "test/jemalloc_test_defs.h" #ifdef JEMALLOC_OSSPIN # include #endif #if defined(HAVE_ALTIVEC) && !defined(__APPLE__) # include #endif #ifdef HAVE_SSE2 # include #endif /******************************************************************************/ /* * For unit tests, expose all public and private interfaces. */ #ifdef JEMALLOC_UNIT_TEST # define JEMALLOC_JET # define JEMALLOC_MANGLE # include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* * For integration tests, expose the public jemalloc interfaces, but only * expose the minimum necessary internal utility code (to avoid re-implementing * essentially identical code within the test infrastructure). */ #elif defined(JEMALLOC_INTEGRATION_TEST) # define JEMALLOC_MANGLE # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/internal/jemalloc_internal_defs.h" # include "jemalloc/internal/jemalloc_internal_macros.h" # define JEMALLOC_N(n) @private_namespace@##n # include "jemalloc/internal/private_namespace.h" # define JEMALLOC_H_TYPES # define JEMALLOC_H_STRUCTS # define JEMALLOC_H_EXTERNS # define JEMALLOC_H_INLINES # include "jemalloc/internal/util.h" # include "jemalloc/internal/qr.h" # include "jemalloc/internal/ql.h" # undef JEMALLOC_H_TYPES # undef JEMALLOC_H_STRUCTS # undef JEMALLOC_H_EXTERNS # undef JEMALLOC_H_INLINES /******************************************************************************/ /* * For stress tests, expose the public jemalloc interfaces with name mangling * so that they can be tested as e.g. malloc() and free(). Also expose the * public jemalloc interfaces with jet_ prefixes, so that stress tests can use * a separate allocator for their internal data structures. */ #elif defined(JEMALLOC_STRESS_TEST) # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/jemalloc_protos_jet.h" # define JEMALLOC_JET # include "jemalloc/internal/jemalloc_internal.h" # include "jemalloc/internal/public_unnamespace.h" # undef JEMALLOC_JET # include "jemalloc/jemalloc_rename.h" # define JEMALLOC_MANGLE # ifdef JEMALLOC_STRESS_TESTLIB # include "jemalloc/jemalloc_mangle_jet.h" # else # include "jemalloc/jemalloc_mangle.h" # endif /******************************************************************************/ /* * This header does dangerous things, the effects of which only test code * should be subject to. */ #else # error "This header cannot be included outside a testing context" #endif /******************************************************************************/ /* * Common test utilities. */ #include "test/btalloc.h" #include "test/math.h" #include "test/mtx.h" #include "test/mq.h" #include "test/test.h" #include "test/timer.h" #include "test/thd.h" #define MEXP 19937 #include "test/SFMT.h" disque-1.0-rc1/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in000066400000000000000000000004421264176561100260460ustar00rootroot00000000000000#include "jemalloc/internal/jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" /* * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its * dependencies are notoriously unportable in practice. */ #undef HAVE_SSE2 #undef HAVE_ALTIVEC disque-1.0-rc1/deps/jemalloc/test/include/test/math.h000066400000000000000000000177541264176561100225620ustar00rootroot00000000000000#ifndef JEMALLOC_ENABLE_INLINE double ln_gamma(double x); double i_gamma(double x, double p, double ln_gamma_p); double pt_norm(double p); double pt_chi2(double p, double df, double ln_gamma_df_2); double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) /* * Compute the natural log of Gamma(x), accurate to 10 decimal places. * * This implementation is based on: * * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function * [S14]. Communications of the ACM 9(9):684. */ JEMALLOC_INLINE double ln_gamma(double x) { double f, z; assert(x > 0.0); if (x < 7.0) { f = 1.0; z = x; while (z < 7.0) { f *= z; z += 1.0; } x = z; f = -log(f); } else f = 0.0; z = 1.0 / (x * x); return (f + (x-0.5) * log(x) - x + 0.918938533204673 + (((-0.000595238095238 * z + 0.000793650793651) * z - 0.002777777777778) * z + 0.083333333333333) / x); } /* * Compute the incomplete Gamma ratio for [0..x], where p is the shape * parameter, and ln_gamma_p is ln_gamma(p). * * This implementation is based on: * * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. * Applied Statistics 19:285-287. */ JEMALLOC_INLINE double i_gamma(double x, double p, double ln_gamma_p) { double acu, factor, oflo, gin, term, rn, a, b, an, dif; double pn[6]; unsigned i; assert(p > 0.0); assert(x >= 0.0); if (x == 0.0) return (0.0); acu = 1.0e-10; oflo = 1.0e30; gin = 0.0; factor = exp(p * log(x) - x - ln_gamma_p); if (x <= 1.0 || x < p) { /* Calculation by series expansion. */ gin = 1.0; term = 1.0; rn = p; while (true) { rn += 1.0; term *= x / rn; gin += term; if (term <= acu) { gin *= factor / p; return (gin); } } } else { /* Calculation by continued fraction. */ a = 1.0 - p; b = a + x + 1.0; term = 0.0; pn[0] = 1.0; pn[1] = x; pn[2] = x + 1.0; pn[3] = x * b; gin = pn[2] / pn[3]; while (true) { a += 1.0; b += 2.0; term += 1.0; an = a * term; for (i = 0; i < 2; i++) pn[i+4] = b * pn[i+2] - an * pn[i]; if (pn[5] != 0.0) { rn = pn[4] / pn[5]; dif = fabs(gin - rn); if (dif <= acu && dif <= acu * rn) { gin = 1.0 - factor * gin; return (gin); } gin = rn; } for (i = 0; i < 4; i++) pn[i] = pn[i+2]; if (fabs(pn[4]) >= oflo) { for (i = 0; i < 4; i++) pn[i] /= oflo; } } } } /* * Given a value p in [0..1] of the lower tail area of the normal distribution, * compute the limit on the definite integral from [-inf..z] that satisfies p, * accurate to 16 decimal places. * * This implementation is based on: * * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal * distribution. Applied Statistics 37(3):477-484. */ JEMALLOC_INLINE double pt_norm(double p) { double q, r, ret; assert(p > 0.0 && p < 1.0); q = p - 0.5; if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; return (q * (((((((2.5090809287301226727e3 * r + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) * r + 3.3871328727963666080e0) / (((((((5.2264952788528545610e3 * r + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) * r + 1.0)); } else { if (q < 0.0) r = p; else r = 1.0 - p; assert(r > 0.0); r = sqrt(-log(r)); if (r <= 5.0) { /* p neither close to 1/2 nor 0 or 1. */ r -= 1.6; ret = ((((((((7.74545014278341407640e-4 * r + 2.27238449892691845833e-2) * r + 2.41780725177450611770e-1) * r + 1.27045825245236838258e0) * r + 3.64784832476320460504e0) * r + 5.76949722146069140550e0) * r + 4.63033784615654529590e0) * r + 1.42343711074968357734e0) / (((((((1.05075007164441684324e-9 * r + 5.47593808499534494600e-4) * r + 1.51986665636164571966e-2) * r + 1.48103976427480074590e-1) * r + 6.89767334985100004550e-1) * r + 1.67638483018380384940e0) * r + 2.05319162663775882187e0) * r + 1.0)); } else { /* p near 0 or 1. */ r -= 5.0; ret = ((((((((2.01033439929228813265e-7 * r + 2.71155556874348757815e-5) * r + 1.24266094738807843860e-3) * r + 2.65321895265761230930e-2) * r + 2.96560571828504891230e-1) * r + 1.78482653991729133580e0) * r + 5.46378491116411436990e0) * r + 6.65790464350110377720e0) / (((((((2.04426310338993978564e-15 * r + 1.42151175831644588870e-7) * r + 1.84631831751005468180e-5) * r + 7.86869131145613259100e-4) * r + 1.48753612908506148525e-2) * r + 1.36929880922735805310e-1) * r + 5.99832206555887937690e-1) * r + 1.0)); } if (q < 0.0) ret = -ret; return (ret); } } /* * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute * the upper limit on the definite integral from [0..z] that satisfies p, * accurate to 12 decimal places. * * This implementation is based on: * * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of * the Chi^2 distribution. Applied Statistics 24(3):385-388. * * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. */ JEMALLOC_INLINE double pt_chi2(double p, double df, double ln_gamma_df_2) { double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; unsigned i; assert(p >= 0.0 && p < 1.0); assert(df > 0.0); e = 5.0e-7; aa = 0.6931471805; xx = 0.5 * df; c = xx - 1.0; if (df < -1.24 * log(p)) { /* Starting approximation for small Chi^2. */ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); if (ch - e < 0.0) return (ch); } else { if (df > 0.32) { x = pt_norm(p); /* * Starting approximation using Wilson and Hilferty * estimate. */ p1 = 0.222222 / df; ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); /* Starting approximation for p tending to 1. */ if (ch > 2.2 * df + 6.0) { ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + ln_gamma_df_2); } } else { ch = 0.4; a = log(1.0 - p); while (true) { q = ch; p1 = 1.0 + ch * (4.67 + ch); p2 = ch * (6.73 + ch * (6.66 + ch)); t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch * (13.32 + 3.0 * ch)) / p2; ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + c * aa) * p2 / p1) / t; if (fabs(q / ch - 1.0) - 0.01 <= 0.0) break; } } } for (i = 0; i < 20; i++) { /* Calculation of seven-term Taylor series. */ q = ch; p1 = 0.5 * ch; if (p1 < 0.0) return (-1.0); p2 = p - i_gamma(p1, xx, ln_gamma_df_2); t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; a = 0.5 * t - b * c; s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + 60.0 * a))))) / 420.0; s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * a)))) / 2520.0; s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * (889.0 + 1740.0 * a))) / 5040.0; s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - b * (s4 - b * (s5 - b * s6)))))); if (fabs(q / ch - 1.0) <= e) break; } return (ch); } /* * Given a value p in [0..1] and Gamma distribution shape and scale parameters, * compute the upper limit on the definite integral from [0..z] that satisfies * p. */ JEMALLOC_INLINE double pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); } #endif disque-1.0-rc1/deps/jemalloc/test/include/test/mq.h000066400000000000000000000055261264176561100222400ustar00rootroot00000000000000void mq_nanosleep(unsigned ns); /* * Simple templated message queue implementation that relies on only mutexes for * synchronization (which reduces portability issues). Given the following * setup: * * typedef struct mq_msg_s mq_msg_t; * struct mq_msg_s { * mq_msg(mq_msg_t) link; * [message data] * }; * mq_gen(, mq_, mq_t, mq_msg_t, link) * * The API is as follows: * * bool mq_init(mq_t *mq); * void mq_fini(mq_t *mq); * unsigned mq_count(mq_t *mq); * mq_msg_t *mq_tryget(mq_t *mq); * mq_msg_t *mq_get(mq_t *mq); * void mq_put(mq_t *mq, mq_msg_t *msg); * * The message queue linkage embedded in each message is to be treated as * externally opaque (no need to initialize or clean up externally). mq_fini() * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ #define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) #define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ typedef struct { \ mtx_t lock; \ ql_head(a_mq_msg_type) msgs; \ unsigned count; \ } a_mq_type; \ a_attr bool \ a_prefix##init(a_mq_type *mq) { \ \ if (mtx_init(&mq->lock)) \ return (true); \ ql_new(&mq->msgs); \ mq->count = 0; \ return (false); \ } \ a_attr void \ a_prefix##fini(a_mq_type *mq) \ { \ \ mtx_fini(&mq->lock); \ } \ a_attr unsigned \ a_prefix##count(a_mq_type *mq) \ { \ unsigned count; \ \ mtx_lock(&mq->lock); \ count = mq->count; \ mtx_unlock(&mq->lock); \ return (count); \ } \ a_attr a_mq_msg_type * \ a_prefix##tryget(a_mq_type *mq) \ { \ a_mq_msg_type *msg; \ \ mtx_lock(&mq->lock); \ msg = ql_first(&mq->msgs); \ if (msg != NULL) { \ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ mq->count--; \ } \ mtx_unlock(&mq->lock); \ return (msg); \ } \ a_attr a_mq_msg_type * \ a_prefix##get(a_mq_type *mq) \ { \ a_mq_msg_type *msg; \ unsigned ns; \ \ msg = a_prefix##tryget(mq); \ if (msg != NULL) \ return (msg); \ \ ns = 1; \ while (true) { \ mq_nanosleep(ns); \ msg = a_prefix##tryget(mq); \ if (msg != NULL) \ return (msg); \ if (ns < 1000*1000*1000) { \ /* Double sleep time, up to max 1 second. */ \ ns <<= 1; \ if (ns > 1000*1000*1000) \ ns = 1000*1000*1000; \ } \ } \ } \ a_attr void \ a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ { \ \ mtx_lock(&mq->lock); \ ql_elm_new(msg, a_field); \ ql_tail_insert(&mq->msgs, msg, a_field); \ mq->count++; \ mtx_unlock(&mq->lock); \ } disque-1.0-rc1/deps/jemalloc/test/include/test/mtx.h000066400000000000000000000010101264176561100224130ustar00rootroot00000000000000/* * mtx is a slightly simplified version of malloc_mutex. This code duplication * is unfortunate, but there are allocator bootstrapping considerations that * would leak into the test infrastructure if malloc_mutex were used directly * in tests. */ typedef struct { #ifdef _WIN32 CRITICAL_SECTION lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #else pthread_mutex_t lock; #endif } mtx_t; bool mtx_init(mtx_t *mtx); void mtx_fini(mtx_t *mtx); void mtx_lock(mtx_t *mtx); void mtx_unlock(mtx_t *mtx); disque-1.0-rc1/deps/jemalloc/test/include/test/test.h000066400000000000000000000315671264176561100226060ustar00rootroot00000000000000#define ASSERT_BUFSIZE 256 #define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ t a_ = (a); \ t b_ = (b); \ if (!(a_ cmp b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) "#cmp" (%s) --> " \ "%"pri" "#neg_cmp" %"pri": ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_, b_); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ ==, "p", __VA_ARGS__) #define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ ==, "p", __VA_ARGS__) #define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) #define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) #define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) #define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) #define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) #define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) #define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) #define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) #define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) #define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) #define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) #define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) #define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) #define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) #define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) #define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) #define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) #define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) #define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) #define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) #define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) #define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) #define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) #define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) #define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ !=, "ld", __VA_ARGS__) #define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ ==, "ld", __VA_ARGS__) #define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ >=, "ld", __VA_ARGS__) #define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ >, "ld", __VA_ARGS__) #define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ <, "ld", __VA_ARGS__) #define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ <=, "ld", __VA_ARGS__) #define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ a, b, ==, !=, "lu", __VA_ARGS__) #define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ a, b, !=, ==, "lu", __VA_ARGS__) #define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ a, b, <, >=, "lu", __VA_ARGS__) #define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ a, b, <=, >, "lu", __VA_ARGS__) #define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ a, b, >=, <, "lu", __VA_ARGS__) #define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ a, b, >, <=, "lu", __VA_ARGS__) #define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ !=, "qd", __VA_ARGS__) #define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ ==, "qd", __VA_ARGS__) #define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ >=, "qd", __VA_ARGS__) #define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ >, "qd", __VA_ARGS__) #define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ <, "qd", __VA_ARGS__) #define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ <=, "qd", __VA_ARGS__) #define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ a, b, ==, !=, "qu", __VA_ARGS__) #define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ a, b, !=, ==, "qu", __VA_ARGS__) #define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ a, b, <, >=, "qu", __VA_ARGS__) #define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ a, b, <=, >, "qu", __VA_ARGS__) #define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ a, b, >=, <, "qu", __VA_ARGS__) #define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ a, b, >, <=, "qu", __VA_ARGS__) #define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ !=, "jd", __VA_ARGS__) #define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ ==, "jd", __VA_ARGS__) #define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ >=, "jd", __VA_ARGS__) #define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ >, "jd", __VA_ARGS__) #define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ <, "jd", __VA_ARGS__) #define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ <=, "jd", __VA_ARGS__) #define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ !=, "ju", __VA_ARGS__) #define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ ==, "ju", __VA_ARGS__) #define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ >=, "ju", __VA_ARGS__) #define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ >, "ju", __VA_ARGS__) #define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ <, "ju", __VA_ARGS__) #define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ <=, "ju", __VA_ARGS__) #define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ !=, "zd", __VA_ARGS__) #define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ ==, "zd", __VA_ARGS__) #define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ >=, "zd", __VA_ARGS__) #define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ >, "zd", __VA_ARGS__) #define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ <, "zd", __VA_ARGS__) #define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ <=, "zd", __VA_ARGS__) #define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ !=, "zu", __VA_ARGS__) #define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ ==, "zu", __VA_ARGS__) #define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ >=, "zu", __VA_ARGS__) #define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ >, "zu", __VA_ARGS__) #define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ <, "zu", __VA_ARGS__) #define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ <=, "zu", __VA_ARGS__) #define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ !=, FMTd32, __VA_ARGS__) #define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ ==, FMTd32, __VA_ARGS__) #define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ >=, FMTd32, __VA_ARGS__) #define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ >, FMTd32, __VA_ARGS__) #define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ <, FMTd32, __VA_ARGS__) #define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ <=, FMTd32, __VA_ARGS__) #define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ !=, FMTu32, __VA_ARGS__) #define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ ==, FMTu32, __VA_ARGS__) #define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ >=, FMTu32, __VA_ARGS__) #define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ >, FMTu32, __VA_ARGS__) #define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ <, FMTu32, __VA_ARGS__) #define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ <=, FMTu32, __VA_ARGS__) #define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ !=, FMTd64, __VA_ARGS__) #define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ ==, FMTd64, __VA_ARGS__) #define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ >=, FMTd64, __VA_ARGS__) #define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ >, FMTd64, __VA_ARGS__) #define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ <, FMTd64, __VA_ARGS__) #define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ <=, FMTd64, __VA_ARGS__) #define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ !=, FMTu64, __VA_ARGS__) #define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ ==, FMTu64, __VA_ARGS__) #define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ >=, FMTu64, __VA_ARGS__) #define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ >, FMTu64, __VA_ARGS__) #define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ <, FMTu64, __VA_ARGS__) #define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ <=, FMTu64, __VA_ARGS__) #define assert_b_eq(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ == b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) == (%s) --> %s != %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_b_ne(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ != b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) != (%s) --> %s == %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) #define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) #define assert_str_eq(a, b, ...) do { \ if (strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) same as (%s) --> " \ "\"%s\" differs from \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_str_ne(a, b, ...) do { \ if (!strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) differs from (%s) --> " \ "\"%s\" same as \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_not_reached(...) do { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Unreachable code reached: ", \ __func__, __FILE__, __LINE__); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } while (0) /* * If this enum changes, corresponding changes in test/test.sh.in are also * necessary. */ typedef enum { test_status_pass = 0, test_status_skip = 1, test_status_fail = 2, test_status_count = 3 } test_status_t; typedef void (test_t)(void); #define TEST_BEGIN(f) \ static void \ f(void) \ { \ p_test_init(#f); #define TEST_END \ goto label_test_end; \ label_test_end: \ p_test_fini(); \ } #define test(...) \ p_test(__VA_ARGS__, NULL) #define test_skip_if(e) do { \ if (e) { \ test_skip("%s:%s:%d: Test skipped: (%s)", \ __func__, __FILE__, __LINE__, #e); \ goto label_test_end; \ } \ } while (0) void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); /* For private use by macros. */ test_status_t p_test(test_t *t, ...); void p_test_init(const char *name); void p_test_fini(void); void p_test_fail(const char *prefix, const char *message); disque-1.0-rc1/deps/jemalloc/test/include/test/thd.h000066400000000000000000000003401264176561100223670ustar00rootroot00000000000000/* Abstraction layer for threading in tests. */ #ifdef _WIN32 typedef HANDLE thd_t; #else typedef pthread_t thd_t; #endif void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); void thd_join(thd_t thd, void **ret); disque-1.0-rc1/deps/jemalloc/test/include/test/timer.h000066400000000000000000000011471264176561100227360ustar00rootroot00000000000000/* Simple timer, for use in benchmark reporting. */ #include #include #define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \ && _POSIX_MONOTONIC_CLOCK >= 0 typedef struct { #ifdef _WIN32 FILETIME ft0; FILETIME ft1; #elif JEMALLOC_CLOCK_GETTIME struct timespec ts0; struct timespec ts1; int clock_id; #else struct timeval tv0; struct timeval tv1; #endif } timedelta_t; void timer_start(timedelta_t *timer); void timer_stop(timedelta_t *timer); uint64_t timer_usec(const timedelta_t *timer); void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); disque-1.0-rc1/deps/jemalloc/test/integration/000077500000000000000000000000001264176561100213635ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/test/integration/MALLOCX_ARENA.c000066400000000000000000000026311264176561100235360ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define NTHREADS 10 static bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; void * thd_start(void *arg) { unsigned thread_ind = (unsigned)(uintptr_t)arg; unsigned arena_ind; void *p; size_t sz; sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, "Error in arenas.extend"); if (thread_ind % 4 != 3) { size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); const char *dss_precs[] = {"disabled", "primary", "secondary"}; unsigned prec_ind = thread_ind % (sizeof(dss_precs)/sizeof(char*)); const char *dss = dss_precs[prec_ind]; int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Error in mallctlnametomib()"); mib[1] = arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, sizeof(const char *)), expected_err, "Error in mallctlbymib()"); } p = mallocx(1, MALLOCX_ARENA(arena_ind)); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); return (NULL); } TEST_BEGIN(test_MALLOCX_ARENA) { thd_t thds[NTHREADS]; unsigned i; for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)(uintptr_t)i); } for (i = 0; i < NTHREADS; i++) thd_join(thds[i], NULL); } TEST_END int main(void) { return (test( test_MALLOCX_ARENA)); } disque-1.0-rc1/deps/jemalloc/test/integration/aligned_alloc.c000066400000000000000000000053101264176561100243030ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define CHUNK 0x400000 /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ #define MAXALIGN ((size_t)0x2000000LU) #define NITER 4 TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; alignment = 0; set_errno(0); p = aligned_alloc(alignment, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment); for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { set_errno(0); p = aligned_alloc(alignment + 1, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { size_t alignment, size, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { ps[i] = aligned_alloc(alignment, size); if (ps[i] == NULL) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } } } TEST_END int main(void) { return (test( test_alignment_errors, test_oom_errors, test_alignment_and_size)); } disque-1.0-rc1/deps/jemalloc/test/integration/allocated.c000066400000000000000000000056551264176561100234720ustar00rootroot00000000000000#include "test/jemalloc_test.h" static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; void * thd_start(void *arg) { int err; void *p; uint64_t a0, a1, d0, d1; uint64_t *ap0, *ap1, *dp0, *dp1; size_t sz, usize; sz = sizeof(a0); if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*ap0, a0, "\"thread.allocatedp\" should provide a pointer to internal " "storage"); sz = sizeof(d0); if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*dp0, d0, "\"thread.deallocatedp\" should provide a pointer to internal " "storage"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() error"); sz = sizeof(a1); mallctl("thread.allocated", &a1, &sz, NULL, 0); sz = sizeof(ap1); mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); assert_u64_eq(*ap1, a1, "Dereferenced \"thread.allocatedp\" value should equal " "\"thread.allocated\" value"); assert_ptr_eq(ap0, ap1, "Pointer returned by \"thread.allocatedp\" should not change"); usize = malloc_usable_size(p); assert_u64_le(a0 + usize, a1, "Allocated memory counter should increase by at least the amount " "explicitly allocated"); free(p); sz = sizeof(d1); mallctl("thread.deallocated", &d1, &sz, NULL, 0); sz = sizeof(dp1); mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); assert_u64_eq(*dp1, d1, "Dereferenced \"thread.deallocatedp\" value should equal " "\"thread.deallocated\" value"); assert_ptr_eq(dp0, dp1, "Pointer returned by \"thread.deallocatedp\" should not change"); assert_u64_le(d0 + usize, d1, "Deallocated memory counter should increase by at least the amount " "explicitly deallocated"); return (NULL); label_ENOENT: assert_false(config_stats, "ENOENT should only be returned if stats are disabled"); test_skip("\"thread.allocated\" mallctl not available"); return (NULL); } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return (test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread)); } disque-1.0-rc1/deps/jemalloc/test/integration/chunk.c000066400000000000000000000207411264176561100226430ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_FILL const char *malloc_conf = "junk:false"; #endif static chunk_hooks_t orig_hooks; static chunk_hooks_t old_hooks; static bool do_dalloc = true; static bool do_decommit; static bool did_alloc; static bool did_dalloc; static bool did_commit; static bool did_decommit; static bool did_purge; static bool did_split; static bool did_merge; #if 0 # define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) #else # define TRACE_HOOK(fmt, ...) #endif void * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, " "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment, *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); did_alloc = true; return (old_hooks.alloc(new_addr, size, alignment, zero, commit, arena_ind)); } bool chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n", __func__, chunk, size, committed ? "true" : "false", arena_ind); did_dalloc = true; if (!do_dalloc) return (true); return (old_hooks.dalloc(chunk, size, committed, arena_ind)); } bool chunk_commit(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " "arena_ind=%u)\n", __func__, chunk, size, offset, length, arena_ind); err = old_hooks.commit(chunk, size, offset, length, arena_ind); did_commit = !err; return (err); } bool chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " "arena_ind=%u)\n", __func__, chunk, size, offset, length, arena_ind); if (!do_decommit) return (true); err = old_hooks.decommit(chunk, size, offset, length, arena_ind); did_decommit = !err; return (err); } bool chunk_purge(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu " "arena_ind=%u)\n", __func__, chunk, size, offset, length, arena_ind); did_purge = true; return (old_hooks.purge(chunk, size, offset, length, arena_ind)); } bool chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, " "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a, size_b, committed ? "true" : "false", arena_ind); did_split = true; return (old_hooks.split(chunk, size, size_a, size_b, committed, arena_ind)); } bool chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, " "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b, size_b, committed ? "true" : "false", arena_ind); did_merge = true; return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b, committed, arena_ind)); } TEST_BEGIN(test_chunk) { void *p; size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz; chunk_hooks_t new_hooks = { chunk_alloc, chunk_dalloc, chunk_commit, chunk_decommit, chunk_purge, chunk_split, chunk_merge }; bool xallocx_success_a, xallocx_success_b, xallocx_success_c; /* Install custom chunk hooks. */ old_size = sizeof(chunk_hooks_t); new_size = sizeof(chunk_hooks_t); assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size, &new_hooks, new_size), 0, "Unexpected chunk_hooks error"); orig_hooks = old_hooks; assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error"); assert_ptr_ne(old_hooks.dalloc, chunk_dalloc, "Unexpected dalloc error"); assert_ptr_ne(old_hooks.commit, chunk_commit, "Unexpected commit error"); assert_ptr_ne(old_hooks.decommit, chunk_decommit, "Unexpected decommit error"); assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error"); assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error"); assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error"); /* Get large size classes. */ sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, "Unexpected arenas.lrun.0.size failure"); assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0, "Unexpected arenas.lrun.1.size failure"); /* Get huge size classes. */ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, "Unexpected arenas.hchunk.0.size failure"); assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0, "Unexpected arenas.hchunk.1.size failure"); assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0, "Unexpected arenas.hchunk.2.size failure"); /* Test dalloc/decommit/purge cascade. */ do_dalloc = false; do_decommit = false; p = mallocx(huge0 * 2, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_dalloc = false; did_decommit = false; did_purge = false; did_split = false; xallocx_success_a = (xallocx(p, huge0, 0, 0) == huge0); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.purge error"); if (xallocx_success_a) { assert_true(did_dalloc, "Expected dalloc"); assert_false(did_decommit, "Unexpected decommit"); assert_true(did_purge, "Expected purge"); } assert_true(did_split, "Expected split"); dallocx(p, 0); do_dalloc = true; /* Test decommit/commit and observe split/merge. */ do_dalloc = false; do_decommit = true; p = mallocx(huge0 * 2, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_decommit = false; did_commit = false; did_split = false; did_merge = false; xallocx_success_b = (xallocx(p, huge0, 0, 0) == huge0); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.purge error"); if (xallocx_success_b) assert_true(did_split, "Expected split"); xallocx_success_c = (xallocx(p, huge0 * 2, 0, 0) == huge0 * 2); assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); if (xallocx_success_b && xallocx_success_c) assert_true(did_merge, "Expected merge"); dallocx(p, 0); do_dalloc = true; do_decommit = false; /* Test purge for partial-chunk huge allocations. */ if (huge0 * 2 > huge2) { /* * There are at least four size classes per doubling, so a * successful xallocx() from size=huge2 to size=huge1 is * guaranteed to leave trailing purgeable memory. */ p = mallocx(huge2, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_purge = false; assert_zu_eq(xallocx(p, huge1, 0, 0), huge1, "Unexpected xallocx() failure"); assert_true(did_purge, "Expected purge"); dallocx(p, 0); } /* Test decommit for large allocations. */ do_decommit = true; p = mallocx(large1, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.purge error"); did_decommit = false; assert_zu_eq(xallocx(p, large0, 0, 0), large0, "Unexpected xallocx() failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.purge error"); did_commit = false; assert_zu_eq(xallocx(p, large1, 0, 0), large1, "Unexpected xallocx() failure"); assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); dallocx(p, 0); do_decommit = false; /* Make sure non-huge allocation succeeds. */ p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); /* Restore chunk hooks. */ assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks, new_size), 0, "Unexpected chunk_hooks error"); assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size, NULL, 0), 0, "Unexpected chunk_hooks error"); assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc, "Unexpected alloc error"); assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc, "Unexpected dalloc error"); assert_ptr_eq(old_hooks.commit, orig_hooks.commit, "Unexpected commit error"); assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit, "Unexpected decommit error"); assert_ptr_eq(old_hooks.purge, orig_hooks.purge, "Unexpected purge error"); assert_ptr_eq(old_hooks.split, orig_hooks.split, "Unexpected split error"); assert_ptr_eq(old_hooks.merge, orig_hooks.merge, "Unexpected merge error"); } TEST_END int main(void) { return (test(test_chunk)); } disque-1.0-rc1/deps/jemalloc/test/integration/mallocx.c000066400000000000000000000100241264176561100231630ustar00rootroot00000000000000#include "test/jemalloc_test.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); } static unsigned get_nhuge(void) { return (get_nsizes_impl("arenas.nhchunks")); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); } static size_t get_huge_size(size_t ind) { return (get_size_impl("arenas.hchunk.0.size", ind)); } TEST_BEGIN(test_oom) { size_t hugemax, size, alignment; hugemax = get_huge_size(get_nhuge()-1); /* * It should be impossible to allocate two objects that each consume * more than half the virtual address space. */ { void *p; p = mallocx(hugemax, 0); if (p != NULL) { assert_ptr_null(mallocx(hugemax, 0), "Expected OOM for mallocx(size=%#zx, 0)", hugemax); dallocx(p, 0); } } #if LG_SIZEOF_PTR == 3 size = ZU(0x8000000000000000); alignment = ZU(0x8000000000000000); #else size = ZU(0x80000000); alignment = ZU(0x80000000); #endif assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)), "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size, alignment); } TEST_END TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 26) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); dallocx(p, 0); } #undef MAXSZ } TEST_END TEST_BEGIN(test_alignment_and_size) { #define MAXALIGN (((size_t)1) << 25) #define NITER 4 size_t nsz, rsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } } #undef MAXALIGN #undef NITER } TEST_END int main(void) { return (test( test_oom, test_basic, test_alignment_and_size)); } disque-1.0-rc1/deps/jemalloc/test/integration/overflow.c000066400000000000000000000025151264176561100233750ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_overflow) { unsigned nhchunks; size_t mib[4]; size_t sz, miblen, max_size_class; void *p; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nhchunks - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); assert_ptr_null(malloc(max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(malloc(SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() OOM"); assert_ptr_null(realloc(p, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(realloc(p, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); free(p); } TEST_END int main(void) { return (test( test_overflow)); } disque-1.0-rc1/deps/jemalloc/test/integration/posix_memalign.c000066400000000000000000000050531264176561100245450ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define CHUNK 0x400000 /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ #define MAXALIGN ((size_t)0x2000000LU) #define NITER 4 TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; for (alignment = 0; alignment < sizeof(void *); alignment++) { assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, "Expected error for invalid alignment %zu", alignment); } for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { err = posix_memalign(&ps[i], alignment, size); if (err) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } } } TEST_END int main(void) { return (test( test_alignment_errors, test_oom_errors, test_alignment_and_size)); } disque-1.0-rc1/deps/jemalloc/test/integration/rallocx.c000066400000000000000000000104461264176561100232000ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_grow_and_shrink) { void *p, *q; size_t tsz; #define NCYCLES 3 unsigned i, j; #define NSZS 2500 size_t szs[NSZS]; #define MAXSZ ZU(12 * 1024 * 1024) p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); szs[0] = sallocx(p, 0); for (i = 0; i < NCYCLES; i++) { for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { q = rallocx(p, szs[j-1]+1, 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j-1], szs[j-1]+1); szs[j] = sallocx(q, 0); assert_zu_ne(szs[j], szs[j-1]+1, "Expected size to be at least: %zu", szs[j-1]+1); p = q; } for (j--; j > 0; j--) { q = rallocx(p, szs[j-1], 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j], szs[j-1]); tsz = sallocx(q, 0); assert_zu_eq(tsz, szs[j-1], "Expected size=%zu, got size=%zu", szs[j-1], tsz); p = q; } } dallocx(p, 0); #undef MAXSZ #undef NSZS #undef NCYCLES } TEST_END static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { bool ret = false; const uint8_t *buf = (const uint8_t *)p; size_t i; for (i = 0; i < len; i++) { uint8_t b = buf[offset+i]; if (b != c) { test_fail("Allocation at %p (len=%zu) contains %#x " "rather than %#x at offset %zu", p, len, b, c, offset+i); ret = true; } } return (ret); } TEST_BEGIN(test_zero) { void *p, *q; size_t psz, qsz, i, j; size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; #define FILL_BYTE 0xaaU #define RANGE 2048 for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { size_t start_size = start_sizes[i]; p = mallocx(start_size, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); psz = sallocx(p, 0); assert_false(validate_fill(p, 0, 0, psz), "Expected zeroed memory"); memset(p, FILL_BYTE, psz); assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); for (j = 1; j < RANGE; j++) { q = rallocx(p, start_size+j, MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error"); qsz = sallocx(q, 0); if (q != p || qsz != psz) { assert_false(validate_fill(q, FILL_BYTE, 0, psz), "Expected filled memory"); assert_false(validate_fill(q, 0, psz, qsz-psz), "Expected zeroed memory"); } if (psz != qsz) { memset((void *)((uintptr_t)q+psz), FILL_BYTE, qsz-psz); psz = qsz; } p = q; } assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); dallocx(p, 0); } #undef FILL_BYTE } TEST_END TEST_BEGIN(test_align) { void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 25) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); assert_ptr_not_null(q, "Unexpected rallocx() error for align=%zu", align); assert_ptr_null( (void *)((uintptr_t)q & (align-1)), "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); #undef MAX_ALIGN } TEST_END TEST_BEGIN(test_lg_align_and_zero) { void *p, *q; size_t lg_align, sz; #define MAX_LG_ALIGN 25 #define MAX_VALIDATE (ZU(1) << 22) lg_align = ZU(0); p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error for lg_align=%zu", lg_align); assert_ptr_null( (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), "%p inadequately aligned for lg_align=%zu", q, lg_align); sz = sallocx(q, 0); if ((sz << 1) <= MAX_VALIDATE) { assert_false(validate_fill(q, 0, 0, sz), "Expected zeroed memory"); } else { assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), "Expected zeroed memory"); assert_false(validate_fill( (void *)((uintptr_t)q+sz-MAX_VALIDATE), 0, 0, MAX_VALIDATE), "Expected zeroed memory"); } p = q; } dallocx(p, 0); #undef MAX_VALIDATE #undef MAX_LG_ALIGN } TEST_END int main(void) { return (test( test_grow_and_shrink, test_zero, test_align, test_lg_align_and_zero)); } disque-1.0-rc1/deps/jemalloc/test/integration/sdallocx.c000066400000000000000000000020011264176561100233310ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define MAXALIGN (((size_t)1) << 25) #define NITER 4 TEST_BEGIN(test_basic) { void *ptr = mallocx(64, 0); sdallocx(ptr, 64, 0); } TEST_END TEST_BEGIN(test_alignment_and_size) { size_t nsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { sdallocx(ps[i], sz, MALLOCX_ALIGN(alignment)); ps[i] = NULL; } } } } } TEST_END int main(void) { return (test( test_basic, test_alignment_and_size)); } disque-1.0-rc1/deps/jemalloc/test/integration/thread_arena.c000066400000000000000000000030061264176561100241430ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define NTHREADS 10 void * thd_start(void *arg) { unsigned main_arena_ind = *(unsigned *)arg; void *p; unsigned arena_ind; size_t size; int err; p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); free(p); size = sizeof(arena_ind); if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, sizeof(main_arena_ind)))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } size = sizeof(arena_ind); if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } assert_u_eq(arena_ind, main_arena_ind, "Arena index should be same as for main thread"); return (NULL); } TEST_BEGIN(test_thread_arena) { void *p; unsigned arena_ind; size_t size; int err; thd_t thds[NTHREADS]; unsigned i; p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); size = sizeof(arena_ind); if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)&arena_ind); } for (i = 0; i < NTHREADS; i++) { intptr_t join_ret; thd_join(thds[i], (void *)&join_ret); assert_zd_eq(join_ret, 0, "Unexpected thread join error"); } } TEST_END int main(void) { return (test( test_thread_arena)); } disque-1.0-rc1/deps/jemalloc/test/integration/thread_tcache_enabled.c000066400000000000000000000047471264176561100257730ustar00rootroot00000000000000#include "test/jemalloc_test.h" static const bool config_tcache = #ifdef JEMALLOC_TCACHE true #else false #endif ; void * thd_start(void *arg) { int err; size_t sz; bool e0, e1; sz = sizeof(bool); if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { if (err == ENOENT) { assert_false(config_tcache, "ENOENT should only be returned if tcache is " "disabled"); } goto label_ENOENT; } if (e0) { e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); } e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); return (NULL); label_ENOENT: test_skip("\"thread.tcache.enabled\" mallctl not available"); return (NULL); } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return (test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread)); } disque-1.0-rc1/deps/jemalloc/test/integration/xallocx.c000066400000000000000000000270251264176561100232070ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_same_size) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_no_move) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, sz-42, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_no_move_fail) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz + 5, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); } static unsigned get_nsmall(void) { return (get_nsizes_impl("arenas.nbins")); } static unsigned get_nlarge(void) { return (get_nsizes_impl("arenas.nlruns")); } static unsigned get_nhuge(void) { return (get_nsizes_impl("arenas.nhchunks")); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); } static size_t get_small_size(size_t ind) { return (get_size_impl("arenas.bin.0.size", ind)); } static size_t get_large_size(size_t ind) { return (get_size_impl("arenas.lrun.0.size", ind)); } static size_t get_huge_size(size_t ind) { return (get_size_impl("arenas.hchunk.0.size", ind)); } TEST_BEGIN(test_size) { size_t small0, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test smallest supported size. */ assert_zu_eq(xallocx(p, 1, 0, 0), small0, "Unexpected xallocx() behavior"); /* Test largest supported size. */ assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_size_extra_overflow) { size_t small0, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, "Unexpected xallocx() behavior"); /* Test overflow such that hugemax-size underflows. */ assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_small) { size_t small0, small1, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_large) { size_t smallmax, large0, large1, large2, huge0, hugemax; void *p; /* Get size classes. */ smallmax = get_small_size(get_nsmall()-1); large0 = get_large_size(0); large1 = get_large_size(1); large2 = get_large_size(2); huge0 = get_huge_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(large2, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, large2, 0, 0), large2, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ assert_zu_eq(xallocx(p, large0, 0, 0), large0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, smallmax, 0, 0), large0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large2, 0, 0), large2, "Unexpected xallocx() behavior"); /* Test size decrease with non-zero extra. */ assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large1, large2 - large1, 0), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, large1 - large0, 0), large1, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, 0), large0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, 0, 0), large0, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ assert_zu_eq(xallocx(p, large2, 0, 0), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge0, 0, 0), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, 0, 0), large0, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_lt(xallocx(p, large0, huge0 - large0, 0), huge0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, 0, 0), large0, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large2, 0, 0), large2, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, 0), huge0, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_huge) { size_t largemax, huge0, huge1, huge2, hugemax; void *p; /* Get size classes. */ largemax = get_large_size(get_nlarge()-1); huge0 = get_huge_size(0); huge1 = get_huge_size(1); huge2 = get_huge_size(2); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(huge2, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, largemax, 0, 0), huge0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, "Unexpected xallocx() behavior"); /* Test size decrease with non-zero extra. */ assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge1, huge2 - huge1, 0), huge2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge0, huge1 - huge0, 0), huge1, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, largemax, huge0 - largemax, 0), huge0, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ assert_zu_le(xallocx(p, huge2, 0, 0), huge2, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax+1, 0, 0), huge2, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, huge0, huge2 - huge0, 0), huge2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, 0), hugemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END static void print_filled_extents(const void *p, uint8_t c, size_t len) { const uint8_t *pc = (const uint8_t *)p; size_t i, range0; uint8_t c0; malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); range0 = 0; c0 = pc[0]; for (i = 0; i < len; i++) { if (pc[i] != c0) { malloc_printf(" %#x[%zu..%zu)", c0, range0, i); range0 = i; c0 = pc[i]; } } malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i); } static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { const uint8_t *pc = (const uint8_t *)p; bool err; size_t i; for (i = offset, err = false; i < offset+len; i++) { if (pc[i] != c) err = true; } if (err) print_filled_extents(p, c, offset + len); return (err); } static void test_zero(size_t szmin, size_t szmax) { size_t sz, nsz; void *p; #define FILL_BYTE 0x7aU sz = szmax; p = mallocx(sz, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", sz); /* * Fill with non-zero so that non-debug builds are more likely to detect * errors. */ memset(p, FILL_BYTE, sz); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); /* Shrink in place so that we can expect growing in place to succeed. */ sz = szmin; assert_zu_eq(xallocx(p, sz, 0, MALLOCX_ZERO), sz, "Unexpected xallocx() error"); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { nsz = nallocx(sz+1, MALLOCX_ZERO); assert_zu_eq(xallocx(p, sz+1, 0, MALLOCX_ZERO), nsz, "Unexpected xallocx() failure"); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); assert_false(validate_fill(p, 0x00, sz, nsz-sz), "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); assert_false(validate_fill(p, FILL_BYTE, 0, nsz), "Memory not filled: nsz=%zu", nsz); } dallocx(p, 0); } TEST_BEGIN(test_zero_large) { size_t large0, largemax; /* Get size classes. */ large0 = get_large_size(0); largemax = get_large_size(get_nlarge()-1); test_zero(large0, largemax); } TEST_END TEST_BEGIN(test_zero_huge) { size_t huge0, huge1; /* Get size classes. */ huge0 = get_huge_size(0); huge1 = get_huge_size(1); test_zero(huge1, huge0 * 2); } TEST_END int main(void) { return (test( test_same_size, test_extra_no_move, test_no_move_fail, test_size, test_size_extra_overflow, test_extra_small, test_extra_large, test_extra_huge, test_zero_large, test_zero_huge)); } disque-1.0-rc1/deps/jemalloc/test/src/000077500000000000000000000000001264176561100176275ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/test/src/SFMT.c000066400000000000000000000503271264176561100205530ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.c * @brief SIMD oriented Fast Mersenne Twister(SFMT) * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #define SFMT_C_ #include "test/jemalloc_test.h" #include "test/SFMT-params.h" #if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(ONLY64) && !defined(BIG_ENDIAN64) #if defined(__GNUC__) #error "-DONLY64 must be specified with -DBIG_ENDIAN64" #endif #undef ONLY64 #endif /*------------------------------------------------------ 128-bit SIMD data type for Altivec, SSE2 or standard C ------------------------------------------------------*/ #if defined(HAVE_ALTIVEC) /** 128-bit data structure */ union W128_T { vector unsigned int s; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #elif defined(HAVE_SSE2) /** 128-bit data structure */ union W128_T { __m128i si; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #else /** 128-bit data structure */ struct W128_T { uint32_t u[4]; }; /** 128-bit data type */ typedef struct W128_T w128_t; #endif struct sfmt_s { /** the 128-bit internal state array */ w128_t sfmt[N]; /** index counter to the 32-bit internal state array */ int idx; /** a flag: it is 0 if and only if the internal state is not yet * initialized. */ int initialized; }; /*-------------------------------------- FILE GLOBAL VARIABLES internal state, index counter and flag --------------------------------------*/ /** a parity check vector which certificate the period of 2^{MEXP} */ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; /*---------------- STATIC FUNCTIONS ----------------*/ JEMALLOC_INLINE_C int idxof(int i); #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); #endif JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); JEMALLOC_INLINE_C uint32_t func1(uint32_t x); JEMALLOC_INLINE_C uint32_t func2(uint32_t x); static void period_certification(sfmt_t *ctx); #if defined(BIG_ENDIAN64) && !defined(ONLY64) JEMALLOC_INLINE_C void swap(w128_t *array, int size); #endif #if defined(HAVE_ALTIVEC) #include "test/SFMT-alti.h" #elif defined(HAVE_SSE2) #include "test/SFMT-sse2.h" #endif /** * This function simulate a 64-bit index of LITTLE ENDIAN * in BIG ENDIAN machine. */ #ifdef ONLY64 JEMALLOC_INLINE_C int idxof(int i) { return i ^ 1; } #else JEMALLOC_INLINE_C int idxof(int i) { return i; } #endif /** * This function simulates SIMD 128-bit right shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif /** * This function simulates SIMD 128-bit left shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #ifdef ONLY64 JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif #endif /** * This function represents the recursion formula. * @param r output * @param a a 128-bit part of the internal state array * @param b a 128-bit part of the internal state array * @param c a 128-bit part of the internal state array * @param d a 128-bit part of the internal state array */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] ^ (d->u[3] << SL1); } #else JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] ^ (d->u[3] << SL1); } #endif #endif #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { int i; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } for (; i < N; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pseudorandom numbers to be generated. */ JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < N; i++) { do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < size - N; i++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j] = array[j + size - N]; } for (; i < size; i++, j++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; ctx->sfmt[j] = array[i]; } } #endif #if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) JEMALLOC_INLINE_C void swap(w128_t *array, int size) { int i; uint32_t x, y; for (i = 0; i < size; i++) { x = array[i].u[0]; y = array[i].u[2]; array[i].u[0] = array[i].u[1]; array[i].u[2] = array[i].u[3]; array[i].u[1] = x; array[i].u[3] = y; } } #endif /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func1(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1664525UL; } /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func2(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1566083941UL; } /** * This function certificate the period of 2^{MEXP} */ static void period_certification(sfmt_t *ctx) { int inner = 0; int i, j; uint32_t work; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; for (i = 0; i < 4; i++) inner ^= psfmt32[idxof(i)] & parity[i]; for (i = 16; i > 0; i >>= 1) inner ^= inner >> i; inner &= 1; /* check OK */ if (inner == 1) { return; } /* check NG, and modification */ for (i = 0; i < 4; i++) { work = 1; for (j = 0; j < 32; j++) { if ((work & parity[i]) != 0) { psfmt32[idxof(i)] ^= work; return; } work = work << 1; } } } /*---------------- PUBLIC FUNCTIONS ----------------*/ /** * This function returns the identification string. * The string shows the word size, the Mersenne exponent, * and all parameters of this generator. */ const char *get_idstring(void) { return IDSTR; } /** * This function returns the minimum size of array used for \b * fill_array32() function. * @return minimum size of array used for fill_array32() function. */ int get_min_array_size32(void) { return N32; } /** * This function returns the minimum size of array used for \b * fill_array64() function. * @return minimum size of array used for fill_array64() function. */ int get_min_array_size64(void) { return N64; } #ifndef ONLY64 /** * This function generates and returns 32-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * @return 32-bit pseudorandom number */ uint32_t gen_rand32(sfmt_t *ctx) { uint32_t r; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; assert(ctx->initialized); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } r = psfmt32[ctx->idx++]; return r; } /* Generate a random integer in [0..limit). */ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { uint32_t ret, above; above = 0xffffffffU - (0xffffffffU % limit); while (1) { ret = gen_rand32(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #endif /** * This function generates and returns 64-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * The function gen_rand64 should not be called after gen_rand32, * unless an initialization is again executed. * @return 64-bit pseudorandom number */ uint64_t gen_rand64(sfmt_t *ctx) { #if defined(BIG_ENDIAN64) && !defined(ONLY64) uint32_t r1, r2; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; #else uint64_t r; uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; #endif assert(ctx->initialized); assert(ctx->idx % 2 == 0); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } #if defined(BIG_ENDIAN64) && !defined(ONLY64) r1 = psfmt32[ctx->idx]; r2 = psfmt32[ctx->idx + 1]; ctx->idx += 2; return ((uint64_t)r2 << 32) | r1; #else r = psfmt64[ctx->idx / 2]; ctx->idx += 2; return r; #endif } /* Generate a random integer in [0..limit). */ uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { uint64_t ret, above; above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); while (1) { ret = gen_rand64(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #ifndef ONLY64 /** * This function generates pseudorandom 32-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 624 and a * multiple of four. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 32-bit integers are filled * by this function. The pointer to the array must be \b "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 32-bit pseudorandom integers to be * generated. size must be a multiple of 4, and greater than or equal * to (MEXP / 128 + 1) * 4. * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 4 == 0); assert(size >= N32); gen_rand_array(ctx, (w128_t *)array, size / 4); ctx->idx = N32; } #endif /** * This function generates pseudorandom 64-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 312 and a * multiple of two. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 64-bit integers are filled * by this function. The pointer to the array must be "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 64-bit pseudorandom integers to be * generated. size must be a multiple of 2, and greater than or equal * to (MEXP / 128 + 1) * 2 * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 2 == 0); assert(size >= N64); gen_rand_array(ctx, (w128_t *)array, size / 2); ctx->idx = N32; #if defined(BIG_ENDIAN64) && !defined(ONLY64) swap((w128_t *)array, size /2); #endif } /** * This function initializes the internal state array with a 32-bit * integer seed. * * @param seed a 32-bit integer used as the seed. */ sfmt_t *init_gen_rand(uint32_t seed) { void *p; sfmt_t *ctx; int i; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; psfmt32[idxof(0)] = seed; for (i = 1; i < N32; i++) { psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] ^ (psfmt32[idxof(i - 1)] >> 30)) + i; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } /** * This function initializes the internal state array, * with an array of 32-bit integers used as the seeds * @param init_key the array of 32-bit integers, used as a seed. * @param key_length the length of init_key. */ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { void *p; sfmt_t *ctx; int i, j, count; uint32_t r; int lag; int mid; int size = N * 4; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; if (size >= 623) { lag = 11; } else if (size >= 68) { lag = 7; } else if (size >= 39) { lag = 5; } else { lag = 3; } mid = (size - lag) / 2; memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); if (key_length + 1 > N32) { count = key_length + 1; } else { count = N32; } r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] ^ psfmt32[idxof(N32 - 1)]); psfmt32[idxof(mid)] += r; r += key_length; psfmt32[idxof(mid + lag)] += r; psfmt32[idxof(0)] = r; count--; for (i = 1, j = 0; (j < count) && (j < key_length); j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += init_key[j] + i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (; j < count; j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (j = 0; j < N32; j++) { r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] ^= r; r -= i; psfmt32[idxof((i + mid + lag) % N32)] ^= r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } void fini_gen_rand(sfmt_t *ctx) { assert(ctx != NULL); ctx->initialized = 0; free(ctx); } disque-1.0-rc1/deps/jemalloc/test/src/btalloc.c000066400000000000000000000001621264176561100214120ustar00rootroot00000000000000#include "test/jemalloc_test.h" void * btalloc(size_t size, unsigned bits) { return (btalloc_0(size, bits)); } disque-1.0-rc1/deps/jemalloc/test/src/btalloc_0.c000066400000000000000000000000621264176561100216300ustar00rootroot00000000000000#include "test/jemalloc_test.h" btalloc_n_gen(0) disque-1.0-rc1/deps/jemalloc/test/src/btalloc_1.c000066400000000000000000000000621264176561100216310ustar00rootroot00000000000000#include "test/jemalloc_test.h" btalloc_n_gen(1) disque-1.0-rc1/deps/jemalloc/test/src/math.c000066400000000000000000000000601264176561100207200ustar00rootroot00000000000000#define MATH_C_ #include "test/jemalloc_test.h" disque-1.0-rc1/deps/jemalloc/test/src/mq.c000066400000000000000000000007131264176561100204110ustar00rootroot00000000000000#include "test/jemalloc_test.h" /* * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep * time is guaranteed. */ void mq_nanosleep(unsigned ns) { assert(ns <= 1000*1000*1000); #ifdef _WIN32 Sleep(ns / 1000); #else { struct timespec timeout; if (ns < 1000*1000*1000) { timeout.tv_sec = 0; timeout.tv_nsec = ns; } else { timeout.tv_sec = 1; timeout.tv_nsec = 0; } nanosleep(&timeout, NULL); } #endif } disque-1.0-rc1/deps/jemalloc/test/src/mtx.c000066400000000000000000000021201264176561100205760ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 #endif bool mtx_init(mtx_t *mtx) { #ifdef _WIN32 if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) return (true); #elif (defined(JEMALLOC_OSSPIN)) mtx->lock = 0; #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) return (true); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); if (pthread_mutex_init(&mtx->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return (true); } pthread_mutexattr_destroy(&attr); #endif return (false); } void mtx_fini(mtx_t *mtx) { #ifdef _WIN32 #elif (defined(JEMALLOC_OSSPIN)) #else pthread_mutex_destroy(&mtx->lock); #endif } void mtx_lock(mtx_t *mtx) { #ifdef _WIN32 EnterCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mtx->lock); #else pthread_mutex_lock(&mtx->lock); #endif } void mtx_unlock(mtx_t *mtx) { #ifdef _WIN32 LeaveCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mtx->lock); #else pthread_mutex_unlock(&mtx->lock); #endif } disque-1.0-rc1/deps/jemalloc/test/src/test.c000066400000000000000000000044061264176561100207560ustar00rootroot00000000000000#include "test/jemalloc_test.h" static unsigned test_count = 0; static test_status_t test_counts[test_status_count] = {0, 0, 0}; static test_status_t test_status = test_status_pass; static const char * test_name = ""; JEMALLOC_FORMAT_PRINTF(1, 2) void test_skip(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); malloc_printf("\n"); test_status = test_status_skip; } JEMALLOC_FORMAT_PRINTF(1, 2) void test_fail(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); malloc_printf("\n"); test_status = test_status_fail; } static const char * test_status_string(test_status_t test_status) { switch (test_status) { case test_status_pass: return "pass"; case test_status_skip: return "skip"; case test_status_fail: return "fail"; default: not_reached(); } } void p_test_init(const char *name) { test_count++; test_status = test_status_pass; test_name = name; } void p_test_fini(void) { test_counts[test_status]++; malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); } test_status_t p_test(test_t *t, ...) { test_status_t ret; va_list ap; /* * Make sure initialization occurs prior to running tests. Tests are * special because they may use internal facilities prior to triggering * initialization as a side effect of calling into the public API. This * is a final safety that works even if jemalloc_constructor() doesn't * run, as for MSVC builds. */ if (nallocx(1, 0) == 0) { malloc_printf("Initialization error"); return (test_status_fail); } ret = test_status_pass; va_start(ap, t); for (; t != NULL; t = va_arg(ap, test_t *)) { t(); if (test_status > ret) ret = test_status; } va_end(ap); malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", test_status_string(test_status_pass), test_counts[test_status_pass], test_count, test_status_string(test_status_skip), test_counts[test_status_skip], test_count, test_status_string(test_status_fail), test_counts[test_status_fail], test_count); return (ret); } void p_test_fail(const char *prefix, const char *message) { malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); test_status = test_status_fail; } disque-1.0-rc1/deps/jemalloc/test/src/thd.c000066400000000000000000000013601264176561100205520ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef _WIN32 void thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); if (*thd == NULL) test_fail("Error in CreateThread()\n"); } void thd_join(thd_t thd, void **ret) { if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { DWORD exit_code; GetExitCodeThread(thd, (LPDWORD) &exit_code); *ret = (void *)(uintptr_t)exit_code; } } #else void thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { if (pthread_create(thd, NULL, proc, arg) != 0) test_fail("Error in pthread_create()\n"); } void thd_join(thd_t thd, void **ret) { pthread_join(thd, ret); } #endif disque-1.0-rc1/deps/jemalloc/test/src/timer.c000066400000000000000000000034261264176561100211200ustar00rootroot00000000000000#include "test/jemalloc_test.h" void timer_start(timedelta_t *timer) { #ifdef _WIN32 GetSystemTimeAsFileTime(&timer->ft0); #elif JEMALLOC_CLOCK_GETTIME if (sysconf(_SC_MONOTONIC_CLOCK) <= 0) timer->clock_id = CLOCK_REALTIME; else timer->clock_id = CLOCK_MONOTONIC; clock_gettime(timer->clock_id, &timer->ts0); #else gettimeofday(&timer->tv0, NULL); #endif } void timer_stop(timedelta_t *timer) { #ifdef _WIN32 GetSystemTimeAsFileTime(&timer->ft0); #elif JEMALLOC_CLOCK_GETTIME clock_gettime(timer->clock_id, &timer->ts1); #else gettimeofday(&timer->tv1, NULL); #endif } uint64_t timer_usec(const timedelta_t *timer) { #ifdef _WIN32 uint64_t t0, t1; t0 = (((uint64_t)timer->ft0.dwHighDateTime) << 32) | timer->ft0.dwLowDateTime; t1 = (((uint64_t)timer->ft1.dwHighDateTime) << 32) | timer->ft1.dwLowDateTime; return ((t1 - t0) / 10); #elif JEMALLOC_CLOCK_GETTIME return (((timer->ts1.tv_sec - timer->ts0.tv_sec) * 1000000) + (timer->ts1.tv_nsec - timer->ts0.tv_nsec) / 1000); #else return (((timer->tv1.tv_sec - timer->tv0.tv_sec) * 1000000) + timer->tv1.tv_usec - timer->tv0.tv_usec); #endif } void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { uint64_t t0 = timer_usec(a); uint64_t t1 = timer_usec(b); uint64_t mult; unsigned i = 0; unsigned j; int n; /* Whole. */ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); i += n; if (i >= buflen) return; mult = 1; for (j = 0; j < n; j++) mult *= 10; /* Decimal. */ n = malloc_snprintf(&buf[i], buflen-i, "."); i += n; /* Fraction. */ while (i < buflen-1) { uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 >= 5)) ? 1 : 0; n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, (t0 * mult / t1) % 10 + round); i += n; mult *= 10; } } disque-1.0-rc1/deps/jemalloc/test/stress/000077500000000000000000000000001264176561100203635ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/test/stress/microbench.c000066400000000000000000000061411264176561100226420ustar00rootroot00000000000000#include "test/jemalloc_test.h" JEMALLOC_INLINE_C void time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void)) { uint64_t i; for (i = 0; i < nwarmup; i++) func(); timer_start(timer); for (i = 0; i < niter; i++) func(); timer_stop(timer); } void compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, void (*func_a), const char *name_b, void (*func_b)) { timedelta_t timer_a, timer_b; char ratio_buf[6]; void *p; p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); return; } time_func(&timer_a, nwarmup, niter, func_a); time_func(&timer_b, nwarmup, niter, func_b); timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf)); malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, " "%s=%"FMTu64"us, ratio=1:%s\n", niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b), ratio_buf); dallocx(p, 0); } static void malloc_free(void) { /* The compiler can optimize away free(malloc(1))! */ void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } free(p); } static void mallocx_free(void) { void *p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); return; } free(p); } TEST_BEGIN(test_malloc_vs_mallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc", malloc_free, "mallocx", mallocx_free); } TEST_END static void malloc_dallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } dallocx(p, 0); } static void malloc_sdallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } sdallocx(p, 1, 0); } TEST_BEGIN(test_free_vs_dallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, "dallocx", malloc_dallocx); } TEST_END TEST_BEGIN(test_dallocx_vs_sdallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, "sdallocx", malloc_sdallocx); } TEST_END static void malloc_mus_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } malloc_usable_size(p); free(p); } static void malloc_sallocx_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } if (sallocx(p, 0) < 1) test_fail("Unexpected sallocx() failure"); free(p); } TEST_BEGIN(test_mus_vs_sallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", malloc_mus_free, "sallocx", malloc_sallocx_free); } TEST_END static void malloc_nallocx_free(void) { void *p; p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } if (nallocx(1, 0) < 1) test_fail("Unexpected nallocx() failure"); free(p); } TEST_BEGIN(test_sallocx_vs_nallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", malloc_sallocx_free, "nallocx", malloc_nallocx_free); } TEST_END int main(void) { return (test( test_malloc_vs_mallocx, test_free_vs_dallocx, test_dallocx_vs_sdallocx, test_mus_vs_sallocx, test_sallocx_vs_nallocx)); } disque-1.0-rc1/deps/jemalloc/test/test.sh.in000066400000000000000000000017561264176561100207710ustar00rootroot00000000000000#!/bin/sh case @abi@ in macho) export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" ;; pecoff) export PATH="${PATH}:@objroot@lib" ;; *) ;; esac # Corresponds to test_status_t. pass_code=0 skip_code=1 fail_code=2 pass_count=0 skip_count=0 fail_count=0 for t in $@; do if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then echo fi echo "=== ${t} ===" ${t}@exe@ @abs_srcroot@ @abs_objroot@ result_code=$? case ${result_code} in ${pass_code}) pass_count=$((pass_count+1)) ;; ${skip_code}) skip_count=$((skip_count+1)) ;; ${fail_code}) fail_count=$((fail_count+1)) ;; *) echo "Test harness error" 1>&2 exit 1 esac done total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` echo echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" if [ ${fail_count} -eq 0 ] ; then exit 0 else exit 1 fi disque-1.0-rc1/deps/jemalloc/test/unit/000077500000000000000000000000001264176561100200175ustar00rootroot00000000000000disque-1.0-rc1/deps/jemalloc/test/unit/SFMT.c000066400000000000000000002530731264176561100207460ustar00rootroot00000000000000/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "test/jemalloc_test.h" #define BLOCK_SIZE 10000 #define BLOCK_SIZE64 (BLOCK_SIZE / 2) #define COUNT_1 1000 #define COUNT_2 700 static const uint32_t init_gen_rand_32_expected[] = { 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U }; static const uint32_t init_by_array_32_expected[] = { 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U }; static const uint64_t init_gen_rand_64_expected[] = { KQU(16924766246869039260), KQU( 8201438687333352714), KQU( 2265290287015001750), KQU(18397264611805473832), KQU( 3375255223302384358), KQU( 6345559975416828796), KQU(18229739242790328073), KQU( 7596792742098800905), KQU( 255338647169685981), KQU( 2052747240048610300), KQU(18328151576097299343), KQU(12472905421133796567), KQU(11315245349717600863), KQU(16594110197775871209), KQU(15708751964632456450), KQU(10452031272054632535), KQU(11097646720811454386), KQU( 4556090668445745441), KQU(17116187693090663106), KQU(14931526836144510645), KQU( 9190752218020552591), KQU( 9625800285771901401), KQU(13995141077659972832), KQU( 5194209094927829625), KQU( 4156788379151063303), KQU( 8523452593770139494), KQU(14082382103049296727), KQU( 2462601863986088483), KQU( 3030583461592840678), KQU( 5221622077872827681), KQU( 3084210671228981236), KQU(13956758381389953823), KQU(13503889856213423831), KQU(15696904024189836170), KQU( 4612584152877036206), KQU( 6231135538447867881), KQU(10172457294158869468), KQU( 6452258628466708150), KQU(14044432824917330221), KQU( 370168364480044279), KQU(10102144686427193359), KQU( 667870489994776076), KQU( 2732271956925885858), KQU(18027788905977284151), KQU(15009842788582923859), KQU( 7136357960180199542), KQU(15901736243475578127), KQU(16951293785352615701), KQU(10551492125243691632), KQU(17668869969146434804), KQU(13646002971174390445), KQU( 9804471050759613248), KQU( 5511670439655935493), KQU(18103342091070400926), KQU(17224512747665137533), KQU(15534627482992618168), KQU( 1423813266186582647), KQU(15821176807932930024), KQU( 30323369733607156), KQU(11599382494723479403), KQU( 653856076586810062), KQU( 3176437395144899659), KQU(14028076268147963917), KQU(16156398271809666195), KQU( 3166955484848201676), KQU( 5746805620136919390), KQU(17297845208891256593), KQU(11691653183226428483), KQU(17900026146506981577), KQU(15387382115755971042), KQU(16923567681040845943), KQU( 8039057517199388606), KQU(11748409241468629263), KQU( 794358245539076095), KQU(13438501964693401242), KQU(14036803236515618962), KQU( 5252311215205424721), KQU(17806589612915509081), KQU( 6802767092397596006), KQU(14212120431184557140), KQU( 1072951366761385712), KQU(13098491780722836296), KQU( 9466676828710797353), KQU(12673056849042830081), KQU(12763726623645357580), KQU(16468961652999309493), KQU(15305979875636438926), KQU(17444713151223449734), KQU( 5692214267627883674), KQU(13049589139196151505), KQU( 880115207831670745), KQU( 1776529075789695498), KQU(16695225897801466485), KQU(10666901778795346845), KQU( 6164389346722833869), KQU( 2863817793264300475), KQU( 9464049921886304754), KQU( 3993566636740015468), KQU( 9983749692528514136), KQU(16375286075057755211), KQU(16042643417005440820), KQU(11445419662923489877), KQU( 7999038846885158836), KQU( 6721913661721511535), KQU( 5363052654139357320), KQU( 1817788761173584205), KQU(13290974386445856444), KQU( 4650350818937984680), KQU( 8219183528102484836), KQU( 1569862923500819899), KQU( 4189359732136641860), KQU(14202822961683148583), KQU( 4457498315309429058), KQU(13089067387019074834), KQU(11075517153328927293), KQU(10277016248336668389), KQU( 7070509725324401122), KQU(17808892017780289380), KQU(13143367339909287349), KQU( 1377743745360085151), KQU( 5749341807421286485), KQU(14832814616770931325), KQU( 7688820635324359492), KQU(10960474011539770045), KQU( 81970066653179790), KQU(12619476072607878022), KQU( 4419566616271201744), KQU(15147917311750568503), KQU( 5549739182852706345), KQU( 7308198397975204770), KQU(13580425496671289278), KQU(17070764785210130301), KQU( 8202832846285604405), KQU( 6873046287640887249), KQU( 6927424434308206114), KQU( 6139014645937224874), KQU(10290373645978487639), KQU(15904261291701523804), KQU( 9628743442057826883), KQU(18383429096255546714), KQU( 4977413265753686967), KQU( 7714317492425012869), KQU( 9025232586309926193), KQU(14627338359776709107), KQU(14759849896467790763), KQU(10931129435864423252), KQU( 4588456988775014359), KQU(10699388531797056724), KQU( 468652268869238792), KQU( 5755943035328078086), KQU( 2102437379988580216), KQU( 9986312786506674028), KQU( 2654207180040945604), KQU( 8726634790559960062), KQU( 100497234871808137), KQU( 2800137176951425819), KQU( 6076627612918553487), KQU( 5780186919186152796), KQU( 8179183595769929098), KQU( 6009426283716221169), KQU( 2796662551397449358), KQU( 1756961367041986764), KQU( 6972897917355606205), KQU(14524774345368968243), KQU( 2773529684745706940), KQU( 4853632376213075959), KQU( 4198177923731358102), KQU( 8271224913084139776), KQU( 2741753121611092226), KQU(16782366145996731181), KQU(15426125238972640790), KQU(13595497100671260342), KQU( 3173531022836259898), KQU( 6573264560319511662), KQU(18041111951511157441), KQU( 2351433581833135952), KQU( 3113255578908173487), KQU( 1739371330877858784), KQU(16046126562789165480), KQU( 8072101652214192925), KQU(15267091584090664910), KQU( 9309579200403648940), KQU( 5218892439752408722), KQU(14492477246004337115), KQU(17431037586679770619), KQU( 7385248135963250480), KQU( 9580144956565560660), KQU( 4919546228040008720), KQU(15261542469145035584), KQU(18233297270822253102), KQU( 5453248417992302857), KQU( 9309519155931460285), KQU(10342813012345291756), KQU(15676085186784762381), KQU(15912092950691300645), KQU( 9371053121499003195), KQU( 9897186478226866746), KQU(14061858287188196327), KQU( 122575971620788119), KQU(12146750969116317754), KQU( 4438317272813245201), KQU( 8332576791009527119), KQU(13907785691786542057), KQU(10374194887283287467), KQU( 2098798755649059566), KQU( 3416235197748288894), KQU( 8688269957320773484), KQU( 7503964602397371571), KQU(16724977015147478236), KQU( 9461512855439858184), KQU(13259049744534534727), KQU( 3583094952542899294), KQU( 8764245731305528292), KQU(13240823595462088985), KQU(13716141617617910448), KQU(18114969519935960955), KQU( 2297553615798302206), KQU( 4585521442944663362), KQU(17776858680630198686), KQU( 4685873229192163363), KQU( 152558080671135627), KQU(15424900540842670088), KQU(13229630297130024108), KQU(17530268788245718717), KQU(16675633913065714144), KQU( 3158912717897568068), KQU(15399132185380087288), KQU( 7401418744515677872), KQU(13135412922344398535), KQU( 6385314346100509511), KQU(13962867001134161139), KQU(10272780155442671999), KQU(12894856086597769142), KQU(13340877795287554994), KQU(12913630602094607396), KQU(12543167911119793857), KQU(17343570372251873096), KQU(10959487764494150545), KQU( 6966737953093821128), KQU(13780699135496988601), KQU( 4405070719380142046), KQU(14923788365607284982), KQU( 2869487678905148380), KQU( 6416272754197188403), KQU(15017380475943612591), KQU( 1995636220918429487), KQU( 3402016804620122716), KQU(15800188663407057080), KQU(11362369990390932882), KQU(15262183501637986147), KQU(10239175385387371494), KQU( 9352042420365748334), KQU( 1682457034285119875), KQU( 1724710651376289644), KQU( 2038157098893817966), KQU( 9897825558324608773), KQU( 1477666236519164736), KQU(16835397314511233640), KQU(10370866327005346508), KQU(10157504370660621982), KQU(12113904045335882069), KQU(13326444439742783008), KQU(11302769043000765804), KQU(13594979923955228484), KQU(11779351762613475968), KQU( 3786101619539298383), KQU( 8021122969180846063), KQU(15745904401162500495), KQU(10762168465993897267), KQU(13552058957896319026), KQU(11200228655252462013), KQU( 5035370357337441226), KQU( 7593918984545500013), KQU( 5418554918361528700), KQU( 4858270799405446371), KQU( 9974659566876282544), KQU(18227595922273957859), KQU( 2772778443635656220), KQU(14285143053182085385), KQU( 9939700992429600469), KQU(12756185904545598068), KQU( 2020783375367345262), KQU( 57026775058331227), KQU( 950827867930065454), KQU( 6602279670145371217), KQU( 2291171535443566929), KQU( 5832380724425010313), KQU( 1220343904715982285), KQU(17045542598598037633), KQU(15460481779702820971), KQU(13948388779949365130), KQU(13975040175430829518), KQU(17477538238425541763), KQU(11104663041851745725), KQU(15860992957141157587), KQU(14529434633012950138), KQU( 2504838019075394203), KQU( 7512113882611121886), KQU( 4859973559980886617), KQU( 1258601555703250219), KQU(15594548157514316394), KQU( 4516730171963773048), KQU(11380103193905031983), KQU( 6809282239982353344), KQU(18045256930420065002), KQU( 2453702683108791859), KQU( 977214582986981460), KQU( 2006410402232713466), KQU( 6192236267216378358), KQU( 3429468402195675253), KQU(18146933153017348921), KQU(17369978576367231139), KQU( 1246940717230386603), KQU(11335758870083327110), KQU(14166488801730353682), KQU( 9008573127269635732), KQU(10776025389820643815), KQU(15087605441903942962), KQU( 1359542462712147922), KQU(13898874411226454206), KQU(17911176066536804411), KQU( 9435590428600085274), KQU( 294488509967864007), KQU( 8890111397567922046), KQU( 7987823476034328778), KQU(13263827582440967651), KQU( 7503774813106751573), KQU(14974747296185646837), KQU( 8504765037032103375), KQU(17340303357444536213), KQU( 7704610912964485743), KQU( 8107533670327205061), KQU( 9062969835083315985), KQU(16968963142126734184), KQU(12958041214190810180), KQU( 2720170147759570200), KQU( 2986358963942189566), KQU(14884226322219356580), KQU( 286224325144368520), KQU(11313800433154279797), KQU(18366849528439673248), KQU(17899725929482368789), KQU( 3730004284609106799), KQU( 1654474302052767205), KQU( 5006698007047077032), KQU( 8196893913601182838), KQU(15214541774425211640), KQU(17391346045606626073), KQU( 8369003584076969089), KQU( 3939046733368550293), KQU(10178639720308707785), KQU( 2180248669304388697), KQU( 62894391300126322), KQU( 9205708961736223191), KQU( 6837431058165360438), KQU( 3150743890848308214), KQU(17849330658111464583), KQU(12214815643135450865), KQU(13410713840519603402), KQU( 3200778126692046802), KQU(13354780043041779313), KQU( 800850022756886036), KQU(15660052933953067433), KQU( 6572823544154375676), KQU(11030281857015819266), KQU(12682241941471433835), KQU(11654136407300274693), KQU( 4517795492388641109), KQU( 9757017371504524244), KQU(17833043400781889277), KQU(12685085201747792227), KQU(10408057728835019573), KQU( 98370418513455221), KQU( 6732663555696848598), KQU(13248530959948529780), KQU( 3530441401230622826), KQU(18188251992895660615), KQU( 1847918354186383756), KQU( 1127392190402660921), KQU(11293734643143819463), KQU( 3015506344578682982), KQU(13852645444071153329), KQU( 2121359659091349142), KQU( 1294604376116677694), KQU( 5616576231286352318), KQU( 7112502442954235625), KQU(11676228199551561689), KQU(12925182803007305359), KQU( 7852375518160493082), KQU( 1136513130539296154), KQU( 5636923900916593195), KQU( 3221077517612607747), KQU(17784790465798152513), KQU( 3554210049056995938), KQU(17476839685878225874), KQU( 3206836372585575732), KQU( 2765333945644823430), KQU(10080070903718799528), KQU( 5412370818878286353), KQU( 9689685887726257728), KQU( 8236117509123533998), KQU( 1951139137165040214), KQU( 4492205209227980349), KQU(16541291230861602967), KQU( 1424371548301437940), KQU( 9117562079669206794), KQU(14374681563251691625), KQU(13873164030199921303), KQU( 6680317946770936731), KQU(15586334026918276214), KQU(10896213950976109802), KQU( 9506261949596413689), KQU( 9903949574308040616), KQU( 6038397344557204470), KQU( 174601465422373648), KQU(15946141191338238030), KQU(17142225620992044937), KQU( 7552030283784477064), KQU( 2947372384532947997), KQU( 510797021688197711), KQU( 4962499439249363461), KQU( 23770320158385357), KQU( 959774499105138124), KQU( 1468396011518788276), KQU( 2015698006852312308), KQU( 4149400718489980136), KQU( 5992916099522371188), KQU(10819182935265531076), KQU(16189787999192351131), KQU( 342833961790261950), KQU(12470830319550495336), KQU(18128495041912812501), KQU( 1193600899723524337), KQU( 9056793666590079770), KQU( 2154021227041669041), KQU( 4963570213951235735), KQU( 4865075960209211409), KQU( 2097724599039942963), KQU( 2024080278583179845), KQU(11527054549196576736), KQU(10650256084182390252), KQU( 4808408648695766755), KQU( 1642839215013788844), KQU(10607187948250398390), KQU( 7076868166085913508), KQU( 730522571106887032), KQU(12500579240208524895), KQU( 4484390097311355324), KQU(15145801330700623870), KQU( 8055827661392944028), KQU( 5865092976832712268), KQU(15159212508053625143), KQU( 3560964582876483341), KQU( 4070052741344438280), KQU( 6032585709886855634), KQU(15643262320904604873), KQU( 2565119772293371111), KQU( 318314293065348260), KQU(15047458749141511872), KQU( 7772788389811528730), KQU( 7081187494343801976), KQU( 6465136009467253947), KQU(10425940692543362069), KQU( 554608190318339115), KQU(14796699860302125214), KQU( 1638153134431111443), KQU(10336967447052276248), KQU( 8412308070396592958), KQU( 4004557277152051226), KQU( 8143598997278774834), KQU(16413323996508783221), KQU(13139418758033994949), KQU( 9772709138335006667), KQU( 2818167159287157659), KQU(17091740573832523669), KQU(14629199013130751608), KQU(18268322711500338185), KQU( 8290963415675493063), KQU( 8830864907452542588), KQU( 1614839084637494849), KQU(14855358500870422231), KQU( 3472996748392519937), KQU(15317151166268877716), KQU( 5825895018698400362), KQU(16730208429367544129), KQU(10481156578141202800), KQU( 4746166512382823750), KQU(12720876014472464998), KQU( 8825177124486735972), KQU(13733447296837467838), KQU( 6412293741681359625), KQU( 8313213138756135033), KQU(11421481194803712517), KQU( 7997007691544174032), KQU( 6812963847917605930), KQU( 9683091901227558641), KQU(14703594165860324713), KQU( 1775476144519618309), KQU( 2724283288516469519), KQU( 717642555185856868), KQU( 8736402192215092346), KQU(11878800336431381021), KQU( 4348816066017061293), KQU( 6115112756583631307), KQU( 9176597239667142976), KQU(12615622714894259204), KQU(10283406711301385987), KQU( 5111762509485379420), KQU( 3118290051198688449), KQU( 7345123071632232145), KQU( 9176423451688682359), KQU( 4843865456157868971), KQU(12008036363752566088), KQU(12058837181919397720), KQU( 2145073958457347366), KQU( 1526504881672818067), KQU( 3488830105567134848), KQU(13208362960674805143), KQU( 4077549672899572192), KQU( 7770995684693818365), KQU( 1398532341546313593), KQU(12711859908703927840), KQU( 1417561172594446813), KQU(17045191024194170604), KQU( 4101933177604931713), KQU(14708428834203480320), KQU(17447509264469407724), KQU(14314821973983434255), KQU(17990472271061617265), KQU( 5087756685841673942), KQU(12797820586893859939), KQU( 1778128952671092879), KQU( 3535918530508665898), KQU( 9035729701042481301), KQU(14808661568277079962), KQU(14587345077537747914), KQU(11920080002323122708), KQU( 6426515805197278753), KQU( 3295612216725984831), KQU(11040722532100876120), KQU(12305952936387598754), KQU(16097391899742004253), KQU( 4908537335606182208), KQU(12446674552196795504), KQU(16010497855816895177), KQU( 9194378874788615551), KQU( 3382957529567613384), KQU( 5154647600754974077), KQU( 9801822865328396141), KQU( 9023662173919288143), KQU(17623115353825147868), KQU( 8238115767443015816), KQU(15811444159859002560), KQU( 9085612528904059661), KQU( 6888601089398614254), KQU( 258252992894160189), KQU( 6704363880792428622), KQU( 6114966032147235763), KQU(11075393882690261875), KQU( 8797664238933620407), KQU( 5901892006476726920), KQU( 5309780159285518958), KQU(14940808387240817367), KQU(14642032021449656698), KQU( 9808256672068504139), KQU( 3670135111380607658), KQU(11211211097845960152), KQU( 1474304506716695808), KQU(15843166204506876239), KQU( 7661051252471780561), KQU(10170905502249418476), KQU( 7801416045582028589), KQU( 2763981484737053050), KQU( 9491377905499253054), KQU(16201395896336915095), KQU( 9256513756442782198), KQU( 5411283157972456034), KQU( 5059433122288321676), KQU( 4327408006721123357), KQU( 9278544078834433377), KQU( 7601527110882281612), KQU(11848295896975505251), KQU(12096998801094735560), KQU(14773480339823506413), KQU(15586227433895802149), KQU(12786541257830242872), KQU( 6904692985140503067), KQU( 5309011515263103959), KQU(12105257191179371066), KQU(14654380212442225037), KQU( 2556774974190695009), KQU( 4461297399927600261), KQU(14888225660915118646), KQU(14915459341148291824), KQU( 2738802166252327631), KQU( 6047155789239131512), KQU(12920545353217010338), KQU(10697617257007840205), KQU( 2751585253158203504), KQU(13252729159780047496), KQU(14700326134672815469), KQU(14082527904374600529), KQU(16852962273496542070), KQU(17446675504235853907), KQU(15019600398527572311), KQU(12312781346344081551), KQU(14524667935039810450), KQU( 5634005663377195738), KQU(11375574739525000569), KQU( 2423665396433260040), KQU( 5222836914796015410), KQU( 4397666386492647387), KQU( 4619294441691707638), KQU( 665088602354770716), KQU(13246495665281593610), KQU( 6564144270549729409), KQU(10223216188145661688), KQU( 3961556907299230585), KQU(11543262515492439914), KQU(16118031437285993790), KQU( 7143417964520166465), KQU(13295053515909486772), KQU( 40434666004899675), KQU(17127804194038347164), KQU( 8599165966560586269), KQU( 8214016749011284903), KQU(13725130352140465239), KQU( 5467254474431726291), KQU( 7748584297438219877), KQU(16933551114829772472), KQU( 2169618439506799400), KQU( 2169787627665113463), KQU(17314493571267943764), KQU(18053575102911354912), KQU(11928303275378476973), KQU(11593850925061715550), KQU(17782269923473589362), KQU( 3280235307704747039), KQU( 6145343578598685149), KQU(17080117031114086090), KQU(18066839902983594755), KQU( 6517508430331020706), KQU( 8092908893950411541), KQU(12558378233386153732), KQU( 4476532167973132976), KQU(16081642430367025016), KQU( 4233154094369139361), KQU( 8693630486693161027), KQU(11244959343027742285), KQU(12273503967768513508), KQU(14108978636385284876), KQU( 7242414665378826984), KQU( 6561316938846562432), KQU( 8601038474994665795), KQU(17532942353612365904), KQU(17940076637020912186), KQU( 7340260368823171304), KQU( 7061807613916067905), KQU(10561734935039519326), KQU(17990796503724650862), KQU( 6208732943911827159), KQU( 359077562804090617), KQU(14177751537784403113), KQU(10659599444915362902), KQU(15081727220615085833), KQU(13417573895659757486), KQU(15513842342017811524), KQU(11814141516204288231), KQU( 1827312513875101814), KQU( 2804611699894603103), KQU(17116500469975602763), KQU(12270191815211952087), KQU(12256358467786024988), KQU(18435021722453971267), KQU( 671330264390865618), KQU( 476504300460286050), KQU(16465470901027093441), KQU( 4047724406247136402), KQU( 1322305451411883346), KQU( 1388308688834322280), KQU( 7303989085269758176), KQU( 9323792664765233642), KQU( 4542762575316368936), KQU(17342696132794337618), KQU( 4588025054768498379), KQU(13415475057390330804), KQU(17880279491733405570), KQU(10610553400618620353), KQU( 3180842072658960139), KQU(13002966655454270120), KQU( 1665301181064982826), KQU( 7083673946791258979), KQU( 190522247122496820), KQU(17388280237250677740), KQU( 8430770379923642945), KQU(12987180971921668584), KQU( 2311086108365390642), KQU( 2870984383579822345), KQU(14014682609164653318), KQU(14467187293062251484), KQU( 192186361147413298), KQU(15171951713531796524), KQU( 9900305495015948728), KQU(17958004775615466344), KQU(14346380954498606514), KQU(18040047357617407096), KQU( 5035237584833424532), KQU(15089555460613972287), KQU( 4131411873749729831), KQU( 1329013581168250330), KQU(10095353333051193949), KQU(10749518561022462716), KQU( 9050611429810755847), KQU(15022028840236655649), KQU( 8775554279239748298), KQU(13105754025489230502), KQU(15471300118574167585), KQU( 89864764002355628), KQU( 8776416323420466637), KQU( 5280258630612040891), KQU( 2719174488591862912), KQU( 7599309137399661994), KQU(15012887256778039979), KQU(14062981725630928925), KQU(12038536286991689603), KQU( 7089756544681775245), KQU(10376661532744718039), KQU( 1265198725901533130), KQU(13807996727081142408), KQU( 2935019626765036403), KQU( 7651672460680700141), KQU( 3644093016200370795), KQU( 2840982578090080674), KQU(17956262740157449201), KQU(18267979450492880548), KQU(11799503659796848070), KQU( 9942537025669672388), KQU(11886606816406990297), KQU( 5488594946437447576), KQU( 7226714353282744302), KQU( 3784851653123877043), KQU( 878018453244803041), KQU(12110022586268616085), KQU( 734072179404675123), KQU(11869573627998248542), KQU( 469150421297783998), KQU( 260151124912803804), KQU(11639179410120968649), KQU( 9318165193840846253), KQU(12795671722734758075), KQU(15318410297267253933), KQU( 691524703570062620), KQU( 5837129010576994601), KQU(15045963859726941052), KQU( 5850056944932238169), KQU(12017434144750943807), KQU( 7447139064928956574), KQU( 3101711812658245019), KQU(16052940704474982954), KQU(18195745945986994042), KQU( 8932252132785575659), KQU(13390817488106794834), KQU(11582771836502517453), KQU( 4964411326683611686), KQU( 2195093981702694011), KQU(14145229538389675669), KQU(16459605532062271798), KQU( 866316924816482864), KQU( 4593041209937286377), KQU( 8415491391910972138), KQU( 4171236715600528969), KQU(16637569303336782889), KQU( 2002011073439212680), KQU(17695124661097601411), KQU( 4627687053598611702), KQU( 7895831936020190403), KQU( 8455951300917267802), KQU( 2923861649108534854), KQU( 8344557563927786255), KQU( 6408671940373352556), KQU(12210227354536675772), KQU(14294804157294222295), KQU(10103022425071085127), KQU(10092959489504123771), KQU( 6554774405376736268), KQU(12629917718410641774), KQU( 6260933257596067126), KQU( 2460827021439369673), KQU( 2541962996717103668), KQU( 597377203127351475), KQU( 5316984203117315309), KQU( 4811211393563241961), KQU(13119698597255811641), KQU( 8048691512862388981), KQU(10216818971194073842), KQU( 4612229970165291764), KQU(10000980798419974770), KQU( 6877640812402540687), KQU( 1488727563290436992), KQU( 2227774069895697318), KQU(11237754507523316593), KQU(13478948605382290972), KQU( 1963583846976858124), KQU( 5512309205269276457), KQU( 3972770164717652347), KQU( 3841751276198975037), KQU(10283343042181903117), KQU( 8564001259792872199), KQU(16472187244722489221), KQU( 8953493499268945921), KQU( 3518747340357279580), KQU( 4003157546223963073), KQU( 3270305958289814590), KQU( 3966704458129482496), KQU( 8122141865926661939), KQU(14627734748099506653), KQU(13064426990862560568), KQU( 2414079187889870829), KQU( 5378461209354225306), KQU(10841985740128255566), KQU( 538582442885401738), KQU( 7535089183482905946), KQU(16117559957598879095), KQU( 8477890721414539741), KQU( 1459127491209533386), KQU(17035126360733620462), KQU( 8517668552872379126), KQU(10292151468337355014), KQU(17081267732745344157), KQU(13751455337946087178), KQU(14026945459523832966), KQU( 6653278775061723516), KQU(10619085543856390441), KQU( 2196343631481122885), KQU(10045966074702826136), KQU(10082317330452718282), KQU( 5920859259504831242), KQU( 9951879073426540617), KQU( 7074696649151414158), KQU(15808193543879464318), KQU( 7385247772746953374), KQU( 3192003544283864292), KQU(18153684490917593847), KQU(12423498260668568905), KQU(10957758099756378169), KQU(11488762179911016040), KQU( 2099931186465333782), KQU(11180979581250294432), KQU( 8098916250668367933), KQU( 3529200436790763465), KQU(12988418908674681745), KQU( 6147567275954808580), KQU( 3207503344604030989), KQU(10761592604898615360), KQU( 229854861031893504), KQU( 8809853962667144291), KQU(13957364469005693860), KQU( 7634287665224495886), KQU(12353487366976556874), KQU( 1134423796317152034), KQU( 2088992471334107068), KQU( 7393372127190799698), KQU( 1845367839871058391), KQU( 207922563987322884), KQU(11960870813159944976), KQU(12182120053317317363), KQU(17307358132571709283), KQU(13871081155552824936), KQU(18304446751741566262), KQU( 7178705220184302849), KQU(10929605677758824425), KQU(16446976977835806844), KQU(13723874412159769044), KQU( 6942854352100915216), KQU( 1726308474365729390), KQU( 2150078766445323155), KQU(15345558947919656626), KQU(12145453828874527201), KQU( 2054448620739726849), KQU( 2740102003352628137), KQU(11294462163577610655), KQU( 756164283387413743), KQU(17841144758438810880), KQU(10802406021185415861), KQU( 8716455530476737846), KQU( 6321788834517649606), KQU(14681322910577468426), KQU(17330043563884336387), KQU(12701802180050071614), KQU(14695105111079727151), KQU( 5112098511654172830), KQU( 4957505496794139973), KQU( 8270979451952045982), KQU(12307685939199120969), KQU(12425799408953443032), KQU( 8376410143634796588), KQU(16621778679680060464), KQU( 3580497854566660073), KQU( 1122515747803382416), KQU( 857664980960597599), KQU( 6343640119895925918), KQU(12878473260854462891), KQU(10036813920765722626), KQU(14451335468363173812), KQU( 5476809692401102807), KQU(16442255173514366342), KQU(13060203194757167104), KQU(14354124071243177715), KQU(15961249405696125227), KQU(13703893649690872584), KQU( 363907326340340064), KQU( 6247455540491754842), KQU(12242249332757832361), KQU( 156065475679796717), KQU( 9351116235749732355), KQU( 4590350628677701405), KQU( 1671195940982350389), KQU(13501398458898451905), KQU( 6526341991225002255), KQU( 1689782913778157592), KQU( 7439222350869010334), KQU(13975150263226478308), KQU(11411961169932682710), KQU(17204271834833847277), KQU( 541534742544435367), KQU( 6591191931218949684), KQU( 2645454775478232486), KQU( 4322857481256485321), KQU( 8477416487553065110), KQU(12902505428548435048), KQU( 971445777981341415), KQU(14995104682744976712), KQU( 4243341648807158063), KQU( 8695061252721927661), KQU( 5028202003270177222), KQU( 2289257340915567840), KQU(13870416345121866007), KQU(13994481698072092233), KQU( 6912785400753196481), KQU( 2278309315841980139), KQU( 4329765449648304839), KQU( 5963108095785485298), KQU( 4880024847478722478), KQU(16015608779890240947), KQU( 1866679034261393544), KQU( 914821179919731519), KQU( 9643404035648760131), KQU( 2418114953615593915), KQU( 944756836073702374), KQU(15186388048737296834), KQU( 7723355336128442206), KQU( 7500747479679599691), KQU(18013961306453293634), KQU( 2315274808095756456), KQU(13655308255424029566), KQU(17203800273561677098), KQU( 1382158694422087756), KQU( 5090390250309588976), KQU( 517170818384213989), KQU( 1612709252627729621), KQU( 1330118955572449606), KQU( 300922478056709885), KQU(18115693291289091987), KQU(13491407109725238321), KQU(15293714633593827320), KQU( 5151539373053314504), KQU( 5951523243743139207), KQU(14459112015249527975), KQU( 5456113959000700739), KQU( 3877918438464873016), KQU(12534071654260163555), KQU(15871678376893555041), KQU(11005484805712025549), KQU(16353066973143374252), KQU( 4358331472063256685), KQU( 8268349332210859288), KQU(12485161590939658075), KQU(13955993592854471343), KQU( 5911446886848367039), KQU(14925834086813706974), KQU( 6590362597857994805), KQU( 1280544923533661875), KQU( 1637756018947988164), KQU( 4734090064512686329), KQU(16693705263131485912), KQU( 6834882340494360958), KQU( 8120732176159658505), KQU( 2244371958905329346), KQU(10447499707729734021), KQU( 7318742361446942194), KQU( 8032857516355555296), KQU(14023605983059313116), KQU( 1032336061815461376), KQU( 9840995337876562612), KQU( 9869256223029203587), KQU(12227975697177267636), KQU(12728115115844186033), KQU( 7752058479783205470), KQU( 729733219713393087), KQU(12954017801239007622) }; static const uint64_t init_by_array_64_expected[] = { KQU( 2100341266307895239), KQU( 8344256300489757943), KQU(15687933285484243894), KQU( 8268620370277076319), KQU(12371852309826545459), KQU( 8800491541730110238), KQU(18113268950100835773), KQU( 2886823658884438119), KQU( 3293667307248180724), KQU( 9307928143300172731), KQU( 7688082017574293629), KQU( 900986224735166665), KQU( 9977972710722265039), KQU( 6008205004994830552), KQU( 546909104521689292), KQU( 7428471521869107594), KQU(14777563419314721179), KQU(16116143076567350053), KQU( 5322685342003142329), KQU( 4200427048445863473), KQU( 4693092150132559146), KQU(13671425863759338582), KQU( 6747117460737639916), KQU( 4732666080236551150), KQU( 5912839950611941263), KQU( 3903717554504704909), KQU( 2615667650256786818), KQU(10844129913887006352), KQU(13786467861810997820), KQU(14267853002994021570), KQU(13767807302847237439), KQU(16407963253707224617), KQU( 4802498363698583497), KQU( 2523802839317209764), KQU( 3822579397797475589), KQU( 8950320572212130610), KQU( 3745623504978342534), KQU(16092609066068482806), KQU( 9817016950274642398), KQU(10591660660323829098), KQU(11751606650792815920), KQU( 5122873818577122211), KQU(17209553764913936624), KQU( 6249057709284380343), KQU(15088791264695071830), KQU(15344673071709851930), KQU( 4345751415293646084), KQU( 2542865750703067928), KQU(13520525127852368784), KQU(18294188662880997241), KQU( 3871781938044881523), KQU( 2873487268122812184), KQU(15099676759482679005), KQU(15442599127239350490), KQU( 6311893274367710888), KQU( 3286118760484672933), KQU( 4146067961333542189), KQU(13303942567897208770), KQU( 8196013722255630418), KQU( 4437815439340979989), KQU(15433791533450605135), KQU( 4254828956815687049), KQU( 1310903207708286015), KQU(10529182764462398549), KQU(14900231311660638810), KQU( 9727017277104609793), KQU( 1821308310948199033), KQU(11628861435066772084), KQU( 9469019138491546924), KQU( 3145812670532604988), KQU( 9938468915045491919), KQU( 1562447430672662142), KQU(13963995266697989134), KQU( 3356884357625028695), KQU( 4499850304584309747), KQU( 8456825817023658122), KQU(10859039922814285279), KQU( 8099512337972526555), KQU( 348006375109672149), KQU(11919893998241688603), KQU( 1104199577402948826), KQU(16689191854356060289), KQU(10992552041730168078), KQU( 7243733172705465836), KQU( 5668075606180319560), KQU(18182847037333286970), KQU( 4290215357664631322), KQU( 4061414220791828613), KQU(13006291061652989604), KQU( 7140491178917128798), KQU(12703446217663283481), KQU( 5500220597564558267), KQU(10330551509971296358), KQU(15958554768648714492), KQU( 5174555954515360045), KQU( 1731318837687577735), KQU( 3557700801048354857), KQU(13764012341928616198), KQU(13115166194379119043), KQU( 7989321021560255519), KQU( 2103584280905877040), KQU( 9230788662155228488), KQU(16396629323325547654), KQU( 657926409811318051), KQU(15046700264391400727), KQU( 5120132858771880830), KQU( 7934160097989028561), KQU( 6963121488531976245), KQU(17412329602621742089), KQU(15144843053931774092), KQU(17204176651763054532), KQU(13166595387554065870), KQU( 8590377810513960213), KQU( 5834365135373991938), KQU( 7640913007182226243), KQU( 3479394703859418425), KQU(16402784452644521040), KQU( 4993979809687083980), KQU(13254522168097688865), KQU(15643659095244365219), KQU( 5881437660538424982), KQU(11174892200618987379), KQU( 254409966159711077), KQU(17158413043140549909), KQU( 3638048789290376272), KQU( 1376816930299489190), KQU( 4622462095217761923), KQU(15086407973010263515), KQU(13253971772784692238), KQU( 5270549043541649236), KQU(11182714186805411604), KQU(12283846437495577140), KQU( 5297647149908953219), KQU(10047451738316836654), KQU( 4938228100367874746), KQU(12328523025304077923), KQU( 3601049438595312361), KQU( 9313624118352733770), KQU(13322966086117661798), KQU(16660005705644029394), KQU(11337677526988872373), KQU(13869299102574417795), KQU(15642043183045645437), KQU( 3021755569085880019), KQU( 4979741767761188161), KQU(13679979092079279587), KQU( 3344685842861071743), KQU(13947960059899588104), KQU( 305806934293368007), KQU( 5749173929201650029), KQU(11123724852118844098), KQU(15128987688788879802), KQU(15251651211024665009), KQU( 7689925933816577776), KQU(16732804392695859449), KQU(17087345401014078468), KQU(14315108589159048871), KQU( 4820700266619778917), KQU(16709637539357958441), KQU( 4936227875177351374), KQU( 2137907697912987247), KQU(11628565601408395420), KQU( 2333250549241556786), KQU( 5711200379577778637), KQU( 5170680131529031729), KQU(12620392043061335164), KQU( 95363390101096078), KQU( 5487981914081709462), KQU( 1763109823981838620), KQU( 3395861271473224396), KQU( 1300496844282213595), KQU( 6894316212820232902), KQU(10673859651135576674), KQU( 5911839658857903252), KQU(17407110743387299102), KQU( 8257427154623140385), KQU(11389003026741800267), KQU( 4070043211095013717), KQU(11663806997145259025), KQU(15265598950648798210), KQU( 630585789434030934), KQU( 3524446529213587334), KQU( 7186424168495184211), KQU(10806585451386379021), KQU(11120017753500499273), KQU( 1586837651387701301), KQU(17530454400954415544), KQU( 9991670045077880430), KQU( 7550997268990730180), KQU( 8640249196597379304), KQU( 3522203892786893823), KQU(10401116549878854788), KQU(13690285544733124852), KQU( 8295785675455774586), KQU(15535716172155117603), KQU( 3112108583723722511), KQU(17633179955339271113), KQU(18154208056063759375), KQU( 1866409236285815666), KQU(13326075895396412882), KQU( 8756261842948020025), KQU( 6281852999868439131), KQU(15087653361275292858), KQU(10333923911152949397), KQU( 5265567645757408500), KQU(12728041843210352184), KQU( 6347959327507828759), KQU( 154112802625564758), KQU(18235228308679780218), KQU( 3253805274673352418), KQU( 4849171610689031197), KQU(17948529398340432518), KQU(13803510475637409167), KQU(13506570190409883095), KQU(15870801273282960805), KQU( 8451286481299170773), KQU( 9562190620034457541), KQU( 8518905387449138364), KQU(12681306401363385655), KQU( 3788073690559762558), KQU( 5256820289573487769), KQU( 2752021372314875467), KQU( 6354035166862520716), KQU( 4328956378309739069), KQU( 449087441228269600), KQU( 5533508742653090868), KQU( 1260389420404746988), KQU(18175394473289055097), KQU( 1535467109660399420), KQU( 8818894282874061442), KQU(12140873243824811213), KQU(15031386653823014946), KQU( 1286028221456149232), KQU( 6329608889367858784), KQU( 9419654354945132725), KQU( 6094576547061672379), KQU(17706217251847450255), KQU( 1733495073065878126), KQU(16918923754607552663), KQU( 8881949849954945044), KQU(12938977706896313891), KQU(14043628638299793407), KQU(18393874581723718233), KQU( 6886318534846892044), KQU(14577870878038334081), KQU(13541558383439414119), KQU(13570472158807588273), KQU(18300760537910283361), KQU( 818368572800609205), KQU( 1417000585112573219), KQU(12337533143867683655), KQU(12433180994702314480), KQU( 778190005829189083), KQU(13667356216206524711), KQU( 9866149895295225230), KQU(11043240490417111999), KQU( 1123933826541378598), KQU( 6469631933605123610), KQU(14508554074431980040), KQU(13918931242962026714), KQU( 2870785929342348285), KQU(14786362626740736974), KQU(13176680060902695786), KQU( 9591778613541679456), KQU( 9097662885117436706), KQU( 749262234240924947), KQU( 1944844067793307093), KQU( 4339214904577487742), KQU( 8009584152961946551), KQU(16073159501225501777), KQU( 3335870590499306217), KQU(17088312653151202847), KQU( 3108893142681931848), KQU(16636841767202792021), KQU(10423316431118400637), KQU( 8008357368674443506), KQU(11340015231914677875), KQU(17687896501594936090), KQU(15173627921763199958), KQU( 542569482243721959), KQU(15071714982769812975), KQU( 4466624872151386956), KQU( 1901780715602332461), KQU( 9822227742154351098), KQU( 1479332892928648780), KQU( 6981611948382474400), KQU( 7620824924456077376), KQU(14095973329429406782), KQU( 7902744005696185404), KQU(15830577219375036920), KQU(10287076667317764416), KQU(12334872764071724025), KQU( 4419302088133544331), KQU(14455842851266090520), KQU(12488077416504654222), KQU( 7953892017701886766), KQU( 6331484925529519007), KQU( 4902145853785030022), KQU(17010159216096443073), KQU(11945354668653886087), KQU(15112022728645230829), KQU(17363484484522986742), KQU( 4423497825896692887), KQU( 8155489510809067471), KQU( 258966605622576285), KQU( 5462958075742020534), KQU( 6763710214913276228), KQU( 2368935183451109054), KQU(14209506165246453811), KQU( 2646257040978514881), KQU( 3776001911922207672), KQU( 1419304601390147631), KQU(14987366598022458284), KQU( 3977770701065815721), KQU( 730820417451838898), KQU( 3982991703612885327), KQU( 2803544519671388477), KQU(17067667221114424649), KQU( 2922555119737867166), KQU( 1989477584121460932), KQU(15020387605892337354), KQU( 9293277796427533547), KQU(10722181424063557247), KQU(16704542332047511651), KQU( 5008286236142089514), KQU(16174732308747382540), KQU(17597019485798338402), KQU(13081745199110622093), KQU( 8850305883842258115), KQU(12723629125624589005), KQU( 8140566453402805978), KQU(15356684607680935061), KQU(14222190387342648650), KQU(11134610460665975178), KQU( 1259799058620984266), KQU(13281656268025610041), KQU( 298262561068153992), KQU(12277871700239212922), KQU(13911297774719779438), KQU(16556727962761474934), KQU(17903010316654728010), KQU( 9682617699648434744), KQU(14757681836838592850), KQU( 1327242446558524473), KQU(11126645098780572792), KQU( 1883602329313221774), KQU( 2543897783922776873), KQU(15029168513767772842), KQU(12710270651039129878), KQU(16118202956069604504), KQU(15010759372168680524), KQU( 2296827082251923948), KQU(10793729742623518101), KQU(13829764151845413046), KQU(17769301223184451213), KQU( 3118268169210783372), KQU(17626204544105123127), KQU( 7416718488974352644), KQU(10450751996212925994), KQU( 9352529519128770586), KQU( 259347569641110140), KQU( 8048588892269692697), KQU( 1774414152306494058), KQU(10669548347214355622), KQU(13061992253816795081), KQU(18432677803063861659), KQU( 8879191055593984333), KQU(12433753195199268041), KQU(14919392415439730602), KQU( 6612848378595332963), KQU( 6320986812036143628), KQU(10465592420226092859), KQU( 4196009278962570808), KQU( 3747816564473572224), KQU(17941203486133732898), KQU( 2350310037040505198), KQU( 5811779859134370113), KQU(10492109599506195126), KQU( 7699650690179541274), KQU( 1954338494306022961), KQU(14095816969027231152), KQU( 5841346919964852061), KQU(14945969510148214735), KQU( 3680200305887550992), KQU( 6218047466131695792), KQU( 8242165745175775096), KQU(11021371934053307357), KQU( 1265099502753169797), KQU( 4644347436111321718), KQU( 3609296916782832859), KQU( 8109807992218521571), KQU(18387884215648662020), KQU(14656324896296392902), KQU(17386819091238216751), KQU(17788300878582317152), KQU( 7919446259742399591), KQU( 4466613134576358004), KQU(12928181023667938509), KQU(13147446154454932030), KQU(16552129038252734620), KQU( 8395299403738822450), KQU(11313817655275361164), KQU( 434258809499511718), KQU( 2074882104954788676), KQU( 7929892178759395518), KQU( 9006461629105745388), KQU( 5176475650000323086), KQU(11128357033468341069), KQU(12026158851559118955), KQU(14699716249471156500), KQU( 448982497120206757), KQU( 4156475356685519900), KQU( 6063816103417215727), KQU(10073289387954971479), KQU( 8174466846138590962), KQU( 2675777452363449006), KQU( 9090685420572474281), KQU( 6659652652765562060), KQU(12923120304018106621), KQU(11117480560334526775), KQU( 937910473424587511), KQU( 1838692113502346645), KQU(11133914074648726180), KQU( 7922600945143884053), KQU(13435287702700959550), KQU( 5287964921251123332), KQU(11354875374575318947), KQU(17955724760748238133), KQU(13728617396297106512), KQU( 4107449660118101255), KQU( 1210269794886589623), KQU(11408687205733456282), KQU( 4538354710392677887), KQU(13566803319341319267), KQU(17870798107734050771), KQU( 3354318982568089135), KQU( 9034450839405133651), KQU(13087431795753424314), KQU( 950333102820688239), KQU( 1968360654535604116), KQU(16840551645563314995), KQU( 8867501803892924995), KQU(11395388644490626845), KQU( 1529815836300732204), KQU(13330848522996608842), KQU( 1813432878817504265), KQU( 2336867432693429560), KQU(15192805445973385902), KQU( 2528593071076407877), KQU( 128459777936689248), KQU( 9976345382867214866), KQU( 6208885766767996043), KQU(14982349522273141706), KQU( 3099654362410737822), KQU(13776700761947297661), KQU( 8806185470684925550), KQU( 8151717890410585321), KQU( 640860591588072925), KQU(14592096303937307465), KQU( 9056472419613564846), KQU(14861544647742266352), KQU(12703771500398470216), KQU( 3142372800384138465), KQU( 6201105606917248196), KQU(18337516409359270184), KQU(15042268695665115339), KQU(15188246541383283846), KQU(12800028693090114519), KQU( 5992859621101493472), KQU(18278043971816803521), KQU( 9002773075219424560), KQU( 7325707116943598353), KQU( 7930571931248040822), KQU( 5645275869617023448), KQU( 7266107455295958487), KQU( 4363664528273524411), KQU(14313875763787479809), KQU(17059695613553486802), KQU( 9247761425889940932), KQU(13704726459237593128), KQU( 2701312427328909832), KQU(17235532008287243115), KQU(14093147761491729538), KQU( 6247352273768386516), KQU( 8268710048153268415), KQU( 7985295214477182083), KQU(15624495190888896807), KQU( 3772753430045262788), KQU( 9133991620474991698), KQU( 5665791943316256028), KQU( 7551996832462193473), KQU(13163729206798953877), KQU( 9263532074153846374), KQU( 1015460703698618353), KQU(17929874696989519390), KQU(18257884721466153847), KQU(16271867543011222991), KQU( 3905971519021791941), KQU(16814488397137052085), KQU( 1321197685504621613), KQU( 2870359191894002181), KQU(14317282970323395450), KQU(13663920845511074366), KQU( 2052463995796539594), KQU(14126345686431444337), KQU( 1727572121947022534), KQU(17793552254485594241), KQU( 6738857418849205750), KQU( 1282987123157442952), KQU(16655480021581159251), KQU( 6784587032080183866), KQU(14726758805359965162), KQU( 7577995933961987349), KQU(12539609320311114036), KQU(10789773033385439494), KQU( 8517001497411158227), KQU(10075543932136339710), KQU(14838152340938811081), KQU( 9560840631794044194), KQU(17445736541454117475), KQU(10633026464336393186), KQU(15705729708242246293), KQU( 1117517596891411098), KQU( 4305657943415886942), KQU( 4948856840533979263), KQU(16071681989041789593), KQU(13723031429272486527), KQU( 7639567622306509462), KQU(12670424537483090390), KQU( 9715223453097197134), KQU( 5457173389992686394), KQU( 289857129276135145), KQU(17048610270521972512), KQU( 692768013309835485), KQU(14823232360546632057), KQU(18218002361317895936), KQU( 3281724260212650204), KQU(16453957266549513795), KQU( 8592711109774511881), KQU( 929825123473369579), KQU(15966784769764367791), KQU( 9627344291450607588), KQU(10849555504977813287), KQU( 9234566913936339275), KQU( 6413807690366911210), KQU(10862389016184219267), KQU(13842504799335374048), KQU( 1531994113376881174), KQU( 2081314867544364459), KQU(16430628791616959932), KQU( 8314714038654394368), KQU( 9155473892098431813), KQU(12577843786670475704), KQU( 4399161106452401017), KQU( 1668083091682623186), KQU( 1741383777203714216), KQU( 2162597285417794374), KQU(15841980159165218736), KQU( 1971354603551467079), KQU( 1206714764913205968), KQU( 4790860439591272330), KQU(14699375615594055799), KQU( 8374423871657449988), KQU(10950685736472937738), KQU( 697344331343267176), KQU(10084998763118059810), KQU(12897369539795983124), KQU(12351260292144383605), KQU( 1268810970176811234), KQU( 7406287800414582768), KQU( 516169557043807831), KQU( 5077568278710520380), KQU( 3828791738309039304), KQU( 7721974069946943610), KQU( 3534670260981096460), KQU( 4865792189600584891), KQU(16892578493734337298), KQU( 9161499464278042590), KQU(11976149624067055931), KQU(13219479887277343990), KQU(14161556738111500680), KQU(14670715255011223056), KQU( 4671205678403576558), KQU(12633022931454259781), KQU(14821376219869187646), KQU( 751181776484317028), KQU( 2192211308839047070), KQU(11787306362361245189), KQU(10672375120744095707), KQU( 4601972328345244467), KQU(15457217788831125879), KQU( 8464345256775460809), KQU(10191938789487159478), KQU( 6184348739615197613), KQU(11425436778806882100), KQU( 2739227089124319793), KQU( 461464518456000551), KQU( 4689850170029177442), KQU( 6120307814374078625), KQU(11153579230681708671), KQU( 7891721473905347926), KQU(10281646937824872400), KQU( 3026099648191332248), KQU( 8666750296953273818), KQU(14978499698844363232), KQU(13303395102890132065), KQU( 8182358205292864080), KQU(10560547713972971291), KQU(11981635489418959093), KQU( 3134621354935288409), KQU(11580681977404383968), KQU(14205530317404088650), KQU( 5997789011854923157), KQU(13659151593432238041), KQU(11664332114338865086), KQU( 7490351383220929386), KQU( 7189290499881530378), KQU(15039262734271020220), KQU( 2057217285976980055), KQU( 555570804905355739), KQU(11235311968348555110), KQU(13824557146269603217), KQU(16906788840653099693), KQU( 7222878245455661677), KQU( 5245139444332423756), KQU( 4723748462805674292), KQU(12216509815698568612), KQU(17402362976648951187), KQU(17389614836810366768), KQU( 4880936484146667711), KQU( 9085007839292639880), KQU(13837353458498535449), KQU(11914419854360366677), KQU(16595890135313864103), KQU( 6313969847197627222), KQU(18296909792163910431), KQU(10041780113382084042), KQU( 2499478551172884794), KQU(11057894246241189489), KQU( 9742243032389068555), KQU(12838934582673196228), KQU(13437023235248490367), KQU(13372420669446163240), KQU( 6752564244716909224), KQU( 7157333073400313737), KQU(12230281516370654308), KQU( 1182884552219419117), KQU( 2955125381312499218), KQU(10308827097079443249), KQU( 1337648572986534958), KQU(16378788590020343939), KQU( 108619126514420935), KQU( 3990981009621629188), KQU( 5460953070230946410), KQU( 9703328329366531883), KQU(13166631489188077236), KQU( 1104768831213675170), KQU( 3447930458553877908), KQU( 8067172487769945676), KQU( 5445802098190775347), KQU( 3244840981648973873), KQU(17314668322981950060), KQU( 5006812527827763807), KQU(18158695070225526260), KQU( 2824536478852417853), KQU(13974775809127519886), KQU( 9814362769074067392), KQU(17276205156374862128), KQU(11361680725379306967), KQU( 3422581970382012542), KQU(11003189603753241266), KQU(11194292945277862261), KQU( 6839623313908521348), KQU(11935326462707324634), KQU( 1611456788685878444), KQU(13112620989475558907), KQU( 517659108904450427), KQU(13558114318574407624), KQU(15699089742731633077), KQU( 4988979278862685458), KQU( 8111373583056521297), KQU( 3891258746615399627), KQU( 8137298251469718086), KQU(12748663295624701649), KQU( 4389835683495292062), KQU( 5775217872128831729), KQU( 9462091896405534927), KQU( 8498124108820263989), KQU( 8059131278842839525), KQU(10503167994254090892), KQU(11613153541070396656), KQU(18069248738504647790), KQU( 570657419109768508), KQU( 3950574167771159665), KQU( 5514655599604313077), KQU( 2908460854428484165), KQU(10777722615935663114), KQU(12007363304839279486), KQU( 9800646187569484767), KQU( 8795423564889864287), KQU(14257396680131028419), KQU( 6405465117315096498), KQU( 7939411072208774878), KQU(17577572378528990006), KQU(14785873806715994850), KQU(16770572680854747390), KQU(18127549474419396481), KQU(11637013449455757750), KQU(14371851933996761086), KQU( 3601181063650110280), KQU( 4126442845019316144), KQU(10198287239244320669), KQU(18000169628555379659), KQU(18392482400739978269), KQU( 6219919037686919957), KQU( 3610085377719446052), KQU( 2513925039981776336), KQU(16679413537926716955), KQU(12903302131714909434), KQU( 5581145789762985009), KQU(12325955044293303233), KQU(17216111180742141204), KQU( 6321919595276545740), KQU( 3507521147216174501), KQU( 9659194593319481840), KQU(11473976005975358326), KQU(14742730101435987026), KQU( 492845897709954780), KQU(16976371186162599676), KQU(17712703422837648655), KQU( 9881254778587061697), KQU( 8413223156302299551), KQU( 1563841828254089168), KQU( 9996032758786671975), KQU( 138877700583772667), KQU(13003043368574995989), KQU( 4390573668650456587), KQU( 8610287390568126755), KQU(15126904974266642199), KQU( 6703637238986057662), KQU( 2873075592956810157), KQU( 6035080933946049418), KQU(13382846581202353014), KQU( 7303971031814642463), KQU(18418024405307444267), KQU( 5847096731675404647), KQU( 4035880699639842500), KQU(11525348625112218478), KQU( 3041162365459574102), KQU( 2604734487727986558), KQU(15526341771636983145), KQU(14556052310697370254), KQU(12997787077930808155), KQU( 9601806501755554499), KQU(11349677952521423389), KQU(14956777807644899350), KQU(16559736957742852721), KQU(12360828274778140726), KQU( 6685373272009662513), KQU(16932258748055324130), KQU(15918051131954158508), KQU( 1692312913140790144), KQU( 546653826801637367), KQU( 5341587076045986652), KQU(14975057236342585662), KQU(12374976357340622412), KQU(10328833995181940552), KQU(12831807101710443149), KQU(10548514914382545716), KQU( 2217806727199715993), KQU(12627067369242845138), KQU( 4598965364035438158), KQU( 150923352751318171), KQU(14274109544442257283), KQU( 4696661475093863031), KQU( 1505764114384654516), KQU(10699185831891495147), KQU( 2392353847713620519), KQU( 3652870166711788383), KQU( 8640653276221911108), KQU( 3894077592275889704), KQU( 4918592872135964845), KQU(16379121273281400789), KQU(12058465483591683656), KQU(11250106829302924945), KQU( 1147537556296983005), KQU( 6376342756004613268), KQU(14967128191709280506), KQU(18007449949790627628), KQU( 9497178279316537841), KQU( 7920174844809394893), KQU(10037752595255719907), KQU(15875342784985217697), KQU(15311615921712850696), KQU( 9552902652110992950), KQU(14054979450099721140), KQU( 5998709773566417349), KQU(18027910339276320187), KQU( 8223099053868585554), KQU( 7842270354824999767), KQU( 4896315688770080292), KQU(12969320296569787895), KQU( 2674321489185759961), KQU( 4053615936864718439), KQU(11349775270588617578), KQU( 4743019256284553975), KQU( 5602100217469723769), KQU(14398995691411527813), KQU( 7412170493796825470), KQU( 836262406131744846), KQU( 8231086633845153022), KQU( 5161377920438552287), KQU( 8828731196169924949), KQU(16211142246465502680), KQU( 3307990879253687818), KQU( 5193405406899782022), KQU( 8510842117467566693), KQU( 6070955181022405365), KQU(14482950231361409799), KQU(12585159371331138077), KQU( 3511537678933588148), KQU( 2041849474531116417), KQU(10944936685095345792), KQU(18303116923079107729), KQU( 2720566371239725320), KQU( 4958672473562397622), KQU( 3032326668253243412), KQU(13689418691726908338), KQU( 1895205511728843996), KQU( 8146303515271990527), KQU(16507343500056113480), KQU( 473996939105902919), KQU( 9897686885246881481), KQU(14606433762712790575), KQU( 6732796251605566368), KQU( 1399778120855368916), KQU( 935023885182833777), KQU(16066282816186753477), KQU( 7291270991820612055), KQU(17530230393129853844), KQU(10223493623477451366), KQU(15841725630495676683), KQU(17379567246435515824), KQU( 8588251429375561971), KQU(18339511210887206423), KQU(17349587430725976100), KQU(12244876521394838088), KQU( 6382187714147161259), KQU(12335807181848950831), KQU(16948885622305460665), KQU(13755097796371520506), KQU(14806740373324947801), KQU( 4828699633859287703), KQU( 8209879281452301604), KQU(12435716669553736437), KQU(13970976859588452131), KQU( 6233960842566773148), KQU(12507096267900505759), KQU( 1198713114381279421), KQU(14989862731124149015), KQU(15932189508707978949), KQU( 2526406641432708722), KQU( 29187427817271982), KQU( 1499802773054556353), KQU(10816638187021897173), KQU( 5436139270839738132), KQU( 6659882287036010082), KQU( 2154048955317173697), KQU(10887317019333757642), KQU(16281091802634424955), KQU(10754549879915384901), KQU(10760611745769249815), KQU( 2161505946972504002), KQU( 5243132808986265107), KQU(10129852179873415416), KQU( 710339480008649081), KQU( 7802129453068808528), KQU(17967213567178907213), KQU(15730859124668605599), KQU(13058356168962376502), KQU( 3701224985413645909), KQU(14464065869149109264), KQU( 9959272418844311646), KQU(10157426099515958752), KQU(14013736814538268528), KQU(17797456992065653951), KQU(17418878140257344806), KQU(15457429073540561521), KQU( 2184426881360949378), KQU( 2062193041154712416), KQU( 8553463347406931661), KQU( 4913057625202871854), KQU( 2668943682126618425), KQU(17064444737891172288), KQU( 4997115903913298637), KQU(12019402608892327416), KQU(17603584559765897352), KQU(11367529582073647975), KQU( 8211476043518436050), KQU( 8676849804070323674), KQU(18431829230394475730), KQU(10490177861361247904), KQU( 9508720602025651349), KQU( 7409627448555722700), KQU( 5804047018862729008), KQU(11943858176893142594), KQU(11908095418933847092), KQU( 5415449345715887652), KQU( 1554022699166156407), KQU( 9073322106406017161), KQU( 7080630967969047082), KQU(18049736940860732943), KQU(12748714242594196794), KQU( 1226992415735156741), KQU(17900981019609531193), KQU(11720739744008710999), KQU( 3006400683394775434), KQU(11347974011751996028), KQU( 3316999628257954608), KQU( 8384484563557639101), KQU(18117794685961729767), KQU( 1900145025596618194), KQU(17459527840632892676), KQU( 5634784101865710994), KQU( 7918619300292897158), KQU( 3146577625026301350), KQU( 9955212856499068767), KQU( 1873995843681746975), KQU( 1561487759967972194), KQU( 8322718804375878474), KQU(11300284215327028366), KQU( 4667391032508998982), KQU( 9820104494306625580), KQU(17922397968599970610), KQU( 1784690461886786712), KQU(14940365084341346821), KQU( 5348719575594186181), KQU(10720419084507855261), KQU(14210394354145143274), KQU( 2426468692164000131), KQU(16271062114607059202), KQU(14851904092357070247), KQU( 6524493015693121897), KQU( 9825473835127138531), KQU(14222500616268569578), KQU(15521484052007487468), KQU(14462579404124614699), KQU(11012375590820665520), KQU(11625327350536084927), KQU(14452017765243785417), KQU( 9989342263518766305), KQU( 3640105471101803790), KQU( 4749866455897513242), KQU(13963064946736312044), KQU(10007416591973223791), KQU(18314132234717431115), KQU( 3286596588617483450), KQU( 7726163455370818765), KQU( 7575454721115379328), KQU( 5308331576437663422), KQU(18288821894903530934), KQU( 8028405805410554106), KQU(15744019832103296628), KQU( 149765559630932100), KQU( 6137705557200071977), KQU(14513416315434803615), KQU(11665702820128984473), KQU( 218926670505601386), KQU( 6868675028717769519), KQU(15282016569441512302), KQU( 5707000497782960236), KQU( 6671120586555079567), KQU( 2194098052618985448), KQU(16849577895477330978), KQU(12957148471017466283), KQU( 1997805535404859393), KQU( 1180721060263860490), KQU(13206391310193756958), KQU(12980208674461861797), KQU( 3825967775058875366), KQU(17543433670782042631), KQU( 1518339070120322730), KQU(16344584340890991669), KQU( 2611327165318529819), KQU(11265022723283422529), KQU( 4001552800373196817), KQU(14509595890079346161), KQU( 3528717165416234562), KQU(18153222571501914072), KQU( 9387182977209744425), KQU(10064342315985580021), KQU(11373678413215253977), KQU( 2308457853228798099), KQU( 9729042942839545302), KQU( 7833785471140127746), KQU( 6351049900319844436), KQU(14454610627133496067), KQU(12533175683634819111), KQU(15570163926716513029), KQU(13356980519185762498) }; TEST_BEGIN(test_gen_rand_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; uint32_t r32; sfmt_t *ctx; assert_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_gen_rand(1234); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); fini_gen_rand(ctx); ctx = init_gen_rand(1234); for (i = 0; i < BLOCK_SIZE; i++) { if (i < COUNT_1) { assert_u32_eq(array32[i], init_gen_rand_32_expected[i], "Output mismatch for i=%d", i); } r32 = gen_rand32(ctx); assert_u32_eq(r32, array32[i], "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); } for (i = 0; i < COUNT_2; i++) { r32 = gen_rand32(ctx); assert_u32_eq(r32, array32_2[i], "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], r32); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_by_array_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; uint32_t r32; sfmt_t *ctx; assert_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_by_array(ini, 4); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); fini_gen_rand(ctx); ctx = init_by_array(ini, 4); for (i = 0; i < BLOCK_SIZE; i++) { if (i < COUNT_1) { assert_u32_eq(array32[i], init_by_array_32_expected[i], "Output mismatch for i=%d", i); } r32 = gen_rand32(ctx); assert_u32_eq(r32, array32[i], "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); } for (i = 0; i < COUNT_2; i++) { r32 = gen_rand32(ctx); assert_u32_eq(r32, array32_2[i], "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], r32); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_gen_rand_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; uint64_t r; sfmt_t *ctx; assert_d_le(get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_gen_rand(4321); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); fini_gen_rand(ctx); ctx = init_gen_rand(4321); for (i = 0; i < BLOCK_SIZE64; i++) { if (i < COUNT_1) { assert_u64_eq(array64[i], init_gen_rand_64_expected[i], "Output mismatch for i=%d", i); } r = gen_rand64(ctx); assert_u64_eq(r, array64[i], "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); assert_u64_eq(r, array64_2[i], "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i, array64_2[i], r); } fini_gen_rand(ctx); } TEST_END TEST_BEGIN(test_by_array_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; uint64_t r; uint32_t ini[] = {5, 4, 3, 2, 1}; sfmt_t *ctx; assert_d_le(get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_by_array(ini, 5); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); fini_gen_rand(ctx); ctx = init_by_array(ini, 5); for (i = 0; i < BLOCK_SIZE64; i++) { if (i < COUNT_1) { assert_u64_eq(array64[i], init_by_array_64_expected[i], "Output mismatch for i=%d", i); } r = gen_rand64(ctx); assert_u64_eq(r, array64[i], "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); assert_u64_eq(r, array64_2[i], "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i, array64_2[i], r); } fini_gen_rand(ctx); } TEST_END int main(void) { return (test( test_gen_rand_32, test_by_array_32, test_gen_rand_64, test_by_array_64)); } disque-1.0-rc1/deps/jemalloc/test/unit/atomic.c000066400000000000000000000056571264176561100214540ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define TEST_STRUCT(p, t) \ struct p##_test_s { \ t accum0; \ t x; \ t s; \ }; \ typedef struct p##_test_s p##_test_t; #define TEST_BODY(p, t, tc, ta, FMT) do { \ const p##_test_t tests[] = { \ {(t)-1, (t)-1, (t)-2}, \ {(t)-1, (t) 0, (t)-2}, \ {(t)-1, (t) 1, (t)-2}, \ \ {(t) 0, (t)-1, (t)-2}, \ {(t) 0, (t) 0, (t)-2}, \ {(t) 0, (t) 1, (t)-2}, \ \ {(t) 1, (t)-1, (t)-2}, \ {(t) 1, (t) 0, (t)-2}, \ {(t) 1, (t) 1, (t)-2}, \ \ {(t)0, (t)-(1 << 22), (t)-2}, \ {(t)0, (t)(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)(1 << 22), (t)-2} \ }; \ unsigned i; \ \ for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \ bool err; \ t accum = tests[i].accum0; \ assert_##ta##_eq(atomic_read_##p(&accum), \ tests[i].accum0, \ "Erroneous read, i=%u", i); \ \ assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \ (t)((tc)tests[i].accum0 + (tc)tests[i].x), \ "i=%u, accum=%"FMT", x=%"FMT, \ i, tests[i].accum0, tests[i].x); \ assert_##ta##_eq(atomic_read_##p(&accum), accum, \ "Erroneous add, i=%u", i); \ \ accum = tests[i].accum0; \ assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \ (t)((tc)tests[i].accum0 - (tc)tests[i].x), \ "i=%u, accum=%"FMT", x=%"FMT, \ i, tests[i].accum0, tests[i].x); \ assert_##ta##_eq(atomic_read_##p(&accum), accum, \ "Erroneous sub, i=%u", i); \ \ accum = tests[i].accum0; \ err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \ assert_b_eq(err, tests[i].accum0 != tests[i].x, \ "Erroneous cas success/failure result"); \ assert_##ta##_eq(accum, err ? tests[i].accum0 : \ tests[i].s, "Erroneous cas effect, i=%u", i); \ \ accum = tests[i].accum0; \ atomic_write_##p(&accum, tests[i].s); \ assert_##ta##_eq(accum, tests[i].s, \ "Erroneous write, i=%u", i); \ } \ } while (0) TEST_STRUCT(uint64, uint64_t) TEST_BEGIN(test_atomic_uint64) { #if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) test_skip("64-bit atomic operations not supported"); #else TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64); #endif } TEST_END TEST_STRUCT(uint32, uint32_t) TEST_BEGIN(test_atomic_uint32) { TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32); } TEST_END TEST_STRUCT(p, void *) TEST_BEGIN(test_atomic_p) { TEST_BODY(p, void *, uintptr_t, ptr, "p"); } TEST_END TEST_STRUCT(z, size_t) TEST_BEGIN(test_atomic_z) { TEST_BODY(z, size_t, size_t, zu, "#zx"); } TEST_END TEST_STRUCT(u, unsigned) TEST_BEGIN(test_atomic_u) { TEST_BODY(u, unsigned, unsigned, u, "#x"); } TEST_END int main(void) { return (test( test_atomic_uint64, test_atomic_uint32, test_atomic_p, test_atomic_z, test_atomic_u)); } disque-1.0-rc1/deps/jemalloc/test/unit/bitmap.c000066400000000000000000000070031264176561100214370ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_bitmap_size) { size_t i, prev_size; prev_size = 0; for (i = 1; i <= BITMAP_MAXBITS; i++) { size_t size = bitmap_size(i); assert_true(size >= prev_size, "Bitmap size is smaller than expected"); prev_size = size; } } TEST_END TEST_BEGIN(test_bitmap_init) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { size_t j; bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * bitmap_info_ngroups(&binfo)); bitmap_init(bitmap, &binfo); for (j = 0; j < i; j++) { assert_false(bitmap_get(bitmap, &binfo, j), "Bit should be unset"); } free(bitmap); } } } TEST_END TEST_BEGIN(test_bitmap_set) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { size_t j; bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * bitmap_info_ngroups(&binfo)); bitmap_init(bitmap, &binfo); for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); free(bitmap); } } } TEST_END TEST_BEGIN(test_bitmap_unset) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { size_t j; bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * bitmap_info_ngroups(&binfo)); bitmap_init(bitmap, &binfo); for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); for (j = 0; j < i; j++) bitmap_unset(bitmap, &binfo, j); for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); free(bitmap); } } } TEST_END TEST_BEGIN(test_bitmap_sfu) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { ssize_t j; bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * bitmap_info_ngroups(&binfo)); bitmap_init(bitmap, &binfo); /* Iteratively set bits starting at the beginning. */ for (j = 0; j < i; j++) { assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should be just after " "previous first unset bit"); } assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); /* * Iteratively unset bits starting at the end, and * verify that bitmap_sfu() reaches the unset bits. */ for (j = i - 1; j >= 0; j--) { bitmap_unset(bitmap, &binfo, j); assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should the bit previously " "unset"); bitmap_unset(bitmap, &binfo, j); } assert_false(bitmap_get(bitmap, &binfo, 0), "Bit should be unset"); /* * Iteratively set bits starting at the beginning, and * verify that bitmap_sfu() looks past them. */ for (j = 1; j < i; j++) { bitmap_set(bitmap, &binfo, j - 1); assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should be just after the " "bit previously set"); bitmap_unset(bitmap, &binfo, j); } assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, "First unset bit should be the last bit"); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); free(bitmap); } } } TEST_END int main(void) { return (test( test_bitmap_size, test_bitmap_init, test_bitmap_set, test_bitmap_unset, test_bitmap_sfu)); } disque-1.0-rc1/deps/jemalloc/test/unit/ckh.c000066400000000000000000000125331264176561100207340ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_new_delete) { tsd_t *tsd; ckh_t ckh; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_count_insert_search_remove) { tsd_t *tsd; ckh_t ckh; const char *strs[] = { "a string", "A string", "a string.", "A string." }; const char *missing = "A string not in the hash table."; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); /* Insert. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { ckh_insert(tsd, &ckh, strs[i], strs[i]); assert_zu_eq(ckh_count(&ckh), i+1, "ckh_count() should return %zu, but it returned %zu", i+1, ckh_count(&ckh)); } /* Search. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_search(&ckh, strs[i], kp, vp), "Unexpected ckh_search() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); } assert_true(ckh_search(&ckh, missing, NULL, NULL), "Unexpected ckh_search() success"); /* Remove. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), "Unexpected ckh_remove() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); assert_zu_eq(ckh_count(&ckh), sizeof(strs)/sizeof(const char *) - i - 1, "ckh_count() should return %zu, but it returned %zu", sizeof(strs)/sizeof(const char *) - i - 1, ckh_count(&ckh)); } ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_insert_iter_remove) { #define NITEMS ZU(1000) tsd_t *tsd; ckh_t ckh; void **p[NITEMS]; void *q, *r; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); for (i = 0; i < NITEMS; i++) { p[i] = mallocx(i+1, 0); assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); } for (i = 0; i < NITEMS; i++) { size_t j; for (j = i; j < NITEMS; j++) { assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), "Unexpected ckh_insert() failure"); assert_false(ckh_search(&ckh, p[j], &q, &r), "Unexpected ckh_search() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); } assert_zu_eq(ckh_count(&ckh), NITEMS, "ckh_count() should return %zu, but it returned %zu", NITEMS, ckh_count(&ckh)); for (j = i + 1; j < NITEMS; j++) { assert_false(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() success"); } { bool seen[NITEMS]; size_t tabind; memset(seen, 0, sizeof(seen)); for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) { size_t k; assert_ptr_eq(q, r, "Key and val not equal"); for (k = 0; k < NITEMS; k++) { if (p[k] == q) { assert_false(seen[k], "Item %zu already seen", k); seen[k] = true; break; } } } for (j = 0; j < i + 1; j++) assert_true(seen[j], "Item %zu not seen", j); for (; j < NITEMS; j++) assert_false(seen[j], "Item %zu seen", j); } } for (i = 0; i < NITEMS; i++) { assert_false(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[i], q, "Key pointer mismatch"); assert_ptr_eq(p[i], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() success"); dallocx(p[i], 0); } assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); ckh_delete(tsd, &ckh); #undef NITEMS } TEST_END int main(void) { return (test( test_new_delete, test_count_insert_search_remove, test_insert_iter_remove)); } disque-1.0-rc1/deps/jemalloc/test/unit/hash.c000066400000000000000000000112121264176561100211030ustar00rootroot00000000000000/* * This file is based on code that is part of SMHasher * (https://code.google.com/p/smhasher/), and is subject to the MIT license * (http://www.opensource.org/licenses/mit-license.php). Both email addresses * associated with the source code's revision history belong to Austin Appleby, * and the revision history ranges from 2010 to 2012. Therefore the copyright * and license are here taken to be: * * Copyright (c) 2010-2012 Austin Appleby * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "test/jemalloc_test.h" typedef enum { hash_variant_x86_32, hash_variant_x86_128, hash_variant_x64_128 } hash_variant_t; static size_t hash_variant_bits(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return (32); case hash_variant_x86_128: return (128); case hash_variant_x64_128: return (128); default: not_reached(); } } static const char * hash_variant_string(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return ("hash_x86_32"); case hash_variant_x86_128: return ("hash_x86_128"); case hash_variant_x64_128: return ("hash_x64_128"); default: not_reached(); } } static void hash_variant_verify(hash_variant_t variant) { const size_t hashbytes = hash_variant_bits(variant) / 8; uint8_t key[256]; VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256); VARIABLE_ARRAY(uint8_t, final, hashbytes); unsigned i; uint32_t computed, expected; memset(key, 0, sizeof(key)); memset(hashes, 0, sizeof(hashes)); memset(final, 0, sizeof(final)); /* * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the * seed. */ for (i = 0; i < 256; i++) { key[i] = (uint8_t)i; switch (variant) { case hash_variant_x86_32: { uint32_t out; out = hash_x86_32(key, i, 256-i); memcpy(&hashes[i*hashbytes], &out, hashbytes); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } default: not_reached(); } } /* Hash the result array. */ switch (variant) { case hash_variant_x86_32: { uint32_t out = hash_x86_32(hashes, hashbytes*256, 0); memcpy(final, &out, sizeof(out)); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(hashes, hashbytes*256, 0, out); memcpy(final, out, sizeof(out)); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(hashes, hashbytes*256, 0, out); memcpy(final, out, sizeof(out)); break; } default: not_reached(); } computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | (final[3] << 24); switch (variant) { #ifdef JEMALLOC_BIG_ENDIAN case hash_variant_x86_32: expected = 0x6213303eU; break; case hash_variant_x86_128: expected = 0x266820caU; break; case hash_variant_x64_128: expected = 0xcc622b6fU; break; #else case hash_variant_x86_32: expected = 0xb0f57ee3U; break; case hash_variant_x86_128: expected = 0xb3ece62aU; break; case hash_variant_x64_128: expected = 0x6384ba69U; break; #endif default: not_reached(); } assert_u32_eq(computed, expected, "Hash mismatch for %s(): expected %#x but got %#x", hash_variant_string(variant), expected, computed); } TEST_BEGIN(test_hash_x86_32) { hash_variant_verify(hash_variant_x86_32); } TEST_END TEST_BEGIN(test_hash_x86_128) { hash_variant_verify(hash_variant_x86_128); } TEST_END TEST_BEGIN(test_hash_x64_128) { hash_variant_verify(hash_variant_x64_128); } TEST_END int main(void) { return (test( test_hash_x86_32, test_hash_x86_128, test_hash_x64_128)); } disque-1.0-rc1/deps/jemalloc/test/unit/junk.c000066400000000000000000000141441264176561100211360ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_FILL # ifndef JEMALLOC_TEST_JUNK_OPT # define JEMALLOC_TEST_JUNK_OPT "junk:true" # endif const char *malloc_conf = "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT; #endif static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; static huge_dalloc_junk_t *huge_dalloc_junk_orig; static void *watch_for_junking; static bool saw_junking; static void watch_junking(void *p) { watch_for_junking = p; saw_junking = false; } static void arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) { size_t i; arena_dalloc_junk_small_orig(ptr, bin_info); for (i = 0; i < bin_info->reg_size; i++) { assert_c_eq(((char *)ptr)[i], 0x5a, "Missing junk fill for byte %zu/%zu of deallocated region", i, bin_info->reg_size); } if (ptr == watch_for_junking) saw_junking = true; } static void arena_dalloc_junk_large_intercept(void *ptr, size_t usize) { size_t i; arena_dalloc_junk_large_orig(ptr, usize); for (i = 0; i < usize; i++) { assert_c_eq(((char *)ptr)[i], 0x5a, "Missing junk fill for byte %zu/%zu of deallocated region", i, usize); } if (ptr == watch_for_junking) saw_junking = true; } static void huge_dalloc_junk_intercept(void *ptr, size_t usize) { huge_dalloc_junk_orig(ptr, usize); /* * The conditions under which junk filling actually occurs are nuanced * enough that it doesn't make sense to duplicate the decision logic in * test code, so don't actually check that the region is junk-filled. */ if (ptr == watch_for_junking) saw_junking = true; } static void test_junk(size_t sz_min, size_t sz_max) { char *s; size_t sz_prev, sz, i; if (opt_junk_free) { arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; arena_dalloc_junk_large_orig = arena_dalloc_junk_large; arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; huge_dalloc_junk_orig = huge_dalloc_junk; huge_dalloc_junk = huge_dalloc_junk_intercept; } sz_prev = 0; s = (char *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_c_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_c_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { if (opt_junk_alloc) { assert_c_eq(s[i], 0xa5, "Newly allocated byte %zu/%zu isn't " "junk-filled", i, sz); } s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { watch_junking(s); s = (char *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be junk-filled", sz); } } watch_junking(s); dallocx(s, 0); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be junk-filled", sz); if (opt_junk_free) { arena_dalloc_junk_small = arena_dalloc_junk_small_orig; arena_dalloc_junk_large = arena_dalloc_junk_large_orig; huge_dalloc_junk = huge_dalloc_junk_orig; } } TEST_BEGIN(test_junk_small) { test_skip_if(!config_fill); test_junk(1, SMALL_MAXCLASS-1); } TEST_END TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); test_junk(SMALL_MAXCLASS+1, large_maxclass); } TEST_END TEST_BEGIN(test_junk_huge) { test_skip_if(!config_fill); test_junk(large_maxclass+1, chunksize*2); } TEST_END arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; static void *most_recently_trimmed; static size_t shrink_size(size_t size) { size_t shrink_size; for (shrink_size = size - 1; nallocx(shrink_size, 0) == size; shrink_size--) ; /* Do nothing. */ return (shrink_size); } static void arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) { arena_ralloc_junk_large_orig(ptr, old_usize, usize); assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize"); assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize"); most_recently_trimmed = ptr; } TEST_BEGIN(test_junk_large_ralloc_shrink) { void *p1, *p2; p1 = mallocx(large_maxclass, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); arena_ralloc_junk_large_orig = arena_ralloc_junk_large; arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; p2 = rallocx(p1, shrink_size(large_maxclass), 0); assert_ptr_eq(p1, p2, "Unexpected move during shrink"); arena_ralloc_junk_large = arena_ralloc_junk_large_orig; assert_ptr_eq(most_recently_trimmed, p1, "Expected trimmed portion of region to be junk-filled"); } TEST_END static bool detected_redzone_corruption; static void arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, size_t offset, uint8_t byte) { detected_redzone_corruption = true; } TEST_BEGIN(test_junk_redzone) { char *s; arena_redzone_corruption_t *arena_redzone_corruption_orig; test_skip_if(!config_fill); test_skip_if(!opt_junk_alloc || !opt_junk_free); arena_redzone_corruption_orig = arena_redzone_corruption; arena_redzone_corruption = arena_redzone_corruption_replacement; /* Test underflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[-1] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); /* Test overflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[sallocx(s, 0)] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); arena_redzone_corruption = arena_redzone_corruption_orig; } TEST_END int main(void) { assert(!config_fill || opt_junk_alloc || opt_junk_free); return (test( test_junk_small, test_junk_large, test_junk_huge, test_junk_large_ralloc_shrink, test_junk_redzone)); } disque-1.0-rc1/deps/jemalloc/test/unit/junk_alloc.c000066400000000000000000000001341264176561100223020ustar00rootroot00000000000000#define JEMALLOC_TEST_JUNK_OPT "junk:alloc" #include "junk.c" #undef JEMALLOC_TEST_JUNK_OPT disque-1.0-rc1/deps/jemalloc/test/unit/junk_free.c000066400000000000000000000001331264176561100221300ustar00rootroot00000000000000#define JEMALLOC_TEST_JUNK_OPT "junk:free" #include "junk.c" #undef JEMALLOC_TEST_JUNK_OPT disque-1.0-rc1/deps/jemalloc/test/unit/lg_chunk.c000066400000000000000000000010001264176561100217440ustar00rootroot00000000000000#include "test/jemalloc_test.h" /* * Make sure that opt.lg_chunk clamping is sufficient. In practice, this test * program will fail a debug assertion during initialization and abort (rather * than the test soft-failing) if clamping is insufficient. */ const char *malloc_conf = "lg_chunk:0"; TEST_BEGIN(test_lg_chunk_clamp) { void *p; p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, 0); } TEST_END int main(void) { return (test( test_lg_chunk_clamp)); } disque-1.0-rc1/deps/jemalloc/test/unit/mallctl.c000066400000000000000000000451701264176561100216220ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_mallctl_errors) { uint64_t epoch; size_t sz; assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, "mallctl() should return ENOENT for non-existent names"); assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on attempt to write " "read-only value"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctlnametomib_errors) { size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, "mallctlnametomib() should return ENOENT for non-existent names"); } TEST_END TEST_BEGIN(test_mallctlbymib_errors) { uint64_t epoch; size_t sz; size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " "attempt to write read-only value"); miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, sizeof(epoch)-1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, sizeof(epoch)+1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctl_read_write) { uint64_t old_epoch, new_epoch; size_t sz = sizeof(old_epoch); /* Blind. */ assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read. */ assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Write. */ assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read+write. */ assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); } TEST_END TEST_BEGIN(test_mallctlnametomib_short_mib) { size_t mib[4]; size_t miblen; miblen = 3; mib[3] = 42; assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_zu_eq(miblen, 3, "Unexpected mib output length"); assert_zu_eq(mib[3], 42, "mallctlnametomib() wrote past the end of the input mib"); } TEST_END TEST_BEGIN(test_mallctl_config) { #define TEST_MALLCTL_CONFIG(config) do { \ bool oldval; \ size_t sz = sizeof(oldval); \ assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \ 0, "Unexpected mallctl() failure"); \ assert_b_eq(oldval, config_##config, "Incorrect config value"); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_CONFIG(cache_oblivious); TEST_MALLCTL_CONFIG(debug); TEST_MALLCTL_CONFIG(fill); TEST_MALLCTL_CONFIG(lazy_lock); TEST_MALLCTL_CONFIG(munmap); TEST_MALLCTL_CONFIG(prof); TEST_MALLCTL_CONFIG(prof_libgcc); TEST_MALLCTL_CONFIG(prof_libunwind); TEST_MALLCTL_CONFIG(stats); TEST_MALLCTL_CONFIG(tcache); TEST_MALLCTL_CONFIG(tls); TEST_MALLCTL_CONFIG(utrace); TEST_MALLCTL_CONFIG(valgrind); TEST_MALLCTL_CONFIG(xmalloc); #undef TEST_MALLCTL_CONFIG } TEST_END TEST_BEGIN(test_mallctl_opt) { bool config_always = true; #define TEST_MALLCTL_OPT(t, opt, config) do { \ t oldval; \ size_t sz = sizeof(oldval); \ int expected = config_##config ? 0 : ENOENT; \ int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \ assert_d_eq(result, expected, \ "Unexpected mallctl() result for opt."#opt); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_OPT(bool, abort, always); TEST_MALLCTL_OPT(size_t, lg_chunk, always); TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(size_t, narenas, always); TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); TEST_MALLCTL_OPT(bool, stats_print, always); TEST_MALLCTL_OPT(const char *, junk, fill); TEST_MALLCTL_OPT(size_t, quarantine, fill); TEST_MALLCTL_OPT(bool, redzone, fill); TEST_MALLCTL_OPT(bool, zero, fill); TEST_MALLCTL_OPT(bool, utrace, utrace); TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); TEST_MALLCTL_OPT(bool, tcache, tcache); TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); TEST_MALLCTL_OPT(bool, prof, prof); TEST_MALLCTL_OPT(const char *, prof_prefix, prof); TEST_MALLCTL_OPT(bool, prof_active, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); TEST_MALLCTL_OPT(bool, prof_accum, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); TEST_MALLCTL_OPT(bool, prof_gdump, prof); TEST_MALLCTL_OPT(bool, prof_final, prof); TEST_MALLCTL_OPT(bool, prof_leak, prof); #undef TEST_MALLCTL_OPT } TEST_END TEST_BEGIN(test_manpage_example) { unsigned nbins, i; size_t mib[4]; size_t len, miblen; len = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, "Unexpected mallctl() failure"); miblen = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); for (i = 0; i < nbins; i++) { size_t bin_size; mib[2] = i; len = sizeof(bin_size); assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0), 0, "Unexpected mallctlbymib() failure"); /* Do something with bin_size... */ } } TEST_END TEST_BEGIN(test_tcache_none) { void *p0, *q, *p1; test_skip_if(!config_tcache); /* Allocate p and q. */ p0 = mallocx(42, 0); assert_ptr_not_null(p0, "Unexpected mallocx() failure"); q = mallocx(42, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); /* Deallocate p and q, but bypass the tcache for q. */ dallocx(p0, 0); dallocx(q, MALLOCX_TCACHE_NONE); /* Make sure that tcache-based allocation returns p, not q. */ p1 = mallocx(42, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); /* Clean up. */ dallocx(p1, MALLOCX_TCACHE_NONE); } TEST_END TEST_BEGIN(test_tcache) { #define NTCACHES 10 unsigned tis[NTCACHES]; void *ps[NTCACHES]; void *qs[NTCACHES]; unsigned i; size_t sz, psz, qsz; test_skip_if(!config_tcache); psz = 42; qsz = nallocx(psz, 0) + 1; /* Create tcaches. */ for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Exercise tcache ID recycling. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Flush empty tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Cache some allocations. */ for (i = 0; i < NTCACHES; i++) { ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); dallocx(ps[i], MALLOCX_TCACHE(tis[i])); qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", i); dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Verify that tcaches allocate cached regions. */ for (i = 0; i < NTCACHES; i++) { void *p0 = ps[i]; ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); assert_ptr_eq(ps[i], p0, "Expected mallocx() to allocate cached region, i=%u", i); } /* Verify that reallocation uses cached regions. */ for (i = 0; i < NTCACHES; i++) { void *q0 = qs[i]; qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", i); assert_ptr_eq(qs[i], q0, "Expected rallocx() to allocate cached region, i=%u", i); /* Avoid undefined behavior in case of test failure. */ if (qs[i] == NULL) qs[i] = ps[i]; } for (i = 0; i < NTCACHES; i++) dallocx(qs[i], MALLOCX_TCACHE(tis[i])); /* Flush some non-empty tcaches. */ for (i = 0; i < NTCACHES/2; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Destroy tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } } TEST_END TEST_BEGIN(test_thread_arena) { unsigned arena_old, arena_new, narenas; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); arena_new = narenas - 1; assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); arena_new = 0; assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); } TEST_END TEST_BEGIN(test_arena_i_lg_dirty_mult) { ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); lg_dirty_mult = -2; assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, &lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); lg_dirty_mult = (sizeof(size_t) << 3); assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, &lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = lg_dirty_mult, lg_dirty_mult++) { ssize_t old_lg_dirty_mult; assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult, &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, "Unexpected old arena.0.lg_dirty_mult"); } } TEST_END TEST_BEGIN(test_arena_i_purge) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; size_t miblen = 3; assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_END TEST_BEGIN(test_arena_i_dss) { const char *dss_prec_old, *dss_prec_new; size_t sz = sizeof(dss_prec_old); size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); mib[1] = narenas_total_get(); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); } TEST_END TEST_BEGIN(test_arenas_initialized) { unsigned narenas; size_t sz = sizeof(narenas); assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); { VARIABLE_ARRAY(bool, initialized, narenas); sz = narenas * sizeof(bool); assert_d_eq(mallctl("arenas.initialized", initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); } } TEST_END TEST_BEGIN(test_arenas_lg_dirty_mult) { ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; size_t sz = sizeof(ssize_t); assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); lg_dirty_mult = -2; assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, &lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); lg_dirty_mult = (sizeof(size_t) << 3); assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, &lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = lg_dirty_mult, lg_dirty_mult++) { ssize_t old_lg_dirty_mult; assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult, &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, "Unexpected old arenas.lg_dirty_mult"); } } TEST_END TEST_BEGIN(test_arenas_constants) { #define TEST_ARENAS_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \ "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, page, PAGE); TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses); TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses); #undef TEST_ARENAS_CONSTANT } TEST_END TEST_BEGIN(test_arenas_bin_constants) { #define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \ 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); #undef TEST_ARENAS_BIN_CONSTANT } TEST_END TEST_BEGIN(test_arenas_lrun_constants) { #define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \ 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS); #undef TEST_ARENAS_LRUN_CONSTANT } TEST_END TEST_BEGIN(test_arenas_hchunk_constants) { #define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \ 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize); #undef TEST_ARENAS_HCHUNK_CONSTANT } TEST_END TEST_BEGIN(test_arenas_extend) { unsigned narenas_before, arena, narenas_after; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas_before+1, narenas_after, "Unexpected number of arenas before versus after extension"); assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); } TEST_END TEST_BEGIN(test_stats_arenas) { #define TEST_STATS_ARENAS(t, name) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \ 0), 0, "Unexpected mallctl() failure"); \ } while (0) TEST_STATS_ARENAS(const char *, dss); TEST_STATS_ARENAS(unsigned, nthreads); TEST_STATS_ARENAS(size_t, pactive); TEST_STATS_ARENAS(size_t, pdirty); #undef TEST_STATS_ARENAS } TEST_END int main(void) { return (test( test_mallctl_errors, test_mallctlnametomib_errors, test_mallctlbymib_errors, test_mallctl_read_write, test_mallctlnametomib_short_mib, test_mallctl_config, test_mallctl_opt, test_manpage_example, test_tcache_none, test_tcache, test_thread_arena, test_arena_i_lg_dirty_mult, test_arena_i_purge, test_arena_i_dss, test_arenas_initialized, test_arenas_lg_dirty_mult, test_arenas_constants, test_arenas_bin_constants, test_arenas_lrun_constants, test_arenas_hchunk_constants, test_arenas_extend, test_stats_arenas)); } disque-1.0-rc1/deps/jemalloc/test/unit/math.c000066400000000000000000000440201264176561100211140ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define MAX_REL_ERR 1.0e-9 #define MAX_ABS_ERR 1.0e-9 #include #ifndef INFINITY #define INFINITY (DBL_MAX + DBL_MAX) #endif static bool double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { double rel_err; if (fabs(a - b) < max_abs_err) return (true); rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); return (rel_err < max_rel_err); } static uint64_t factorial(unsigned x) { uint64_t ret = 1; unsigned i; for (i = 2; i <= x; i++) ret *= (uint64_t)i; return (ret); } TEST_BEGIN(test_ln_gamma_factorial) { unsigned x; /* exp(ln_gamma(x)) == (x-1)! for integer x. */ for (x = 1; x <= 21; x++) { assert_true(double_eq_rel(exp(ln_gamma(x)), (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), "Incorrect factorial result for x=%u", x); } } TEST_END /* Expected ln_gamma([0.0..100.0] increment=0.25). */ static const double ln_gamma_misc_expected[] = { INFINITY, 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, 359.13420536957539753 }; TEST_BEGIN(test_ln_gamma_misc) { unsigned i; for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { double x = (double)i * 0.25; assert_true(double_eq_rel(ln_gamma(x), ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect ln_gamma result for i=%u", i); } } TEST_END /* Expected pt_norm([0.01..0.99] increment=0.01). */ static const double pt_norm_expected[] = { -INFINITY, -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 }; TEST_BEGIN(test_pt_norm) { unsigned i; for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { double p = (double)i * 0.01; assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_norm result for i=%u", i); } } TEST_END /* * Expected pt_chi2(p=[0.01..0.99] increment=0.07, * df={0.1, 1.1, 10.1, 100.1, 1000.1}). */ static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; static const double pt_chi2_expected[] = { 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, 2.606673548632508, 4.602913725294877, 5.646152813924212, 6.488971315540869, 7.249823275816285, 7.977314231410841, 8.700354939944047, 9.441728024225892, 10.224338321374127, 11.076435368801061, 12.039320937038386, 13.183878752697167, 14.657791935084575, 16.885728216339373, 23.361991680031817, 70.14844087392152, 80.92379498849355, 85.53325420085891, 88.94433120715347, 91.83732712857017, 94.46719943606301, 96.96896479994635, 99.43412843510363, 101.94074719829733, 104.57228644307247, 107.43900093448734, 110.71844673417287, 114.76616819871325, 120.57422505959563, 135.92318818757556, 899.0072447849649, 937.9271278858220, 953.8117189560207, 965.3079371501154, 974.8974061207954, 983.4936235182347, 991.5691170518946, 999.4334123954690, 1007.3391826856553, 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 }; TEST_BEGIN(test_pt_chi2) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { double df = pt_chi2_df[i]; double ln_gamma_df = ln_gamma(df * 0.5); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_chi2 result for i=%u, j=%u", i, j); e++; } } } TEST_END /* * Expected pt_gamma(p=[0.1..0.99] increment=0.07, * shape=[0.5..3.0] increment=0.5). */ static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; static const double pt_gamma_expected[] = { 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 }; TEST_BEGIN(test_pt_gamma_shape) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { double shape = pt_gamma_shape[i]; double ln_gamma_shape = ln_gamma(shape); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_gamma result for i=%u, j=%u", i, j); e++; } } } TEST_END TEST_BEGIN(test_pt_gamma_scale) { double shape = 1.0; double ln_gamma_shape = ln_gamma(shape); assert_true(double_eq_rel( pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, MAX_ABS_ERR), "Scale should be trivially equivalent to external multiplication"); } TEST_END int main(void) { return (test( test_ln_gamma_factorial, test_ln_gamma_misc, test_pt_norm, test_pt_chi2, test_pt_gamma_shape, test_pt_gamma_scale)); } disque-1.0-rc1/deps/jemalloc/test/unit/mq.c000066400000000000000000000034061264176561100206030ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define NSENDERS 3 #define NMSGS 100000 typedef struct mq_msg_s mq_msg_t; struct mq_msg_s { mq_msg(mq_msg_t) link; }; mq_gen(static, mq_, mq_t, mq_msg_t, link) TEST_BEGIN(test_mq_basic) { mq_t mq; mq_msg_t msg; assert_false(mq_init(&mq), "Unexpected mq_init() failure"); assert_u_eq(mq_count(&mq), 0, "mq should be empty"); assert_ptr_null(mq_tryget(&mq), "mq_tryget() should fail when the queue is empty"); mq_put(&mq, &msg); assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); mq_put(&mq, &msg); assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); mq_fini(&mq); } TEST_END static void * thd_receiver_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < (NSENDERS * NMSGS); i++) { mq_msg_t *msg = mq_get(mq); assert_ptr_not_null(msg, "mq_get() should never return NULL"); dallocx(msg, 0); } return (NULL); } static void * thd_sender_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < NMSGS; i++) { mq_msg_t *msg; void *p; p = mallocx(sizeof(mq_msg_t), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); msg = (mq_msg_t *)p; mq_put(mq, msg); } return (NULL); } TEST_BEGIN(test_mq_threaded) { mq_t mq; thd_t receiver; thd_t senders[NSENDERS]; unsigned i; assert_false(mq_init(&mq), "Unexpected mq_init() failure"); thd_create(&receiver, thd_receiver_start, (void *)&mq); for (i = 0; i < NSENDERS; i++) thd_create(&senders[i], thd_sender_start, (void *)&mq); thd_join(receiver, NULL); for (i = 0; i < NSENDERS; i++) thd_join(senders[i], NULL); mq_fini(&mq); } TEST_END int main(void) { return (test( test_mq_basic, test_mq_threaded)); } disque-1.0-rc1/deps/jemalloc/test/unit/mtx.c000066400000000000000000000017531264176561100210010ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define NTHREADS 2 #define NINCRS 2000000 TEST_BEGIN(test_mtx_basic) { mtx_t mtx; assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); mtx_lock(&mtx); mtx_unlock(&mtx); mtx_fini(&mtx); } TEST_END typedef struct { mtx_t mtx; unsigned x; } thd_start_arg_t; static void * thd_start(void *varg) { thd_start_arg_t *arg = (thd_start_arg_t *)varg; unsigned i; for (i = 0; i < NINCRS; i++) { mtx_lock(&arg->mtx); arg->x++; mtx_unlock(&arg->mtx); } return (NULL); } TEST_BEGIN(test_mtx_race) { thd_start_arg_t arg; thd_t thds[NTHREADS]; unsigned i; assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); arg.x = 0; for (i = 0; i < NTHREADS; i++) thd_create(&thds[i], thd_start, (void *)&arg); for (i = 0; i < NTHREADS; i++) thd_join(thds[i], NULL); assert_u_eq(arg.x, NTHREADS * NINCRS, "Race-related counter corruption"); } TEST_END int main(void) { return (test( test_mtx_basic, test_mtx_race)); } disque-1.0-rc1/deps/jemalloc/test/unit/prof_accum.c000066400000000000000000000035571264176561100223130ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define NTHREADS 4 #define NALLOCS_PER_THREAD 50 #define DUMP_INTERVAL 1 #define BT_COUNT_CHECK_INTERVAL 5 #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"; #endif static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return (fd); } static void * alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration)); } static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; size_t bt_count_prev, bt_count; unsigned i_prev, i; i_prev = 0; bt_count_prev = 0; for (i = 0; i < NALLOCS_PER_THREAD; i++) { void *p = alloc_from_permuted_backtrace(thd_ind, i); dallocx(p, 0); if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } if (i % BT_COUNT_CHECK_INTERVAL == 0 || i+1 == NALLOCS_PER_THREAD) { bt_count = prof_bt_count(); assert_zu_le(bt_count_prev+(i-i_prev), bt_count, "Expected larger backtrace count increase"); i_prev = i; bt_count_prev = bt_count; } } return (NULL); } TEST_BEGIN(test_idump) { bool active; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) thd_join(thds[i], NULL); } TEST_END int main(void) { return (test( test_idump)); } disque-1.0-rc1/deps/jemalloc/test/unit/prof_active.c000066400000000000000000000071351264176561100224720ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_thread_active_init:false,lg_prof_sample:0"; #endif static void mallctl_bool_get(const char *name, bool expected, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading %s", func, line, name); assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_bool_set(const char *name, bool old_expected, bool val_new, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0, "%s():%d: Unexpected mallctl failure reading/writing %s", func, line, name); assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, int line) { mallctl_bool_get("prof.active", prof_active_old_expected, func, line); } #define mallctl_prof_active_get(a) \ mallctl_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_prof_active_set_impl(bool prof_active_old_expected, bool prof_active_new, const char *func, int line) { mallctl_bool_set("prof.active", prof_active_old_expected, prof_active_new, func, line); } #define mallctl_prof_active_set(a, b) \ mallctl_prof_active_set_impl(a, b, __func__, __LINE__) static void mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, const char *func, int line) { mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, func, line); } #define mallctl_thread_prof_active_get(a) \ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, bool thread_prof_active_new, const char *func, int line) { mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, thread_prof_active_new, func, line); } #define mallctl_thread_prof_active_set(a, b) \ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) static void prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { void *p; size_t expected_backtraces = expect_sample ? 1 : 0; assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, line); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), expected_backtraces, "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); } #define prof_sampling_probe(a) \ prof_sampling_probe_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); mallctl_prof_active_get(true); mallctl_thread_prof_active_get(false); mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(false, false); /* prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(true, false); mallctl_thread_prof_active_set(false, false); /* !prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, false); mallctl_thread_prof_active_set(false, true); /* !prof.active, thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, true); mallctl_thread_prof_active_set(true, true); /* prof.active, thread.prof.active. */ prof_sampling_probe(true); /* Restore settings. */ mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(true, false); } TEST_END int main(void) { return (test( test_prof_active)); } disque-1.0-rc1/deps/jemalloc/test/unit/prof_gdump.c000066400000000000000000000036551264176561100223360ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true"; #endif static bool did_prof_dump_open; static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return (fd); } TEST_BEGIN(test_gdump) { bool active, gdump, gdump_old; void *p, *q, *r, *s; size_t sz; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; q = mallocx(chunksize, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); gdump = false; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; r = mallocx(chunksize, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); gdump = true; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; s = mallocx(chunksize, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); dallocx(p, 0); dallocx(q, 0); dallocx(r, 0); dallocx(s, 0); } TEST_END int main(void) { return (test( test_gdump)); } disque-1.0-rc1/deps/jemalloc/test/unit/prof_idump.c000066400000000000000000000017111264176561100223270ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0," "lg_prof_interval:0"; #endif static bool did_prof_dump_open; static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return (fd); } TEST_BEGIN(test_idump) { bool active; void *p; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, 0); assert_true(did_prof_dump_open, "Expected a profile dump"); } TEST_END int main(void) { return (test( test_idump)); } disque-1.0-rc1/deps/jemalloc/test/unit/prof_reset.c000066400000000000000000000165451264176561100223460ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_active:false,lg_prof_sample:0"; #endif static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return (fd); } static void set_prof_active(bool active) { assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), 0, "Unexpected mallctl failure"); } static size_t get_lg_prof_sample(void) { size_t lg_prof_sample; size_t sz = sizeof(size_t); assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); return (lg_prof_sample); } static void do_prof_reset(size_t lg_prof_sample) { assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample, sizeof(size_t)), 0, "Unexpected mallctl failure while resetting profile data"); assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), "Expected profile sample rate change"); } TEST_BEGIN(test_prof_reset_basic) { size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; size_t sz; unsigned i; test_skip_if(!config_prof); sz = sizeof(size_t); assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); assert_zu_eq(lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); /* Test simple resets. */ for (i = 0; i < 2; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure while resetting profile data"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected profile sample rate change"); } /* Test resets with prof.lg_sample changes. */ lg_prof_sample_next = 1; for (i = 0; i < 2; i++) { do_prof_reset(lg_prof_sample_next); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample, lg_prof_sample_next, "Expected profile sample rate change"); lg_prof_sample_next = lg_prof_sample_orig; } /* Make sure the test code restored prof.lg_sample. */ lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); } TEST_END bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all) { prof_dump_header_intercepted = true; memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); return (false); } TEST_BEGIN(test_prof_reset_cleanup) { void *p; prof_dump_header_t *prof_dump_header_orig; test_skip_if(!config_prof); set_prof_active(true); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header_orig = prof_dump_header; prof_dump_header = prof_dump_header_intercept; assert_false(prof_dump_header_intercepted, "Unexpected intercept"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_true(prof_dump_header_intercepted, "Expected intercept"); assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header = prof_dump_header_orig; dallocx(p, 0); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); set_prof_active(false); } TEST_END #define NTHREADS 4 #define NALLOCS_PER_THREAD (1U << 13) #define OBJ_RING_BUF_COUNT 1531 #define RESET_INTERVAL (1U << 10) #define DUMP_INTERVAL 3677 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; unsigned i; void *objs[OBJ_RING_BUF_COUNT]; memset(objs, 0, sizeof(objs)); for (i = 0; i < NALLOCS_PER_THREAD; i++) { if (i % RESET_INTERVAL == 0) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile " "data"); } if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); assert_ptr_not_null(*pp, "Unexpected btalloc() failure"); } } /* Clean up any remaining objects. */ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } } return (NULL); } TEST_BEGIN(test_prof_reset) { size_t lg_prof_sample_orig; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; size_t bt_count, tdata_count; test_skip_if(!config_prof); bt_count = prof_bt_count(); assert_zu_eq(bt_count, 0, "Unexpected pre-existing tdata structures"); tdata_count = prof_tdata_count(); lg_prof_sample_orig = get_lg_prof_sample(); do_prof_reset(5); set_prof_active(true); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) thd_join(thds[i], NULL); assert_zu_eq(prof_bt_count(), bt_count, "Unexpected bactrace count change"); assert_zu_eq(prof_tdata_count(), tdata_count, "Unexpected remaining tdata structures"); set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NTHREADS #undef NALLOCS_PER_THREAD #undef OBJ_RING_BUF_COUNT #undef RESET_INTERVAL #undef DUMP_INTERVAL /* Test sampling at the same allocation site across resets. */ #define NITER 10 TEST_BEGIN(test_xallocx) { size_t lg_prof_sample_orig; unsigned i; void *ptrs[NITER]; test_skip_if(!config_prof); lg_prof_sample_orig = get_lg_prof_sample(); set_prof_active(true); /* Reset profiling. */ do_prof_reset(0); for (i = 0; i < NITER; i++) { void *p; size_t sz, nsz; /* Reset profiling. */ do_prof_reset(0); /* Allocate small object (which will be promoted). */ p = ptrs[i] = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); /* Reset profiling. */ do_prof_reset(0); /* Perform successful xallocx(). */ sz = sallocx(p, 0); assert_zu_eq(xallocx(p, sz, 0, 0), sz, "Unexpected xallocx() failure"); /* Perform unsuccessful xallocx(). */ nsz = nallocx(sz+1, 0); assert_zu_eq(xallocx(p, nsz, 0, 0), sz, "Unexpected xallocx() success"); } for (i = 0; i < NITER; i++) { /* dallocx. */ dallocx(ptrs[i], 0); } set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NITER int main(void) { /* Intercept dumping prior to running any tests. */ prof_dump_open = prof_dump_open_intercept; return (test( test_prof_reset_basic, test_prof_reset_cleanup, test_prof_reset, test_xallocx)); } disque-1.0-rc1/deps/jemalloc/test/unit/prof_thread_name.c000066400000000000000000000062511264176561100234640ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_active:false"; #endif static void mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, int line) { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); assert_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); } #define mallctl_thread_name_get(a) \ mallctl_thread_name_get_impl(a, __func__, __LINE__) static void mallctl_thread_name_set_impl(const char *thread_name, const char *func, int line) { assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sizeof(thread_name)), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); } #define mallctl_thread_name_set(a) \ mallctl_thread_name_set_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_thread_name_validation) { const char *thread_name; test_skip_if(!config_prof); mallctl_thread_name_get(""); mallctl_thread_name_set("hi there"); /* NULL input shouldn't be allowed. */ thread_name = NULL; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* '\n' shouldn't be allowed. */ thread_name = "hi\nthere"; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* Simultaneous read/write shouldn't be allowed. */ { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, &thread_name, sizeof(thread_name)), EPERM, "Unexpected mallctl result writing \"%s\" to " "thread.prof.name", thread_name); } mallctl_thread_name_set(""); } TEST_END #define NTHREADS 4 #define NRESET 25 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; char thread_name[16] = ""; unsigned i; malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); mallctl_thread_name_get(""); mallctl_thread_name_set(thread_name); for (i = 0; i < NRESET; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); mallctl_thread_name_get(thread_name); } mallctl_thread_name_set(thread_name); mallctl_thread_name_set(""); return (NULL); } TEST_BEGIN(test_prof_thread_name_threaded) { thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) thd_join(thds[i], NULL); } TEST_END #undef NTHREADS #undef NRESET int main(void) { return (test( test_prof_thread_name_validation, test_prof_thread_name_threaded)); } disque-1.0-rc1/deps/jemalloc/test/unit/ql.c000066400000000000000000000106031264176561100205770ustar00rootroot00000000000000#include "test/jemalloc_test.h" /* Number of ring entries, in [2..26]. */ #define NENTRIES 9 typedef struct list_s list_t; typedef ql_head(list_t) list_head_t; struct list_s { ql_elm(list_t) link; char id; }; static void test_empty_list(list_head_t *head) { list_t *t; unsigned i; assert_ptr_null(ql_first(head), "Unexpected element for empty list"); assert_ptr_null(ql_last(head, link), "Unexpected element for empty list"); i = 0; ql_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); i = 0; ql_reverse_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); } TEST_BEGIN(test_ql_empty) { list_head_t head; ql_new(&head); test_empty_list(&head); } TEST_END static void init_entries(list_t *entries, unsigned nentries) { unsigned i; for (i = 0; i < nentries; i++) { entries[i].id = 'a' + i; ql_elm_new(&entries[i], link); } } static void test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { list_t *t; unsigned i; assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, "Element id mismatch"); i = 0; ql_foreach(t, head, link) { assert_c_eq(t->id, entries[i].id, "Element id mismatch"); i++; } i = 0; ql_reverse_foreach(t, head, link) { assert_c_eq(t->id, entries[nentries-i-1].id, "Element id mismatch"); i++; } for (i = 0; i < nentries-1; i++) { t = ql_next(head, &entries[i], link); assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); } assert_ptr_null(ql_next(head, &entries[nentries-1], link), "Unexpected element"); assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); for (i = 1; i < nentries; i++) { t = ql_prev(head, &entries[i], link); assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); } } TEST_BEGIN(test_ql_tail_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_tail_insert(&head, &entries[i], link); test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_tail_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_tail_insert(&head, &entries[i], link); for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, entries, NENTRIES-i); ql_tail_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_head_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_head_insert(&head, &entries[NENTRIES-i-1], link); test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_head_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_head_insert(&head, &entries[NENTRIES-i-1], link); for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, &entries[i], NENTRIES-i); ql_head_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_insert) { list_head_t head; list_t entries[8]; list_t *a, *b, *c, *d, *e, *f, *g, *h; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); a = &entries[0]; b = &entries[1]; c = &entries[2]; d = &entries[3]; e = &entries[4]; f = &entries[5]; g = &entries[6]; h = &entries[7]; /* * ql_remove(), ql_before_insert(), and ql_after_insert() are used * internally by other macros that are already tested, so there's no * need to test them completely. However, insertion/deletion from the * middle of lists is not otherwise tested; do so here. */ ql_tail_insert(&head, f, link); ql_before_insert(&head, f, b, link); ql_before_insert(&head, f, c, link); ql_after_insert(f, h, link); ql_after_insert(f, g, link); ql_before_insert(&head, b, a, link); ql_after_insert(c, d, link); ql_before_insert(&head, f, e, link); test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); } TEST_END int main(void) { return (test( test_ql_empty, test_ql_tail_insert, test_ql_tail_remove, test_ql_head_insert, test_ql_head_remove, test_ql_insert)); } disque-1.0-rc1/deps/jemalloc/test/unit/qr.c000066400000000000000000000120641264176561100206100ustar00rootroot00000000000000#include "test/jemalloc_test.h" /* Number of ring entries, in [2..26]. */ #define NENTRIES 9 /* Split index, in [1..NENTRIES). */ #define SPLIT_INDEX 5 typedef struct ring_s ring_t; struct ring_s { qr(ring_t) link; char id; }; static void init_entries(ring_t *entries) { unsigned i; for (i = 0; i < NENTRIES; i++) { qr_new(&entries[i], link); entries[i].id = 'a' + i; } } static void test_independent_entries(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { j++; } assert_u_eq(j, 1, "Iteration over single-element ring should visit precisely " "one element"); } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { j++; } assert_u_eq(j, 1, "Iteration over single-element ring should visit precisely " "one element"); } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_ptr_eq(t, &entries[i], "Next element in single-element ring should be same as " "current element"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_ptr_eq(t, &entries[i], "Previous element in single-element ring should be same as " "current element"); } } TEST_BEGIN(test_qr_one) { ring_t entries[NENTRIES]; init_entries(entries); test_independent_entries(entries); } TEST_END static void test_entries_ring(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, "Element id mismatch"); } } TEST_BEGIN(test_qr_after_insert) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); for (i = 1; i < NENTRIES; i++) qr_after_insert(&entries[i - 1], &entries[i], link); test_entries_ring(entries); } TEST_END TEST_BEGIN(test_qr_remove) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); for (i = 1; i < NENTRIES; i++) qr_after_insert(&entries[i - 1], &entries[i], link); for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[i+j].id, "Element id mismatch"); j++; } j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, "Element id mismatch"); j++; } qr_remove(&entries[i], link); } test_independent_entries(entries); } TEST_END TEST_BEGIN(test_qr_before_insert) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); for (i = 1; i < NENTRIES; i++) qr_before_insert(&entries[i - 1], &entries[i], link); for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(NENTRIES+i-j) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, "Element id mismatch"); } } TEST_END static void test_split_entries(ring_t *entries) { ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { if (i < SPLIT_INDEX) { assert_c_eq(t->id, entries[(i+j) % SPLIT_INDEX].id, "Element id mismatch"); } else { assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, "Element id mismatch"); } j++; } } } TEST_BEGIN(test_qr_meld_split) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); for (i = 1; i < NENTRIES; i++) qr_after_insert(&entries[i - 1], &entries[i], link); qr_split(&entries[0], &entries[SPLIT_INDEX], link); test_split_entries(entries); qr_meld(&entries[0], &entries[SPLIT_INDEX], link); test_entries_ring(entries); qr_meld(&entries[0], &entries[SPLIT_INDEX], link); test_split_entries(entries); qr_split(&entries[0], &entries[SPLIT_INDEX], link); test_entries_ring(entries); qr_split(&entries[0], &entries[0], link); test_entries_ring(entries); qr_meld(&entries[0], &entries[0], link); test_entries_ring(entries); } TEST_END int main(void) { return (test( test_qr_one, test_qr_after_insert, test_qr_remove, test_qr_before_insert, test_qr_meld_split)); } disque-1.0-rc1/deps/jemalloc/test/unit/quarantine.c000066400000000000000000000050271264176561100223360ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define QUARANTINE_SIZE 8192 #define STRINGIFY_HELPER(x) #x #define STRINGIFY(x) STRINGIFY_HELPER(x) #ifdef JEMALLOC_FILL const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:" STRINGIFY(QUARANTINE_SIZE); #endif void quarantine_clear(void) { void *p; p = mallocx(QUARANTINE_SIZE*2, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, 0); } TEST_BEGIN(test_quarantine) { #define SZ ZU(256) #define NQUARANTINED (QUARANTINE_SIZE/SZ) void *quarantined[NQUARANTINED+1]; size_t i, j; test_skip_if(!config_fill); assert_zu_eq(nallocx(SZ, 0), SZ, "SZ=%zu does not precisely equal a size class", SZ); quarantine_clear(); /* * Allocate enough regions to completely fill the quarantine, plus one * more. The last iteration occurs with a completely full quarantine, * but no regions should be drained from the quarantine until the last * deallocation occurs. Therefore no region recycling should occur * until after this loop completes. */ for (i = 0; i < NQUARANTINED+1; i++) { void *p = mallocx(SZ, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); quarantined[i] = p; dallocx(p, 0); for (j = 0; j < i; j++) { assert_ptr_ne(p, quarantined[j], "Quarantined region recycled too early; " "i=%zu, j=%zu", i, j); } } #undef NQUARANTINED #undef SZ } TEST_END static bool detected_redzone_corruption; static void arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, size_t offset, uint8_t byte) { detected_redzone_corruption = true; } TEST_BEGIN(test_quarantine_redzone) { char *s; arena_redzone_corruption_t *arena_redzone_corruption_orig; test_skip_if(!config_fill); arena_redzone_corruption_orig = arena_redzone_corruption; arena_redzone_corruption = arena_redzone_corruption_replacement; /* Test underflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[-1] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); /* Test overflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[sallocx(s, 0)] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); arena_redzone_corruption = arena_redzone_corruption_orig; } TEST_END int main(void) { return (test( test_quarantine, test_quarantine_redzone)); } disque-1.0-rc1/deps/jemalloc/test/unit/rb.c000066400000000000000000000165711264176561100206000ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ a_type *rbp_bh_t; \ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ rbp_bh_t != &(a_rbt)->rbt_nil; \ rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ (r_height)++; \ } \ } \ } while (0) typedef struct node_s node_t; struct node_s { #define NODE_MAGIC 0x9823af7e uint32_t magic; rb_node(node_t) link; uint64_t key; }; static int node_cmp(node_t *a, node_t *b) { int ret; assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); ret = (a->key > b->key) - (a->key < b->key); if (ret == 0) { /* * Duplicates are not allowed in the tree, so force an * arbitrary ordering for non-identical items with equal keys. */ ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } return (ret); } typedef rb_tree(node_t) tree_t; rb_gen(static, tree_, tree_t, node_t, link, node_cmp); TEST_BEGIN(test_rb_empty) { tree_t tree; node_t key; tree_new(&tree); assert_true(tree_empty(&tree), "Tree should be empty"); assert_ptr_null(tree_first(&tree), "Unexpected node"); assert_ptr_null(tree_last(&tree), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); } TEST_END static unsigned tree_recurse(node_t *node, unsigned black_height, unsigned black_depth, node_t *nil) { unsigned ret = 0; node_t *left_node = rbtn_left_get(node_t, link, node); node_t *right_node = rbtn_right_get(node_t, link, node); if (!rbtn_red_get(node_t, link, node)) black_depth++; /* Red nodes must be interleaved with black nodes. */ if (rbtn_red_get(node_t, link, node)) { assert_false(rbtn_red_get(node_t, link, left_node), "Node should be black"); assert_false(rbtn_red_get(node_t, link, right_node), "Node should be black"); } if (node == nil) return (ret); /* Self. */ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Left subtree. */ if (left_node != nil) ret += tree_recurse(left_node, black_height, black_depth, nil); else ret += (black_depth != black_height); /* Right subtree. */ if (right_node != nil) ret += tree_recurse(right_node, black_height, black_depth, nil); else ret += (black_depth != black_height); return (ret); } static node_t * tree_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; node_t *search_node; assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Test rb_search(). */ search_node = tree_search(tree, node); assert_ptr_eq(search_node, node, "tree_search() returned unexpected node"); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); assert_ptr_eq(search_node, node, "tree_nsearch() returned unexpected node"); /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); assert_ptr_eq(search_node, node, "tree_psearch() returned unexpected node"); (*i)++; return (NULL); } static unsigned tree_iterate(tree_t *tree) { unsigned i; i = 0; tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); return (i); } static unsigned tree_iterate_reverse(tree_t *tree) { unsigned i; i = 0; tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); return (i); } static void node_remove(tree_t *tree, node_t *node, unsigned nnodes) { node_t *search_node; unsigned black_height, imbalances; tree_remove(tree, node); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); if (search_node != NULL) { assert_u64_ge(search_node->key, node->key, "Key ordering error"); } /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); if (search_node != NULL) { assert_u64_le(search_node->key, node->key, "Key ordering error"); } node->magic = 0; rbtn_black_height(node_t, link, tree, black_height); imbalances = tree_recurse(tree->rbt_root, black_height, 0, &(tree->rbt_nil)); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(tree), nnodes-1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(tree), nnodes-1, "Unexpected node iteration count"); } static node_t * remove_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_next(tree, node); node_remove(tree, node, *nnodes); return (ret); } static node_t * remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_prev(tree, node); node_remove(tree, node, *nnodes); return (ret); } TEST_BEGIN(test_rb_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; tree_t tree; node_t nodes[NNODES]; unsigned i, j, k, black_height, imbalances; sfmt = init_gen_rand(SEED); for (i = 0; i < NBAGS; i++) { switch (i) { case 0: /* Insert in order. */ for (j = 0; j < NNODES; j++) bag[j] = j; break; case 1: /* Insert in reverse order. */ for (j = 0; j < NNODES; j++) bag[j] = NNODES - j - 1; break; default: for (j = 0; j < NNODES; j++) bag[j] = gen_rand64_range(sfmt, NNODES); } for (j = 1; j <= NNODES; j++) { /* Initialize tree and nodes. */ tree_new(&tree); tree.rbt_nil.magic = 0; for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; } /* Insert nodes. */ for (k = 0; k < j; k++) { tree_insert(&tree, &nodes[k]); rbtn_black_height(node_t, link, &tree, black_height); imbalances = tree_recurse(tree.rbt_root, black_height, 0, &(tree.rbt_nil)); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(&tree), k+1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(&tree), k+1, "Unexpected node iteration count"); assert_false(tree_empty(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_first(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_last(&tree), "Tree should not be empty"); tree_next(&tree, &nodes[k]); tree_prev(&tree, &nodes[k]); } /* Remove nodes. */ switch (i % 4) { case 0: for (k = 0; k < j; k++) node_remove(&tree, &nodes[k], j - k); break; case 1: for (k = j; k > 0; k--) node_remove(&tree, &nodes[k-1], k); break; case 2: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_iter(&tree, start, remove_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } case 3: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_reverse_iter(&tree, start, remove_reverse_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } default: not_reached(); } } } fini_gen_rand(sfmt); #undef NNODES #undef NBAGS #undef SEED } TEST_END int main(void) { return (test( test_rb_empty, test_rb_random)); } disque-1.0-rc1/deps/jemalloc/test/unit/rtree.c000066400000000000000000000073671264176561100213210ustar00rootroot00000000000000#include "test/jemalloc_test.h" static rtree_node_elm_t * node_alloc(size_t nelms) { return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t))); } static void node_dalloc(rtree_node_elm_t *node) { free(node); } TEST_BEGIN(test_rtree_get_empty) { unsigned i; for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); assert_ptr_null(rtree_get(&rtree, 0, false), "rtree_get() should return NULL for empty tree"); rtree_delete(&rtree); } } TEST_END TEST_BEGIN(test_rtree_extrema) { unsigned i; extent_node_t node_a, node_b; for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); assert_false(rtree_set(&rtree, 0, &node_a), "Unexpected rtree_set() failure"); assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a, "rtree_get() should return previously set value"); assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b), "Unexpected rtree_set() failure"); assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b, "rtree_get() should return previously set value"); rtree_delete(&rtree); } } TEST_END TEST_BEGIN(test_rtree_bits) { unsigned i, j, k; for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { uintptr_t keys[] = {0, 1, (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; extent_node_t node; rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { assert_false(rtree_set(&rtree, keys[j], &node), "Unexpected rtree_set() failure"); for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { assert_ptr_eq(rtree_get(&rtree, keys[k], true), &node, "rtree_get() should return " "previously set value and ignore " "insignificant key bits; i=%u, j=%u, k=%u, " "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, j, k, keys[j], keys[k]); } assert_ptr_null(rtree_get(&rtree, (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), "Only leftmost rtree leaf should be set; " "i=%u, j=%u", i, j); assert_false(rtree_set(&rtree, keys[j], NULL), "Unexpected rtree_set() failure"); } rtree_delete(&rtree); } } TEST_END TEST_BEGIN(test_rtree_random) { unsigned i; sfmt_t *sfmt; #define NSET 16 #define SEED 42 sfmt = init_gen_rand(SEED); for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { uintptr_t keys[NSET]; extent_node_t node; unsigned j; rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); for (j = 0; j < NSET; j++) { keys[j] = (uintptr_t)gen_rand64(sfmt); assert_false(rtree_set(&rtree, keys[j], &node), "Unexpected rtree_set() failure"); assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { assert_false(rtree_set(&rtree, keys[j], NULL), "Unexpected rtree_set() failure"); assert_ptr_null(rtree_get(&rtree, keys[j], true), "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { assert_ptr_null(rtree_get(&rtree, keys[j], true), "rtree_get() should return previously set value"); } rtree_delete(&rtree); } fini_gen_rand(sfmt); #undef NSET #undef SEED } TEST_END int main(void) { return (test( test_rtree_get_empty, test_rtree_extrema, test_rtree_bits, test_rtree_random)); } disque-1.0-rc1/deps/jemalloc/test/unit/size_classes.c000066400000000000000000000055631264176561100226630ustar00rootroot00000000000000#include "test/jemalloc_test.h" static size_t get_max_size_class(void) { unsigned nhchunks; size_t mib[4]; size_t sz, miblen, max_size_class; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nhchunks - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); return (max_size_class); } TEST_BEGIN(test_size_classes) { size_t size_class, max_size_class; szind_t index, max_index; max_size_class = get_max_size_class(); max_index = size2index(max_size_class); for (index = 0, size_class = index2size(index); index < max_index || size_class < max_size_class; index++, size_class = index2size(index)) { assert_true(index < max_index, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_true(size_class < max_size_class, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_u_eq(index, size2index(size_class), "size2index() does not reverse index2size(): index=%u -->" " size_class=%zu --> index=%u --> size_class=%zu", index, size_class, size2index(size_class), index2size(size2index(size_class))); assert_zu_eq(size_class, index2size(size2index(size_class)), "index2size() does not reverse size2index(): index=%u -->" " size_class=%zu --> index=%u --> size_class=%zu", index, size_class, size2index(size_class), index2size(size2index(size_class))); assert_u_eq(index+1, size2index(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (index > 0) ? s2u(index2size(index-1)+1) : s2u(1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class-1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class), "s2u() does not compute same size class"); assert_zu_eq(s2u(size_class+1), index2size(index+1), "s2u() does not round up to next size class"); } assert_u_eq(index, size2index(index2size(index)), "size2index() does not reverse index2size()"); assert_zu_eq(max_size_class, index2size(size2index(max_size_class)), "index2size() does not reverse size2index()"); assert_zu_eq(size_class, s2u(index2size(index-1)+1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class-1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class), "s2u() does not compute same size class"); } TEST_END int main(void) { return (test( test_size_classes)); } disque-1.0-rc1/deps/jemalloc/test/unit/stats.c000066400000000000000000000334151264176561100213270ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_stats_summary) { size_t *cactive; size_t sz, allocated, active, resident, mapped; int expected = config_stats ? 0 : ENOENT; sz = sizeof(cactive); assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_le(active, *cactive, "active should be no larger than cactive"); assert_zu_le(allocated, active, "allocated should be no larger than active"); assert_zu_lt(active, resident, "active should be less than resident"); assert_zu_lt(active, mapped, "active should be less than mapped"); } } TEST_END TEST_BEGIN(test_stats_huge) { void *p; uint64_t epoch; size_t allocated; uint64_t nmalloc, ndalloc, nrequests; size_t sz; int expected = config_stats ? 0 : ENOENT; p = mallocx(large_maxclass+1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_le(nmalloc, nrequests, "nmalloc should no larger than nrequests"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_summary) { unsigned arena; void *little, *large, *huge; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; size_t mapped; uint64_t npurge, nmadvise, purged; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); little = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(little, "Unexpected mallocx() failure"); large = mallocx(large_maxclass, 0); assert_ptr_not_null(large, "Unexpected mallocx() failure"); huge = mallocx(chunksize, 0); assert_ptr_not_null(huge, "Unexpected mallocx() failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0), expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); if (config_stats) { assert_u64_gt(npurge, 0, "At least one purge should have occurred"); assert_u64_le(nmadvise, purged, "nmadvise should be no greater than purged"); } dallocx(little, 0); dallocx(large, 0); dallocx(huge, 0); } TEST_END void * thd_start(void *arg) { return (NULL); } static void no_lazy_lock(void) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_BEGIN(test_stats_arenas_small) { unsigned arena; void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; int expected = config_stats ? 0 : ENOENT; no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be no greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_large) { unsigned arena; void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(large_maxclass, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_zu_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_zu_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_zu_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_huge) { unsigned arena; void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_zu_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_zu_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_bins) { unsigned arena; void *p; size_t sz, curruns, curregs; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nruns, nreruns; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(arena_bin_info[0].reg_size, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, NULL, 0), config_tcache ? expected : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz, NULL, 0), config_tcache ? expected : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); assert_zu_gt(curregs, 0, "allocated should be greater than zero"); if (config_tcache) { assert_u64_gt(nfills, 0, "At least one fill should have occurred"); assert_u64_gt(nflushes, 0, "At least one flush should have occurred"); } assert_u64_gt(nruns, 0, "At least one run should have been allocated"); assert_zu_gt(curruns, 0, "At least one run should be currently allocated"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_lruns) { unsigned arena; void *p; uint64_t epoch, nmalloc, ndalloc, nrequests; size_t curruns, sz; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(LARGE_MINCLASS, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); assert_u64_gt(curruns, 0, "At least one run should be currently allocated"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_hchunks) { unsigned arena; void *p; uint64_t epoch, nmalloc, ndalloc; size_t curhchunks, sz; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(curhchunks, 0, "At least one chunk should be currently allocated"); } dallocx(p, 0); } TEST_END int main(void) { return (test( test_stats_summary, test_stats_huge, test_stats_arenas_summary, test_stats_arenas_small, test_stats_arenas_large, test_stats_arenas_huge, test_stats_arenas_bins, test_stats_arenas_lruns, test_stats_arenas_hchunks)); } disque-1.0-rc1/deps/jemalloc/test/unit/tsd.c000066400000000000000000000037171264176561100207650ustar00rootroot00000000000000#include "test/jemalloc_test.h" #define THREAD_DATA 0x72b65c10 typedef unsigned int data_t; static bool data_cleanup_executed; malloc_tsd_types(data_, data_t) malloc_tsd_protos(, data_, data_t) void data_cleanup(void *arg) { data_t *data = (data_t *)arg; if (!data_cleanup_executed) { assert_x_eq(*data, THREAD_DATA, "Argument passed into cleanup function should match tsd " "value"); } data_cleanup_executed = true; /* * Allocate during cleanup for two rounds, in order to assure that * jemalloc's internal tsd reinitialization happens. */ switch (*data) { case THREAD_DATA: *data = 1; data_tsd_set(data); break; case 1: *data = 2; data_tsd_set(data); break; case 2: return; default: not_reached(); } { void *p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpeced mallocx() failure"); dallocx(p, 0); } } malloc_tsd_externs(data_, data_t) #define DATA_INIT 0x12345678 malloc_tsd_data(, data_, data_t, DATA_INIT) malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup) static void * thd_start(void *arg) { data_t d = (data_t)(uintptr_t)arg; void *p; assert_x_eq(*data_tsd_get(), DATA_INIT, "Initial tsd get should return initialization value"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); data_tsd_set(&d); assert_x_eq(*data_tsd_get(), d, "After tsd set, tsd get should return value that was set"); d = 0; assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg, "Resetting local data should have no effect on tsd"); free(p); return (NULL); } TEST_BEGIN(test_tsd_main_thread) { thd_start((void *) 0xa5f3e329); } TEST_END TEST_BEGIN(test_tsd_sub_thread) { thd_t thd; data_cleanup_executed = false; thd_create(&thd, thd_start, (void *)THREAD_DATA); thd_join(thd, NULL); assert_true(data_cleanup_executed, "Cleanup function should have executed"); } TEST_END int main(void) { data_tsd_boot(); return (test( test_tsd_main_thread, test_tsd_sub_thread)); } disque-1.0-rc1/deps/jemalloc/test/unit/util.c000066400000000000000000000207751264176561100211530ustar00rootroot00000000000000#include "test/jemalloc_test.h" TEST_BEGIN(test_pow2_ceil) { unsigned i, pow2; size_t x; assert_zu_eq(pow2_ceil(0), 0, "Unexpected result"); for (i = 0; i < sizeof(size_t) * 8; i++) { assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i, "Unexpected result"); } for (i = 2; i < sizeof(size_t) * 8; i++) { assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i, "Unexpected result"); } for (i = 0; i < sizeof(size_t) * 8 - 1; i++) { assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1), "Unexpected result"); } for (pow2 = 1; pow2 < 25; pow2++) { for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) { assert_zu_eq(pow2_ceil(x), ZU(1) << pow2, "Unexpected result, x=%zu", x); } } } TEST_END TEST_BEGIN(test_malloc_strtoumax_no_endptr) { int err; set_errno(0); assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); err = get_errno(); assert_d_eq(err, 0, "Unexpected failure"); } TEST_END TEST_BEGIN(test_malloc_strtoumax) { struct test_s { const char *input; const char *expected_remainder; int base; int expected_errno; const char *expected_errno_name; uintmax_t expected_x; }; #define ERR(e) e, #e #define KUMAX(x) ((uintmax_t)x##ULL) struct test_s tests[] = { {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, {"42", "", 0, ERR(0), KUMAX(42)}, {"+42", "", 0, ERR(0), KUMAX(42)}, {"-42", "", 0, ERR(0), KUMAX(-42)}, {"042", "", 0, ERR(0), KUMAX(042)}, {"+042", "", 0, ERR(0), KUMAX(042)}, {"-042", "", 0, ERR(0), KUMAX(-042)}, {"0x42", "", 0, ERR(0), KUMAX(0x42)}, {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, {"-0x42", "", 0, ERR(0), KUMAX(-0x42)}, {"0", "", 0, ERR(0), KUMAX(0)}, {"1", "", 0, ERR(0), KUMAX(1)}, {"42", "", 0, ERR(0), KUMAX(42)}, {" 42", "", 0, ERR(0), KUMAX(42)}, {"42 ", " ", 0, ERR(0), KUMAX(42)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"42x", "x", 0, ERR(0), KUMAX(42)}, {"07", "", 0, ERR(0), KUMAX(7)}, {"010", "", 0, ERR(0), KUMAX(8)}, {"08", "8", 0, ERR(0), KUMAX(0)}, {"0_", "_", 0, ERR(0), KUMAX(0)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"0X", "X", 0, ERR(0), KUMAX(0)}, {"0xg", "xg", 0, ERR(0), KUMAX(0)}, {"0XA", "", 0, ERR(0), KUMAX(10)}, {"010", "", 10, ERR(0), KUMAX(10)}, {"0x3", "x3", 10, ERR(0), KUMAX(0)}, {"12", "2", 2, ERR(0), KUMAX(1)}, {"78", "8", 8, ERR(0), KUMAX(7)}, {"9a", "a", 10, ERR(0), KUMAX(9)}, {"9A", "A", 10, ERR(0), KUMAX(9)}, {"fg", "g", 16, ERR(0), KUMAX(15)}, {"FG", "G", 16, ERR(0), KUMAX(15)}, {"0xfg", "g", 16, ERR(0), KUMAX(15)}, {"0XFG", "G", 16, ERR(0), KUMAX(15)}, {"z_", "_", 36, ERR(0), KUMAX(35)}, {"Z_", "_", 36, ERR(0), KUMAX(35)} }; #undef ERR #undef KUMAX unsigned i; for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { struct test_s *test = &tests[i]; int err; uintmax_t result; char *remainder; set_errno(0); result = malloc_strtoumax(test->input, &remainder, test->base); err = get_errno(); assert_d_eq(err, test->expected_errno, "Expected errno %s for \"%s\", base %d", test->expected_errno_name, test->input, test->base); assert_str_eq(remainder, test->expected_remainder, "Unexpected remainder for \"%s\", base %d", test->input, test->base); if (err == 0) { assert_ju_eq(result, test->expected_x, "Unexpected result for \"%s\", base %d", test->input, test->base); } } } TEST_END TEST_BEGIN(test_malloc_snprintf_truncated) { #define BUFLEN 15 char buf[BUFLEN]; int result; size_t len; #define TEST(expected_str_untruncated, ...) do { \ result = malloc_snprintf(buf, len, __VA_ARGS__); \ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ "Unexpected string inequality (\"%s\" vs \"%s\")", \ buf, expected_str_untruncated); \ assert_d_eq(result, strlen(expected_str_untruncated), \ "Unexpected result"); \ } while (0) for (len = 1; len < BUFLEN; len++) { TEST("012346789", "012346789"); TEST("a0123b", "a%sb", "0123"); TEST("a01234567", "a%s%s", "0123", "4567"); TEST("a0123 ", "a%-6s", "0123"); TEST("a 0123", "a%6s", "0123"); TEST("a 012", "a%6.3s", "0123"); TEST("a 012", "a%*.*s", 6, 3, "0123"); TEST("a 123b", "a% db", 123); TEST("a123b", "a%-db", 123); TEST("a-123b", "a%-db", -123); TEST("a+123b", "a%+db", 123); } #undef BUFLEN #undef TEST } TEST_END TEST_BEGIN(test_malloc_snprintf) { #define BUFLEN 128 char buf[BUFLEN]; int result; #define TEST(expected_str, ...) do { \ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ assert_str_eq(buf, expected_str, "Unexpected output"); \ assert_d_eq(result, strlen(expected_str), "Unexpected result"); \ } while (0) TEST("hello", "hello"); TEST("50%, 100%", "50%%, %d%%", 100); TEST("a0123b", "a%sb", "0123"); TEST("a 0123b", "a%5sb", "0123"); TEST("a 0123b", "a%*sb", 5, "0123"); TEST("a0123 b", "a%-5sb", "0123"); TEST("a0123b", "a%*sb", -1, "0123"); TEST("a0123 b", "a%*sb", -5, "0123"); TEST("a0123 b", "a%-*sb", -5, "0123"); TEST("a012b", "a%.3sb", "0123"); TEST("a012b", "a%.*sb", 3, "0123"); TEST("a0123b", "a%.*sb", -3, "0123"); TEST("a 012b", "a%5.3sb", "0123"); TEST("a 012b", "a%5.*sb", 3, "0123"); TEST("a 012b", "a%*.3sb", 5, "0123"); TEST("a 012b", "a%*.*sb", 5, 3, "0123"); TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); TEST("_abcd_", "_%x_", 0xabcd); TEST("_0xabcd_", "_%#x_", 0xabcd); TEST("_1234_", "_%o_", 01234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_1234_", "_%d_", 1234); TEST("_ 1234_", "_% d_", 1234); TEST("_+1234_", "_%+d_", 1234); TEST("_-1234_", "_%d_", -1234); TEST("_-1234_", "_% d_", -1234); TEST("_-1234_", "_%+d_", -1234); TEST("_-1234_", "_%d_", -1234); TEST("_1234_", "_%d_", 1234); TEST("_-1234_", "_%i_", -1234); TEST("_1234_", "_%i_", 1234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_0x1234abc_", "_%#x_", 0x1234abc); TEST("_0X1234ABC_", "_%#X_", 0x1234abc); TEST("_c_", "_%c_", 'c'); TEST("_string_", "_%s_", "string"); TEST("_0x42_", "_%p_", ((void *)0x42)); TEST("_-1234_", "_%ld_", ((long)-1234)); TEST("_1234_", "_%ld_", ((long)1234)); TEST("_-1234_", "_%li_", ((long)-1234)); TEST("_1234_", "_%li_", ((long)1234)); TEST("_01234_", "_%#lo_", ((long)01234)); TEST("_1234_", "_%lu_", ((long)1234)); TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); TEST("_-1234_", "_%lld_", ((long long)-1234)); TEST("_1234_", "_%lld_", ((long long)1234)); TEST("_-1234_", "_%lli_", ((long long)-1234)); TEST("_1234_", "_%lli_", ((long long)1234)); TEST("_01234_", "_%#llo_", ((long long)01234)); TEST("_1234_", "_%llu_", ((long long)1234)); TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%qd_", ((long long)-1234)); TEST("_1234_", "_%qd_", ((long long)1234)); TEST("_-1234_", "_%qi_", ((long long)-1234)); TEST("_1234_", "_%qi_", ((long long)1234)); TEST("_01234_", "_%#qo_", ((long long)01234)); TEST("_1234_", "_%qu_", ((long long)1234)); TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); TEST("_1234_", "_%jd_", ((intmax_t)1234)); TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); TEST("_1234_", "_%ji_", ((intmax_t)1234)); TEST("_01234_", "_%#jo_", ((intmax_t)01234)); TEST("_1234_", "_%ju_", ((intmax_t)1234)); TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); TEST("_1234_", "_%zd_", ((ssize_t)1234)); TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); TEST("_1234_", "_%zi_", ((ssize_t)1234)); TEST("_01234_", "_%#zo_", ((ssize_t)01234)); TEST("_1234_", "_%zu_", ((ssize_t)1234)); TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); #undef BUFLEN } TEST_END int main(void) { return (test( test_pow2_ceil, test_malloc_strtoumax_no_endptr, test_malloc_strtoumax, test_malloc_snprintf_truncated, test_malloc_snprintf)); } disque-1.0-rc1/deps/jemalloc/test/unit/zero.c000066400000000000000000000026611264176561100211470ustar00rootroot00000000000000#include "test/jemalloc_test.h" #ifdef JEMALLOC_FILL const char *malloc_conf = "abort:false,junk:false,zero:true,redzone:false,quarantine:0"; #endif static void test_zero(size_t sz_min, size_t sz_max) { char *s; size_t sz_prev, sz, i; sz_prev = 0; s = (char *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_c_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_c_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { assert_c_eq(s[i], 0x0, "Newly allocated byte %zu/%zu isn't zero-filled", i, sz); s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { s = (char *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); } } dallocx(s, 0); } TEST_BEGIN(test_zero_small) { test_skip_if(!config_fill); test_zero(1, SMALL_MAXCLASS-1); } TEST_END TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); test_zero(SMALL_MAXCLASS+1, large_maxclass); } TEST_END TEST_BEGIN(test_zero_huge) { test_skip_if(!config_fill); test_zero(large_maxclass+1, chunksize*2); } TEST_END int main(void) { return (test( test_zero_small, test_zero_large, test_zero_huge)); } disque-1.0-rc1/deps/linenoise/000077500000000000000000000000001264176561100162605ustar00rootroot00000000000000disque-1.0-rc1/deps/linenoise/.gitignore000066400000000000000000000000451264176561100202470ustar00rootroot00000000000000linenoise_example *.dSYM history.txt disque-1.0-rc1/deps/linenoise/Makefile000066400000000000000000000004731264176561100177240ustar00rootroot00000000000000STD= WARN= -Wall OPT= -Os R_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) R_LDFLAGS= $(LDFLAGS) DEBUG= -g R_CC=$(CC) $(R_CFLAGS) R_LD=$(CC) $(R_LDFLAGS) linenoise.o: linenoise.h linenoise.c linenoise_example: linenoise.o example.o $(R_LD) -o $@ $^ .c.o: $(R_CC) -c $< clean: rm -f linenoise_example *.o disque-1.0-rc1/deps/linenoise/README.markdown000066400000000000000000000061221264176561100207620ustar00rootroot00000000000000# Linenoise A minimal, zero-config, BSD licensed, readline replacement used in Redis, MongoDB, and Android. * Single and multi line editing mode with the usual key bindings implemented. * History handling. * Completion. * About 1,100 lines of BSD license source code. * Only uses a subset of VT100 escapes (ANSI.SYS compatible). ## Can a line editing library be 20k lines of code? Line editing with some support for history is a really important feature for command line utilities. Instead of retyping almost the same stuff again and again it's just much better to hit the up arrow and edit on syntax errors, or in order to try a slightly different command. But apparently code dealing with terminals is some sort of Black Magic: readline is 30k lines of code, libedit 20k. Is it reasonable to link small utilities to huge libraries just to get a minimal support for line editing? So what usually happens is either: * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Real world example of this problem: Tclsh). * Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance). The result is a pollution of binaries without line editing support. So I spent more or less two hours doing a reality check resulting in this little library: is it *really* needed for a line editing library to be 20k lines of code? Apparently not, it is possibe to get a very small, zero configuration, trivial to embed library, that solves the problem. Smaller programs will just include this, supporing line editing out of the box. Larger programs may use this little library or just checking with configure if readline/libedit is available and resorting to linenoise if not. ## Terminals, in 2010. Apparently almost every terminal you can happen to use today has some kind of support for basic VT100 escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it, and now can work even on ANSI.SYS compatible terminals, since no VT220 specific sequences are used anymore. The library is currently about 1100 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software. ## Tested with... * Linux text only console ($TERM = linux) * Linux KDE terminal application ($TERM = xterm) * Linux xterm ($TERM = xterm) * Linux Buildroot ($TERM = vt100) * Mac OS X iTerm ($TERM = xterm) * Mac OS X default Terminal.app ($TERM = xterm) * OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen) * IBM AIX 6.1 * FreeBSD xterm ($TERM = xterm) * ANSI.SYS Please test it everywhere you can and report back! ## Let's push this forward! Patches should be provided in the respect of linenoise sensibility for small easy to understand code. Send feedbacks to antirez at gmail disque-1.0-rc1/deps/linenoise/example.c000066400000000000000000000042361264176561100200640ustar00rootroot00000000000000#include #include #include #include "linenoise.h" void completion(const char *buf, linenoiseCompletions *lc) { if (buf[0] == 'h') { linenoiseAddCompletion(lc,"hello"); linenoiseAddCompletion(lc,"hello there"); } } int main(int argc, char **argv) { char *line; char *prgname = argv[0]; /* Parse options, with --multiline we enable multi line editing. */ while(argc > 1) { argc--; argv++; if (!strcmp(*argv,"--multiline")) { linenoiseSetMultiLine(1); printf("Multi-line mode enabled.\n"); } else if (!strcmp(*argv,"--keycodes")) { linenoisePrintKeyCodes(); exit(0); } else { fprintf(stderr, "Usage: %s [--multiline] [--keycodes]\n", prgname); exit(1); } } /* Set the completion callback. This will be called every time the * user uses the key. */ linenoiseSetCompletionCallback(completion); /* Load history from file. The history file is just a plain text file * where entries are separated by newlines. */ linenoiseHistoryLoad("history.txt"); /* Load the history at startup */ /* Now this is the main loop of the typical linenoise-based application. * The call to linenoise() will block as long as the user types something * and presses enter. * * The typed string is returned as a malloc() allocated string by * linenoise, so the user needs to free() it. */ while((line = linenoise("hello> ")) != NULL) { /* Do something with the string. */ if (line[0] != '\0' && line[0] != '/') { printf("echo: '%s'\n", line); linenoiseHistoryAdd(line); /* Add to the history. */ linenoiseHistorySave("history.txt"); /* Save the history on disk. */ } else if (!strncmp(line,"/historylen",11)) { /* The "/historylen" command will change the history len. */ int len = atoi(line+11); linenoiseHistorySetMaxLen(len); } else if (line[0] == '/') { printf("Unreconized command: %s\n", line); } free(line); } return 0; } disque-1.0-rc1/deps/linenoise/linenoise.c000066400000000000000000001057401264176561100204200ustar00rootroot00000000000000/* linenoise.c -- guerrilla line editing library against the idea that a * line editing lib needs to be 20,000 lines of C code. * * You can find the latest source code at: * * http://github.com/antirez/linenoise * * Does a number of crazy assumptions that happen to be true in 99.9999% of * the 2010 UNIX computers around. * * ------------------------------------------------------------------------ * * Copyright (c) 2010-2013, Salvatore Sanfilippo * Copyright (c) 2010-2013, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ------------------------------------------------------------------------ * * References: * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html * * Todo list: * - Filter bogus Ctrl+ combinations. * - Win32 support * * Bloat: * - History search like Ctrl+r in readline? * * List of escape sequences used by this program, we do everything just * with three sequences. In order to be so cheap we may have some * flickering effect with some slow terminal, but the lesser sequences * the more compatible. * * EL (Erase Line) * Sequence: ESC [ n K * Effect: if n is 0 or missing, clear from cursor to end of line * Effect: if n is 1, clear from beginning of line to cursor * Effect: if n is 2, clear entire line * * CUF (CUrsor Forward) * Sequence: ESC [ n C * Effect: moves cursor forward n chars * * CUB (CUrsor Backward) * Sequence: ESC [ n D * Effect: moves cursor backward n chars * * The following is used to get the terminal width if getting * the width with the TIOCGWINSZ ioctl fails * * DSR (Device Status Report) * Sequence: ESC [ 6 n * Effect: reports the current cusor position as ESC [ n ; m R * where n is the row and m is the column * * When multi line mode is enabled, we also use an additional escape * sequence. However multi line editing is disabled by default. * * CUU (Cursor Up) * Sequence: ESC [ n A * Effect: moves cursor up of n chars. * * CUD (Cursor Down) * Sequence: ESC [ n B * Effect: moves cursor down of n chars. * * When linenoiseClearScreen() is called, two additional escape sequences * are used in order to clear the screen and position the cursor at home * position. * * CUP (Cursor position) * Sequence: ESC [ H * Effect: moves the cursor to upper left corner * * ED (Erase display) * Sequence: ESC [ 2 J * Effect: clear the whole screen * */ #include #include #include #include #include #include #include #include #include #include #include #include "linenoise.h" #define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100 #define LINENOISE_MAX_LINE 4096 static char *unsupported_term[] = {"dumb","cons25","emacs",NULL}; static linenoiseCompletionCallback *completionCallback = NULL; static struct termios orig_termios; /* In order to restore at exit.*/ static int rawmode = 0; /* For atexit() function to check if restore is needed*/ static int mlmode = 0; /* Multi line mode. Default is single line. */ static int atexit_registered = 0; /* Register atexit just 1 time. */ static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN; static int history_len = 0; static char **history = NULL; /* The linenoiseState structure represents the state during line editing. * We pass this state to functions implementing specific editing * functionalities. */ struct linenoiseState { int ifd; /* Terminal stdin file descriptor. */ int ofd; /* Terminal stdout file descriptor. */ char *buf; /* Edited line buffer. */ size_t buflen; /* Edited line buffer size. */ const char *prompt; /* Prompt to display. */ size_t plen; /* Prompt length. */ size_t pos; /* Current cursor position. */ size_t oldpos; /* Previous refresh cursor position. */ size_t len; /* Current edited line length. */ size_t cols; /* Number of columns in terminal. */ size_t maxrows; /* Maximum num of rows used so far (multiline mode) */ int history_index; /* The history index we are currently editing. */ }; enum KEY_ACTION{ KEY_NULL = 0, /* NULL */ CTRL_A = 1, /* Ctrl+a */ CTRL_B = 2, /* Ctrl-b */ CTRL_C = 3, /* Ctrl-c */ CTRL_D = 4, /* Ctrl-d */ CTRL_E = 5, /* Ctrl-e */ CTRL_F = 6, /* Ctrl-f */ CTRL_H = 8, /* Ctrl-h */ TAB = 9, /* Tab */ CTRL_K = 11, /* Ctrl+k */ CTRL_L = 12, /* Ctrl+l */ ENTER = 13, /* Enter */ CTRL_N = 14, /* Ctrl-n */ CTRL_P = 16, /* Ctrl-p */ CTRL_T = 20, /* Ctrl-t */ CTRL_U = 21, /* Ctrl+u */ CTRL_W = 23, /* Ctrl+w */ ESC = 27, /* Escape */ BACKSPACE = 127 /* Backspace */ }; static void linenoiseAtExit(void); int linenoiseHistoryAdd(const char *line); static void refreshLine(struct linenoiseState *l); /* Debugging macro. */ #if 0 FILE *lndebug_fp = NULL; #define lndebug(...) \ do { \ if (lndebug_fp == NULL) { \ lndebug_fp = fopen("/tmp/lndebug.txt","a"); \ fprintf(lndebug_fp, \ "[%d %d %d] p: %d, rows: %d, rpos: %d, max: %d, oldmax: %d\n", \ (int)l->len,(int)l->pos,(int)l->oldpos,plen,rows,rpos, \ (int)l->maxrows,old_rows); \ } \ fprintf(lndebug_fp, ", " __VA_ARGS__); \ fflush(lndebug_fp); \ } while (0) #else #define lndebug(fmt, ...) #endif /* ======================= Low level terminal handling ====================== */ /* Set if to use or not the multi line mode. */ void linenoiseSetMultiLine(int ml) { mlmode = ml; } /* Return true if the terminal name is in the list of terminals we know are * not able to understand basic escape sequences. */ static int isUnsupportedTerm(void) { char *term = getenv("TERM"); int j; if (term == NULL) return 0; for (j = 0; unsupported_term[j]; j++) if (!strcasecmp(term,unsupported_term[j])) return 1; return 0; } /* Raw mode: 1960 magic shit. */ static int enableRawMode(int fd) { struct termios raw; if (!isatty(STDIN_FILENO)) goto fatal; if (!atexit_registered) { atexit(linenoiseAtExit); atexit_registered = 1; } if (tcgetattr(fd,&orig_termios) == -1) goto fatal; raw = orig_termios; /* modify the original mode */ /* input modes: no break, no CR to NL, no parity check, no strip char, * no start/stop output control. */ raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); /* output modes - disable post processing */ raw.c_oflag &= ~(OPOST); /* control modes - set 8 bit chars */ raw.c_cflag |= (CS8); /* local modes - choing off, canonical off, no extended functions, * no signal chars (^Z,^C) */ raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); /* control chars - set return condition: min number of bytes and timer. * We want read to return every single byte, without timeout. */ raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* 1 byte, no timer */ /* put terminal in raw mode after flushing */ if (tcsetattr(fd,TCSAFLUSH,&raw) < 0) goto fatal; rawmode = 1; return 0; fatal: errno = ENOTTY; return -1; } static void disableRawMode(int fd) { /* Don't even check the return value as it's too late. */ if (rawmode && tcsetattr(fd,TCSAFLUSH,&orig_termios) != -1) rawmode = 0; } /* Use the ESC [6n escape sequence to query the horizontal cursor position * and return it. On error -1 is returned, on success the position of the * cursor. */ static int getCursorPosition(int ifd, int ofd) { char buf[32]; int cols, rows; unsigned int i = 0; /* Report cursor location */ if (write(ofd, "\x1b[6n", 4) != 4) return -1; /* Read the response: ESC [ rows ; cols R */ while (i < sizeof(buf)-1) { if (read(ifd,buf+i,1) != 1) break; if (buf[i] == 'R') break; i++; } buf[i] = '\0'; /* Parse it. */ if (buf[0] != ESC || buf[1] != '[') return -1; if (sscanf(buf+2,"%d;%d",&rows,&cols) != 2) return -1; return cols; } /* Try to get the number of columns in the current terminal, or assume 80 * if it fails. */ static int getColumns(int ifd, int ofd) { struct winsize ws; if (ioctl(1, TIOCGWINSZ, &ws) == -1 || ws.ws_col == 0) { /* ioctl() failed. Try to query the terminal itself. */ int start, cols; /* Get the initial position so we can restore it later. */ start = getCursorPosition(ifd,ofd); if (start == -1) goto failed; /* Go to right margin and get position. */ if (write(ofd,"\x1b[999C",6) != 6) goto failed; cols = getCursorPosition(ifd,ofd); if (cols == -1) goto failed; /* Restore position. */ if (cols > start) { char seq[32]; snprintf(seq,32,"\x1b[%dD",cols-start); if (write(ofd,seq,strlen(seq)) == -1) { /* Can't recover... */ } } return cols; } else { return ws.ws_col; } failed: return 80; } /* Clear the screen. Used to handle ctrl+l */ void linenoiseClearScreen(void) { if (write(STDOUT_FILENO,"\x1b[H\x1b[2J",7) <= 0) { /* nothing to do, just to avoid warning. */ } } /* Beep, used for completion when there is nothing to complete or when all * the choices were already shown. */ static void linenoiseBeep(void) { fprintf(stderr, "\x7"); fflush(stderr); } /* ============================== Completion ================================ */ /* Free a list of completion option populated by linenoiseAddCompletion(). */ static void freeCompletions(linenoiseCompletions *lc) { size_t i; for (i = 0; i < lc->len; i++) free(lc->cvec[i]); if (lc->cvec != NULL) free(lc->cvec); } /* This is an helper function for linenoiseEdit() and is called when the * user types the key in order to complete the string currently in the * input. * * The state of the editing is encapsulated into the pointed linenoiseState * structure as described in the structure definition. */ static int completeLine(struct linenoiseState *ls) { linenoiseCompletions lc = { 0, NULL }; int nread, nwritten; char c = 0; completionCallback(ls->buf,&lc); if (lc.len == 0) { linenoiseBeep(); } else { size_t stop = 0, i = 0; while(!stop) { /* Show completion or original buffer */ if (i < lc.len) { struct linenoiseState saved = *ls; ls->len = ls->pos = strlen(lc.cvec[i]); ls->buf = lc.cvec[i]; refreshLine(ls); ls->len = saved.len; ls->pos = saved.pos; ls->buf = saved.buf; } else { refreshLine(ls); } nread = read(ls->ifd,&c,1); if (nread <= 0) { freeCompletions(&lc); return -1; } switch(c) { case 9: /* tab */ i = (i+1) % (lc.len+1); if (i == lc.len) linenoiseBeep(); break; case 27: /* escape */ /* Re-show original buffer */ if (i < lc.len) refreshLine(ls); stop = 1; break; default: /* Update buffer and return */ if (i < lc.len) { nwritten = snprintf(ls->buf,ls->buflen,"%s",lc.cvec[i]); ls->len = ls->pos = nwritten; } stop = 1; break; } } } freeCompletions(&lc); return c; /* Return last read character */ } /* Register a callback function to be called for tab-completion. */ void linenoiseSetCompletionCallback(linenoiseCompletionCallback *fn) { completionCallback = fn; } /* This function is used by the callback function registered by the user * in order to add completion options given the input string when the * user typed . See the example.c source code for a very easy to * understand example. */ void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) { size_t len = strlen(str); char *copy, **cvec; copy = malloc(len+1); if (copy == NULL) return; memcpy(copy,str,len+1); cvec = realloc(lc->cvec,sizeof(char*)*(lc->len+1)); if (cvec == NULL) { free(copy); return; } lc->cvec = cvec; lc->cvec[lc->len++] = copy; } /* =========================== Line editing ================================= */ /* We define a very simple "append buffer" structure, that is an heap * allocated string where we can append to. This is useful in order to * write all the escape sequences in a buffer and flush them to the standard * output in a single call, to avoid flickering effects. */ struct abuf { char *b; int len; }; static void abInit(struct abuf *ab) { ab->b = NULL; ab->len = 0; } static void abAppend(struct abuf *ab, const char *s, int len) { char *new = realloc(ab->b,ab->len+len); if (new == NULL) return; memcpy(new+ab->len,s,len); ab->b = new; ab->len += len; } static void abFree(struct abuf *ab) { free(ab->b); } /* Single line low level line refresh. * * Rewrite the currently edited line accordingly to the buffer content, * cursor position, and number of columns of the terminal. */ static void refreshSingleLine(struct linenoiseState *l) { char seq[64]; size_t plen = strlen(l->prompt); int fd = l->ofd; char *buf = l->buf; size_t len = l->len; size_t pos = l->pos; struct abuf ab; while((plen+pos) >= l->cols) { buf++; len--; pos--; } while (plen+len > l->cols) { len--; } abInit(&ab); /* Cursor to left edge */ snprintf(seq,64,"\r"); abAppend(&ab,seq,strlen(seq)); /* Write the prompt and the current buffer content */ abAppend(&ab,l->prompt,strlen(l->prompt)); abAppend(&ab,buf,len); /* Erase to right */ snprintf(seq,64,"\x1b[0K"); abAppend(&ab,seq,strlen(seq)); /* Move cursor to original position. */ snprintf(seq,64,"\r\x1b[%dC", (int)(pos+plen)); abAppend(&ab,seq,strlen(seq)); if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ abFree(&ab); } /* Multi line low level line refresh. * * Rewrite the currently edited line accordingly to the buffer content, * cursor position, and number of columns of the terminal. */ static void refreshMultiLine(struct linenoiseState *l) { char seq[64]; int plen = strlen(l->prompt); int rows = (plen+l->len+l->cols-1)/l->cols; /* rows used by current buf. */ int rpos = (plen+l->oldpos+l->cols)/l->cols; /* cursor relative row. */ int rpos2; /* rpos after refresh. */ int col; /* colum position, zero-based. */ int old_rows = l->maxrows; int fd = l->ofd, j; struct abuf ab; /* Update maxrows if needed. */ if (rows > (int)l->maxrows) l->maxrows = rows; /* First step: clear all the lines used before. To do so start by * going to the last row. */ abInit(&ab); if (old_rows-rpos > 0) { lndebug("go down %d", old_rows-rpos); snprintf(seq,64,"\x1b[%dB", old_rows-rpos); abAppend(&ab,seq,strlen(seq)); } /* Now for every row clear it, go up. */ for (j = 0; j < old_rows-1; j++) { lndebug("clear+up"); snprintf(seq,64,"\r\x1b[0K\x1b[1A"); abAppend(&ab,seq,strlen(seq)); } /* Clean the top line. */ lndebug("clear"); snprintf(seq,64,"\r\x1b[0K"); abAppend(&ab,seq,strlen(seq)); /* Write the prompt and the current buffer content */ abAppend(&ab,l->prompt,strlen(l->prompt)); abAppend(&ab,l->buf,l->len); /* If we are at the very end of the screen with our prompt, we need to * emit a newline and move the prompt to the first column. */ if (l->pos && l->pos == l->len && (l->pos+plen) % l->cols == 0) { lndebug(""); abAppend(&ab,"\n",1); snprintf(seq,64,"\r"); abAppend(&ab,seq,strlen(seq)); rows++; if (rows > (int)l->maxrows) l->maxrows = rows; } /* Move cursor to right position. */ rpos2 = (plen+l->pos+l->cols)/l->cols; /* current cursor relative row. */ lndebug("rpos2 %d", rpos2); /* Go up till we reach the expected positon. */ if (rows-rpos2 > 0) { lndebug("go-up %d", rows-rpos2); snprintf(seq,64,"\x1b[%dA", rows-rpos2); abAppend(&ab,seq,strlen(seq)); } /* Set column. */ col = (plen+(int)l->pos) % (int)l->cols; lndebug("set col %d", 1+col); if (col) snprintf(seq,64,"\r\x1b[%dC", col); else snprintf(seq,64,"\r"); abAppend(&ab,seq,strlen(seq)); lndebug("\n"); l->oldpos = l->pos; if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ abFree(&ab); } /* Calls the two low level functions refreshSingleLine() or * refreshMultiLine() according to the selected mode. */ static void refreshLine(struct linenoiseState *l) { if (mlmode) refreshMultiLine(l); else refreshSingleLine(l); } /* Insert the character 'c' at cursor current position. * * On error writing to the terminal -1 is returned, otherwise 0. */ int linenoiseEditInsert(struct linenoiseState *l, char c) { if (l->len < l->buflen) { if (l->len == l->pos) { l->buf[l->pos] = c; l->pos++; l->len++; l->buf[l->len] = '\0'; if ((!mlmode && l->plen+l->len < l->cols) /* || mlmode */) { /* Avoid a full update of the line in the * trivial case. */ if (write(l->ofd,&c,1) == -1) return -1; } else { refreshLine(l); } } else { memmove(l->buf+l->pos+1,l->buf+l->pos,l->len-l->pos); l->buf[l->pos] = c; l->len++; l->pos++; l->buf[l->len] = '\0'; refreshLine(l); } } return 0; } /* Move cursor on the left. */ void linenoiseEditMoveLeft(struct linenoiseState *l) { if (l->pos > 0) { l->pos--; refreshLine(l); } } /* Move cursor on the right. */ void linenoiseEditMoveRight(struct linenoiseState *l) { if (l->pos != l->len) { l->pos++; refreshLine(l); } } /* Move cursor to the start of the line. */ void linenoiseEditMoveHome(struct linenoiseState *l) { if (l->pos != 0) { l->pos = 0; refreshLine(l); } } /* Move cursor to the end of the line. */ void linenoiseEditMoveEnd(struct linenoiseState *l) { if (l->pos != l->len) { l->pos = l->len; refreshLine(l); } } /* Substitute the currently edited line with the next or previous history * entry as specified by 'dir'. */ #define LINENOISE_HISTORY_NEXT 0 #define LINENOISE_HISTORY_PREV 1 void linenoiseEditHistoryNext(struct linenoiseState *l, int dir) { if (history_len > 1) { /* Update the current history entry before to * overwrite it with the next one. */ free(history[history_len - 1 - l->history_index]); history[history_len - 1 - l->history_index] = strdup(l->buf); /* Show the new entry */ l->history_index += (dir == LINENOISE_HISTORY_PREV) ? 1 : -1; if (l->history_index < 0) { l->history_index = 0; return; } else if (l->history_index >= history_len) { l->history_index = history_len-1; return; } strncpy(l->buf,history[history_len - 1 - l->history_index],l->buflen); l->buf[l->buflen-1] = '\0'; l->len = l->pos = strlen(l->buf); refreshLine(l); } } /* Delete the character at the right of the cursor without altering the cursor * position. Basically this is what happens with the "Delete" keyboard key. */ void linenoiseEditDelete(struct linenoiseState *l) { if (l->len > 0 && l->pos < l->len) { memmove(l->buf+l->pos,l->buf+l->pos+1,l->len-l->pos-1); l->len--; l->buf[l->len] = '\0'; refreshLine(l); } } /* Backspace implementation. */ void linenoiseEditBackspace(struct linenoiseState *l) { if (l->pos > 0 && l->len > 0) { memmove(l->buf+l->pos-1,l->buf+l->pos,l->len-l->pos); l->pos--; l->len--; l->buf[l->len] = '\0'; refreshLine(l); } } /* Delete the previosu word, maintaining the cursor at the start of the * current word. */ void linenoiseEditDeletePrevWord(struct linenoiseState *l) { size_t old_pos = l->pos; size_t diff; while (l->pos > 0 && l->buf[l->pos-1] == ' ') l->pos--; while (l->pos > 0 && l->buf[l->pos-1] != ' ') l->pos--; diff = old_pos - l->pos; memmove(l->buf+l->pos,l->buf+old_pos,l->len-old_pos+1); l->len -= diff; refreshLine(l); } /* This function is the core of the line editing capability of linenoise. * It expects 'fd' to be already in "raw mode" so that every key pressed * will be returned ASAP to read(). * * The resulting string is put into 'buf' when the user type enter, or * when ctrl+d is typed. * * The function returns the length of the current buffer. */ static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, const char *prompt) { struct linenoiseState l; /* Populate the linenoise state that we pass to functions implementing * specific editing functionalities. */ l.ifd = stdin_fd; l.ofd = stdout_fd; l.buf = buf; l.buflen = buflen; l.prompt = prompt; l.plen = strlen(prompt); l.oldpos = l.pos = 0; l.len = 0; l.cols = getColumns(stdin_fd, stdout_fd); l.maxrows = 0; l.history_index = 0; /* Buffer starts empty. */ l.buf[0] = '\0'; l.buflen--; /* Make sure there is always space for the nulterm */ /* The latest history entry is always our current buffer, that * initially is just an empty string. */ linenoiseHistoryAdd(""); if (write(l.ofd,prompt,l.plen) == -1) return -1; while(1) { char c; int nread; char seq[3]; nread = read(l.ifd,&c,1); if (nread <= 0) return l.len; /* Only autocomplete when the callback is set. It returns < 0 when * there was an error reading from fd. Otherwise it will return the * character that should be handled next. */ if (c == 9 && completionCallback != NULL) { c = completeLine(&l); /* Return on errors */ if (c < 0) return l.len; /* Read next character when 0 */ if (c == 0) continue; } switch(c) { case ENTER: /* enter */ history_len--; free(history[history_len]); if (mlmode) linenoiseEditMoveEnd(&l); return (int)l.len; case CTRL_C: /* ctrl-c */ errno = EAGAIN; return -1; case BACKSPACE: /* backspace */ case 8: /* ctrl-h */ linenoiseEditBackspace(&l); break; case CTRL_D: /* ctrl-d, remove char at right of cursor, or if the line is empty, act as end-of-file. */ if (l.len > 0) { linenoiseEditDelete(&l); } else { history_len--; free(history[history_len]); return -1; } break; case CTRL_T: /* ctrl-t, swaps current character with previous. */ if (l.pos > 0 && l.pos < l.len) { int aux = buf[l.pos-1]; buf[l.pos-1] = buf[l.pos]; buf[l.pos] = aux; if (l.pos != l.len-1) l.pos++; refreshLine(&l); } break; case CTRL_B: /* ctrl-b */ linenoiseEditMoveLeft(&l); break; case CTRL_F: /* ctrl-f */ linenoiseEditMoveRight(&l); break; case CTRL_P: /* ctrl-p */ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); break; case CTRL_N: /* ctrl-n */ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); break; case ESC: /* escape sequence */ /* Read the next two bytes representing the escape sequence. * Use two calls to handle slow terminals returning the two * chars at different times. */ if (read(l.ifd,seq,1) == -1) break; if (read(l.ifd,seq+1,1) == -1) break; /* ESC [ sequences. */ if (seq[0] == '[') { if (seq[1] >= '0' && seq[1] <= '9') { /* Extended escape, read additional byte. */ if (read(l.ifd,seq+2,1) == -1) break; if (seq[2] == '~') { switch(seq[1]) { case '3': /* Delete key. */ linenoiseEditDelete(&l); break; } } } else { switch(seq[1]) { case 'A': /* Up */ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); break; case 'B': /* Down */ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); break; case 'C': /* Right */ linenoiseEditMoveRight(&l); break; case 'D': /* Left */ linenoiseEditMoveLeft(&l); break; case 'H': /* Home */ linenoiseEditMoveHome(&l); break; case 'F': /* End*/ linenoiseEditMoveEnd(&l); break; } } } /* ESC O sequences. */ else if (seq[0] == 'O') { switch(seq[1]) { case 'H': /* Home */ linenoiseEditMoveHome(&l); break; case 'F': /* End*/ linenoiseEditMoveEnd(&l); break; } } break; default: if (linenoiseEditInsert(&l,c)) return -1; break; case CTRL_U: /* Ctrl+u, delete the whole line. */ buf[0] = '\0'; l.pos = l.len = 0; refreshLine(&l); break; case CTRL_K: /* Ctrl+k, delete from current to end of line. */ buf[l.pos] = '\0'; l.len = l.pos; refreshLine(&l); break; case CTRL_A: /* Ctrl+a, go to the start of the line */ linenoiseEditMoveHome(&l); break; case CTRL_E: /* ctrl+e, go to the end of the line */ linenoiseEditMoveEnd(&l); break; case CTRL_L: /* ctrl+l, clear screen */ linenoiseClearScreen(); refreshLine(&l); break; case CTRL_W: /* ctrl+w, delete previous word */ linenoiseEditDeletePrevWord(&l); break; } } return l.len; } /* This special mode is used by linenoise in order to print scan codes * on screen for debugging / development purposes. It is implemented * by the linenoise_example program using the --keycodes option. */ void linenoisePrintKeyCodes(void) { char quit[4]; printf("Linenoise key codes debugging mode.\n" "Press keys to see scan codes. Type 'quit' at any time to exit.\n"); if (enableRawMode(STDIN_FILENO) == -1) return; memset(quit,' ',4); while(1) { char c; int nread; nread = read(STDIN_FILENO,&c,1); if (nread <= 0) continue; memmove(quit,quit+1,sizeof(quit)-1); /* shift string to left. */ quit[sizeof(quit)-1] = c; /* Insert current char on the right. */ if (memcmp(quit,"quit",sizeof(quit)) == 0) break; printf("'%c' %02x (%d) (type quit to exit)\n", isprint(c) ? c : '?', (int)c, (int)c); printf("\r"); /* Go left edge manually, we are in raw mode. */ fflush(stdout); } disableRawMode(STDIN_FILENO); } /* This function calls the line editing function linenoiseEdit() using * the STDIN file descriptor set in raw mode. */ static int linenoiseRaw(char *buf, size_t buflen, const char *prompt) { int count; if (buflen == 0) { errno = EINVAL; return -1; } if (!isatty(STDIN_FILENO)) { /* Not a tty: read from file / pipe. */ if (fgets(buf, buflen, stdin) == NULL) return -1; count = strlen(buf); if (count && buf[count-1] == '\n') { count--; buf[count] = '\0'; } } else { /* Interactive editing. */ if (enableRawMode(STDIN_FILENO) == -1) return -1; count = linenoiseEdit(STDIN_FILENO, STDOUT_FILENO, buf, buflen, prompt); disableRawMode(STDIN_FILENO); printf("\n"); } return count; } /* The high level function that is the main API of the linenoise library. * This function checks if the terminal has basic capabilities, just checking * for a blacklist of stupid terminals, and later either calls the line * editing function or uses dummy fgets() so that you will be able to type * something even in the most desperate of the conditions. */ char *linenoise(const char *prompt) { char buf[LINENOISE_MAX_LINE]; int count; if (isUnsupportedTerm()) { size_t len; printf("%s",prompt); fflush(stdout); if (fgets(buf,LINENOISE_MAX_LINE,stdin) == NULL) return NULL; len = strlen(buf); while(len && (buf[len-1] == '\n' || buf[len-1] == '\r')) { len--; buf[len] = '\0'; } return strdup(buf); } else { count = linenoiseRaw(buf,LINENOISE_MAX_LINE,prompt); if (count == -1) return NULL; return strdup(buf); } } /* ================================ History ================================= */ /* Free the history, but does not reset it. Only used when we have to * exit() to avoid memory leaks are reported by valgrind & co. */ static void freeHistory(void) { if (history) { int j; for (j = 0; j < history_len; j++) free(history[j]); free(history); } } /* At exit we'll try to fix the terminal to the initial conditions. */ static void linenoiseAtExit(void) { disableRawMode(STDIN_FILENO); freeHistory(); } /* This is the API call to add a new entry in the linenoise history. * It uses a fixed array of char pointers that are shifted (memmoved) * when the history max length is reached in order to remove the older * entry and make room for the new one, so it is not exactly suitable for huge * histories, but will work well for a few hundred of entries. * * Using a circular buffer is smarter, but a bit more complex to handle. */ int linenoiseHistoryAdd(const char *line) { char *linecopy; if (history_max_len == 0) return 0; /* Initialization on first call. */ if (history == NULL) { history = malloc(sizeof(char*)*history_max_len); if (history == NULL) return 0; memset(history,0,(sizeof(char*)*history_max_len)); } /* Don't add duplicated lines. */ if (history_len && !strcmp(history[history_len-1], line)) return 0; /* Add an heap allocated copy of the line in the history. * If we reached the max length, remove the older line. */ linecopy = strdup(line); if (!linecopy) return 0; if (history_len == history_max_len) { free(history[0]); memmove(history,history+1,sizeof(char*)*(history_max_len-1)); history_len--; } history[history_len] = linecopy; history_len++; return 1; } /* Set the maximum length for the history. This function can be called even * if there is already some history, the function will make sure to retain * just the latest 'len' elements if the new history length value is smaller * than the amount of items already inside the history. */ int linenoiseHistorySetMaxLen(int len) { char **new; if (len < 1) return 0; if (history) { int tocopy = history_len; new = malloc(sizeof(char*)*len); if (new == NULL) return 0; /* If we can't copy everything, free the elements we'll not use. */ if (len < tocopy) { int j; for (j = 0; j < tocopy-len; j++) free(history[j]); tocopy = len; } memset(new,0,sizeof(char*)*len); memcpy(new,history+(history_len-tocopy), sizeof(char*)*tocopy); free(history); history = new; } history_max_len = len; if (history_len > history_max_len) history_len = history_max_len; return 1; } /* Save the history in the specified file. On success 0 is returned * otherwise -1 is returned. */ int linenoiseHistorySave(const char *filename) { FILE *fp = fopen(filename,"w"); int j; if (fp == NULL) return -1; for (j = 0; j < history_len; j++) fprintf(fp,"%s\n",history[j]); fclose(fp); return 0; } /* Load the history from the specified file. If the file does not exist * zero is returned and no operation is performed. * * If the file exists and the operation succeeded 0 is returned, otherwise * on error -1 is returned. */ int linenoiseHistoryLoad(const char *filename) { FILE *fp = fopen(filename,"r"); char buf[LINENOISE_MAX_LINE]; if (fp == NULL) return -1; while (fgets(buf,LINENOISE_MAX_LINE,fp) != NULL) { char *p; p = strchr(buf,'\r'); if (!p) p = strchr(buf,'\n'); if (p) *p = '\0'; linenoiseHistoryAdd(buf); } fclose(fp); return 0; } disque-1.0-rc1/deps/linenoise/linenoise.h000066400000000000000000000047241264176561100204250ustar00rootroot00000000000000/* linenoise.h -- guerrilla line editing library against the idea that a * line editing lib needs to be 20,000 lines of C code. * * See linenoise.c for more information. * * ------------------------------------------------------------------------ * * Copyright (c) 2010, Salvatore Sanfilippo * Copyright (c) 2010, Pieter Noordhuis * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __LINENOISE_H #define __LINENOISE_H #ifdef __cplusplus extern "C" { #endif typedef struct linenoiseCompletions { size_t len; char **cvec; } linenoiseCompletions; typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *); void linenoiseSetCompletionCallback(linenoiseCompletionCallback *); void linenoiseAddCompletion(linenoiseCompletions *, const char *); char *linenoise(const char *prompt); int linenoiseHistoryAdd(const char *line); int linenoiseHistorySetMaxLen(int len); int linenoiseHistorySave(const char *filename); int linenoiseHistoryLoad(const char *filename); void linenoiseClearScreen(void); void linenoiseSetMultiLine(int ml); void linenoisePrintKeyCodes(void); #ifdef __cplusplus } #endif #endif /* __LINENOISE_H */ disque-1.0-rc1/deps/update-jemalloc.sh000077500000000000000000000004321264176561100176770ustar00rootroot00000000000000#!/bin/bash VER=$1 URL="http://www.canonware.com/download/jemalloc/jemalloc-${VER}.tar.bz2" echo "Downloading $URL" curl $URL > /tmp/jemalloc.tar.bz2 tar xvjf /tmp/jemalloc.tar.bz2 rm -rf jemalloc mv jemalloc-${VER} jemalloc echo "Use git status, add all files and commit changes." disque-1.0-rc1/disque.conf000066400000000000000000000434541264176561100155130ustar00rootroot00000000000000# Disque configuration file example # Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: # # 1k => 1000 bytes # 1kb => 1024 bytes # 1m => 1000000 bytes # 1mb => 1024*1024 bytes # 1g => 1000000000 bytes # 1gb => 1024*1024*1024 bytes # # units are case insensitive so 1GB 1Gb 1gB are all the same. ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you # have a standard template that goes to all Disque servers but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # # Notice option "include" won't be rewritten by command "CONFIG REWRITE" # from admin or Disque Sentinel. Since Disque always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. # # If instead you are interested in using includes to override configuration # options, it is better to use include as the last line. # # include /path/to/local.conf # include /path/to/other.conf ################################ GENERAL ##################################### # By default Disque does not run as a daemon. Use 'yes' if you need it. # Note that Disque will write a pid file in /var/run/disque.pid when daemonized. daemonize no # When running daemonized, Disque writes a pid file in /var/run/disque.pid by # default. You can specify a custom pid file location here. pidfile /var/run/disque.pid # Accept connections on the specified port, default is 7711. # If port 0 is specified Disque will not listen on a TCP socket. port 7711 # TCP listen() backlog. # # In high requests-per-second environments you need an high backlog in order # to avoid slow clients connections issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. tcp-backlog 511 # By default Disque listens for connections from all the network interfaces # available on the server. It is possible to listen to just one or multiple # interfaces using the "bind" configuration directive, followed by one or # more IP addresses. # # Examples: # # bind 192.168.1.100 10.0.0.1 # bind 127.0.0.1 # Specify the path for the Unix socket that will be used to listen for # incoming connections. There is no default, so Disque will not listen # on a unix socket when not specified. # # unixsocket /tmp/disque.sock # unixsocketperm 700 # Close the connection after a client is idle for N seconds (0 to disable) timeout 0 # TCP keepalive. # # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence # of communication. This is useful for two reasons: # # 1) Detect dead peers. # 2) Take the connection alive from the point of view of network # equipment in the middle. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. # On other kernels the period depends on the kernel configuration. # # A reasonable value for this option is 60 seconds. tcp-keepalive 0 # Specify the server verbosity level. # This can be one of: # debug (a lot of information, useful for development/testing) # verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) loglevel notice # Specify the log file name. Also the empty string can be used to force # Disque to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null logfile "" # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. # syslog-enabled no # Specify the syslog identity. # syslog-ident disque # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. # syslog-facility local0 ############################# WORKING DIRECTORY ############################### # The working directory. # # Data and state files will be written inside this directory. # # The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. dir ./ ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other # commands. This might be useful in environments in which you do not trust # others with access to the host running disque-server. # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). # # Warning: since Disque is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # # requirepass foobared # Command renaming. # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something # hard to guess so that it will still be available for internal-use tools # but not available for general clients. # # Example: # # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 # # It is also possible to completely kill a command by renaming it into # an empty string: # # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the # AOF file or transmitted to slaves may cause problems. ################################### LIMITS #################################### # Set the max number of connected clients at the same time. By default # this limit is set to 10000 clients, however if the Disque server is not # able to configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit # minus 32 (as Disque reserves a few file descriptors for internal uses). # # Once the limit is reached Disque will close all the new connections sending # an error 'max number of clients reached'. # # maxclients 10000 # Don't use more memory than the specified amount of bytes. # When the memory limit is reached Disque will try to remove ACKs # or other state that can be safely evicted accotding to the specified # policy. # # If Disque can't free memory according to the policy, or if the policy is # set to 'noeviction', Disque will start to reply with errors to commands # that would use more memory. # # maxmemory # MAXMEMORY POLICY: how Disque will select what to remove when maxmemory # is reached. You can select among the following behvaior: # # "noeviction" -- Nothing is evicted, an error is returned to the user # on write operations. # "acks" -- Disque will try to remove jobs in ACKED state in order to # reclain memory. # # The default is: # # maxmemory-policy acks ############################## APPEND ONLY MODE ############################### # The Append Only File is used when you want your server to be able to # persist data on disk and recover after a crash. appendonly no # The name of the append only file (default: "appendonly.aof") appendfilename "disque.aof" # The fsync() call tells the Operating System to actually write data on disk # instead of waiting for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # # Disque supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log. Slow, Safest. # everysec: fsync only one time every second. Compromise. # # The default is "everysec", as that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to # "no" that will let the operating system flush the output buffer when # it wants, for better performances (but if you can live with the idea of # some data loss consider the default persistence mode that's snapshotting), # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # # More details please check the following article: # http://antirez.com/post/redis-persistence-demystified.html # # If unsure, use "everysec". # appendfsync always appendfsync everysec # appendfsync no # Automatic rewrite of the append only file. # Disque is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. # # This is how it works: Disque remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). # # This base size is compared to the current size. If the current size is # bigger than the specified percentage, the rewrite is triggered. Also # you need to specify a minimal size for the AOF file to be rewritten, this # is useful to avoid rewriting the AOF file even if the percentage increase # is reached but it is still pretty small. # # Specify a percentage of zero in order to disable the automatic AOF # rewrite feature. auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb # An AOF file may be found to be truncated at the end during the Disque # startup process, when the AOF data gets loaded back into memory. # This may happen when the system where Disque is running # crashes, especially when an ext4 filesystem is mounted without the # data=ordered option (however this can't happen when Disque itself # crashes or aborts but the operating system still works correctly). # # Disque can either exit with an error when this happens, or load as much # data as possible (the default now) and start if the AOF file is found # to be truncated at the end. The following option controls this behavior. # # If aof-load-truncated is set to yes, a truncated AOF file is loaded and # the Disque server starts emitting a log to inform the user of the event. # Otherwise if the option is set to no, the server aborts with an error # and refuses to start. When the option is set to no, the user requires # to fix the AOF file using the "disque-check-aof" utility before to restart # the server. # # Note that if the AOF file will be found to be corrupted in the middle # the server will still exit with an error. This option only applies when # Disque will try to read more data from the AOF file but not enough bytes # will be found. aof-load-truncated yes # Normally when Disque is restarted with AOF enabled, it only loads job # data without putting the job into the queue again automatically. # # This is the default for a few reasons: # 1) In the time the server was down, it is possible that another node # put the same job in its queue. Having it enqueued again after a # restart may cause unnecessary additional deliveries. # 2) If the job has retry set to 0, reloading the "queued" state may # violate the at-moce-once contract. # 3) Anyway jobs are requeued automatically if not acknowledged. # # However during server upgrades to load the full state again after the # restart may be a good idea: it's a small controlled downtime. Moreover # if before exiting the server rewrites the AOF from scratch, even # at-moce-once jobs are safe to put back into the queue. # # The "aof-enqueue-jobs-once" enables reloading the AOF with full queue # state for the next restart. Note that it also forces to load the AOF # even if the AOF is disabled. Once the AOF is loaded, the configuration # file is rewritten automatically to set the option back to 'no'. # # So in order to upgrade a server, it is possible to use the following # sequence of commands: # # CONFIG SET aof-enqueue-jobs-once yes # CONFIG REWRITE # SHUTDOWN REWRITE-AOF # # Then restart the server, that will load the AOF generated synchronously # via SHUTDOWN REWRITE-AOF, preserving the full state. # # Note that using this option when the AOF was not generated with # SHUTDOWN REWRITE-AOF is risky if you have at-most-once jobs, since the AOF # may not reflect the latest state of the server (the message could be # already delivered): use this option only in the way mentioned above. aof-enqueue-jobs-once no ################################### CLUSTER ################################## # Every cluster node has a cluster configuration file. This file is not # intended to be edited by hand. It is created and updated by Disque nodes. # Every Disque Cluster node requires a different cluster configuration file. # Make sure that instances running in the same system do not have # overlapping cluster configuration file names. # # cluster-config-file nodes-7711.conf # Cluster node timeout is the amount of milliseconds a node must be unreachable # for it to be considered in failure state. # Most other internal time limits are multiple of the node timeout. # # cluster-node-timeout 15000 ################################## SLOW LOG ################################### # The Disque Slow Log is a system to log queries that exceeded a specified # execution time. The execution time does not include the I/O operations # like talking with the client, sending the reply and so forth, # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). # # You can configure the slow log with two parameters: one tells Disque # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the # slow log. When a new command is logged the oldest one is removed from the # queue of logged commands. # The following time is expressed in microseconds, so 1000000 is equivalent # to one second. Note that a negative number disables the slow log, while # a value of zero forces the logging of every command. slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. slowlog-max-len 128 ################################ LATENCY MONITOR ############################## # The Disque latency monitoring subsystem samples different operations # at runtime in order to collect data related to possible sources of # latency of a Disque instance. # # Via the LATENCY command this information is available to the user that can # print graphs and obtain reports. # # The system only logs operations that were performed in a time equal or # greater than the amount of milliseconds specified via the # latency-monitor-threshold configuration directive. When its value is set # to zero, the latency monitor is turned off. # # By default latency monitoring is disabled since it is mostly not needed # if you don't have latency issues, and collecting data has a performance # impact, that while very small, can be measured under big load. Latency # monitoring can easily be enalbed at runtime using the command # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold 0 ############################### ADVANCED CONFIG ############################### # The client output buffer limits can be used to force disconnection of clients # that are not reading data from the server fast enough for some reason (a # common reason is that a Pub/Sub client can't consume messages as fast as the # publisher can produce them). # # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients # slave -> slave clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: # # client-output-buffer-limit # # A client is immediately disconnected once the hard limit is reached, or if # the soft limit is reached and remains reached for the specified number of # seconds (continuously). # So for instance if the hard limit is 32 megabytes and the soft limit is # 16 megabytes / 10 seconds, the client will get disconnected immediately # if the size of the output buffers reach 32 megabytes, but will also get # disconnected if the client reaches 16 megabytes and continuously overcomes # the limit for 10 seconds. # # By default normal clients are not limited because they don't receive data # without asking (in a push way), but just after a request, so only # asynchronous clients may create a scenario where data is requested faster # than it can read. # # Instead there is a default limit for pubsub and slave clients, since # subscribers and slaves receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 # Disque calls an internal function to perform many background tasks, like # closing connections of clients in timeout, purging expired keys that are # never requested, and so forth. # # Not all tasks are performed with the same frequency, but Disque checks for # tasks to perform according to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when # Disque is idle, but at the same time will make Disque more responsive when # there are many keys expiring at the same time, and timeouts may be # handled with more precision. # # The range is between 1 and 500, however a value over 100 is usually not # a good idea. Most users should use the default of 10 and raise this up to # 100 only in environments where very low latency is required. hz 10 # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes disque-1.0-rc1/runtest000077500000000000000000000004201264176561100147660ustar00rootroot00000000000000#!/bin/sh TCL_VERSIONS="8.5 8.6" TCLSH="" for VERSION in $TCL_VERSIONS; do TCL=`which tclsh$VERSION 2>/dev/null` && TCLSH=$TCL done if [ -z $TCLSH ] then echo "You need tcl 8.5 or newer in order to run the Disque test" exit 1 fi $TCLSH tests/cluster/run.tcl $* disque-1.0-rc1/src/000077500000000000000000000000001264176561100141275ustar00rootroot00000000000000disque-1.0-rc1/src/.gitignore000066400000000000000000000000521264176561100161140ustar00rootroot00000000000000*.gcda *.gcno *.gcov redis.info lcov-html disque-1.0-rc1/src/00-RELEASENOTES000066400000000000000000000015201264176561100161560ustar00rootroot00000000000000Disque 1.0 release notes ======================= Upgrade urgency levels: * LOW: No need to upgrade unless there are new features you want to use. * MODERATE: Program an upgrade of the server, but it's not urgent. * HIGH: There is a critical bug that may affect a subset of users. Upgrade! * CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. Disque 1.0 RC1 (2 Jan 2016) --- This is the first release candidate of Disque 1.0. Disque is a distributed message broker. * [NEW] Everything is new :-) Read the README file for more info about what Disque provides. Credits === Credits: For each release, a list of changes with the relative author is provided. Where not specified the implementation and design is done by Salvatore Sanfilippo. Commit messages may contain additional credits. Cheers, Salvatore disque-1.0-rc1/src/Makefile000066400000000000000000000144761264176561100156030ustar00rootroot00000000000000# Disque Makefile # Copyright (C) 2009-2014 Salvatore Sanfilippo # This file is released under the BSD license, see the COPYING file # # The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using # what is needed for Disque plus the standard CFLAGS and LDFLAGS passed. # However when building the dependencies (Jemalloc, Hiredis, ...) # CFLAGS and LDFLAGS are propagated to the dependencies, so to pass # flags only to be used when compiling / linking Disque itself DISQUE_CFLAGS # and DISQUE_LDFLAGS are used instead (this is the case of 'make gcov'). # # Dependencies are stored in the Makefile.dep file. To rebuild this file # Just use 'make dep', but this is only needed by developers. release_hdr := $(shell sh -c './mkreleasehdr.sh') uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') OPTIMIZATION?=-O2 DEPENDENCY_TARGETS=hiredis linenoise # Default settings STD=-std=c99 -pedantic WARN=-Wall -W OPT=$(OPTIMIZATION) PREFIX?=/usr/local INSTALL_BIN=$(PREFIX)/bin INSTALL=install # Default allocator ifeq ($(uname_S),Linux) MALLOC=jemalloc else MALLOC=libc endif # Backwards compatibility for selecting an allocator ifeq ($(USE_TCMALLOC),yes) MALLOC=tcmalloc endif ifeq ($(USE_TCMALLOC_MINIMAL),yes) MALLOC=tcmalloc_minimal endif ifeq ($(USE_JEMALLOC),yes) MALLOC=jemalloc endif # Override default settings if possible -include .make-settings FINAL_CFLAGS=$(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(DISQUE_CFLAGS) FINAL_LDFLAGS=$(LDFLAGS) $(DISQUE_LDFLAGS) $(DEBUG) FINAL_LIBS=-lm DEBUG=-g -ggdb ifeq ($(uname_S),SunOS) # SunOS INSTALL=cp -pf FINAL_CFLAGS+= -D__EXTENSIONS__ -D_XPG6 FINAL_LIBS+= -ldl -lnsl -lsocket -lresolv -lpthread else ifeq ($(uname_S),Darwin) # Darwin (nothing to do) else ifeq ($(uname_S),AIX) # AIX FINAL_LDFLAGS+= -Wl,-bexpall FINAL_LIBS+= -pthread -lcrypt -lbsd else # All the other OSes (notably Linux) FINAL_LDFLAGS+= -rdynamic FINAL_LIBS+= -pthread endif endif endif # Include paths to dependencies FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise ifeq ($(MALLOC),tcmalloc) FINAL_CFLAGS+= -DUSE_TCMALLOC FINAL_LIBS+= -ltcmalloc endif ifeq ($(MALLOC),tcmalloc_minimal) FINAL_CFLAGS+= -DUSE_TCMALLOC FINAL_LIBS+= -ltcmalloc_minimal endif ifeq ($(MALLOC),jemalloc) DEPENDENCY_TARGETS+= jemalloc FINAL_CFLAGS+= -DUSE_JEMALLOC -I../deps/jemalloc/include FINAL_LIBS+= ../deps/jemalloc/lib/libjemalloc.a -ldl endif DISQUE_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) DISQUE_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS) DISQUE_INSTALL=$(QUIET_INSTALL)$(INSTALL) CCCOLOR="\033[34m" LINKCOLOR="\033[34;1m" SRCCOLOR="\033[33m" BINCOLOR="\033[37;1m" MAKECOLOR="\033[32;1m" ENDCOLOR="\033[0m" ifndef V QUIET_CC = @printf ' %b %b\n' $(CCCOLOR)CC$(ENDCOLOR) $(SRCCOLOR)$@$(ENDCOLOR) 1>&2; QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) 1>&2; QUIET_INSTALL = @printf ' %b %b\n' $(LINKCOLOR)INSTALL$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) 1>&2; endif DISQUE_SERVER_NAME=disque-server DISQUE_SERVER_OBJ=adlist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o sha1.o release.o networking.o util.o object.o config.o aof.o debug.o syncio.o cluster.o endianconv.o slowlog.o bio.o memtest.o crc64.o setproctitle.o blocked.o latency.o sparkline.o rio.o job.o queue.o skiplist.o ack.o DISQUE_CLI_NAME=disque DISQUE_CLI_OBJ=anet.o adlist.o disque-cli.o zmalloc.o release.o anet.o ae.o crc64.o DISQUE_CHECK_AOF_NAME=disque-check-aof DISQUE_CHECK_AOF_OBJ=disque-check-aof.o all: $(DISQUE_SERVER_NAME) $(DISQUE_CLI_NAME) $(DISQUE_CHECK_AOF_NAME) @echo "" @echo "Hint: It's a good idea to run 'make test' ;)" @echo "" .PHONY: all # Deps (use make dep to generate this) include Makefile.dep dep: $(DISQUE_CC) -MM *.c > Makefile.dep .PHONY: dep persist-settings: distclean echo STD=$(STD) >> .make-settings echo WARN=$(WARN) >> .make-settings echo OPT=$(OPT) >> .make-settings echo MALLOC=$(MALLOC) >> .make-settings echo CFLAGS=$(CFLAGS) >> .make-settings echo LDFLAGS=$(LDFLAGS) >> .make-settings echo DISQUE_CFLAGS=$(DISQUE_CFLAGS) >> .make-settings echo DISQUE_LDFLAGS=$(DISQUE_LDFLAGS) >> .make-settings echo PREV_FINAL_CFLAGS=$(FINAL_CFLAGS) >> .make-settings echo PREV_FINAL_LDFLAGS=$(FINAL_LDFLAGS) >> .make-settings -(cd ../deps && $(MAKE) $(DEPENDENCY_TARGETS)) .PHONY: persist-settings # Prerequisites target .make-prerequisites: @touch $@ # Clean everything, persist settings and build dependencies if anything changed ifneq ($(strip $(PREV_FINAL_CFLAGS)), $(strip $(FINAL_CFLAGS))) .make-prerequisites: persist-settings endif ifneq ($(strip $(PREV_FINAL_LDFLAGS)), $(strip $(FINAL_LDFLAGS))) .make-prerequisites: persist-settings endif # disque-server $(DISQUE_SERVER_NAME): $(DISQUE_SERVER_OBJ) $(DISQUE_LD) -o $@ $^ $(FINAL_LIBS) # disque-cli $(DISQUE_CLI_NAME): $(DISQUE_CLI_OBJ) $(DISQUE_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(FINAL_LIBS) # disque-check-aof $(DISQUE_CHECK_AOF_NAME): $(DISQUE_CHECK_AOF_OBJ) $(DISQUE_LD) -o $@ $^ $(FINAL_LIBS) # Because the jemalloc.h header is generated as a part of the jemalloc build, # building it should complete before building any other object. Instead of # depending on a single artifact, build all dependencies first. %.o: %.c .make-prerequisites $(DISQUE_CC) -c $< clean: rm -rf $(DISQUE_SERVER_NAME) $(DISQUE_CLI_NAME) $(DISQUE_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov disque.info lcov-html .PHONY: clean distclean: clean -(cd ../deps && $(MAKE) distclean) -(rm -f .make-*) .PHONY: distclean test: $(DISQUE_SERVER_NAME) $(DISQUE_CHECK_AOF_NAME) @(cd ..; ./runtest) check: test lcov: $(MAKE) gcov @(set -e; cd ..; ./runtest --clients 1) @geninfo -o disque.info . @genhtml --legend -o lcov-html disque.info .PHONY: lcov 32bit: @echo "" @echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386" @echo "" $(MAKE) CFLAGS="-m32" LDFLAGS="-m32" gcov: $(MAKE) DISQUE_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" DISQUE_LDFLAGS="-fprofile-arcs -ftest-coverage" noopt: $(MAKE) OPTIMIZATION="-O0" valgrind: $(MAKE) OPTIMIZATION="-O0" MALLOC="libc" src/help.h: @../utils/generate-command-help.rb > help.h install: all @mkdir -p $(INSTALL_BIN) $(DISQUE_INSTALL) $(DISQUE_SERVER_NAME) $(INSTALL_BIN) $(DISQUE_INSTALL) $(DISQUE_CLI_NAME) $(INSTALL_BIN) $(DISQUE_INSTALL) $(DISQUE_CHECK_AOF_NAME) $(INSTALL_BIN) disque-1.0-rc1/src/Makefile.dep000066400000000000000000000076731264176561100163530ustar00rootroot00000000000000ack.o: ack.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h cluster.h job.h queue.h adlist.o: adlist.c adlist.h zmalloc.h ae.o: ae.c ae.h zmalloc.h config.h ae_kqueue.c ae_kqueue.o: ae_kqueue.c ae_select.o: ae_select.c anet.o: anet.c fmacros.h anet.h aof.o: aof.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h bio.h job.h bio.o: bio.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h bio.h blocked.o: blocked.c server.h fmacros.h config.h ae.h sds.h dict.h \ adlist.h skiplist.h zmalloc.h anet.h version.h util.h latency.h \ sparkline.h rio.h job.h queue.h cluster.o: cluster.c server.h fmacros.h config.h ae.h sds.h dict.h \ adlist.h skiplist.h zmalloc.h anet.h version.h util.h latency.h \ sparkline.h rio.h cluster.h job.h queue.h endianconv.h ack.h config.o: config.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h cluster.h job.h queue.h crc16.o: crc16.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h crc64.o: crc64.c debug.o: debug.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h sha1.h crc64.h job.h queue.h bio.h dict.o: dict.c fmacros.h dict.h zmalloc.h disqueassert.h disque-check-aof.o: disque-check-aof.c fmacros.h config.h disque-cli.o: disque-cli.c fmacros.h version.h ../deps/hiredis/hiredis.h \ sds.h zmalloc.h ../deps/linenoise/linenoise.h help.h anet.h ae.h disque.o: disque.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h cluster.h job.h queue.h slowlog.h bio.h asciilogo.h endianconv.o: endianconv.c job.o: job.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h cluster.h job.h queue.h ack.h sha1.h endianconv.h latency.o: latency.c server.h fmacros.h config.h ae.h sds.h dict.h \ adlist.h skiplist.h zmalloc.h anet.h version.h util.h latency.h \ sparkline.h rio.h lzf_c.o: lzf_c.c lzfP.h lzf_d.o: lzf_d.c lzfP.h memtest.o: memtest.c config.h networking.o: networking.c server.h fmacros.h config.h ae.h sds.h dict.h \ adlist.h skiplist.h zmalloc.h anet.h version.h util.h latency.h \ sparkline.h rio.h object.o: object.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h pqsort.o: pqsort.c queue.o: queue.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h cluster.h job.h queue.h release.o: release.c release.h version.h crc64.h rio.o: rio.c fmacros.h rio.h sds.h util.h crc64.h config.h server.h ae.h \ dict.h adlist.h skiplist.h zmalloc.h anet.h version.h latency.h \ sparkline.h sds.o: sds.c sds.h zmalloc.h setproctitle.o: setproctitle.c sha1.o: sha1.c sha1.h config.h skiplist.o: skiplist.c server.h fmacros.h config.h ae.h sds.h dict.h \ adlist.h skiplist.h zmalloc.h anet.h version.h util.h latency.h \ sparkline.h rio.h slowlog.o: slowlog.c server.h fmacros.h config.h ae.h sds.h dict.h \ adlist.h skiplist.h zmalloc.h anet.h version.h util.h latency.h \ sparkline.h rio.h slowlog.h sparkline.o: sparkline.c server.h fmacros.h config.h ae.h sds.h dict.h \ adlist.h skiplist.h zmalloc.h anet.h version.h util.h latency.h \ sparkline.h rio.h syncio.o: syncio.c server.h fmacros.h config.h ae.h sds.h dict.h adlist.h \ skiplist.h zmalloc.h anet.h version.h util.h latency.h sparkline.h \ rio.h util.o: util.c fmacros.h util.h sds.h sha1.h zmalloc.o: zmalloc.c config.h zmalloc.h disque-1.0-rc1/src/ack.c000066400000000000000000000277251264176561100150460ustar00rootroot00000000000000/* Acknowledges handling and commands. * * Copyright (c) 2014, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "cluster.h" #include "job.h" #include "queue.h" /* ------------------------- Low level ack functions ------------------------ */ /* Change job state as acknowledged. If it is already in that state, the * function does nothing. */ void acknowledgeJob(job *job) { if (job->state == JOB_STATE_ACKED) return; dequeueJob(job); job->state = JOB_STATE_ACKED; /* Remove the nodes_confirmed hash table if it exists. * tryJobGC() will take care to create a new one used for the GC * process. */ if (job->nodes_confirmed) { dictRelease(job->nodes_confirmed); job->nodes_confirmed = NULL; } updateJobAwakeTime(job,0); /* Make sure we'll schedule a job GC. */ AOFAckJob(job); /* Change job state in AOF. */ } /* ------------------------- Garbage collection ----------------------------- */ /* Return the next milliseconds unix time where the next GC attept for this * job should be performed. */ mstime_t getNextGCRetryTime(job *job) { mstime_t period = JOB_GC_RETRY_MIN_PERIOD * (1 << job->gc_retry); if (period > JOB_GC_RETRY_MAX_PERIOD) period = JOB_GC_RETRY_MAX_PERIOD; /* Desync a bit the GC process, it is a waste of resources for * multiple nodes to try to GC at the same time. */ return server.mstime + period + randomTimeError(500); } /* Try to garbage collect the job. */ void tryJobGC(job *job) { if (job->state != JOB_STATE_ACKED) return; int dummy_ack = dictSize(job->nodes_delivered) == 0; serverLog(LL_VERBOSE,"GC %.*s",JOB_ID_LEN,job->id); /* Don't overflow the count, it's only useful for the exponential delay. * Actually we'll keep trying forever. */ if (job->gc_retry != JOB_GC_RETRY_COUNT_MAX) job->gc_retry++; /* nodes_confirmed is used in order to store all the nodes that have the * job in ACKed state, so that the job can be evicted when we are * confident the job will not be reissued. */ if (job->nodes_confirmed == NULL) { job->nodes_confirmed = dictCreate(&clusterNodesDictType,NULL); dictAdd(job->nodes_confirmed,myself->name,myself); } /* Check ASAP if we already reached all the nodes. This special case * here is mainly useful when the job replication factor is 1, so * there is no SETACK to send, nor GOTCAK to receive. * * Also check if this is a dummy ACK but the cluster size is now 1: * in such a case we don't have other nodes to send SETACK to, we can * just remove the ACK. Note that dummy ACKs are not created at all * if the cluster size is 1, but this code path may be entered as a result * of the cluster getting resized to a single node. */ int all_nodes_reached = (!dummy_ack) && (dictSize(job->nodes_delivered) == dictSize(job->nodes_confirmed)); int dummy_ack_single_node = dummy_ack && server.cluster->size == 1; if (all_nodes_reached || dummy_ack_single_node) { serverLog(LL_VERBOSE, "Deleting %.48s: all nodes reached in tryJobGC()", job->id); unregisterJob(job); freeJob(job); return; } /* Send a SETACK message to all the nodes that may have a message but are * still not listed in the nodes_confirmed hash table. However if this * is a dummy ACK (created by ACKJOB command acknowledging a job we don't * know) we have to broadcast the SETACK to everybody in search of the * owner. */ dict *targets = dictSize(job->nodes_delivered) == 0 ? server.cluster->nodes : job->nodes_delivered; dictForeach(targets,de) clusterNode *node = dictGetVal(de); if (dictFind(job->nodes_confirmed,node->name) == NULL) clusterSendSetAck(node,job); dictEndForeach } /* This function is called by cluster.c every time we receive a GOTACK message * about a job we know. */ void gotAckReceived(clusterNode *sender, job *job, int known) { /* A dummy ACK is an acknowledged job that we created just becakse a client * send us ACKJOB about a job we were not aware. */ int dummy_ack = dictSize(job->nodes_delivered) == 0; serverLog(LL_VERBOSE,"RECEIVED GOTACK FROM %.40s FOR JOB %.48s", sender->name, job->id); /* We should never receive a GOTACK for a job which is not acknowledged, * but it is more robust to handle it explicitly. */ if (job->state != JOB_STATE_ACKED) return; /* If this is a dummy ACK, and we reached a node that knows about this job, * it's up to it to perform the garbage collection, so we can forget about * this job and reclaim memory. */ if (dummy_ack && known) { serverLog(LL_VERBOSE,"Deleting %.48s: authoritative node reached", job->id); unregisterJob(job); freeJob(job); return; } /* If the sender knows about the job, or if we have the sender in the list * of nodes that may have the job (even if it no longer remembers about * the job), we do two things: * * 1) Add the node to the list of nodes_delivered. It is likely already * there... so this should be useless, but is a good invariant * to enforce. * 2) Add the node to the list of nodes that acknowledged the ACK. */ if (known || dictFind(job->nodes_delivered,sender->name) != NULL) { dictAdd(job->nodes_delivered,sender->name,sender); /* job->nodes_confirmed exists if we started a job garbage collection, * but we may receive GOTACK messages in other conditions sometimes, * since we reply with SETACK to QUEUED and WILLQUEUE if the job is * acknowledged but we did not yet started to GC. So we need to test * if the hash table actually exists. */ if (job->nodes_confirmed) dictAdd(job->nodes_confirmed,sender->name,sender); } /* If our job is actually a dummy ACK, we are still interested to collect * all the nodes in the cluster that reported they don't have a clue: * eventually if everybody in the cluster has no clue, we can safely remove * the dummy ACK. */ if (!known && dummy_ack) { dictAdd(job->nodes_confirmed,sender->name,sender); if (dictSize(job->nodes_confirmed) >= dictSize(server.cluster->nodes)) { serverLog(LL_VERBOSE, "Deleting %.48s: dummy ACK not known cluster-wide", job->id); unregisterJob(job); freeJob(job); return; } } /* Finally the common case: our SETACK reached everybody. Broadcast * a DELJOB to all the nodes involved, and delete the job. */ if (!dummy_ack && job->nodes_confirmed && dictSize(job->nodes_confirmed) >= dictSize(job->nodes_delivered)) { serverLog(LL_VERBOSE, "Deleting %.48s: All nodes involved acknowledged the job", job->id); clusterBroadcastDelJob(job); unregisterJob(job); freeJob(job); return; } } /* -------------------------- Acks related commands ------------------------ */ /* ACKJOB jobid_1 jobid_2 ... jobid_N * * Set job state as acknowledged, if the job does not exist creates a * fake job just to hold the acknowledge. * * As a result of a job being acknowledged, the system tries to garbage * collect it, that is, to remove the job from every node of the system * in order to both avoid multiple deliveries of the same job, and to * release resources. * * If a job was already acknowledged, the ACKJOB command still has the * effect of forcing a GC attempt ASAP. * * The command returns the number of jobs already known and that were * already not in the ACKED state. */ void ackjobCommand(client *c) { int j, known = 0; if (validateJobIDs(c,c->argv+1,c->argc-1) == C_ERR) return; /* Perform the appropriate action for each job. */ for (j = 1; j < c->argc; j++) { job *job = lookupJob(c->argv[j]->ptr); /* Case 1: No such job. Create one just to hold the ACK. However * if the cluster is composed by a single node we are sure the job * does not exist in the whole cluster, so do this only if the * cluster size is greater than one. */ if (job == NULL && server.cluster->size > 1 && !myselfLeaving()) { char *id = c->argv[j]->ptr; int ttl = getRawTTLFromJobID(id); /* TTL is even for "at most once" jobs. In this case we * don't need to create a dummy hack. */ if (ttl & 1) { job = createJob(id,JOB_STATE_ACKED,0,0); setJobTTLFromID(job); serverAssert(registerJob(job) == C_OK); } } /* Case 2: Job exists and is not acknowledged. Change state. */ else if (job && job->state != JOB_STATE_ACKED) { dequeueJob(job); /* Safe to call if job is not queued. */ acknowledgeJob(job); known++; } /* Anyway... start a GC attempt on the acked job. */ if (job) tryJobGC(job); } addReplyLongLong(c,known); } /* FASTACK jobid_1 jobid_2 ... jobid_N * * Performs a fast acknowledge of the specified jobs. * A fast acknowledge does not really attempt to make sure all the nodes * that may have a copy recive the ack. The job is just discarded and * a best-effort DELJOB is sent to all the nodes that may have a copy * without caring if they receive or not the message. * * This command will more likely result in duplicated messages delivery * during network partiitons, but uses less messages compared to ACKJOB. * * If a job is not known, a cluster-wide DELJOB is broadcasted. * * The command returns the number of jobs that are deleted from the local * node as a result of receiving the command. */ void fastackCommand(client *c) { int j, known = 0; if (validateJobIDs(c,c->argv+1,c->argc-1) == C_ERR) return; /* Perform the appropriate action for each job. */ for (j = 1; j < c->argc; j++) { job *job = lookupJob(c->argv[j]->ptr); if (job == NULL) { /* Job not known, just broadcast the DELJOB message to everybody. */ clusterBroadcastJobIDMessage(server.cluster->nodes,c->argv[j]->ptr, CLUSTERMSG_TYPE_DELJOB,0, CLUSTERMSG_NOFLAGS); } else { clusterBroadcastDelJob(job); unregisterJob(job); freeJob(job); known++; } } addReplyLongLong(c,known); } disque-1.0-rc1/src/ack.h000066400000000000000000000034311264176561100150370ustar00rootroot00000000000000/* * Copyright (c) 2014, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __DISQUE_ACK_H #define __DISQUE_ACK_H void acknowledgeJob(job *j); void tryJobGC(job *j); void gotAckReceived(clusterNode *sender, job *job, int known); mstime_t getNextGCRetryTime(job *job); #endif disque-1.0-rc1/src/adlist.c000066400000000000000000000233101264176561100155520ustar00rootroot00000000000000/* adlist.c - A generic doubly linked list implementation * * Copyright (c) 2006-2010, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include "adlist.h" #include "zmalloc.h" /* Create a new list. The created list can be freed with * AlFreeList(), but private value of every node need to be freed * by the user before to call AlFreeList(). * * On error, NULL is returned. Otherwise the pointer to the new list. */ list *listCreate(void) { struct list *list; if ((list = zmalloc(sizeof(*list))) == NULL) return NULL; list->head = list->tail = NULL; list->len = 0; list->dup = NULL; list->free = NULL; list->match = NULL; return list; } /* Free the whole list. * * This function can't fail. */ void listRelease(list *list) { unsigned long len; listNode *current, *next; current = list->head; len = list->len; while(len--) { next = current->next; if (list->free) list->free(current->value); zfree(current); current = next; } zfree(list); } /* Add a new node to the list, to head, containing the specified 'value' * pointer as value. * * On error, NULL is returned and no operation is performed (i.e. the * list remains unaltered). * On success the 'list' pointer you pass to the function is returned. */ list *listAddNodeHead(list *list, void *value) { listNode *node; if ((node = zmalloc(sizeof(*node))) == NULL) return NULL; node->value = value; if (list->len == 0) { list->head = list->tail = node; node->prev = node->next = NULL; } else { node->prev = NULL; node->next = list->head; list->head->prev = node; list->head = node; } list->len++; return list; } /* Add a new node to the list, to tail, containing the specified 'value' * pointer as value. * * On error, NULL is returned and no operation is performed (i.e. the * list remains unaltered). * On success the 'list' pointer you pass to the function is returned. */ list *listAddNodeTail(list *list, void *value) { listNode *node; if ((node = zmalloc(sizeof(*node))) == NULL) return NULL; node->value = value; if (list->len == 0) { list->head = list->tail = node; node->prev = node->next = NULL; } else { node->prev = list->tail; node->next = NULL; list->tail->next = node; list->tail = node; } list->len++; return list; } list *listInsertNode(list *list, listNode *old_node, void *value, int after) { listNode *node; if ((node = zmalloc(sizeof(*node))) == NULL) return NULL; node->value = value; if (after) { node->prev = old_node; node->next = old_node->next; if (list->tail == old_node) { list->tail = node; } } else { node->next = old_node; node->prev = old_node->prev; if (list->head == old_node) { list->head = node; } } if (node->prev != NULL) { node->prev->next = node; } if (node->next != NULL) { node->next->prev = node; } list->len++; return list; } /* Remove the specified node from the specified list. * It's up to the caller to free the private value of the node. * * This function can't fail. */ void listDelNode(list *list, listNode *node) { if (node->prev) node->prev->next = node->next; else list->head = node->next; if (node->next) node->next->prev = node->prev; else list->tail = node->prev; if (list->free) list->free(node->value); zfree(node); list->len--; } /* Returns a list iterator 'iter'. After the initialization every * call to listNext() will return the next element of the list. * * This function can't fail. */ listIter *listGetIterator(list *list, int direction) { listIter *iter; if ((iter = zmalloc(sizeof(*iter))) == NULL) return NULL; if (direction == AL_START_HEAD) iter->next = list->head; else iter->next = list->tail; iter->direction = direction; return iter; } /* Release the iterator memory */ void listReleaseIterator(listIter *iter) { zfree(iter); } /* Create an iterator in the list private iterator structure */ void listRewind(list *list, listIter *li) { li->next = list->head; li->direction = AL_START_HEAD; } void listRewindTail(list *list, listIter *li) { li->next = list->tail; li->direction = AL_START_TAIL; } /* Return the next element of an iterator. * It's valid to remove the currently returned element using * listDelNode(), but not to remove other elements. * * The function returns a pointer to the next element of the list, * or NULL if there are no more elements, so the classical usage patter * is: * * iter = listGetIterator(list,); * while ((node = listNext(iter)) != NULL) { * doSomethingWith(listNodeValue(node)); * } * * */ listNode *listNext(listIter *iter) { listNode *current = iter->next; if (current != NULL) { if (iter->direction == AL_START_HEAD) iter->next = current->next; else iter->next = current->prev; } return current; } /* Duplicate the whole list. On out of memory NULL is returned. * On success a copy of the original list is returned. * * The 'Dup' method set with listSetDupMethod() function is used * to copy the node value. Otherwise the same pointer value of * the original node is used as value of the copied node. * * The original list both on success or error is never modified. */ list *listDup(list *orig) { list *copy; listIter *iter; listNode *node; if ((copy = listCreate()) == NULL) return NULL; copy->dup = orig->dup; copy->free = orig->free; copy->match = orig->match; iter = listGetIterator(orig, AL_START_HEAD); while((node = listNext(iter)) != NULL) { void *value; if (copy->dup) { value = copy->dup(node->value); if (value == NULL) { listRelease(copy); listReleaseIterator(iter); return NULL; } } else value = node->value; if (listAddNodeTail(copy, value) == NULL) { listRelease(copy); listReleaseIterator(iter); return NULL; } } listReleaseIterator(iter); return copy; } /* Search the list for a node matching a given key. * The match is performed using the 'match' method * set with listSetMatchMethod(). If no 'match' method * is set, the 'value' pointer of every node is directly * compared with the 'key' pointer. * * On success the first matching node pointer is returned * (search starts from head). If no matching node exists * NULL is returned. */ listNode *listSearchKey(list *list, void *key) { listIter *iter; listNode *node; iter = listGetIterator(list, AL_START_HEAD); while((node = listNext(iter)) != NULL) { if (list->match) { if (list->match(node->value, key)) { listReleaseIterator(iter); return node; } } else { if (key == node->value) { listReleaseIterator(iter); return node; } } } listReleaseIterator(iter); return NULL; } /* Return the element at the specified zero-based index * where 0 is the head, 1 is the element next to head * and so on. Negative integers are used in order to count * from the tail, -1 is the last element, -2 the penultimate * and so on. If the index is out of range NULL is returned. */ listNode *listIndex(list *list, long index) { listNode *n; if (index < 0) { index = (-index)-1; n = list->tail; while(index-- && n) n = n->prev; } else { n = list->head; while(index-- && n) n = n->next; } return n; } /* Rotate the list removing the tail node and inserting it to the head. */ void listRotate(list *list) { listNode *tail = list->tail; if (listLength(list) <= 1) return; /* Detach current tail */ list->tail = tail->prev; list->tail->next = NULL; /* Move it as head */ list->head->prev = tail; tail->prev = NULL; tail->next = list->head; list->head = tail; } disque-1.0-rc1/src/adlist.h000066400000000000000000000065741264176561100155740ustar00rootroot00000000000000/* adlist.h - A generic doubly linked list implementation * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __ADLIST_H__ #define __ADLIST_H__ /* Node, List, and Iterator are the only data structures used currently. */ typedef struct listNode { struct listNode *prev; struct listNode *next; void *value; } listNode; typedef struct listIter { listNode *next; int direction; } listIter; typedef struct list { listNode *head; listNode *tail; void *(*dup)(void *ptr); void (*free)(void *ptr); int (*match)(void *ptr, void *key); unsigned long len; } list; /* Functions implemented as macros */ #define listLength(l) ((l)->len) #define listFirst(l) ((l)->head) #define listLast(l) ((l)->tail) #define listPrevNode(n) ((n)->prev) #define listNextNode(n) ((n)->next) #define listNodeValue(n) ((n)->value) #define listSetDupMethod(l,m) ((l)->dup = (m)) #define listSetFreeMethod(l,m) ((l)->free = (m)) #define listSetMatchMethod(l,m) ((l)->match = (m)) #define listGetDupMethod(l) ((l)->dup) #define listGetFree(l) ((l)->free) #define listGetMatchMethod(l) ((l)->match) /* Prototypes */ list *listCreate(void); void listRelease(list *list); list *listAddNodeHead(list *list, void *value); list *listAddNodeTail(list *list, void *value); list *listInsertNode(list *list, listNode *old_node, void *value, int after); void listDelNode(list *list, listNode *node); listIter *listGetIterator(list *list, int direction); listNode *listNext(listIter *iter); void listReleaseIterator(listIter *iter); list *listDup(list *orig); listNode *listSearchKey(list *list, void *key); listNode *listIndex(list *list, long index); void listRewind(list *list, listIter *li); void listRewindTail(list *list, listIter *li); void listRotate(list *list); /* Directions for iterators */ #define AL_START_HEAD 0 #define AL_START_TAIL 1 #endif /* __ADLIST_H__ */ disque-1.0-rc1/src/ae.c000066400000000000000000000367611264176561100146750ustar00rootroot00000000000000/* A simple event-driven programming library. Originally I wrote this code * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * it in form of a library for easy reuse. * * Copyright (c) 2006-2010, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include "ae.h" #include "zmalloc.h" #include "config.h" /* Include the best multiplexing layer supported by this system. * The following should be ordered by performances, descending. */ #ifdef HAVE_EVPORT #include "ae_evport.c" #else #ifdef HAVE_EPOLL #include "ae_epoll.c" #else #ifdef HAVE_KQUEUE #include "ae_kqueue.c" #else #include "ae_select.c" #endif #endif #endif aeEventLoop *aeCreateEventLoop(int setsize) { aeEventLoop *eventLoop; int i; if ((eventLoop = zmalloc(sizeof(*eventLoop))) == NULL) goto err; eventLoop->events = zmalloc(sizeof(aeFileEvent)*setsize); eventLoop->fired = zmalloc(sizeof(aeFiredEvent)*setsize); if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err; eventLoop->setsize = setsize; eventLoop->lastTime = time(NULL); eventLoop->timeEventHead = NULL; eventLoop->timeEventNextId = 0; eventLoop->stop = 0; eventLoop->maxfd = -1; eventLoop->beforesleep = NULL; if (aeApiCreate(eventLoop) == -1) goto err; /* Events with mask == AE_NONE are not set. So let's initialize the * vector with it. */ for (i = 0; i < setsize; i++) eventLoop->events[i].mask = AE_NONE; return eventLoop; err: if (eventLoop) { zfree(eventLoop->events); zfree(eventLoop->fired); zfree(eventLoop); } return NULL; } /* Return the current set size. */ int aeGetSetSize(aeEventLoop *eventLoop) { return eventLoop->setsize; } /* Resize the maximum set size of the event loop. * If the requested set size is smaller than the current set size, but * there is already a file descriptor in use that is >= the requested * set size minus one, AE_ERR is returned and the operation is not * performed at all. * * Otherwise AE_OK is returned and the operation is successful. */ int aeResizeSetSize(aeEventLoop *eventLoop, int setsize) { int i; if (setsize == eventLoop->setsize) return AE_OK; if (eventLoop->maxfd >= setsize) return AE_ERR; if (aeApiResize(eventLoop,setsize) == -1) return AE_ERR; eventLoop->events = zrealloc(eventLoop->events,sizeof(aeFileEvent)*setsize); eventLoop->fired = zrealloc(eventLoop->fired,sizeof(aeFiredEvent)*setsize); eventLoop->setsize = setsize; /* Make sure that if we created new slots, they are initialized with * an AE_NONE mask. */ for (i = eventLoop->maxfd+1; i < setsize; i++) eventLoop->events[i].mask = AE_NONE; return AE_OK; } void aeDeleteEventLoop(aeEventLoop *eventLoop) { aeApiFree(eventLoop); zfree(eventLoop->events); zfree(eventLoop->fired); zfree(eventLoop); } void aeStop(aeEventLoop *eventLoop) { eventLoop->stop = 1; } int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, aeFileProc *proc, void *clientData) { if (fd >= eventLoop->setsize) { errno = ERANGE; return AE_ERR; } aeFileEvent *fe = &eventLoop->events[fd]; if (aeApiAddEvent(eventLoop, fd, mask) == -1) return AE_ERR; fe->mask |= mask; if (mask & AE_READABLE) fe->rfileProc = proc; if (mask & AE_WRITABLE) fe->wfileProc = proc; fe->clientData = clientData; if (fd > eventLoop->maxfd) eventLoop->maxfd = fd; return AE_OK; } void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask) { if (fd >= eventLoop->setsize) return; aeFileEvent *fe = &eventLoop->events[fd]; if (fe->mask == AE_NONE) return; aeApiDelEvent(eventLoop, fd, mask); fe->mask = fe->mask & (~mask); if (fd == eventLoop->maxfd && fe->mask == AE_NONE) { /* Update the max fd */ int j; for (j = eventLoop->maxfd-1; j >= 0; j--) if (eventLoop->events[j].mask != AE_NONE) break; eventLoop->maxfd = j; } } int aeGetFileEvents(aeEventLoop *eventLoop, int fd) { if (fd >= eventLoop->setsize) return 0; aeFileEvent *fe = &eventLoop->events[fd]; return fe->mask; } static void aeGetTime(long *seconds, long *milliseconds) { struct timeval tv; gettimeofday(&tv, NULL); *seconds = tv.tv_sec; *milliseconds = tv.tv_usec/1000; } static void aeAddMillisecondsToNow(long long milliseconds, long *sec, long *ms) { long cur_sec, cur_ms, when_sec, when_ms; aeGetTime(&cur_sec, &cur_ms); when_sec = cur_sec + milliseconds/1000; when_ms = cur_ms + milliseconds%1000; if (when_ms >= 1000) { when_sec ++; when_ms -= 1000; } *sec = when_sec; *ms = when_ms; } long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, aeTimeProc *proc, void *clientData, aeEventFinalizerProc *finalizerProc) { long long id = eventLoop->timeEventNextId++; aeTimeEvent *te; te = zmalloc(sizeof(*te)); if (te == NULL) return AE_ERR; te->id = id; aeAddMillisecondsToNow(milliseconds,&te->when_sec,&te->when_ms); te->timeProc = proc; te->finalizerProc = finalizerProc; te->clientData = clientData; te->next = eventLoop->timeEventHead; eventLoop->timeEventHead = te; return id; } int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id) { aeTimeEvent *te, *prev = NULL; te = eventLoop->timeEventHead; while(te) { if (te->id == id) { if (prev == NULL) eventLoop->timeEventHead = te->next; else prev->next = te->next; if (te->finalizerProc) te->finalizerProc(eventLoop, te->clientData); zfree(te); return AE_OK; } prev = te; te = te->next; } return AE_ERR; /* NO event with the specified ID found */ } /* Search the first timer to fire. * This operation is useful to know how many time the select can be * put in sleep without to delay any event. * If there are no timers NULL is returned. * * Note that's O(N) since time events are unsorted. * Possible optimizations (not needed by Disque so far, but...): * 1) Insert the event in order, so that the nearest is just the head. * Much better but still insertion or deletion of timers is O(N). * 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)). */ static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) { aeTimeEvent *te = eventLoop->timeEventHead; aeTimeEvent *nearest = NULL; while(te) { if (!nearest || te->when_sec < nearest->when_sec || (te->when_sec == nearest->when_sec && te->when_ms < nearest->when_ms)) nearest = te; te = te->next; } return nearest; } /* Process time events */ static int processTimeEvents(aeEventLoop *eventLoop) { int processed = 0; aeTimeEvent *te; long long maxId; time_t now = time(NULL); /* If the system clock is moved to the future, and then set back to the * right value, time events may be delayed in a random way. Often this * means that scheduled operations will not be performed soon enough. * * Here we try to detect system clock skews, and force all the time * events to be processed ASAP when this happens: the idea is that * processing events earlier is less dangerous than delaying them * indefinitely, and practice suggests it is. */ if (now < eventLoop->lastTime) { te = eventLoop->timeEventHead; while(te) { te->when_sec = 0; te = te->next; } } eventLoop->lastTime = now; te = eventLoop->timeEventHead; maxId = eventLoop->timeEventNextId-1; while(te) { long now_sec, now_ms; long long id; if (te->id > maxId) { te = te->next; continue; } aeGetTime(&now_sec, &now_ms); if (now_sec > te->when_sec || (now_sec == te->when_sec && now_ms >= te->when_ms)) { int retval; id = te->id; retval = te->timeProc(eventLoop, id, te->clientData); processed++; /* After an event is processed our time event list may * no longer be the same, so we restart from head. * Still we make sure to don't process events registered * by event handlers itself in order to don't loop forever. * To do so we saved the max ID we want to handle. * * FUTURE OPTIMIZATIONS: * Note that this is NOT great algorithmically. Disque uses * a single time event so it's not a problem but the right * way to do this is to add the new elements on head, and * to flag deleted elements in a special way for later * deletion (putting references to the nodes to delete into * another linked list). */ if (retval != AE_NOMORE) { aeAddMillisecondsToNow(retval,&te->when_sec,&te->when_ms); } else { aeDeleteTimeEvent(eventLoop, id); } te = eventLoop->timeEventHead; } else { te = te->next; } } return processed; } /* Process every pending time event, then every pending file event * (that may be registered by time event callbacks just processed). * Without special flags the function sleeps until some file event * fires, or when the next time event occurs (if any). * * If flags is 0, the function does nothing and returns. * if flags has AE_ALL_EVENTS set, all the kind of events are processed. * if flags has AE_FILE_EVENTS set, file events are processed. * if flags has AE_TIME_EVENTS set, time events are processed. * if flags has AE_DONT_WAIT set the function returns ASAP until all * the events that's possible to process without to wait are processed. * * The function returns the number of events processed. */ int aeProcessEvents(aeEventLoop *eventLoop, int flags) { int processed = 0, numevents; /* Nothing to do? return ASAP */ if (!(flags & AE_TIME_EVENTS) && !(flags & AE_FILE_EVENTS)) return 0; /* Note that we want call select() even if there are no * file events to process as long as we want to process time * events, in order to sleep until the next time event is ready * to fire. */ if (eventLoop->maxfd != -1 || ((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) { int j; aeTimeEvent *shortest = NULL; struct timeval tv, *tvp; if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT)) shortest = aeSearchNearestTimer(eventLoop); if (shortest) { long now_sec, now_ms; /* Calculate the time missing for the nearest * timer to fire. */ aeGetTime(&now_sec, &now_ms); tvp = &tv; tvp->tv_sec = shortest->when_sec - now_sec; if (shortest->when_ms < now_ms) { tvp->tv_usec = ((shortest->when_ms+1000) - now_ms)*1000; tvp->tv_sec --; } else { tvp->tv_usec = (shortest->when_ms - now_ms)*1000; } if (tvp->tv_sec < 0) tvp->tv_sec = 0; if (tvp->tv_usec < 0) tvp->tv_usec = 0; } else { /* If we have to check for events but need to return * ASAP because of AE_DONT_WAIT we need to set the timeout * to zero */ if (flags & AE_DONT_WAIT) { tv.tv_sec = tv.tv_usec = 0; tvp = &tv; } else { /* Otherwise we can block */ tvp = NULL; /* wait forever */ } } numevents = aeApiPoll(eventLoop, tvp); for (j = 0; j < numevents; j++) { aeFileEvent *fe = &eventLoop->events[eventLoop->fired[j].fd]; int mask = eventLoop->fired[j].mask; int fd = eventLoop->fired[j].fd; int rfired = 0; /* note the fe->mask & mask & ... code: maybe an already processed * event removed an element that fired and we still didn't * processed, so we check if the event is still valid. */ if (fe->mask & mask & AE_READABLE) { rfired = 1; fe->rfileProc(eventLoop,fd,fe->clientData,mask); } if (fe->mask & mask & AE_WRITABLE) { if (!rfired || fe->wfileProc != fe->rfileProc) fe->wfileProc(eventLoop,fd,fe->clientData,mask); } processed++; } } /* Check time events */ if (flags & AE_TIME_EVENTS) processed += processTimeEvents(eventLoop); return processed; /* return the number of processed file/time events */ } /* Wait for milliseconds until the given file descriptor becomes * writable/readable/exception */ int aeWait(int fd, int mask, long long milliseconds) { struct pollfd pfd; int retmask = 0, retval; memset(&pfd, 0, sizeof(pfd)); pfd.fd = fd; if (mask & AE_READABLE) pfd.events |= POLLIN; if (mask & AE_WRITABLE) pfd.events |= POLLOUT; if ((retval = poll(&pfd, 1, milliseconds))== 1) { if (pfd.revents & POLLIN) retmask |= AE_READABLE; if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE; if (pfd.revents & POLLERR) retmask |= AE_WRITABLE; if (pfd.revents & POLLHUP) retmask |= AE_WRITABLE; return retmask; } else { return retval; } } void aeMain(aeEventLoop *eventLoop) { eventLoop->stop = 0; while (!eventLoop->stop) { if (eventLoop->beforesleep != NULL) eventLoop->beforesleep(eventLoop); aeProcessEvents(eventLoop, AE_ALL_EVENTS); } } char *aeGetApiName(void) { return aeApiName(); } void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep) { eventLoop->beforesleep = beforesleep; } disque-1.0-rc1/src/ae.h000066400000000000000000000110301264176561100146600ustar00rootroot00000000000000/* A simple event-driven programming library. Originally I wrote this code * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * it in form of a library for easy reuse. * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __AE_H__ #define __AE_H__ #define AE_OK 0 #define AE_ERR -1 #define AE_NONE 0 #define AE_READABLE 1 #define AE_WRITABLE 2 #define AE_FILE_EVENTS 1 #define AE_TIME_EVENTS 2 #define AE_ALL_EVENTS (AE_FILE_EVENTS|AE_TIME_EVENTS) #define AE_DONT_WAIT 4 #define AE_NOMORE -1 /* Macros */ #define AE_NOTUSED(V) ((void) V) struct aeEventLoop; /* Types and data structures */ typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData); typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData); typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop); /* File event structure */ typedef struct aeFileEvent { int mask; /* one of AE_(READABLE|WRITABLE) */ aeFileProc *rfileProc; aeFileProc *wfileProc; void *clientData; } aeFileEvent; /* Time event structure */ typedef struct aeTimeEvent { long long id; /* time event identifier. */ long when_sec; /* seconds */ long when_ms; /* milliseconds */ aeTimeProc *timeProc; aeEventFinalizerProc *finalizerProc; void *clientData; struct aeTimeEvent *next; } aeTimeEvent; /* A fired event */ typedef struct aeFiredEvent { int fd; int mask; } aeFiredEvent; /* State of an event based program */ typedef struct aeEventLoop { int maxfd; /* highest file descriptor currently registered */ int setsize; /* max number of file descriptors tracked */ long long timeEventNextId; time_t lastTime; /* Used to detect system clock skew */ aeFileEvent *events; /* Registered events */ aeFiredEvent *fired; /* Fired events */ aeTimeEvent *timeEventHead; int stop; void *apidata; /* This is used for polling API specific data */ aeBeforeSleepProc *beforesleep; } aeEventLoop; /* Prototypes */ aeEventLoop *aeCreateEventLoop(int setsize); void aeDeleteEventLoop(aeEventLoop *eventLoop); void aeStop(aeEventLoop *eventLoop); int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, aeFileProc *proc, void *clientData); void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask); int aeGetFileEvents(aeEventLoop *eventLoop, int fd); long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, aeTimeProc *proc, void *clientData, aeEventFinalizerProc *finalizerProc); int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id); int aeProcessEvents(aeEventLoop *eventLoop, int flags); int aeWait(int fd, int mask, long long milliseconds); void aeMain(aeEventLoop *eventLoop); char *aeGetApiName(void); void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep); int aeGetSetSize(aeEventLoop *eventLoop); int aeResizeSetSize(aeEventLoop *eventLoop, int setsize); #endif disque-1.0-rc1/src/ae_epoll.c000066400000000000000000000114151264176561100160550ustar00rootroot00000000000000/* Linux epoll(2) based ae.c module * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include typedef struct aeApiState { int epfd; struct epoll_event *events; } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; state->events = zmalloc(sizeof(struct epoll_event)*eventLoop->setsize); if (!state->events) { zfree(state); return -1; } state->epfd = epoll_create(1024); /* 1024 is just a hint for the kernel */ if (state->epfd == -1) { zfree(state->events); zfree(state); return -1; } eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { aeApiState *state = eventLoop->apidata; state->events = zrealloc(state->events, sizeof(struct epoll_event)*setsize); return 0; } static void aeApiFree(aeEventLoop *eventLoop) { aeApiState *state = eventLoop->apidata; close(state->epfd); zfree(state->events); zfree(state); } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; struct epoll_event ee; /* If the fd was already monitored for some event, we need a MOD * operation. Otherwise we need an ADD operation. */ int op = eventLoop->events[fd].mask == AE_NONE ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; ee.events = 0; mask |= eventLoop->events[fd].mask; /* Merge old events */ if (mask & AE_READABLE) ee.events |= EPOLLIN; if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; ee.data.u64 = 0; /* avoid valgrind warning */ ee.data.fd = fd; if (epoll_ctl(state->epfd,op,fd,&ee) == -1) return -1; return 0; } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int delmask) { aeApiState *state = eventLoop->apidata; struct epoll_event ee; int mask = eventLoop->events[fd].mask & (~delmask); ee.events = 0; if (mask & AE_READABLE) ee.events |= EPOLLIN; if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; ee.data.u64 = 0; /* avoid valgrind warning */ ee.data.fd = fd; if (mask != AE_NONE) { epoll_ctl(state->epfd,EPOLL_CTL_MOD,fd,&ee); } else { /* Note, Kernel < 2.6.9 requires a non null event pointer even for * EPOLL_CTL_DEL. */ epoll_ctl(state->epfd,EPOLL_CTL_DEL,fd,&ee); } } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; int retval, numevents = 0; retval = epoll_wait(state->epfd,state->events,eventLoop->setsize, tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1); if (retval > 0) { int j; numevents = retval; for (j = 0; j < numevents; j++) { int mask = 0; struct epoll_event *e = state->events+j; if (e->events & EPOLLIN) mask |= AE_READABLE; if (e->events & EPOLLOUT) mask |= AE_WRITABLE; if (e->events & EPOLLERR) mask |= AE_WRITABLE; if (e->events & EPOLLHUP) mask |= AE_WRITABLE; eventLoop->fired[j].fd = e->data.fd; eventLoop->fired[j].mask = mask; } } return numevents; } static char *aeApiName(void) { return "epoll"; } disque-1.0-rc1/src/ae_evport.c000066400000000000000000000252741264176561100162710ustar00rootroot00000000000000/* ae.c module for illumos event ports. * * Copyright (c) 2012, Joyent, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include static int evport_debug = 0; /* * This file implements the ae API using event ports, present on Solaris-based * systems since Solaris 10. Using the event port interface, we associate file * descriptors with the port. Each association also includes the set of poll(2) * events that the consumer is interested in (e.g., POLLIN and POLLOUT). * * There's one tricky piece to this implementation: when we return events via * aeApiPoll, the corresponding file descriptors become dissociated from the * port. This is necessary because poll events are level-triggered, so if the * fd didn't become dissociated, it would immediately fire another event since * the underlying state hasn't changed yet. We must re-associate the file * descriptor, but only after we know that our caller has actually read from it. * The ae API does not tell us exactly when that happens, but we do know that * it must happen by the time aeApiPoll is called again. Our solution is to * keep track of the last fds returned by aeApiPoll and re-associate them next * time aeApiPoll is invoked. * * To summarize, in this module, each fd association is EITHER (a) represented * only via the in-kernel association OR (b) represented by pending_fds and * pending_masks. (b) is only true for the last fds we returned from aeApiPoll, * and only until we enter aeApiPoll again (at which point we restore the * in-kernel association). */ #define MAX_EVENT_BATCHSZ 512 typedef struct aeApiState { int portfd; /* event port */ int npending; /* # of pending fds */ int pending_fds[MAX_EVENT_BATCHSZ]; /* pending fds */ int pending_masks[MAX_EVENT_BATCHSZ]; /* pending fds' masks */ } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { int i; aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; state->portfd = port_create(); if (state->portfd == -1) { zfree(state); return -1; } state->npending = 0; for (i = 0; i < MAX_EVENT_BATCHSZ; i++) { state->pending_fds[i] = -1; state->pending_masks[i] = AE_NONE; } eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { /* Nothing to resize here. */ return 0; } static void aeApiFree(aeEventLoop *eventLoop) { aeApiState *state = eventLoop->apidata; close(state->portfd); zfree(state); } static int aeApiLookupPending(aeApiState *state, int fd) { int i; for (i = 0; i < state->npending; i++) { if (state->pending_fds[i] == fd) return (i); } return (-1); } /* * Helper function to invoke port_associate for the given fd and mask. */ static int aeApiAssociate(const char *where, int portfd, int fd, int mask) { int events = 0; int rv, err; if (mask & AE_READABLE) events |= POLLIN; if (mask & AE_WRITABLE) events |= POLLOUT; if (evport_debug) fprintf(stderr, "%s: port_associate(%d, 0x%x) = ", where, fd, events); rv = port_associate(portfd, PORT_SOURCE_FD, fd, events, (void *)(uintptr_t)mask); err = errno; if (evport_debug) fprintf(stderr, "%d (%s)\n", rv, rv == 0 ? "no error" : strerror(err)); if (rv == -1) { fprintf(stderr, "%s: port_associate: %s\n", where, strerror(err)); if (err == EAGAIN) fprintf(stderr, "aeApiAssociate: event port limit exceeded."); } return rv; } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; int fullmask, pfd; if (evport_debug) fprintf(stderr, "aeApiAddEvent: fd %d mask 0x%x\n", fd, mask); /* * Since port_associate's "events" argument replaces any existing events, we * must be sure to include whatever events are already associated when * we call port_associate() again. */ fullmask = mask | eventLoop->events[fd].mask; pfd = aeApiLookupPending(state, fd); if (pfd != -1) { /* * This fd was recently returned from aeApiPoll. It should be safe to * assume that the consumer has processed that poll event, but we play * it safer by simply updating pending_mask. The fd will be * re-associated as usual when aeApiPoll is called again. */ if (evport_debug) fprintf(stderr, "aeApiAddEvent: adding to pending fd %d\n", fd); state->pending_masks[pfd] |= fullmask; return 0; } return (aeApiAssociate("aeApiAddEvent", state->portfd, fd, fullmask)); } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; int fullmask, pfd; if (evport_debug) fprintf(stderr, "del fd %d mask 0x%x\n", fd, mask); pfd = aeApiLookupPending(state, fd); if (pfd != -1) { if (evport_debug) fprintf(stderr, "deleting event from pending fd %d\n", fd); /* * This fd was just returned from aeApiPoll, so it's not currently * associated with the port. All we need to do is update * pending_mask appropriately. */ state->pending_masks[pfd] &= ~mask; if (state->pending_masks[pfd] == AE_NONE) state->pending_fds[pfd] = -1; return; } /* * The fd is currently associated with the port. Like with the add case * above, we must look at the full mask for the file descriptor before * updating that association. We don't have a good way of knowing what the * events are without looking into the eventLoop state directly. We rely on * the fact that our caller has already updated the mask in the eventLoop. */ fullmask = eventLoop->events[fd].mask; if (fullmask == AE_NONE) { /* * We're removing *all* events, so use port_dissociate to remove the * association completely. Failure here indicates a bug. */ if (evport_debug) fprintf(stderr, "aeApiDelEvent: port_dissociate(%d)\n", fd); if (port_dissociate(state->portfd, PORT_SOURCE_FD, fd) != 0) { perror("aeApiDelEvent: port_dissociate"); abort(); /* will not return */ } } else if (aeApiAssociate("aeApiDelEvent", state->portfd, fd, fullmask) != 0) { /* * ENOMEM is a potentially transient condition, but the kernel won't * generally return it unless things are really bad. EAGAIN indicates * we've reached an resource limit, for which it doesn't make sense to * retry (counter-intuitively). All other errors indicate a bug. In any * of these cases, the best we can do is to abort. */ abort(); /* will not return */ } } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; struct timespec timeout, *tsp; int mask, i; uint_t nevents; port_event_t event[MAX_EVENT_BATCHSZ]; /* * If we've returned fd events before, we must re-associate them with the * port now, before calling port_get(). See the block comment at the top of * this file for an explanation of why. */ for (i = 0; i < state->npending; i++) { if (state->pending_fds[i] == -1) /* This fd has since been deleted. */ continue; if (aeApiAssociate("aeApiPoll", state->portfd, state->pending_fds[i], state->pending_masks[i]) != 0) { /* See aeApiDelEvent for why this case is fatal. */ abort(); } state->pending_masks[i] = AE_NONE; state->pending_fds[i] = -1; } state->npending = 0; if (tvp != NULL) { timeout.tv_sec = tvp->tv_sec; timeout.tv_nsec = tvp->tv_usec * 1000; tsp = &timeout; } else { tsp = NULL; } /* * port_getn can return with errno == ETIME having returned some events (!). * So if we get ETIME, we check nevents, too. */ nevents = 1; if (port_getn(state->portfd, event, MAX_EVENT_BATCHSZ, &nevents, tsp) == -1 && (errno != ETIME || nevents == 0)) { if (errno == ETIME || errno == EINTR) return 0; /* Any other error indicates a bug. */ perror("aeApiPoll: port_get"); abort(); } state->npending = nevents; for (i = 0; i < nevents; i++) { mask = 0; if (event[i].portev_events & POLLIN) mask |= AE_READABLE; if (event[i].portev_events & POLLOUT) mask |= AE_WRITABLE; eventLoop->fired[i].fd = event[i].portev_object; eventLoop->fired[i].mask = mask; if (evport_debug) fprintf(stderr, "aeApiPoll: fd %d mask 0x%x\n", (int)event[i].portev_object, mask); state->pending_fds[i] = event[i].portev_object; state->pending_masks[i] = (uintptr_t)event[i].portev_user; } return nevents; } static char *aeApiName(void) { return "evport"; } disque-1.0-rc1/src/ae_kqueue.c000066400000000000000000000107301264176561100162400ustar00rootroot00000000000000/* Kqueue(2)-based ae.c module * * Copyright (C) 2009 Harish Mallipeddi - harish.mallipeddi@gmail.com * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include typedef struct aeApiState { int kqfd; struct kevent *events; } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; state->events = zmalloc(sizeof(struct kevent)*eventLoop->setsize); if (!state->events) { zfree(state); return -1; } state->kqfd = kqueue(); if (state->kqfd == -1) { zfree(state->events); zfree(state); return -1; } eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { aeApiState *state = eventLoop->apidata; state->events = zrealloc(state->events, sizeof(struct kevent)*setsize); return 0; } static void aeApiFree(aeEventLoop *eventLoop) { aeApiState *state = eventLoop->apidata; close(state->kqfd); zfree(state->events); zfree(state); } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; struct kevent ke; if (mask & AE_READABLE) { EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; } if (mask & AE_WRITABLE) { EV_SET(&ke, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; } return 0; } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; struct kevent ke; if (mask & AE_READABLE) { EV_SET(&ke, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); kevent(state->kqfd, &ke, 1, NULL, 0, NULL); } if (mask & AE_WRITABLE) { EV_SET(&ke, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); kevent(state->kqfd, &ke, 1, NULL, 0, NULL); } } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; int retval, numevents = 0; if (tvp != NULL) { struct timespec timeout; timeout.tv_sec = tvp->tv_sec; timeout.tv_nsec = tvp->tv_usec * 1000; retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, &timeout); } else { retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, NULL); } if (retval > 0) { int j; numevents = retval; for(j = 0; j < numevents; j++) { int mask = 0; struct kevent *e = state->events+j; if (e->filter == EVFILT_READ) mask |= AE_READABLE; if (e->filter == EVFILT_WRITE) mask |= AE_WRITABLE; eventLoop->fired[j].fd = e->ident; eventLoop->fired[j].mask = mask; } } return numevents; } static char *aeApiName(void) { return "kqueue"; } disque-1.0-rc1/src/ae_select.c000066400000000000000000000073351264176561100162270ustar00rootroot00000000000000/* Select()-based ae.c module. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include typedef struct aeApiState { fd_set rfds, wfds; /* We need to have a copy of the fd sets as it's not safe to reuse * FD sets after select(). */ fd_set _rfds, _wfds; } aeApiState; static int aeApiCreate(aeEventLoop *eventLoop) { aeApiState *state = zmalloc(sizeof(aeApiState)); if (!state) return -1; FD_ZERO(&state->rfds); FD_ZERO(&state->wfds); eventLoop->apidata = state; return 0; } static int aeApiResize(aeEventLoop *eventLoop, int setsize) { /* Just ensure we have enough room in the fd_set type. */ if (setsize >= FD_SETSIZE) return -1; return 0; } static void aeApiFree(aeEventLoop *eventLoop) { zfree(eventLoop->apidata); } static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; if (mask & AE_READABLE) FD_SET(fd,&state->rfds); if (mask & AE_WRITABLE) FD_SET(fd,&state->wfds); return 0; } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; if (mask & AE_READABLE) FD_CLR(fd,&state->rfds); if (mask & AE_WRITABLE) FD_CLR(fd,&state->wfds); } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { aeApiState *state = eventLoop->apidata; int retval, j, numevents = 0; memcpy(&state->_rfds,&state->rfds,sizeof(fd_set)); memcpy(&state->_wfds,&state->wfds,sizeof(fd_set)); retval = select(eventLoop->maxfd+1, &state->_rfds,&state->_wfds,NULL,tvp); if (retval > 0) { for (j = 0; j <= eventLoop->maxfd; j++) { int mask = 0; aeFileEvent *fe = &eventLoop->events[j]; if (fe->mask == AE_NONE) continue; if (fe->mask & AE_READABLE && FD_ISSET(j,&state->_rfds)) mask |= AE_READABLE; if (fe->mask & AE_WRITABLE && FD_ISSET(j,&state->_wfds)) mask |= AE_WRITABLE; eventLoop->fired[numevents].fd = j; eventLoop->fired[numevents].mask = mask; numevents++; } } return numevents; } static char *aeApiName(void) { return "select"; } disque-1.0-rc1/src/anet.c000066400000000000000000000502071264176561100152260ustar00rootroot00000000000000/* anet.c -- Basic TCP socket stuff made a bit less boring * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "anet.h" static void anetSetError(char *err, const char *fmt, ...) { va_list ap; if (!err) return; va_start(ap, fmt); vsnprintf(err, ANET_ERR_LEN, fmt, ap); va_end(ap); } int anetSetBlock(char *err, int fd, int non_block) { int flags; /* Set the socket blocking (if non_block is zero) or non-blocking. * Note that fcntl(2) for F_GETFL and F_SETFL can't be * interrupted by a signal. */ if ((flags = fcntl(fd, F_GETFL)) == -1) { anetSetError(err, "fcntl(F_GETFL): %s", strerror(errno)); return ANET_ERR; } if (non_block) flags |= O_NONBLOCK; else flags &= ~O_NONBLOCK; if (fcntl(fd, F_SETFL, flags) == -1) { anetSetError(err, "fcntl(F_SETFL,O_NONBLOCK): %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } int anetNonBlock(char *err, int fd) { return anetSetBlock(err,fd,1); } int anetBlock(char *err, int fd) { return anetSetBlock(err,fd,0); } /* Set TCP keep alive option to detect dead peers. The interval option * is only used for Linux as we are using Linux-specific APIs to set * the probe send time, interval, and count. */ int anetKeepAlive(char *err, int fd, int interval) { int val = 1; if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)) == -1) { anetSetError(err, "setsockopt SO_KEEPALIVE: %s", strerror(errno)); return ANET_ERR; } #ifdef __linux__ /* Default settings are more or less garbage, with the keepalive time * set to 7200 by default on Linux. Modify settings to make the feature * actually useful. */ /* Send first probe after interval. */ val = interval; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0) { anetSetError(err, "setsockopt TCP_KEEPIDLE: %s\n", strerror(errno)); return ANET_ERR; } /* Send next probes after the specified interval. Note that we set the * delay as interval / 3, as we send three probes before detecting * an error (see the next setsockopt call). */ val = interval/3; if (val == 0) val = 1; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &val, sizeof(val)) < 0) { anetSetError(err, "setsockopt TCP_KEEPINTVL: %s\n", strerror(errno)); return ANET_ERR; } /* Consider the socket in error state after three we send three ACK * probes without getting a reply. */ val = 3; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &val, sizeof(val)) < 0) { anetSetError(err, "setsockopt TCP_KEEPCNT: %s\n", strerror(errno)); return ANET_ERR; } #else ((void) interval); /* Avoid unused var warning for non Linux systems. */ #endif return ANET_OK; } static int anetSetTcpNoDelay(char *err, int fd, int val) { if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) == -1) { anetSetError(err, "setsockopt TCP_NODELAY: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } int anetEnableTcpNoDelay(char *err, int fd) { return anetSetTcpNoDelay(err, fd, 1); } int anetDisableTcpNoDelay(char *err, int fd) { return anetSetTcpNoDelay(err, fd, 0); } int anetSetSendBuffer(char *err, int fd, int buffsize) { if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffsize, sizeof(buffsize)) == -1) { anetSetError(err, "setsockopt SO_SNDBUF: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } int anetTcpKeepAlive(char *err, int fd) { int yes = 1; if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes)) == -1) { anetSetError(err, "setsockopt SO_KEEPALIVE: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } /* Set the socket send timeout (SO_SNDTIMEO socket option) to the specified * number of milliseconds, or disable it if the 'ms' argument is zero. */ int anetSendTimeout(char *err, int fd, long long ms) { struct timeval tv; tv.tv_sec = ms/1000; tv.tv_usec = (ms%1000)*1000; if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) == -1) { anetSetError(err, "setsockopt SO_SNDTIMEO: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } /* anetGenericResolve() is called by anetResolve() and anetResolveIP() to * do the actual work. It resolves the hostname "host" and set the string * representation of the IP address into the buffer pointed by "ipbuf". * * If flags is set to ANET_IP_ONLY the function only resolves hostnames * that are actually already IPv4 or IPv6 addresses. This turns the function * into a validating / normalizing function. */ int anetGenericResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, int flags) { struct addrinfo hints, *info; int rv; memset(&hints,0,sizeof(hints)); if (flags & ANET_IP_ONLY) hints.ai_flags = AI_NUMERICHOST; hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; /* specify socktype to avoid dups */ if ((rv = getaddrinfo(host, NULL, &hints, &info)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); return ANET_ERR; } if (info->ai_family == AF_INET) { struct sockaddr_in *sa = (struct sockaddr_in *)info->ai_addr; inet_ntop(AF_INET, &(sa->sin_addr), ipbuf, ipbuf_len); } else { struct sockaddr_in6 *sa = (struct sockaddr_in6 *)info->ai_addr; inet_ntop(AF_INET6, &(sa->sin6_addr), ipbuf, ipbuf_len); } freeaddrinfo(info); return ANET_OK; } int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len) { return anetGenericResolve(err,host,ipbuf,ipbuf_len,ANET_NONE); } int anetResolveIP(char *err, char *host, char *ipbuf, size_t ipbuf_len) { return anetGenericResolve(err,host,ipbuf,ipbuf_len,ANET_IP_ONLY); } static int anetSetReuseAddr(char *err, int fd) { int yes = 1; /* Make sure connection-intensive things like the redis benckmark * will be able to close/open sockets a zillion of times */ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) == -1) { anetSetError(err, "setsockopt SO_REUSEADDR: %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } static int anetCreateSocket(char *err, int domain) { int s; if ((s = socket(domain, SOCK_STREAM, 0)) == -1) { anetSetError(err, "creating socket: %s", strerror(errno)); return ANET_ERR; } /* Make sure connection-intensive things like the redis benchmark * will be able to close/open sockets a zillion of times */ if (anetSetReuseAddr(err,s) == ANET_ERR) { close(s); return ANET_ERR; } return s; } #define ANET_CONNECT_NONE 0 #define ANET_CONNECT_NONBLOCK 1 #define ANET_CONNECT_BE_BINDING 2 /* Best effort binding. */ static int anetTcpGenericConnect(char *err, char *addr, int port, char *source_addr, int flags) { int s = ANET_ERR, rv; char portstr[6]; /* strlen("65535") + 1; */ struct addrinfo hints, *servinfo, *bservinfo, *p, *b; snprintf(portstr,sizeof(portstr),"%d",port); memset(&hints,0,sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; if ((rv = getaddrinfo(addr,portstr,&hints,&servinfo)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); return ANET_ERR; } for (p = servinfo; p != NULL; p = p->ai_next) { /* Try to create the socket and to connect it. * If we fail in the socket() call, or on connect(), we retry with * the next entry in servinfo. */ if ((s = socket(p->ai_family,p->ai_socktype,p->ai_protocol)) == -1) continue; if (anetSetReuseAddr(err,s) == ANET_ERR) goto error; if (flags & ANET_CONNECT_NONBLOCK && anetNonBlock(err,s) != ANET_OK) goto error; if (source_addr) { int bound = 0; /* Using getaddrinfo saves us from self-determining IPv4 vs IPv6 */ if ((rv = getaddrinfo(source_addr, NULL, &hints, &bservinfo)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); goto error; } for (b = bservinfo; b != NULL; b = b->ai_next) { if (bind(s,b->ai_addr,b->ai_addrlen) != -1) { bound = 1; break; } } freeaddrinfo(bservinfo); if (!bound) { anetSetError(err, "bind: %s", strerror(errno)); goto error; } } if (connect(s,p->ai_addr,p->ai_addrlen) == -1) { /* If the socket is non-blocking, it is ok for connect() to * return an EINPROGRESS error here. */ if (errno == EINPROGRESS && flags & ANET_CONNECT_NONBLOCK) goto end; close(s); s = ANET_ERR; continue; } /* If we ended an iteration of the for loop without errors, we * have a connected socket. Let's return to the caller. */ goto end; } if (p == NULL) anetSetError(err, "creating socket: %s", strerror(errno)); error: if (s != ANET_ERR) { close(s); s = ANET_ERR; } end: freeaddrinfo(servinfo); /* Handle best effort binding: if a binding address was used, but it is * not possible to create a socket, try again without a binding address. */ if (s == ANET_ERR && source_addr && (flags & ANET_CONNECT_BE_BINDING)) { return anetTcpGenericConnect(err,addr,port,NULL,flags); } else { return s; } } int anetTcpConnect(char *err, char *addr, int port) { return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONE); } int anetTcpNonBlockConnect(char *err, char *addr, int port) { return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONBLOCK); } int anetTcpNonBlockBindConnect(char *err, char *addr, int port, char *source_addr) { return anetTcpGenericConnect(err,addr,port,source_addr, ANET_CONNECT_NONBLOCK); } int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, char *source_addr) { return anetTcpGenericConnect(err,addr,port,source_addr, ANET_CONNECT_NONBLOCK|ANET_CONNECT_BE_BINDING); } int anetUnixGenericConnect(char *err, char *path, int flags) { int s; struct sockaddr_un sa; if ((s = anetCreateSocket(err,AF_LOCAL)) == ANET_ERR) return ANET_ERR; sa.sun_family = AF_LOCAL; strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); if (flags & ANET_CONNECT_NONBLOCK) { if (anetNonBlock(err,s) != ANET_OK) return ANET_ERR; } if (connect(s,(struct sockaddr*)&sa,sizeof(sa)) == -1) { if (errno == EINPROGRESS && flags & ANET_CONNECT_NONBLOCK) return s; anetSetError(err, "connect: %s", strerror(errno)); close(s); return ANET_ERR; } return s; } int anetUnixConnect(char *err, char *path) { return anetUnixGenericConnect(err,path,ANET_CONNECT_NONE); } int anetUnixNonBlockConnect(char *err, char *path) { return anetUnixGenericConnect(err,path,ANET_CONNECT_NONBLOCK); } /* Like read(2) but make sure 'count' is read before to return * (unless error or EOF condition is encountered) */ int anetRead(int fd, char *buf, int count) { ssize_t nread, totlen = 0; while(totlen != count) { nread = read(fd,buf,count-totlen); if (nread == 0) return totlen; if (nread == -1) return -1; totlen += nread; buf += nread; } return totlen; } /* Like write(2) but make sure 'count' is written before to return * (unless error is encountered) */ int anetWrite(int fd, char *buf, int count) { ssize_t nwritten, totlen = 0; while(totlen != count) { nwritten = write(fd,buf,count-totlen); if (nwritten == 0) return totlen; if (nwritten == -1) return -1; totlen += nwritten; buf += nwritten; } return totlen; } static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog) { if (bind(s,sa,len) == -1) { anetSetError(err, "bind: %s", strerror(errno)); close(s); return ANET_ERR; } if (listen(s, backlog) == -1) { anetSetError(err, "listen: %s", strerror(errno)); close(s); return ANET_ERR; } return ANET_OK; } static int anetV6Only(char *err, int s) { int yes = 1; if (setsockopt(s,IPPROTO_IPV6,IPV6_V6ONLY,&yes,sizeof(yes)) == -1) { anetSetError(err, "setsockopt: %s", strerror(errno)); close(s); return ANET_ERR; } return ANET_OK; } static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backlog) { int s, rv; char _port[6]; /* strlen("65535") */ struct addrinfo hints, *servinfo, *p; snprintf(_port,6,"%d",port); memset(&hints,0,sizeof(hints)); hints.ai_family = af; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_PASSIVE; /* No effect if bindaddr != NULL */ if ((rv = getaddrinfo(bindaddr,_port,&hints,&servinfo)) != 0) { anetSetError(err, "%s", gai_strerror(rv)); return ANET_ERR; } for (p = servinfo; p != NULL; p = p->ai_next) { if ((s = socket(p->ai_family,p->ai_socktype,p->ai_protocol)) == -1) continue; if (af == AF_INET6 && anetV6Only(err,s) == ANET_ERR) goto error; if (anetSetReuseAddr(err,s) == ANET_ERR) goto error; if (anetListen(err,s,p->ai_addr,p->ai_addrlen,backlog) == ANET_ERR) goto error; goto end; } if (p == NULL) { anetSetError(err, "unable to bind socket"); goto error; } error: s = ANET_ERR; end: freeaddrinfo(servinfo); return s; } int anetTcpServer(char *err, int port, char *bindaddr, int backlog) { return _anetTcpServer(err, port, bindaddr, AF_INET, backlog); } int anetTcp6Server(char *err, int port, char *bindaddr, int backlog) { return _anetTcpServer(err, port, bindaddr, AF_INET6, backlog); } int anetUnixServer(char *err, char *path, mode_t perm, int backlog) { int s; struct sockaddr_un sa; if ((s = anetCreateSocket(err,AF_LOCAL)) == ANET_ERR) return ANET_ERR; memset(&sa,0,sizeof(sa)); sa.sun_family = AF_LOCAL; strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); if (anetListen(err,s,(struct sockaddr*)&sa,sizeof(sa),backlog) == ANET_ERR) return ANET_ERR; if (perm) chmod(sa.sun_path, perm); return s; } static int anetGenericAccept(char *err, int s, struct sockaddr *sa, socklen_t *len) { int fd; while(1) { fd = accept(s,sa,len); if (fd == -1) { if (errno == EINTR) continue; else { anetSetError(err, "accept: %s", strerror(errno)); return ANET_ERR; } } break; } return fd; } int anetTcpAccept(char *err, int s, char *ip, size_t ip_len, int *port) { int fd; struct sockaddr_storage sa; socklen_t salen = sizeof(sa); if ((fd = anetGenericAccept(err,s,(struct sockaddr*)&sa,&salen)) == -1) return ANET_ERR; if (sa.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&sa; if (ip) inet_ntop(AF_INET,(void*)&(s->sin_addr),ip,ip_len); if (port) *port = ntohs(s->sin_port); } else { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&sa; if (ip) inet_ntop(AF_INET6,(void*)&(s->sin6_addr),ip,ip_len); if (port) *port = ntohs(s->sin6_port); } return fd; } int anetUnixAccept(char *err, int s) { int fd; struct sockaddr_un sa; socklen_t salen = sizeof(sa); if ((fd = anetGenericAccept(err,s,(struct sockaddr*)&sa,&salen)) == -1) return ANET_ERR; return fd; } int anetPeerToString(int fd, char *ip, size_t ip_len, int *port) { struct sockaddr_storage sa; socklen_t salen = sizeof(sa); if (getpeername(fd,(struct sockaddr*)&sa,&salen) == -1) goto error; if (ip_len == 0) goto error; if (sa.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&sa; if (ip) inet_ntop(AF_INET,(void*)&(s->sin_addr),ip,ip_len); if (port) *port = ntohs(s->sin_port); } else if (sa.ss_family == AF_INET6) { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&sa; if (ip) inet_ntop(AF_INET6,(void*)&(s->sin6_addr),ip,ip_len); if (port) *port = ntohs(s->sin6_port); } else if (sa.ss_family == AF_UNIX) { if (ip) strncpy(ip,"/unixsocket",ip_len); if (port) *port = 0; } else { goto error; } return 0; error: if (ip) { if (ip_len >= 2) { ip[0] = '?'; ip[1] = '\0'; } else if (ip_len == 1) { ip[0] = '\0'; } } if (port) *port = 0; return -1; } /* Format an IP,port pair into something easy to parse. If IP is IPv6 * (matches for ":"), the ip is surrounded by []. IP and port are just * separated by colons. This the standard to display addresses within Redis. */ int anetFormatAddr(char *buf, size_t buf_len, char *ip, int port) { return snprintf(buf,buf_len, strchr(ip,':') ? "[%s]:%d" : "%s:%d", ip, port); } /* Like anetFormatAddr() but extract ip and port from the socket's peer. */ int anetFormatPeer(int fd, char *buf, size_t buf_len) { char ip[INET6_ADDRSTRLEN]; int port; anetPeerToString(fd,ip,sizeof(ip),&port); return anetFormatAddr(buf, buf_len, ip, port); } int anetSockName(int fd, char *ip, size_t ip_len, int *port) { struct sockaddr_storage sa; socklen_t salen = sizeof(sa); if (getsockname(fd,(struct sockaddr*)&sa,&salen) == -1) { if (port) *port = 0; ip[0] = '?'; ip[1] = '\0'; return -1; } if (sa.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&sa; if (ip) inet_ntop(AF_INET,(void*)&(s->sin_addr),ip,ip_len); if (port) *port = ntohs(s->sin_port); } else { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&sa; if (ip) inet_ntop(AF_INET6,(void*)&(s->sin6_addr),ip,ip_len); if (port) *port = ntohs(s->sin6_port); } return 0; } int anetFormatSock(int fd, char *fmt, size_t fmt_len) { char ip[INET6_ADDRSTRLEN]; int port; anetSockName(fd,ip,sizeof(ip),&port); return anetFormatAddr(fmt, fmt_len, ip, port); } disque-1.0-rc1/src/anet.h000066400000000000000000000067221264176561100152360ustar00rootroot00000000000000/* anet.c -- Basic TCP socket stuff made a bit less boring * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef ANET_H #define ANET_H #define ANET_OK 0 #define ANET_ERR -1 #define ANET_ERR_LEN 256 /* Flags used with certain functions. */ #define ANET_NONE 0 #define ANET_IP_ONLY (1<<0) #if defined(__sun) || defined(_AIX) #define AF_LOCAL AF_UNIX #endif #ifdef _AIX #undef ip_len #endif int anetTcpConnect(char *err, char *addr, int port); int anetTcpNonBlockConnect(char *err, char *addr, int port); int anetTcpNonBlockBindConnect(char *err, char *addr, int port, char *source_addr); int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, char *source_addr); int anetUnixConnect(char *err, char *path); int anetUnixNonBlockConnect(char *err, char *path); int anetRead(int fd, char *buf, int count); int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len); int anetResolveIP(char *err, char *host, char *ipbuf, size_t ipbuf_len); int anetTcpServer(char *err, int port, char *bindaddr, int backlog); int anetTcp6Server(char *err, int port, char *bindaddr, int backlog); int anetUnixServer(char *err, char *path, mode_t perm, int backlog); int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port); int anetUnixAccept(char *err, int serversock); int anetWrite(int fd, char *buf, int count); int anetNonBlock(char *err, int fd); int anetBlock(char *err, int fd); int anetEnableTcpNoDelay(char *err, int fd); int anetDisableTcpNoDelay(char *err, int fd); int anetTcpKeepAlive(char *err, int fd); int anetSendTimeout(char *err, int fd, long long ms); int anetPeerToString(int fd, char *ip, size_t ip_len, int *port); int anetKeepAlive(char *err, int fd, int interval); int anetSockName(int fd, char *ip, size_t ip_len, int *port); int anetFormatAddr(char *fmt, size_t fmt_len, char *ip, int port); int anetFormatPeer(int fd, char *fmt, size_t fmt_len); int anetFormatSock(int fd, char *fmt, size_t fmt_len); #endif disque-1.0-rc1/src/aof.c000066400000000000000000001342201264176561100150420ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "bio.h" #include "rio.h" #include "job.h" #include #include #include #include #include #include #include void aofUpdateCurrentSize(void); void aofClosePipes(void); /* Mark that we are loading in the global state and setup the fields * needed to provide loading stats. */ void startLoading(FILE *fp) { struct stat sb; /* Load the DB */ server.loading = 1; server.loading_start_time = time(NULL); if (fstat(fileno(fp), &sb) == -1) { server.loading_total_bytes = 1; /* just to avoid division by zero */ } else { server.loading_total_bytes = sb.st_size; } } /* Refresh the loading progress info */ void loadingProgress(off_t pos) { server.loading_loaded_bytes = pos; if (server.stat_peak_memory < zmalloc_used_memory()) server.stat_peak_memory = zmalloc_used_memory(); } /* Loading finished */ void stopLoading(void) { server.loading = 0; } /* Get the AOF state as string for INFO reporting. */ char *aofGetStateString(void) { switch(server.aof_state) { case AOF_OFF: return "off"; case AOF_ON: return "on"; case AOF_WAIT_REWRITE: return "wait-rewrite"; default: return "unknown"; } } /* ---------------------------------------------------------------------------- * AOF rewrite buffer implementation. * * The following code implement a simple buffer used in order to accumulate * changes while the background process is rewriting the AOF file. * * We only need to append, but can't just use realloc with a large block * because 'huge' reallocs are not always handled as one could expect * (via remapping of pages at OS level) but may involve copying data. * * For this reason we use a list of blocks, every block is * AOF_RW_BUF_BLOCK_SIZE bytes. * ------------------------------------------------------------------------- */ #define AOF_RW_BUF_BLOCK_SIZE (1024*1024*10) /* 10 MB per block */ typedef struct aofrwblock { unsigned long used, free; char buf[AOF_RW_BUF_BLOCK_SIZE]; } aofrwblock; /* This function free the old AOF rewrite buffer if needed, and initialize * a fresh new one. It tests for server.aof_rewrite_buf_blocks equal to NULL * so can be used for the first initialization as well. */ void aofRewriteBufferReset(void) { if (server.aof_rewrite_buf_blocks) listRelease(server.aof_rewrite_buf_blocks); server.aof_rewrite_buf_blocks = listCreate(); listSetFreeMethod(server.aof_rewrite_buf_blocks,zfree); } /* Return the current size of the AOF rewrite buffer. */ unsigned long aofRewriteBufferSize(void) { listNode *ln; listIter li; unsigned long size = 0; listRewind(server.aof_rewrite_buf_blocks,&li); while((ln = listNext(&li))) { aofrwblock *block = listNodeValue(ln); size += block->used; } return size; } /* Event handler used to send data to the child process doing the AOF * rewrite. We send pieces of our AOF differences buffer so that the final * write when the child finishes the rewrite will be small. */ void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) { listNode *ln; aofrwblock *block; ssize_t nwritten; UNUSED(el); UNUSED(fd); UNUSED(privdata); UNUSED(mask); while(1) { ln = listFirst(server.aof_rewrite_buf_blocks); block = ln ? ln->value : NULL; if (server.aof_stop_sending_diff || !block) { aeDeleteFileEvent(server.el,server.aof_pipe_write_data_to_child, AE_WRITABLE); return; } if (block->used > 0) { nwritten = write(server.aof_pipe_write_data_to_child, block->buf,block->used); if (nwritten <= 0) return; memmove(block->buf,block->buf+nwritten,block->used-nwritten); block->used -= nwritten; } if (block->used == 0) listDelNode(server.aof_rewrite_buf_blocks,ln); } } /* Append data to the AOF rewrite buffer, allocating new blocks if needed. */ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { listNode *ln = listLast(server.aof_rewrite_buf_blocks); aofrwblock *block = ln ? ln->value : NULL; while(len) { /* If we already got at least an allocated block, try appending * at least some piece into it. */ if (block) { unsigned long thislen = (block->free < len) ? block->free : len; if (thislen) { /* The current block is not already full. */ memcpy(block->buf+block->used, s, thislen); block->used += thislen; block->free -= thislen; s += thislen; len -= thislen; } } if (len) { /* First block to allocate, or need another block. */ int numblocks; block = zmalloc(sizeof(*block)); block->free = AOF_RW_BUF_BLOCK_SIZE; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks,block); /* Log every time we cross more 10 or 100 blocks, respectively * as a notice or warning. */ numblocks = listLength(server.aof_rewrite_buf_blocks); if (((numblocks+1) % 10) == 0) { int level = ((numblocks+1) % 100) == 0 ? LL_WARNING : LL_NOTICE; serverLog(level,"Background AOF buffer size: %lu MB", aofRewriteBufferSize()/(1024*1024)); } } } /* Install a file event to send data to the rewrite child if there is * not one already. */ if (aeGetFileEvents(server.el,server.aof_pipe_write_data_to_child) == 0) { aeCreateFileEvent(server.el, server.aof_pipe_write_data_to_child, AE_WRITABLE, aofChildWriteDiffData, NULL); } } /* Write the buffer (possibly composed of multiple blocks) into the specified * fd. If a short write or any other error happens -1 is returned, * otherwise the number of bytes written is returned. */ ssize_t aofRewriteBufferWrite(int fd) { listNode *ln; listIter li; ssize_t count = 0; listRewind(server.aof_rewrite_buf_blocks,&li); while((ln = listNext(&li))) { aofrwblock *block = listNodeValue(ln); ssize_t nwritten; if (block->used) { nwritten = write(fd,block->buf,block->used); if (nwritten != (ssize_t)block->used) { if (nwritten == 0) errno = EIO; return -1; } count += nwritten; } } return count; } /* ---------------------------------------------------------------------------- * AOF file implementation * ------------------------------------------------------------------------- */ /* Starts a background task that performs fsync() against the specified * file descriptor (the one of the AOF file) in another thread. */ void aof_background_fsync(int fd) { bioCreateBackgroundJob(BIO_AOF_FSYNC,(void*)(long)fd,NULL,NULL); } /* Called when the user switches from "appendonly yes" to "appendonly no" * at runtime using the CONFIG command. */ void stopAppendOnly(void) { serverAssert(server.aof_state != AOF_OFF); flushAppendOnlyFile(1); aof_fsync(server.aof_fd); close(server.aof_fd); server.aof_fd = -1; server.aof_selected_db = -1; server.aof_state = AOF_OFF; /* rewrite operation in progress? kill it, wait child exit */ if (server.aof_child_pid != -1) { int statloc; serverLog(LL_NOTICE,"Killing running AOF rewrite child: %ld", (long) server.aof_child_pid); if (kill(server.aof_child_pid,SIGUSR1) != -1) wait3(&statloc,0,NULL); /* reset the buffer accumulating changes while the child saves */ aofRewriteBufferReset(); aofRemoveTempFile(server.aof_child_pid); server.aof_child_pid = -1; server.aof_rewrite_time_start = -1; /* close pipes used for IPC between the two processes. */ aofClosePipes(); } } /* Called when the user switches from "appendonly no" to "appendonly yes" * at runtime using the CONFIG command. */ int startAppendOnly(void) { server.aof_last_fsync = server.unixtime; server.aof_fd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644); serverAssert(server.aof_state == AOF_OFF); if (server.aof_fd == -1) { serverLog(LL_WARNING,"Disque needs to enable the AOF but can't open the append only file: %s",strerror(errno)); return C_ERR; } if (rewriteAppendOnlyFileBackground() == C_ERR) { close(server.aof_fd); serverLog(LL_WARNING,"Disque needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error."); return C_ERR; } /* We correctly switched on AOF, now wait for the rewrite to be complete * in order to append data on disk. */ server.aof_state = AOF_WAIT_REWRITE; return C_OK; } /* Write the append only file buffer on disk. * * Since we are required to write the AOF before replying to the client, * and the only way the client socket can get a write is entering when the * the event loop, we accumulate all the AOF writes in a memory * buffer and write it on disk using this function just before entering * the event loop again. * * About the 'force' argument: * * When the fsync policy is set to 'everysec' we may delay the flush if there * is still an fsync() going on in the background thread, since for instance * on Linux write(2) will be blocked by the background fsync anyway. * When this happens we remember that there is some aof buffer to be * flushed ASAP, and will try to do that in the serverCron() function. * * However if force is set to 1 we'll write regardless of the background * fsync. */ #define AOF_WRITE_LOG_ERROR_RATE 30 /* Seconds between errors logging. */ void flushAppendOnlyFile(int force) { ssize_t nwritten; int sync_in_progress = 0; mstime_t latency; if (sdslen(server.aof_buf) == 0) return; if (server.aof_fsync == AOF_FSYNC_EVERYSEC) sync_in_progress = bioPendingJobsOfType(BIO_AOF_FSYNC) != 0; if (server.aof_fsync == AOF_FSYNC_EVERYSEC && !force) { /* With this append fsync policy we do background fsyncing. * If the fsync is still in progress we can try to delay * the write for a couple of seconds. */ if (sync_in_progress) { if (server.aof_flush_postponed_start == 0) { /* No previous write postponing, remember that we are * postponing the flush and return. */ server.aof_flush_postponed_start = server.unixtime; return; } else if (server.unixtime - server.aof_flush_postponed_start < 2) { /* We were already waiting for fsync to finish, but for less * than two seconds this is still ok. Postpone again. */ return; } /* Otherwise fall trough, and go write since we can't wait * over two seconds. */ server.aof_delayed_fsync++; serverLog(LL_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Disque."); } } /* We want to perform a single write. This should be guaranteed atomic * at least if the filesystem we are writing is a real physical one. * While this will save us against the server being killed I don't think * there is much to do about the whole server stopping for power problems * or alike */ latencyStartMonitor(latency); nwritten = write(server.aof_fd,server.aof_buf,sdslen(server.aof_buf)); latencyEndMonitor(latency); /* We want to capture different events for delayed writes: * when the delay happens with a pending fsync, or with a saving child * active, and when the above two conditions are missing. * We also use an additional event name to save all samples which is * useful for graphing / monitoring purposes. */ if (sync_in_progress) { latencyAddSampleIfNeeded("aof-write-pending-fsync",latency); } else if (server.aof_child_pid != -1) { latencyAddSampleIfNeeded("aof-write-active-child",latency); } else { latencyAddSampleIfNeeded("aof-write-alone",latency); } latencyAddSampleIfNeeded("aof-write",latency); /* We performed the write so reset the postponed flush sentinel to zero. */ server.aof_flush_postponed_start = 0; if (nwritten != (signed)sdslen(server.aof_buf)) { static time_t last_write_error_log = 0; int can_log = 0; /* Limit logging rate to 1 line per AOF_WRITE_LOG_ERROR_RATE seconds. */ if ((server.unixtime - last_write_error_log) > AOF_WRITE_LOG_ERROR_RATE) { can_log = 1; last_write_error_log = server.unixtime; } /* Log the AOF write error and record the error code. */ if (nwritten == -1) { if (can_log) { serverLog(LL_WARNING,"Error writing to the AOF file: %s", strerror(errno)); server.aof_last_write_errno = errno; } } else { if (can_log) { serverLog(LL_WARNING,"Short write while writing to " "the AOF file: (nwritten=%lld, " "expected=%lld)", (long long)nwritten, (long long)sdslen(server.aof_buf)); } if (ftruncate(server.aof_fd, server.aof_current_size) == -1) { if (can_log) { serverLog(LL_WARNING, "Could not remove short write " "from the append-only file. Disque may refuse " "to load the AOF the next time it starts. " "ftruncate: %s", strerror(errno)); } } else { /* If the ftruncate() succeeded we can set nwritten to * -1 since there is no longer partial data into the AOF. */ nwritten = -1; } server.aof_last_write_errno = ENOSPC; } /* Handle the AOF write error. */ if (server.aof_fsync == AOF_FSYNC_ALWAYS) { /* We can't recover when the fsync policy is ALWAYS since the * reply for the client is already in the output buffers, and we * have the contract with the user that on acknowledged write data * is synced on disk. */ serverLog(LL_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting..."); exit(1); } else { /* Recover from failed write leaving data into the buffer. However * set an error to stop accepting writes as long as the error * condition is not cleared. */ server.aof_last_write_status = C_ERR; /* Trim the sds buffer if there was a partial write, and there * was no way to undo it with ftruncate(2). */ if (nwritten > 0) { server.aof_current_size += nwritten; sdsrange(server.aof_buf,nwritten,-1); } return; /* We'll try again on the next call... */ } } else { /* Successful write(2). If AOF was in error state, restore the * OK state and log the event. */ if (server.aof_last_write_status == C_ERR) { serverLog(LL_WARNING, "AOF write error looks solved, Disque can write again."); server.aof_last_write_status = C_OK; } } server.aof_current_size += nwritten; /* Re-use AOF buffer when it is small enough. The maximum comes from the * arena size of 4k minus some overhead (but is otherwise arbitrary). */ if ((sdslen(server.aof_buf)+sdsavail(server.aof_buf)) < 4000) { sdsclear(server.aof_buf); } else { sdsfree(server.aof_buf); server.aof_buf = sdsempty(); } /* Don't fsync if no-appendfsync-on-rewrite is set to yes and there are * children doing I/O in the background. */ if (server.aof_no_fsync_on_rewrite && server.aof_child_pid != -1) return; /* Perform the fsync if needed. */ if (server.aof_fsync == AOF_FSYNC_ALWAYS) { /* aof_fsync is defined as fdatasync() for Linux in order to avoid * flushing metadata. */ latencyStartMonitor(latency); aof_fsync(server.aof_fd); /* Let's try to get this data on the disk */ latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-fsync-always",latency); server.aof_last_fsync = server.unixtime; } else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC && server.unixtime > server.aof_last_fsync)) { if (!sync_in_progress) aof_background_fsync(server.aof_fd); server.aof_last_fsync = server.unixtime; } } sds catAppendOnlyGenericCommand(sds dst, int argc, robj **argv) { char buf[32]; int len, j; robj *o; buf[0] = '*'; len = 1+ll2string(buf+1,sizeof(buf)-1,argc); buf[len++] = '\r'; buf[len++] = '\n'; dst = sdscatlen(dst,buf,len); for (j = 0; j < argc; j++) { o = getDecodedObject(argv[j]); buf[0] = '$'; len = 1+ll2string(buf+1,sizeof(buf)-1,sdslen(o->ptr)); buf[len++] = '\r'; buf[len++] = '\n'; dst = sdscatlen(dst,buf,len); dst = sdscatlen(dst,o->ptr,sdslen(o->ptr)); dst = sdscatlen(dst,"\r\n",2); decrRefCount(o); } return dst; } void feedAppendOnlyFile(robj **argv, int argc) { sds buf = sdsempty(); buf = catAppendOnlyGenericCommand(buf,argc,argv); /* Append to the AOF buffer. This will be flushed on disk just before * of re-entering the event loop, so before the client will get a * positive reply about the operation performed. */ if (server.aof_state == AOF_ON) server.aof_buf = sdscatlen(server.aof_buf,buf,sdslen(buf)); /* If a background append only file rewriting is in progress we want to * accumulate the differences between the child DB and the current one * in a buffer, so that when the child process will do its work we * can append the differences to the new append only file. */ if (server.aof_child_pid != -1) aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf)); sdsfree(buf); } /* ---------------------------------------------------------------------------- * AOF loading * ------------------------------------------------------------------------- */ /* In Disque commands are always executed in the context of a client, so in * order to load the append only file we need to create a fake client. */ struct client *createFakeClient(void) { struct client *c = zmalloc(sizeof(*c)); c->fd = -1; c->name = NULL; c->querybuf = sdsempty(); c->querybuf_peak = 0; c->argc = 0; c->argv = NULL; c->bufpos = 0; c->flags = CLIENT_AOF_CLIENT; c->btype = BLOCKED_NONE; c->reply = listCreate(); c->reply_bytes = 0; c->obuf_soft_limit_reached_time = 0; c->peerid = NULL; listSetFreeMethod(c->reply,decrRefCountVoid); listSetDupMethod(c->reply,dupClientReplyValue); return c; } void freeFakeClientArgv(struct client *c) { int j; for (j = 0; j < c->argc; j++) decrRefCount(c->argv[j]); zfree(c->argv); } void freeFakeClient(struct client *c) { sdsfree(c->querybuf); listRelease(c->reply); zfree(c); } /* Replay the append log file. On success C_OK is returned. On non fatal * error (the append only file is zero-length) C_ERR is returned. On * fatal error an error message is logged and the program exists. */ int loadAppendOnlyFile(char *filename) { struct client *fakeClient; FILE *fp = fopen(filename,"r"); struct disque_stat sb; int old_aof_state = server.aof_state; long loops = 0; off_t valid_up_to = 0; /* Offset of the latest well-formed command loaded. */ if (fp && disque_fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) { server.aof_current_size = 0; fclose(fp); return C_ERR; } if (fp == NULL) { serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno)); exit(1); } /* Temporarily disable AOF, to prevent EXEC from feeding a MULTI * to the same file we're about to read. */ server.aof_state = AOF_OFF; fakeClient = createFakeClient(); startLoading(fp); while(1) { int argc, j; unsigned long len; robj **argv; char buf[128]; sds argsds; struct serverCommand *cmd; /* Serve the clients from time to time */ if (!(loops++ % 1000)) { loadingProgress(ftello(fp)); processEventsWhileBlocked(); } if (fgets(buf,sizeof(buf),fp) == NULL) { if (feof(fp)) break; else goto readerr; } if (buf[0] != '*') goto fmterr; if (buf[1] == '\0') goto readerr; argc = atoi(buf+1); if (argc < 1) goto fmterr; argv = zmalloc(sizeof(robj*)*argc); fakeClient->argc = argc; fakeClient->argv = argv; for (j = 0; j < argc; j++) { if (fgets(buf,sizeof(buf),fp) == NULL) { fakeClient->argc = j; /* Free up to j-1. */ freeFakeClientArgv(fakeClient); goto readerr; } if (buf[0] != '$') goto fmterr; len = strtol(buf+1,NULL,10); argsds = sdsnewlen(NULL,len); if (len && fread(argsds,len,1,fp) == 0) { sdsfree(argsds); fakeClient->argc = j; /* Free up to j-1. */ freeFakeClientArgv(fakeClient); goto readerr; } argv[j] = createObject(OBJ_STRING,argsds); if (fread(buf,2,1,fp) == 0) { fakeClient->argc = j+1; /* Free up to j. */ freeFakeClientArgv(fakeClient); goto readerr; /* discard CRLF */ } } /* Command lookup */ cmd = lookupCommand(argv[0]->ptr); if (!cmd) { serverLog(LL_WARNING,"Unknown command '%s' reading the append only file", (char*)argv[0]->ptr); exit(1); } /* Run the command in the context of a fake client */ cmd->proc(fakeClient); /* The fake client should not have a reply */ serverAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0); /* The fake client should never get blocked */ serverAssert((fakeClient->flags & CLIENT_BLOCKED) == 0); /* Clean up. Command code may have changed argv/argc so we use the * argv/argc of the client instead of the local variables. */ freeFakeClientArgv(fakeClient); if (server.aof_load_truncated) valid_up_to = ftello(fp); } loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */ fclose(fp); freeFakeClient(fakeClient); server.aof_state = old_aof_state; stopLoading(); aofUpdateCurrentSize(); server.aof_rewrite_base_size = server.aof_current_size; return C_OK; readerr: /* Read error. If feof(fp) is true, fall through to unexpected EOF. */ if (!feof(fp)) { serverLog(LL_WARNING,"Unrecoverable error reading the append only file: %s", strerror(errno)); exit(1); } /* Unexpected AOF end of file. */ if (server.aof_load_truncated) { serverLog(LL_WARNING,"!!! Warning: short read while loading the AOF file !!!"); serverLog(LL_WARNING,"!!! Truncating the AOF at offset %llu !!!", (unsigned long long) valid_up_to); if (valid_up_to == -1 || truncate(filename,valid_up_to) == -1) { if (valid_up_to == -1) { serverLog(LL_WARNING,"Last valid command offset is invalid"); } else { serverLog(LL_WARNING,"Error truncating the AOF file: %s", strerror(errno)); } } else { /* Make sure the AOF file descriptor points to the end of the * file after the truncate call. */ if (server.aof_fd != -1 && lseek(server.aof_fd,0,SEEK_END) == -1) { serverLog(LL_WARNING,"Can't seek the end of the AOF file: %s", strerror(errno)); } else { serverLog(LL_WARNING, "AOF loaded anyway because aof-load-truncated is enabled"); goto loaded_ok; } } } serverLog(LL_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./disque-check-aof --fix . 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the server."); exit(1); fmterr: /* Format error. */ serverLog(LL_WARNING,"Bad file format reading the append only file: make a backup of your AOF file, then use ./disque-check-aof --fix "); exit(1); } /* ---------------------------------------------------------------------------- * AOF rewrite * ------------------------------------------------------------------------- */ /* Delegate writing an object to writing a bulk string or bulk long long. * This is not placed in rio.c since that adds the disque.h dependency. */ int rioWriteBulkObject(rio *r, robj *obj) { /* Avoid using getDecodedObject to help copy-on-write (we are often * in a child process when this function is called). */ if (obj->encoding == OBJ_ENCODING_INT) { return rioWriteBulkLongLong(r,(long)obj->ptr); } else if (sdsEncodedObject(obj)) { return rioWriteBulkString(r,obj->ptr,sdslen(obj->ptr)); } else { serverPanic("Unknown string encoding"); } } /* This function is called by the child rewriting the AOF file to read * the difference accumulated from the parent into a buffer, that is * concatenated at the end of the rewrite. */ ssize_t aofReadDiffFromParent(void) { char buf[65536]; /* Default pipe buffer size on most Linux systems. */ ssize_t nread, total = 0; while ((nread = read(server.aof_pipe_read_data_from_parent,buf,sizeof(buf))) > 0) { server.aof_child_diff = sdscatlen(server.aof_child_diff,buf,nread); total += nread; } return total; } /* Write a sequence of commands able to fully rebuild the dataset into * "filename". Used both by BGREWRITEAOF and SHUTDOWN REWRITE. * * If 'background' is non-zero, the function assumes to be running in the * context of a child process, so it will attempt to read the accumulated * difference buffer from the parent process (all the writes happening in the * parent side while we rewrite the AOF). Otherwise if 'background' is zero * the function assumes we are just doing a foreground rewrite, for example * via the SHUTDOWN REWRITE command, and no parent diff is read. * * Return value: on success C_OK, on error C_ERR. */ int rewriteAppendOnlyFile(char *filename, int background) { dictIterator *di = NULL; rio aof; FILE *fp; char tmpfile[256]; char byte; /* Note that we have to use a different temp name here compared to the * one used by rewriteAppendOnlyFileBackground() function. */ snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) getpid()); fp = fopen(tmpfile,"w"); if (!fp) { serverLog(LL_WARNING, "Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s", strerror(errno)); return C_ERR; } if (background) server.aof_child_diff = sdsempty(); rioInitWithFile(&aof,fp); if (server.aof_rewrite_incremental_fsync) rioSetAutoSync(&aof,AOF_AUTOSYNC_BYTES); /* Rerwite jobs that are in interesting states: active or queued. * ad LOADJOB commands. */ di = dictGetIterator(server.jobs); struct dictEntry *de; while((de = dictNext(di)) != NULL) { job *job = dictGetKey(de); if (job->state != JOB_STATE_ACTIVE && job->state != JOB_STATE_QUEUED) continue; char cmd[] = "*2\r\n$7\r\nLOADJOB\r\n"; sds serialized = serializeJob(sdsempty(),job,SER_STORAGE); if (rioWrite(&aof,cmd,sizeof(cmd)-1) == 0) goto werr; if (rioWriteBulkString(&aof,serialized,sdslen(serialized)) == 0) goto werr; sdsfree(serialized); } dictReleaseIterator(di); di = NULL; /* Don't free it at end. */ /* If this is a synchronous rewrite, skip all the child-parent * handshake about the difference buffer. */ if (!background) goto flush_and_rename; /* Do an initial slow fsync here while the parent is still sending * data, in order to make the next final fsync faster. */ if (fflush(fp) == EOF) goto werr; if (fsync(fileno(fp)) == -1) goto werr; /* Read again a few times to get more data from the parent. * We can't read forever (the server may receive data from clients * faster than it is able to send data to the child), so we try to read * some more data in a loop as soon as there is a good chance more data * will come. If it looks like we are wasting time, we abort (this * happens after 20 ms without new data). */ int nodata = 0; mstime_t start = mstime(); while(mstime()-start < 1000 && nodata < 20) { if (aeWait(server.aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0) { nodata++; continue; } nodata = 0; /* Start counting from zero, we stop on N *contiguous* timeouts. */ aofReadDiffFromParent(); } /* Ask the master to stop sending diffs. */ if (write(server.aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr; if (anetNonBlock(NULL,server.aof_pipe_read_ack_from_parent) != ANET_OK) goto werr; /* We read the ACK from the server using a 10 seconds timeout. Normally * it should reply ASAP, but just in case we lose its reply, we are sure * the child will eventually get terminated. */ if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 || byte != '!') goto werr; serverLog(LL_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF..."); /* Read the final diff if any. */ aofReadDiffFromParent(); /* Write the received diff to the file. */ serverLog(LL_NOTICE, "Concatenating %.2f MB of AOF diff received from parent.", (double) sdslen(server.aof_child_diff) / (1024*1024)); if (rioWrite(&aof,server.aof_child_diff,sdslen(server.aof_child_diff)) == 0) goto werr; flush_and_rename: /* Make sure data will not remain on the OS's output buffers */ if (fflush(fp) == EOF) goto werr; if (fsync(fileno(fp)) == -1) goto werr; if (fclose(fp) == EOF) goto werr; /* Use RENAME to make sure the AOF file is changed atomically only * if the generate AOF file is ok. */ if (rename(tmpfile,filename) == -1) { serverLog(LL_WARNING,"Error moving temp append only file on the final destination: %s", strerror(errno)); unlink(tmpfile); return C_ERR; } serverLog(LL_NOTICE,"SYNC append only file rewrite performed"); return C_OK; werr: serverLog(LL_WARNING,"Write error writing append only file on disk: %s", strerror(errno)); fclose(fp); unlink(tmpfile); if (di) dictReleaseIterator(di); return C_ERR; } /* ---------------------------------------------------------------------------- * AOF rewrite pipes for IPC * -------------------------------------------------------------------------- */ /* This event handler is called when the AOF rewriting child sends us a * single '!' char to signal we should stop sending buffer diffs. The * parent sends a '!' as well to acknowledge. */ void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) { char byte; UNUSED(el); UNUSED(privdata); UNUSED(mask); if (read(fd,&byte,1) == 1 && byte == '!') { serverLog(LL_NOTICE,"AOF rewrite child asks to stop sending diffs."); server.aof_stop_sending_diff = 1; if (write(server.aof_pipe_write_ack_to_child,"!",1) != 1) { /* If we can't send the ack, inform the user, but don't try again * since in the other side the children will use a timeout if the * kernel can't buffer our write, or, the children was * terminated. */ serverLog(LL_WARNING,"Can't send ACK to AOF child: %s", strerror(errno)); } } /* Remove the handler since this can be called only one time during a * rewrite. */ aeDeleteFileEvent(server.el,server.aof_pipe_read_ack_from_child,AE_READABLE); } /* Create the pipes used for parent - child process IPC during rewrite. * We have a data pipe used to send AOF incremental diffs to the child, * and two other pipes used by the children to signal it finished with * the rewrite so no more data should be written, and another for the * parent to acknowledge it understood this new condition. */ int aofCreatePipes(void) { int fds[6] = {-1, -1, -1, -1, -1, -1}; int j; if (pipe(fds) == -1) goto error; /* parent -> children data. */ if (pipe(fds+2) == -1) goto error; /* children -> parent ack. */ if (pipe(fds+4) == -1) goto error; /* children -> parent ack. */ /* Parent -> children data is non blocking. */ if (anetNonBlock(NULL,fds[0]) != ANET_OK) goto error; if (anetNonBlock(NULL,fds[1]) != ANET_OK) goto error; if (aeCreateFileEvent(server.el, fds[2], AE_READABLE, aofChildPipeReadable, NULL) == AE_ERR) goto error; server.aof_pipe_write_data_to_child = fds[1]; server.aof_pipe_read_data_from_parent = fds[0]; server.aof_pipe_write_ack_to_parent = fds[3]; server.aof_pipe_read_ack_from_child = fds[2]; server.aof_pipe_write_ack_to_child = fds[5]; server.aof_pipe_read_ack_from_parent = fds[4]; server.aof_stop_sending_diff = 0; return C_OK; error: serverLog(LL_WARNING,"Error opening /setting AOF rewrite IPC pipes: %s", strerror(errno)); for (j = 0; j < 6; j++) if(fds[j] != -1) close(fds[j]); return C_ERR; } void aofClosePipes(void) { aeDeleteFileEvent(server.el,server.aof_pipe_read_ack_from_child,AE_READABLE); aeDeleteFileEvent(server.el,server.aof_pipe_write_data_to_child,AE_WRITABLE); close(server.aof_pipe_write_data_to_child); close(server.aof_pipe_read_data_from_parent); close(server.aof_pipe_write_ack_to_parent); close(server.aof_pipe_read_ack_from_child); close(server.aof_pipe_write_ack_to_child); close(server.aof_pipe_read_ack_from_parent); } /* ---------------------------------------------------------------------------- * AOF background rewrite * ------------------------------------------------------------------------- */ /* This is how rewriting of the append only file in background works: * * 1) The user calls BGREWRITEAOF * 2) Disque calls this function, that forks(): * 2a) the child rewrite the append only file in a temp file. * 2b) the parent accumulates differences in server.aof_rewrite_buf. * 3) When the child finished '2a' exists. * 4) The parent will trap the exit code, if it's OK, will append the * data accumulated into server.aof_rewrite_buf into the temp file, and * finally will rename(2) the temp file in the actual file name. * The the new file is reopened as the new append only file. Profit! */ int rewriteAppendOnlyFileBackground(void) { pid_t childpid; long long start; if (server.aof_child_pid != -1) return C_ERR; if (aofCreatePipes() != C_OK) return C_ERR; start = ustime(); if ((childpid = fork()) == 0) { char tmpfile[256]; /* Child */ closeListeningSockets(0); serverSetProcTitle("disque-aof-rewrite"); snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) getpid()); if (rewriteAppendOnlyFile(tmpfile,1) == C_OK) { size_t private_dirty = zmalloc_get_private_dirty(); if (private_dirty) { serverLog(LL_NOTICE, "AOF rewrite: %zu MB of memory used by copy-on-write", private_dirty/(1024*1024)); } exitFromChild(0); } else { exitFromChild(1); } } else { /* Parent */ server.stat_fork_time = ustime()-start; server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */ latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000); if (childpid == -1) { serverLog(LL_WARNING, "Can't rewrite append only file in background: fork: %s", strerror(errno)); return C_ERR; } serverLog(LL_NOTICE, "Background append only file rewriting started by pid %d",childpid); server.aof_rewrite_scheduled = 0; server.aof_rewrite_time_start = time(NULL); server.aof_child_pid = childpid; updateDictResizePolicy(); /* We set appendseldb to -1 in order to force the next call to the * feedAppendOnlyFile() to issue a SELECT command, so the differences * accumulated by the parent into server.aof_rewrite_buf will start * with a SELECT statement and it will be safe to merge. */ server.aof_selected_db = -1; return C_OK; } return C_OK; /* unreached */ } void bgrewriteaofCommand(client *c) { if (server.aof_child_pid != -1) { addReplyError(c,"Background append only file rewriting already in progress"); } else if (rewriteAppendOnlyFileBackground() == C_OK) { addReplyStatus(c,"Background append only file rewriting started"); } else { addReply(c,shared.err); } } void aofRemoveTempFile(pid_t childpid) { char tmpfile[256]; snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) childpid); unlink(tmpfile); } /* Update the server.aof_current_size field explicitly using stat(2) * to check the size of the file. This is useful after a rewrite or after * a restart, normally the size is updated just adding the write length * to the current length, that is much faster. */ void aofUpdateCurrentSize(void) { struct disque_stat sb; mstime_t latency; latencyStartMonitor(latency); if (disque_fstat(server.aof_fd,&sb) == -1) { serverLog(LL_WARNING,"Unable to obtain the AOF file length. stat: %s", strerror(errno)); } else { server.aof_current_size = sb.st_size; } latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-fstat",latency); } /* A background append only file rewriting (BGREWRITEAOF) terminated its work. * Handle this. */ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { if (!bysignal && exitcode == 0) { int newfd, oldfd; char tmpfile[256]; long long now = ustime(); mstime_t latency; serverLog(LL_NOTICE, "Background AOF rewrite terminated with success"); /* Flush the differences accumulated by the parent to the * rewritten AOF. */ latencyStartMonitor(latency); snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int)server.aof_child_pid); newfd = open(tmpfile,O_WRONLY|O_APPEND); if (newfd == -1) { serverLog(LL_WARNING, "Unable to open the temporary AOF produced by the child: %s", strerror(errno)); goto cleanup; } if (aofRewriteBufferWrite(newfd) == -1) { serverLog(LL_WARNING, "Error trying to flush the parent diff to the rewritten AOF: %s", strerror(errno)); close(newfd); goto cleanup; } latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-rewrite-diff-write",latency); serverLog(LL_NOTICE, "Residual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024)); /* The only remaining thing to do is to rename the temporary file to * the configured file and switch the file descriptor used to do AOF * writes. We don't want close(2) or rename(2) calls to block the * server on old file deletion. * * There are two possible scenarios: * * 1) AOF is DISABLED and this was a one time rewrite. The temporary * file will be renamed to the configured file. When this file already * exists, it will be unlinked, which may block the server. * * 2) AOF is ENABLED and the rewritten AOF will immediately start * receiving writes. After the temporary file is renamed to the * configured file, the original AOF file descriptor will be closed. * Since this will be the last reference to that file, closing it * causes the underlying file to be unlinked, which may block the * server. * * To mitigate the blocking effect of the unlink operation (either * caused by rename(2) in scenario 1, or by close(2) in scenario 2), we * use a background thread to take care of this. First, we * make scenario 1 identical to scenario 2 by opening the target file * when it exists. The unlink operation after the rename(2) will then * be executed upon calling close(2) for its descriptor. Everything to * guarantee atomicity for this switch has already happened by then, so * we don't care what the outcome or duration of that close operation * is, as long as the file descriptor is released again. */ if (server.aof_fd == -1) { /* AOF disabled */ /* Don't care if this fails: oldfd will be -1 and we handle that. * One notable case of -1 return is if the old file does * not exist. */ oldfd = open(server.aof_filename,O_RDONLY|O_NONBLOCK); } else { /* AOF enabled */ oldfd = -1; /* We'll set this to the current AOF filedes later. */ } /* Rename the temporary file. This will not unlink the target file if * it exists, because we reference it with "oldfd". */ latencyStartMonitor(latency); if (rename(tmpfile,server.aof_filename) == -1) { serverLog(LL_WARNING, "Error trying to rename the temporary AOF file: %s", strerror(errno)); close(newfd); if (oldfd != -1) close(oldfd); goto cleanup; } latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-rename",latency); if (server.aof_fd == -1) { /* AOF disabled, we don't need to set the AOF file descriptor * to this new file, so we can close it. */ close(newfd); } else { /* AOF enabled, replace the old fd with the new one. */ oldfd = server.aof_fd; server.aof_fd = newfd; if (server.aof_fsync == AOF_FSYNC_ALWAYS) aof_fsync(newfd); else if (server.aof_fsync == AOF_FSYNC_EVERYSEC) aof_background_fsync(newfd); server.aof_selected_db = -1; /* Make sure SELECT is re-issued */ aofUpdateCurrentSize(); server.aof_rewrite_base_size = server.aof_current_size; /* Clear regular AOF buffer since its contents was just written to * the new AOF from the background rewrite buffer. */ sdsfree(server.aof_buf); server.aof_buf = sdsempty(); } server.aof_lastbgrewrite_status = C_OK; serverLog(LL_NOTICE, "Background AOF rewrite finished successfully"); /* Change state from WAIT_REWRITE to ON if needed */ if (server.aof_state == AOF_WAIT_REWRITE) server.aof_state = AOF_ON; /* Asynchronously close the overwritten AOF. */ if (oldfd != -1) bioCreateBackgroundJob(BIO_CLOSE_FILE,(void*)(long)oldfd,NULL,NULL); serverLog(LL_VERBOSE, "Background AOF rewrite signal handler took %lldus", ustime()-now); } else if (!bysignal && exitcode != 0) { server.aof_lastbgrewrite_status = C_ERR; serverLog(LL_WARNING, "Background AOF rewrite terminated with error"); } else { server.aof_lastbgrewrite_status = C_ERR; serverLog(LL_WARNING, "Background AOF rewrite terminated by signal %d", bysignal); } cleanup: aofClosePipes(); aofRewriteBufferReset(); aofRemoveTempFile(server.aof_child_pid); server.aof_child_pid = -1; server.aof_rewrite_time_last = time(NULL)-server.aof_rewrite_time_start; server.aof_rewrite_time_start = -1; /* Schedule a new rewrite if we are waiting for it to switch the AOF ON. */ if (server.aof_state == AOF_WAIT_REWRITE) server.aof_rewrite_scheduled = 1; } disque-1.0-rc1/src/asciilogo.h000066400000000000000000000040711264176561100162530ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ char *ascii_logo = " Disque %s (%s/%d) %s bit\n" " _ - \n" " . Port: %d\n" " . o . PID: %ld\n" " . \n" " - http://disque.io \n" " \n\n"; disque-1.0-rc1/src/atomicvar.h000066400000000000000000000070351264176561100162720ustar00rootroot00000000000000/* This file implements atomic counters using __atomic or __sync macros if * available, otherwise synchronizing different threads using a mutex. * * The exported interaface is composed of three macros: * * atomicIncr(var,count,mutex) -- Increment the atomic counter * atomicDecr(var,count,mutex) -- Decrement the atomic counter * atomicGet(var,dstvar,mutex) -- Fetch the atomic counter value * * If atomic primitives are availble (tested in config.h) the mutex * is not used. * * Never use return value from the macros. To update and get use instead: * * atomicIncr(mycounter,...); * atomicGet(mycounter,newvalue); * doSomethingWith(newvalue); * * ---------------------------------------------------------------------------- * * Copyright (c) 2015, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #ifndef __ATOMIC_VAR_H #define __ATOMIC_VAR_H #if defined(__ATOMIC_RELAXED) /* Implementation using __atomic macros. */ #define atomicIncr(var,count,mutex) __atomic_add_fetch(&var,(count),__ATOMIC_RELAXED) #define atomicDecr(var,count,mutex) __atomic_sub_fetch(&var,(count),__ATOMIC_RELAXED) #define atomicGet(var,dstvar,mutex) do { \ dstvar = __atomic_load_n(&var,__ATOMIC_RELAXED); \ } while(0) #elif defined(HAVE_ATOMIC) /* Implementation using __sync macros. */ #define atomicIncr(var,count,mutex) __sync_add_and_fetch(&var,(count)) #define atomicDecr(var,count,mutex) __sync_sub_and_fetch(&var,(count)) #define atomicGet(var,dstvar,mutex) do { \ dstvar = __sync_sub_and_fetch(&var,0); \ } while(0) #else /* Implementation using pthread mutex. */ #define atomicIncr(var,count,mutex) do { \ pthread_mutex_lock(&mutex); \ var += (count); \ pthread_mutex_unlock(&mutex); \ } while(0) #define atomicDecr(var,count,mutex) do { \ pthread_mutex_lock(&mutex); \ var -= (count); \ pthread_mutex_unlock(&mutex); \ } while(0) #define atomicGet(var,dstvar,mutex) do { \ pthread_mutex_lock(&mutex); \ dstvar = var; \ pthread_mutex_unlock(&mutex); \ } while(0) #endif #endif /* __ATOMIC_VAR_H */ disque-1.0-rc1/src/bio.c000066400000000000000000000230371264176561100150510ustar00rootroot00000000000000/* Background I/O service for Disque. * * This file implements operations that we need to perform in the background. * Currently there is only a single operation, that is a background close(2) * system call. This is needed as when the process is the last owner of a * reference to a file closing it means unlinking it, and the deletion of the * file is slow, blocking the server. * * In the future we'll either continue implementing new things we need or * we'll switch to libeio. However there are probably long term uses for this * file as we may want to put here Disque specific background tasks. * * DESIGN * ------ * * The design is trivial, we have a structure representing a job to perform * and a different thread and job queue for every job type. * Every thread wait for new jobs in its queue, and process every job * sequentially. * * Jobs of the same type are guaranteed to be processed from the least * recently inserted to the most recently inserted (older jobs processed * first). * * Currently there is no way for the creator of the job to be notified about * the completion of the operation, this will only be added when/if needed. * * ---------------------------------------------------------------------------- * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "bio.h" static pthread_t bio_threads[BIO_NUM_OPS]; static pthread_mutex_t bio_mutex[BIO_NUM_OPS]; static pthread_cond_t bio_newjob_cond[BIO_NUM_OPS]; static pthread_cond_t bio_step_cond[BIO_NUM_OPS]; static list *bio_jobs[BIO_NUM_OPS]; /* The following array is used to hold the number of pending jobs for every * OP type. This allows us to export the bioPendingJobsOfType() API that is * useful when the main thread wants to perform some operation that may involve * objects shared with the background thread. The main thread will just wait * that there are no longer jobs of this type to be executed before performing * the sensible operation. This data is also useful for reporting. */ static unsigned long long bio_pending[BIO_NUM_OPS]; /* This structure represents a background Job. It is only used locally to this * file as the API does not expose the internals at all. */ struct bio_job { time_t time; /* Time at which the job was created. */ /* Job specific arguments pointers. If we need to pass more than three * arguments we can just pass a pointer to a structure or alike. */ void *arg1, *arg2, *arg3; }; void *bioProcessBackgroundJobs(void *arg); /* Make sure we have enough stack to perform all the things we do in the * main thread. */ #define DISQUE_THREAD_STACK_SIZE (1024*1024*4) /* Initialize the background system, spawning the thread. */ void bioInit(void) { pthread_attr_t attr; pthread_t thread; size_t stacksize; int j; /* Initialization of state vars and objects */ for (j = 0; j < BIO_NUM_OPS; j++) { pthread_mutex_init(&bio_mutex[j],NULL); pthread_cond_init(&bio_newjob_cond[j],NULL); pthread_cond_init(&bio_step_cond[j],NULL); bio_jobs[j] = listCreate(); bio_pending[j] = 0; } /* Set the stack size as by default it may be small in some system */ pthread_attr_init(&attr); pthread_attr_getstacksize(&attr,&stacksize); if (!stacksize) stacksize = 1; /* The world is full of Solaris Fixes */ while (stacksize < DISQUE_THREAD_STACK_SIZE) stacksize *= 2; pthread_attr_setstacksize(&attr, stacksize); /* Ready to spawn our threads. We use the single argument the thread * function accepts in order to pass the job ID the thread is * responsible of. */ for (j = 0; j < BIO_NUM_OPS; j++) { void *arg = (void*)(unsigned long) j; if (pthread_create(&thread,&attr,bioProcessBackgroundJobs,arg) != 0) { serverLog(LL_WARNING,"Fatal: Can't initialize Background Jobs."); exit(1); } bio_threads[j] = thread; } } void bioCreateBackgroundJob(int type, void *arg1, void *arg2, void *arg3) { struct bio_job *job = zmalloc(sizeof(*job)); job->time = time(NULL); job->arg1 = arg1; job->arg2 = arg2; job->arg3 = arg3; pthread_mutex_lock(&bio_mutex[type]); listAddNodeTail(bio_jobs[type],job); bio_pending[type]++; pthread_cond_signal(&bio_newjob_cond[type]); pthread_mutex_unlock(&bio_mutex[type]); } void *bioProcessBackgroundJobs(void *arg) { struct bio_job *job; unsigned long type = (unsigned long) arg; sigset_t sigset; /* Make the thread killable at any time, so that bioKillThreads() * can work reliably. */ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_mutex_lock(&bio_mutex[type]); /* Block SIGALRM so we are sure that only the main thread will * receive the watchdog signal. */ sigemptyset(&sigset); sigaddset(&sigset, SIGALRM); if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) serverLog(LL_WARNING, "Warning: can't mask SIGALRM in bio.c thread: %s", strerror(errno)); while(1) { listNode *ln; /* The loop always starts with the lock hold. */ if (listLength(bio_jobs[type]) == 0) { pthread_cond_wait(&bio_newjob_cond[type],&bio_mutex[type]); continue; } /* Pop the job from the queue. */ ln = listFirst(bio_jobs[type]); job = ln->value; /* It is now possible to unlock the background system as we know have * a stand alone job structure to process.*/ pthread_mutex_unlock(&bio_mutex[type]); /* Process the job accordingly to its type. */ if (type == BIO_CLOSE_FILE) { close((long)job->arg1); } else if (type == BIO_AOF_FSYNC) { aof_fsync((long)job->arg1); } else { serverPanic("Wrong job type in bioProcessBackgroundJobs()."); } zfree(job); /* Unblock threads blocked on bioWaitStepOfType() if any. */ pthread_cond_broadcast(&bio_step_cond[type]); /* Lock again before reiterating the loop, if there are no longer * jobs to process we'll block again in pthread_cond_wait(). */ pthread_mutex_lock(&bio_mutex[type]); listDelNode(bio_jobs[type],ln); bio_pending[type]--; } } /* Return the number of pending jobs of the specified type. */ unsigned long long bioPendingJobsOfType(int type) { unsigned long long val; pthread_mutex_lock(&bio_mutex[type]); val = bio_pending[type]; pthread_mutex_unlock(&bio_mutex[type]); return val; } /* If there are pending jobs for the specified type, the function blocks * and waits that the next job was processed. Otherwise the function * does not block and returns ASAP. * * The function returns the number of jobs still to process of the * requested type. * * This function is useful when from another thread, we want to wait * a bio.c thread to do more work in a blocking way. */ unsigned long long bioWaitStepOfType(int type) { unsigned long long val; pthread_mutex_lock(&bio_mutex[type]); val = bio_pending[type]; if (val != 0) { pthread_cond_wait(&bio_step_cond[type],&bio_mutex[type]); val = bio_pending[type]; } pthread_mutex_unlock(&bio_mutex[type]); return val; } /* Kill the running bio threads in an unclean way. This function should be * used only when it's critical to stop the threads for some reason. * Currently Disque does this only on crash (for instance on SIGSEGV) in order * to perform a fast memory check without other threads messing with memory. */ void bioKillThreads(void) { int err, j; for (j = 0; j < BIO_NUM_OPS; j++) { if (pthread_cancel(bio_threads[j]) == 0) { if ((err = pthread_join(bio_threads[j],NULL)) != 0) { serverLog(LL_WARNING, "Bio thread for job type #%d can be joined: %s", j, strerror(err)); } else { serverLog(LL_WARNING, "Bio thread for job type #%d terminated",j); } } } } disque-1.0-rc1/src/bio.h000066400000000000000000000040151264176561100150510ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* Exported API */ void bioInit(void); void bioCreateBackgroundJob(int type, void *arg1, void *arg2, void *arg3); unsigned long long bioPendingJobsOfType(int type); unsigned long long bioWaitStepOfType(int type); time_t bioOlderJobOfType(int type); void bioKillThreads(void); /* Background job opcodes */ #define BIO_CLOSE_FILE 0 /* Deferred close(2) syscall. */ #define BIO_AOF_FSYNC 1 /* Deferred AOF fsync. */ #define BIO_NUM_OPS 2 disque-1.0-rc1/src/blocked.c000066400000000000000000000157571264176561100157150ustar00rootroot00000000000000/* blocked.c - generic support for blocking operations like BLPOP & WAIT. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * --------------------------------------------------------------------------- * * API: * * getTimeoutFromObjectOrReply() is just an utility function to parse a * timeout argument since blocking operations usually require a timeout. * * blockClient() set the CLIENT_BLOCKED flag in the client, and set the * specified block type 'btype' filed to one of BLOCKED_* macros. * * unblockClient() unblocks the client doing the following: * 1) It calls the btype-specific function to cleanup the state. * 2) It unblocks the client by unsetting the CLIENT_BLOCKED flag. * 3) It puts the client into a list of just unblocked clients that are * processed ASAP in the beforeSleep() event loop callback, so that * if there is some query buffer to process, we do it. This is also * required because otherwise there is no 'readable' event fired, we * already read the pending commands. We also set the CLIENT_UNBLOCKED * flag to remember the client is in the unblocked_clients list. * * processUnblockedClients() is called inside the beforeSleep() function * to process the query buffer from unblocked clients and remove the clients * from the blocked_clients queue. * * replyToBlockedClientTimedOut() is called by the cron function when * a client blocked reaches the specified timeout (if the timeout is set * to 0, no timeout is processed). * It usually just needs to send a reply to the client. * * When implementing a new type of blocking opeation, the implementation * should modify unblockClient() and replyToBlockedClientTimedOut() in order * to handle the btype-specific behavior of this two functions. */ #include "server.h" #include "job.h" #include "queue.h" /* Get a timeout value from an object and store it into 'timeout'. * The final timeout is always stored as milliseconds as a time where the * timeout will expire, however the parsing is performed according to * the 'unit' that can be seconds or milliseconds. * * Note that if the timeout is zero (usually from the point of view of * commands API this means no timeout) the value stored into 'timeout' * is zero. */ int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit) { long long tval; if (getLongLongFromObjectOrReply(c,object,&tval, "timeout is not an integer or out of range") != C_OK) return C_ERR; if (tval < 0) { addReplyError(c,"timeout is negative"); return C_ERR; } if (tval > 0) { if (unit == UNIT_SECONDS) tval *= 1000; tval += mstime(); } *timeout = tval; return C_OK; } /* Block a client for the specific operation type. Once the CLIENT_BLOCKED * flag is set client query buffer is not longer processed, but accumulated, * and will be processed when the client is unblocked. */ void blockClient(client *c, int btype) { c->flags |= CLIENT_BLOCKED; c->btype = btype; server.bpop_blocked_clients++; } /* This function is called in the beforeSleep() function of the event loop * in order to process the pending input buffer of clients that were * unblocked after a blocking operation. */ void processUnblockedClients(void) { listNode *ln; client *c; while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); serverAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); c->flags &= ~CLIENT_UNBLOCKED; /* Note that the client may be blocked again at this point, since * a new blocking command was processed. In that case we just remove * it from the unblocked clients list without actually processing * its pending query buffer. */ if (!(c->flags & CLIENT_BLOCKED)) { c->btype = BLOCKED_NONE; /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) { processInputBuffer(c); } } } } /* Unblock a client calling the right function depending on the kind * of operation the client is blocking for. */ void unblockClient(client *c) { if (c->btype == BLOCKED_JOB_REPL) { unblockClientWaitingJobRepl(c); } else if (c->btype == BLOCKED_GETJOB) { unblockClientBlockedForJobs(c); } else { serverPanic("Unknown btype in unblockClient()."); } /* Clear the flags, and put the client in the unblocked list so that * we'll process new commands in its query buffer ASAP. */ c->flags &= ~CLIENT_BLOCKED; c->btype = BLOCKED_NONE; server.bpop_blocked_clients--; /* The client may already be into the unblocked list because of a previous * blocking operation, don't add back it into the list multiple times. */ if (!(c->flags & CLIENT_UNBLOCKED)) { c->flags |= CLIENT_UNBLOCKED; listAddNodeTail(server.unblocked_clients,c); } } /* This function gets called when a blocked client timed out in order to * send it a reply of some kind. */ void replyToBlockedClientTimedOut(client *c) { if (c->btype == BLOCKED_JOB_REPL) { addReplySds(c, sdsnew("-NOREPL Timeout reached before replicating to " "the requested number of nodes\r\n")); return; } else if (c->btype == BLOCKED_GETJOB) { addReply(c,shared.nullmultibulk); } else { serverPanic("Unknown btype in replyToBlockedClientTimedOut()."); } } disque-1.0-rc1/src/cluster.c000066400000000000000000003134201264176561100157570ustar00rootroot00000000000000/* Disque Cluster implementation. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "cluster.h" #include "endianconv.h" #include "ack.h" #include #include #include #include #include #include #include #include #include /* A global reference to myself is handy to make code more clear. * Myself always points to server.cluster->myself, that is, the clusterNode * that represents this node. */ clusterNode *myself = NULL; clusterNode *createClusterNode(char *nodename, int flags); int clusterAddNode(clusterNode *node); void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask); void clusterReadHandler(aeEventLoop *el, int fd, void *privdata, int mask); void clusterSendPing(clusterLink *link, int type); void clusterSendFail(char *nodename); void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request); void clusterUpdateState(void); int clusterNodeGetSlotBit(clusterNode *n, int slot); sds clusterGenNodesDescription(int filter); int clusterNodeAddSlave(clusterNode *master, clusterNode *slave); void clusterSetMaster(clusterNode *n); void clusterDoBeforeSleep(int flags); void clusterSendUpdate(clusterLink *link, clusterNode *node); void clusterSetNodeAsMaster(clusterNode *n); void clusterDelNode(clusterNode *delnode); void clusterShuffleReachableNodes(void); void clusterSendGotJob(clusterNode *node, job *j); void clusterSendGotAck(clusterNode *node, char *jobid, int known); void clusterBroadcastQueued(job *j, unsigned char flags); void clusterBroadcastDelJob(job *j); sds representClusterNodeFlags(sds ci, uint16_t flags); /* ----------------------------------------------------------------------------- * Initialization * -------------------------------------------------------------------------- */ /* Load the cluster config from 'filename'. * * If the file does not exist or is zero-length (this may happen because * when we lock the nodes.conf file, we create a zero-length one for the * sake of locking if it does not already exist), C_ERR is returned. * If the configuration was loaded from the file, C_OK is returned. */ int clusterLoadConfig(char *filename) { FILE *fp = fopen(filename,"r"); struct stat sb; char *line; int maxline, j; if (fp == NULL) { if (errno == ENOENT) { return C_ERR; } else { serverLog(LL_WARNING, "Loading the cluster node config from %s: %s", filename, strerror(errno)); exit(1); } } /* Check if the file is zero-length: if so return C_ERR to signal * we have to write the config. */ if (fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) { fclose(fp); return C_ERR; } /* Parse the file. */ maxline = 1024; line = zmalloc(maxline); while(fgets(line,maxline,fp) != NULL) { int argc; sds *argv; clusterNode *n; char *p, *s; /* Skip blank lines, they can be created either by users manually * editing nodes.conf or by the config writing process if stopped * before the truncate() call. */ if (line[0] == '\n') continue; /* Split the line into arguments for processing. */ argv = sdssplitargs(line,&argc); if (argv == NULL) goto fmterr; /* Handle the special "vars" line. Don't pretend it is the last * line even if it actually is when generated by Disque. */ if (strcasecmp(argv[0],"vars") == 0) { for (j = 1; j < argc; j += 2) { if (strcasecmp(argv[j],"someVarNameHere") == 0) { /* TODO: currently not used. */ } else { serverLog(LL_WARNING, "Skipping unknown cluster config variable '%s'", argv[j]); } } sdsfreesplitres(argv,argc); continue; } /* Regular config lines have at least eight fields */ if (argc != 6) goto fmterr; /* Create this node if it does not exist */ if (strlen(argv[0]) != CLUSTER_NAMELEN) goto fmterr; n = clusterLookupNode(argv[0]); if (!n) { n = createClusterNode(argv[0],0); clusterAddNode(n); } /* Address and port */ if ((p = strrchr(argv[1],':')) == NULL) goto fmterr; *p = '\0'; memcpy(n->ip,argv[1],strlen(argv[1])+1); n->port = atoi(p+1); /* Parse flags */ p = s = argv[2]; while(p) { p = strchr(s,','); if (p) *p = '\0'; if (!strcasecmp(s,"myself")) { serverAssert(server.cluster->myself == NULL); myself = server.cluster->myself = n; n->flags |= CLUSTER_NODE_MYSELF; } else if (!strcasecmp(s,"fail?")) { n->flags |= CLUSTER_NODE_PFAIL; } else if (!strcasecmp(s,"fail")) { n->flags |= CLUSTER_NODE_FAIL; n->fail_time = mstime(); } else if (!strcasecmp(s,"handshake")) { n->flags |= CLUSTER_NODE_HANDSHAKE; } else if (!strcasecmp(s,"noaddr")) { n->flags |= CLUSTER_NODE_NOADDR; } else if (!strcasecmp(s,"leaving")) { n->flags |= CLUSTER_NODE_LEAVING; } else if (!strcasecmp(s,"noflags")) { /* nothing to do */ } else { serverPanic("Unknown flag in disque cluster config file"); } if (p) s = p+1; } /* Set ping sent / pong received timestamps */ if (atoi(argv[3])) n->ping_sent = mstime(); if (atoi(argv[4])) n->pong_received = mstime(); sdsfreesplitres(argv,argc); } /* Config sanity check */ if (server.cluster->myself == NULL) goto fmterr; zfree(line); fclose(fp); serverLog(LL_NOTICE,"Node configuration loaded, I'm %.40s", myself->name); return C_OK; fmterr: serverLog(LL_WARNING, "Unrecoverable error: corrupted cluster config file."); zfree(line); if (fp) fclose(fp); exit(1); } /* Cluster node configuration is exactly the same as CLUSTER NODES output. * * This function writes the node config and returns 0, on error -1 * is returned. * * Note: we need to write the file in an atomic way from the point of view * of the POSIX filesystem semantics, so that if the server is stopped * or crashes during the write, we'll end with either the old file or the * new one. Since we have the full payload to write available we can use * a single write to write the whole file. If the pre-existing file was * bigger we pad our payload with newlines that are anyway ignored and truncate * the file afterward. */ int clusterSaveConfig(int do_fsync) { sds ci; size_t content_size; struct stat sb; int fd; server.cluster->todo_before_sleep &= ~CLUSTER_TODO_SAVE_CONFIG; /* Get the nodes description and concatenate our "vars" directive to * save other persistent state. */ ci = clusterGenNodesDescription(CLUSTER_NODE_HANDSHAKE); #if 0 /* TODO: check if we are going to use persistent vars. */ ci = sdscatprintf(ci,"vars currentEpoch %llu lastVoteEpoch %llu\n", (unsigned long long) server.cluster->currentEpoch, (unsigned long long) server.cluster->lastVoteEpoch); #endif content_size = sdslen(ci); if ((fd = open(server.cluster_configfile,O_WRONLY|O_CREAT,0644)) == -1) goto err; /* Pad the new payload if the existing file length is greater. */ if (fstat(fd,&sb) != -1) { if (sb.st_size > (off_t)content_size) { ci = sdsgrowzero(ci,sb.st_size); memset(ci+content_size,'\n',sb.st_size-content_size); } } if (write(fd,ci,sdslen(ci)) != (ssize_t)sdslen(ci)) goto err; if (do_fsync) { server.cluster->todo_before_sleep &= ~CLUSTER_TODO_FSYNC_CONFIG; fsync(fd); } /* Truncate the file if needed to remove the final \n padding that * is just garbage. */ if (content_size != sdslen(ci) && ftruncate(fd,content_size) == -1) { /* ftruncate() failing is not a critical error. */ } close(fd); sdsfree(ci); return 0; err: if (fd != -1) close(fd); sdsfree(ci); return -1; } void clusterSaveConfigOrDie(int do_fsync) { if (clusterSaveConfig(do_fsync) == -1) { serverLog(LL_WARNING,"Fatal: can't update cluster config file."); exit(1); } } /* Lock the cluster config using flock(), and leaks the file descritor used to * acquire the lock so that the file will be locked forever. * * This works because we always update nodes.conf with a new version * in-place, reopening the file, and writing to it in place (later adjusting * the length with ftruncate()). * * On success C_OK is returned, otherwise an error is logged and * the function returns C_ERR to signal a lock was not acquired. */ int clusterLockConfig(char *filename) { /* flock() does not exist on Solaris * and a fcntl-based solution won't help, as we constantly re-open that file, * which will release _all_ locks anyway */ #if !defined(__sun) /* To lock it, we need to open the file in a way it is created if * it does not exist, otherwise there is a race condition with other * processes. */ int fd = open(filename,O_WRONLY|O_CREAT,0644); if (fd == -1) { serverLog(LL_WARNING, "Can't open %s in order to acquire a lock: %s", filename, strerror(errno)); return C_ERR; } if (flock(fd,LOCK_EX|LOCK_NB) == -1) { if (errno == EWOULDBLOCK) { serverLog(LL_WARNING, "Sorry, the cluster configuration file %s is already used " "by a different Disque node. Please make sure that " "different nodes use different cluster configuration " "files.", filename); } else { serverLog(LL_WARNING, "Impossible to lock %s: %s", filename, strerror(errno)); } close(fd); return C_ERR; } /* Lock acquired: leak the 'fd' by not closing it, so that we'll retain the * lock to the file as long as the process exists. */ #endif /* __sun */ return C_OK; } void clusterInit(void) { int saveconf = 0; server.cluster = zmalloc(sizeof(clusterState)); server.cluster->myself = NULL; server.cluster->state = CLUSTER_OK; server.cluster->size = 1; server.cluster->todo_before_sleep = 0; server.cluster->nodes = dictCreate(&clusterNodesDictType,NULL); server.cluster->deleted_nodes = dictCreate(&clusterNodesDictType,NULL); server.cluster->nodes_black_list = dictCreate(&clusterNodesBlackListDictType,NULL); server.cluster->stats_bus_messages_sent = 0; server.cluster->stats_bus_messages_received = 0; server.cluster->reachable_nodes = NULL; server.cluster->reachable_nodes_count = 0; /* Lock the cluster config file to make sure every node uses * its own nodes.conf. */ if (clusterLockConfig(server.cluster_configfile) == C_ERR) exit(1); /* Load or create a new nodes configuration. */ if (clusterLoadConfig(server.cluster_configfile) == C_ERR) { /* No configuration found. We will just use the random name provided * by the createClusterNode() function. */ myself = server.cluster->myself = createClusterNode(NULL,CLUSTER_NODE_MYSELF); serverLog(LL_NOTICE,"No cluster configuration found, I'm %.40s", myself->name); clusterAddNode(myself); saveconf = 1; } if (saveconf) clusterSaveConfigOrDie(1); /* We need a listening TCP port for our cluster messaging needs. */ server.cfd_count = 0; /* Port sanity check II * The other handshake port check is triggered too late to stop * us from trying to use a too-high cluster port number. */ if (server.port > (65535-CLUSTER_PORT_INCR)) { serverLog(LL_WARNING, "Disque port number too high. " "Cluster communication port is 10,000 port " "numbers higher than your Disque node port. " "Your Disque node port number must be " "lower than 55535."); exit(1); } if (listenToPort(server.port+CLUSTER_PORT_INCR, server.cfd,&server.cfd_count) == C_ERR) { exit(1); } else { int j; for (j = 0; j < server.cfd_count; j++) { if (aeCreateFileEvent(server.el, server.cfd[j], AE_READABLE, clusterAcceptHandler, NULL) == AE_ERR) serverPanic("Unrecoverable error creating Disque Cluster " "Bus file event."); } } /* Set myself->port to my listening port, we'll just need to discover * the IP address via MEET messages. */ myself->port = server.port; /* Update state and list of reachable nodes. */ clusterUpdateState(); clusterUpdateReachableNodes(); } /* Reset a node performing a soft or hard reset: * * 1) All other nodes are forget. * 2) Only for hard reset: a new Node ID is generated. * 3) The new configuration is saved and the cluster state updated. * 4) Flush away all the jobs. */ void clusterReset(int hard) { dictIterator *di; dictEntry *de; /* Forget all the nodes, but myself. */ di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node == myself) continue; clusterDelNode(node); } dictReleaseIterator(di); /* Information about reachable nodes is no longer valid. Update it. */ clusterUpdateReachableNodes(); /* Hard reset only: change node ID. */ if (hard) { sds oldname; /* To change the Node ID we need to remove the old name from the * nodes table, change the ID, and re-add back with new name. */ oldname = sdsnewlen(myself->name, CLUSTER_NAMELEN); dictDelete(server.cluster->nodes,oldname); sdsfree(oldname); getRandomHexChars(myself->name, CLUSTER_NAMELEN); clusterAddNode(myself); } flushServerData(); /* TODO: flush the deleted nodes hash table and the deleted nodes * entries. There are no longer jobs or queues that may reference * them. */ /* Make sure to persist the new config and update the state. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_UPDATE_STATE| CLUSTER_TODO_FSYNC_CONFIG); } /* ----------------------------------------------------------------------------- * CLUSTER communication link * -------------------------------------------------------------------------- */ clusterLink *createClusterLink(clusterNode *node) { clusterLink *link = zmalloc(sizeof(*link)); link->ctime = mstime(); link->sndbuf = sdsempty(); link->rcvbuf = sdsempty(); link->node = node; link->fd = -1; return link; } /* Free a cluster link, but does not free the associated node of course. * This function will just make sure that the original node associated * with this link will have the 'link' field set to NULL. */ void freeClusterLink(clusterLink *link) { if (link->fd != -1) { aeDeleteFileEvent(server.el, link->fd, AE_WRITABLE); aeDeleteFileEvent(server.el, link->fd, AE_READABLE); } sdsfree(link->sndbuf); sdsfree(link->rcvbuf); if (link->node) link->node->link = NULL; close(link->fd); zfree(link); } #define MAX_CLUSTER_ACCEPTS_PER_CALL 1000 void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { int cport, cfd; int max = MAX_CLUSTER_ACCEPTS_PER_CALL; char cip[NET_IP_STR_LEN]; clusterLink *link; UNUSED(el); UNUSED(mask); UNUSED(privdata); while(max--) { cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) serverLog(LL_VERBOSE, "Accepting cluster node: %s", server.neterr); return; } anetNonBlock(NULL,cfd); anetEnableTcpNoDelay(NULL,cfd); /* Use non-blocking I/O for cluster messages. */ serverLog(LL_VERBOSE,"Accepted cluster node %s:%d", cip, cport); /* Create a link object we use to handle the connection. * It gets passed to the readable handler when data is available. * Initiallly the link->node pointer is set to NULL as we don't know * which node is, but the right node is references once we know the * node identity. */ link = createClusterLink(NULL); link->fd = cfd; aeCreateFileEvent(server.el,cfd,AE_READABLE,clusterReadHandler,link); } } /* ----------------------------------------------------------------------------- * CLUSTER node API * -------------------------------------------------------------------------- */ /* Create a new cluster node, with the specified flags. * If "nodename" is NULL this is considered a first handshake and a random * node name is assigned to this node (it will be fixed later when we'll * receive the first pong). * * The node is created and returned to the user, but it is not automatically * added to the nodes hash table. */ clusterNode *createClusterNode(char *nodename, int flags) { clusterNode *node = zmalloc(sizeof(*node)); if (nodename) memcpy(node->name, nodename, CLUSTER_NAMELEN); else getRandomHexChars(node->name, CLUSTER_NAMELEN); node->ctime = mstime(); node->flags = flags; node->ping_sent = node->pong_received = 0; node->fail_time = 0; node->link = NULL; memset(node->ip,0,sizeof(node->ip)); node->port = 0; node->fail_reports = listCreate(); listSetFreeMethod(node->fail_reports,zfree); return node; } /* This function is called every time we get a failure report from a node. * The side effect is to populate the fail_reports list (or to update * the timestamp of an existing report). * * 'failing' is the node that is in failure state according to the * 'sender' node. * * The function returns 0 if it just updates a timestamp of an existing * failure report from the same sender. 1 is returned if a new failure * report is created. */ int clusterNodeAddFailureReport(clusterNode *failing, clusterNode *sender) { list *l = failing->fail_reports; listNode *ln; listIter li; clusterNodeFailReport *fr; /* If a failure report from the same sender already exists, just update * the timestamp. */ listRewind(l,&li); while ((ln = listNext(&li)) != NULL) { fr = ln->value; if (fr->node == sender) { fr->time = mstime(); return 0; } } /* Otherwise create a new report. */ fr = zmalloc(sizeof(*fr)); fr->node = sender; fr->time = mstime(); listAddNodeTail(l,fr); return 1; } /* Remove failure reports that are too old, where too old means reasonably * older than the global node timeout. Note that anyway for a node to be * flagged as FAIL we need to have a local PFAIL state that is at least * older than the global node timeout, so we don't just trust the number * of failure reports from other nodes. */ void clusterNodeCleanupFailureReports(clusterNode *node) { list *l = node->fail_reports; listNode *ln; listIter li; clusterNodeFailReport *fr; mstime_t maxtime = server.cluster_node_timeout * CLUSTER_FAIL_REPORT_VALIDITY_MULT; mstime_t now = mstime(); listRewind(l,&li); while ((ln = listNext(&li)) != NULL) { fr = ln->value; if (now - fr->time > maxtime) listDelNode(l,ln); } } /* Remove the failing report for 'node' if it was previously considered * failing by 'sender'. This function is called when a node informs us via * gossip that a node is OK from its point of view (no FAIL or PFAIL flags). * * Note that this function is called relatively often as it gets called even * when there are no nodes failing, and is O(N), however when the cluster is * fine the failure reports list is empty so the function runs in constant * time. * * The function returns 1 if the failure report was found and removed. * Otherwise 0 is returned. */ int clusterNodeDelFailureReport(clusterNode *node, clusterNode *sender) { list *l = node->fail_reports; listNode *ln; listIter li; clusterNodeFailReport *fr; /* Search for a failure report from this sender. */ listRewind(l,&li); while ((ln = listNext(&li)) != NULL) { fr = ln->value; if (fr->node == sender) break; } if (!ln) return 0; /* No failure report from this sender. */ /* Remove the failure report. */ listDelNode(l,ln); clusterNodeCleanupFailureReports(node); return 1; } /* Return the number of external nodes that believe 'node' is failing, * not including this node, that may have a PFAIL or FAIL state for this * node as well. */ int clusterNodeFailureReportsCount(clusterNode *node) { clusterNodeCleanupFailureReports(node); return listLength(node->fail_reports); } /* Free a node, however if the node was part of the cluster (no handshake * flag is set), the node is actually just disconnected, moved into * a deleted_nodes hash table, and flagged as deleted: we may still have * references of it in jobs and other places, and the cleanup is a useless * and complex effort. * * Note that this is the low level part of freeing a node, and should only * be called by clusterDelNode() that also handles the higher level logic * of removing a node from the cluster. */ void freeClusterNode(clusterNode *n) { serverAssert(dictDelete(server.cluster->nodes,n->name) == DICT_OK); if (n->link) freeClusterLink(n->link); /* We can free nodes in handshake state, but we can't free nodes * that were part of the cluster: they may still be referenced * by jobs. */ if (nodeInHandshake(n)) { listRelease(n->fail_reports); zfree(n); } else { dictAdd(server.cluster->deleted_nodes,n->name,n); n->flags |= CLUSTER_NODE_DELETED; } } /* Add a node to the nodes hash table */ int clusterAddNode(clusterNode *node) { int retval; retval = dictAdd(server.cluster->nodes, node->name, node); return (retval == DICT_OK) ? C_OK : C_ERR; } /* Remove a node from the cluster: * 1) Remove all the failure reports sent by this node. * 2) Free the node using freeClusterNode() that will remove it from * the hash table and actually free the node resources. */ void clusterDelNode(clusterNode *delnode) { dictIterator *di; dictEntry *de; /* 1) Remove failure reports. */ di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node == delnode) continue; clusterNodeDelFailureReport(node,delnode); } dictReleaseIterator(di); /* 2) Free the node, unlinking it from the cluster. */ freeClusterNode(delnode); } /* Node lookup by name */ clusterNode *clusterLookupNode(char *name) { dictEntry *de = dictFind(server.cluster->nodes,name); if (de == NULL) return NULL; return dictGetVal(de); } /* This is only used after the handshake. When we connect a given IP/PORT * as a result of CLUSTER MEET we don't have the node name yet, so we * pick a random one, and will fix it when we receive the PONG request using * this function. */ void clusterRenameNode(clusterNode *node, char *newname) { int retval; sds s = sdsnewlen(node->name, CLUSTER_NAMELEN); serverLog(LL_DEBUG,"Renaming node %.40s into %.40s", node->name, newname); retval = dictDelete(server.cluster->nodes, s); sdsfree(s); serverAssert(retval == DICT_OK); memcpy(node->name, newname, CLUSTER_NAMELEN); clusterAddNode(node); } /* ----------------------------------------------------------------------------- * CLUSTER nodes blacklist * * The nodes blacklist is just a way to ensure that a given node with a given * Node ID is not readded before some time elapsed (this time is specified * in seconds in CLUSTER_BLACKLIST_TTL). * * This is useful when we want to remove a node from the cluster completely: * when CLUSTER FORGET is called, it also puts the node into the blacklist so * that even if we receive gossip messages from other nodes that still remember * about the node we want to remove, we don't re-add it before some time. * * Currently the CLUSTER_BLACKLIST_TTL is set to 1 minute, this means * that disque-trib has 60 seconds to send CLUSTER FORGET messages to nodes * in the cluster without dealing with the problem of other nodes re-adding * back the node to nodes we already sent the FORGET command to. * * The data structure used is a hash table with an sds string representing * the node ID as key, and the time when it is ok to re-add the node as * value. * -------------------------------------------------------------------------- */ #define CLUSTER_BLACKLIST_TTL 60 /* 1 minute. */ /* Before of the addNode() or Exists() operations we always remove expired * entries from the black list. This is an O(N) operation but it is not a * problem since add / exists operations are called very infrequently and * the hash table is supposed to contain very little elements at max. * However without the cleanup during long uptimes and with some automated * node add/removal procedures, entries could accumulate. */ void clusterBlacklistCleanup(void) { dictIterator *di; dictEntry *de; di = dictGetSafeIterator(server.cluster->nodes_black_list); while((de = dictNext(di)) != NULL) { int64_t expire = dictGetUnsignedIntegerVal(de); if (expire < server.unixtime) dictDelete(server.cluster->nodes_black_list,dictGetKey(de)); } dictReleaseIterator(di); } /* Cleanup the blacklist and add a new node ID to the black list. */ void clusterBlacklistAddNode(clusterNode *node) { dictEntry *de; sds id = sdsnewlen(node->name,CLUSTER_NAMELEN); clusterBlacklistCleanup(); if (dictAdd(server.cluster->nodes_black_list,id,NULL) == DICT_OK) { /* If the key was added, duplicate the sds string representation of * the key for the next lookup. We'll free it at the end. */ id = sdsdup(id); } de = dictFind(server.cluster->nodes_black_list,id); dictSetUnsignedIntegerVal(de,time(NULL)+CLUSTER_BLACKLIST_TTL); sdsfree(id); } /* Return non-zero if the specified node ID exists in the blacklist. * You don't need to pass an sds string here, any pointer to 40 bytes * will work. */ int clusterBlacklistExists(char *nodeid) { sds id = sdsnewlen(nodeid,CLUSTER_NAMELEN); int retval; clusterBlacklistCleanup(); retval = dictFind(server.cluster->nodes_black_list,id) != NULL; sdsfree(id); return retval; } /* ----------------------------------------------------------------------------- * CLUSTER messages exchange - PING/PONG and gossip * -------------------------------------------------------------------------- */ /* This function checks if a given node should be marked as FAIL. * It happens if the following conditions are met: * * 1) We received enough failure reports from other master nodes via gossip. * Enough means that the majority of the masters signaled the node is * down recently. * 2) We believe this node is in PFAIL state. * * If a failure is detected we also inform the whole cluster about this * event trying to force every other node to set the FAIL flag for the node. * * Note that the form of agreement used here is weak, as we collect the majority * of masters state during some time, and even if we force agreement by * propagating the FAIL message, because of partitions we may not reach every * node. However: * * 1) Either we reach the majority and eventually the FAIL state will propagate * to all the cluster. * 2) Or there is no majority so no slave promotion will be authorized and the * FAIL flag will be cleared after some time. */ void markNodeAsFailingIfNeeded(clusterNode *node) { int failures; int needed_quorum = (server.cluster->size / 2) + 1; if (!nodeTimedOut(node)) return; /* We can reach it. */ if (nodeFailed(node)) return; /* Already FAILing. */ failures = clusterNodeFailureReportsCount(node); /* Also count myself as a voter if I'm a master. */ failures++; if (failures < needed_quorum) return; /* No weak agreement from masters. */ serverLog(LL_VERBOSE, "Marking node %.40s as failing (quorum reached).", node->name); /* Mark the node as failing. */ node->flags &= ~CLUSTER_NODE_PFAIL; node->flags |= CLUSTER_NODE_FAIL; node->fail_time = mstime(); /* Broadcast the failing node name to everybody, forcing all the other * reachable nodes to flag the node as FAIL. */ clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); } /* This function is called only if a node is marked as FAIL, but we are able * to reach it again. It checks if there are the conditions to undo the FAIL * state. */ void clearNodeFailureIfNeeded(clusterNode *node) { serverAssert(nodeFailed(node)); /* We always clear the FAIL flag if we can contact the node again. */ serverLog(LL_VERBOSE, "Clear FAIL state for node %.40s: it is reachable again.", node->name); node->flags &= ~CLUSTER_NODE_FAIL; clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); } /* Return true if we already have a node in HANDSHAKE state matching the * specified ip address and port number. This function is used in order to * avoid adding a new handshake node for the same address multiple times. */ int clusterHandshakeInProgress(char *ip, int port) { dictIterator *di; dictEntry *de; di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (!nodeInHandshake(node)) continue; if (!strcasecmp(node->ip,ip) && node->port == port) break; } dictReleaseIterator(di); return de != NULL; } /* Start an handshake with the specified address if there is not one * already in progress. Returns non-zero if the handshake was actually * started. On error zero is returned and errno is set to one of the * following values: * * EAGAIN - There is already an handshake in progress for this address. * EINVAL - IP or port are not valid. */ int clusterStartHandshake(char *ip, int port) { clusterNode *n; char norm_ip[NET_IP_STR_LEN]; struct sockaddr_storage sa; /* IP sanity check */ if (inet_pton(AF_INET,ip, &(((struct sockaddr_in *)&sa)->sin_addr))) { sa.ss_family = AF_INET; } else if (inet_pton(AF_INET6,ip, &(((struct sockaddr_in6 *)&sa)->sin6_addr))) { sa.ss_family = AF_INET6; } else { errno = EINVAL; return 0; } /* Port sanity check */ if (port <= 0 || port > (65535-CLUSTER_PORT_INCR)) { errno = EINVAL; return 0; } /* Set norm_ip as the normalized string representation of the node * IP address. */ memset(norm_ip,0,NET_IP_STR_LEN); if (sa.ss_family == AF_INET) inet_ntop(AF_INET, (void*)&(((struct sockaddr_in *)&sa)->sin_addr), norm_ip,NET_IP_STR_LEN); else inet_ntop(AF_INET6, (void*)&(((struct sockaddr_in6 *)&sa)->sin6_addr), norm_ip,NET_IP_STR_LEN); if (clusterHandshakeInProgress(norm_ip,port)) { errno = EAGAIN; return 0; } /* Add the node with a random address (NULL as first argument to * createClusterNode()). Everything will be fixed during the * handshake. */ n = createClusterNode(NULL,CLUSTER_NODE_HANDSHAKE|CLUSTER_NODE_MEET); memcpy(n->ip,norm_ip,sizeof(n->ip)); n->port = port; clusterAddNode(n); return 1; } /* Process the gossip section of PING or PONG packets. * Note that this function assumes that the packet is already sanity-checked * by the caller, not in the content of the gossip section, but in the * length. */ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { uint16_t count = ntohs(hdr->count); clusterMsgDataGossip *g = (clusterMsgDataGossip*) hdr->data.ping.gossip; clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender); while(count--) { uint16_t flags = ntohs(g->flags); clusterNode *node; sds ci; ci = representClusterNodeFlags(sdsempty(), flags); serverLog(LL_DEBUG,"GOSSIP %.40s %s:%d %s", g->nodename, g->ip, ntohs(g->port), ci); sdsfree(ci); /* Update our state accordingly to the gossip sections */ node = clusterLookupNode(g->nodename); if (node) { /* We already know this node. */ if (sender && node != myself) { if (flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_PFAIL)) { if (clusterNodeAddFailureReport(node,sender)) { serverLog(LL_VERBOSE, "Node %.40s reported node %.40s as not reachable.", sender->name, node->name); } markNodeAsFailingIfNeeded(node); } else { if (clusterNodeDelFailureReport(node,sender)) { serverLog(LL_VERBOSE, "Node %.40s reported node %.40s is back online.", sender->name, node->name); } } } /* If we already know this node, but it is not reachable, and * we see a different address in the gossip section, start an * handshake with the (possibly) new address: this will result * into a node address update if the handshake will be * successful. */ if (node->flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_PFAIL) && (strcasecmp(node->ip,g->ip) || node->port != ntohs(g->port))) { clusterStartHandshake(g->ip,ntohs(g->port)); } } else { /* If it's not in NOADDR state and we don't have it, we * start a handshake process against this IP/PORT pairs. * * Note that we require that the sender of this gossip message * is a well known node in our cluster, otherwise we risk * joining another cluster. */ if (sender && !(flags & CLUSTER_NODE_NOADDR) && !clusterBlacklistExists(g->nodename)) { clusterStartHandshake(g->ip,ntohs(g->port)); } } /* Next node */ g++; } } /* IP -> string conversion. 'buf' is supposed to at least be 46 bytes. */ void nodeIp2String(char *buf, clusterLink *link) { anetPeerToString(link->fd, buf, NET_IP_STR_LEN, NULL); } /* Update the node address to the IP address that can be extracted * from link->fd, and at the specified port. * Also disconnect the node link so that we'll connect again to the new * address. * * If the ip/port pair are already correct no operation is performed at * all. * * The function returns 0 if the node address is still the same, * otherwise 1 is returned. */ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, int port) { char ip[NET_IP_STR_LEN] = {0}; /* We don't proceed if the link is the same as the sender link, as this * function is designed to see if the node link is consistent with the * symmetric link that is used to receive PINGs from the node. * * As a side effect this function never frees the passed 'link', so * it is safe to call during packet processing. */ if (link == node->link) return 0; nodeIp2String(ip,link); if (node->port == port && strcmp(ip,node->ip) == 0) return 0; /* IP / port is different, update it. */ memcpy(node->ip,ip,sizeof(ip)); node->port = port; if (node->link) freeClusterLink(node->link); node->flags &= ~CLUSTER_NODE_NOADDR; serverLog(LL_WARNING,"Address updated for node %.40s, now %s:%d", node->name, node->ip, node->port); return 1; } /* When this function is called, there is a packet to process starting * at node->rcvbuf. Releasing the buffer is up to the caller, so this * function should just handle the higher level stuff of processing the * packet, modifying the cluster state if needed. * * The function returns 1 if the link is still valid after the packet * was processed, otherwise 0 if the link was freed since the packet * processing lead to some inconsistency error (for instance a PONG * received from the wrong sender ID). */ int clusterProcessPacket(clusterLink *link) { clusterMsg *hdr = (clusterMsg*) link->rcvbuf; uint32_t totlen = ntohl(hdr->totlen); uint16_t type = ntohs(hdr->type); clusterNode *sender; server.cluster->stats_bus_messages_received++; serverLog(LL_DEBUG,"--- Processing packet of type %d, %lu bytes", type, (unsigned long) totlen); /* Perform sanity checks */ if (totlen < 16) return 1; /* At least signature, version, totlen, count. */ if (ntohs(hdr->ver) != CLUSTER_PROTO_VER) return 1; /* Can't handle versions other than the current one.*/ if (totlen > sdslen(link->rcvbuf)) return 1; if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG || type == CLUSTERMSG_TYPE_MEET) { uint16_t count = ntohs(hdr->count); uint32_t explen; /* expected length of this packet */ explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); explen += (sizeof(clusterMsgDataGossip)*count); if (totlen != explen) return 1; } else if (type == CLUSTERMSG_TYPE_FAIL) { uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); explen += sizeof(clusterMsgDataFail); if (totlen != explen) return 1; } else if (type == CLUSTERMSG_TYPE_REPLJOB || type == CLUSTERMSG_TYPE_YOURJOBS) { uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); explen += sizeof(clusterMsgDataJob) - sizeof(hdr->data.jobs.serialized.jobs_data) + ntohl(hdr->data.jobs.serialized.datasize); if (totlen != explen) return 1; } else if (type == CLUSTERMSG_TYPE_GOTJOB || type == CLUSTERMSG_TYPE_ENQUEUE || type == CLUSTERMSG_TYPE_QUEUED || type == CLUSTERMSG_TYPE_WORKING || type == CLUSTERMSG_TYPE_SETACK || type == CLUSTERMSG_TYPE_GOTACK || type == CLUSTERMSG_TYPE_DELJOB || type == CLUSTERMSG_TYPE_WILLQUEUE) { uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); explen += sizeof(clusterMsgDataJobID); if (totlen != explen) return 1; } else if (type == CLUSTERMSG_TYPE_NEEDJOBS || type == CLUSTERMSG_TYPE_PAUSE) { uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); explen += sizeof(clusterMsgDataQueueOp) - 8; if (totlen < explen) return 1; explen += ntohl(hdr->data.queueop.about.qnamelen); if (totlen != explen) return 1; } /* Check if the sender is a known node. */ sender = clusterLookupNode(hdr->sender); /* Initial processing of PING and MEET requests replying with a PONG. */ if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_MEET) { serverLog(LL_DEBUG,"Ping packet received: %p", (void*)link->node); /* We use incoming MEET messages in order to set the address * for 'myself', since only other cluster nodes will send us * MEET messages on handshakes, when the cluster joins, or * later if we changed address, and those nodes will use our * official address to connect to us. So by obtaining this address * from the socket is a simple way to discover / update our own * address in the cluster without it being hardcoded in the config. * * However if we don't have an address at all, we update the address * even with a normal PING packet. If it's wrong it will be fixed * by MEET later. */ if (type == CLUSTERMSG_TYPE_MEET || myself->ip[0] == '\0') { char ip[NET_IP_STR_LEN]; if (anetSockName(link->fd,ip,sizeof(ip),NULL) != -1 && strcmp(ip,myself->ip)) { memcpy(myself->ip,ip,NET_IP_STR_LEN); serverLog(LL_WARNING,"IP address for this node updated to %s", myself->ip); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } } /* Add this node if it is new for us and the msg type is MEET. * In this stage we don't try to add the node with the right * flags, slaveof pointer, and so forth, as this details will be * resolved when we'll receive PONGs from the node. */ if (!sender && type == CLUSTERMSG_TYPE_MEET) { clusterNode *node; node = createClusterNode(NULL,CLUSTER_NODE_HANDSHAKE); nodeIp2String(node->ip,link); node->port = ntohs(hdr->port); clusterAddNode(node); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } /* If this is a MEET packet from an unknown node, we still process * the gossip section here since we have to trust the sender because * of the message type. */ if (!sender && type == CLUSTERMSG_TYPE_MEET) clusterProcessGossipSection(hdr,link); /* Anyway reply with a PONG */ clusterSendPing(link,CLUSTERMSG_TYPE_PONG); } /* PING, PONG, MEET: process config information. */ if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG || type == CLUSTERMSG_TYPE_MEET) { serverLog(LL_DEBUG,"%s packet received: %p", type == CLUSTERMSG_TYPE_PING ? "ping" : "pong", (void*)link->node); if (link->node) { if (nodeInHandshake(link->node)) { /* If we already have this node, try to change the * IP/port of the node with the new one. */ if (sender) { serverLog(LL_VERBOSE, "Handshake: we already know node %.40s, " "updating the address if needed.", sender->name); if (nodeUpdateAddressIfNeeded(sender,link,ntohs(hdr->port))) { clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_UPDATE_STATE); } /* Free this node as we already have it. This will * cause the link to be freed as well. */ clusterDelNode(link->node); return 0; } /* First thing to do is replacing the random name with the * right node name if this was a handshake stage. */ clusterRenameNode(link->node, hdr->sender); serverLog(LL_DEBUG,"Handshake with node %.40s completed.", link->node->name); link->node->flags &= ~CLUSTER_NODE_HANDSHAKE; clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE| CLUSTER_TODO_SAVE_CONFIG); } else if (memcmp(link->node->name,hdr->sender, CLUSTER_NAMELEN) != 0) { /* If the reply has a non matching node ID we * disconnect this node and set it as not having an associated * address. */ serverLog(LL_DEBUG,"PONG contains mismatching sender ID"); link->node->flags |= CLUSTER_NODE_NOADDR; link->node->ip[0] = '\0'; link->node->port = 0; freeClusterLink(link); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); return 0; } } /* Update the node address if it changed. */ if (sender && type == CLUSTERMSG_TYPE_PING && !nodeInHandshake(sender) && nodeUpdateAddressIfNeeded(sender,link,ntohs(hdr->port))) { clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_UPDATE_STATE); } /* Update our info about the node */ if (link->node && type == CLUSTERMSG_TYPE_PONG) { link->node->pong_received = mstime(); link->node->ping_sent = 0; /* The PFAIL condition can be reversed without external * help if it is momentary (that is, if it does not * turn into a FAIL state). * * The FAIL condition is also reversible under specific * conditions detected by clearNodeFailureIfNeeded(). */ if (nodeTimedOut(link->node)) { link->node->flags &= ~CLUSTER_NODE_PFAIL; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_UPDATE_STATE); } else if (nodeFailed(link->node)) { clearNodeFailureIfNeeded(link->node); } } /* Copy certain flags from what the node publishes. */ if (sender) { int old_flags = sender->flags; int reported_flags = ntohs(hdr->flags); int flags_to_copy = CLUSTER_NODE_LEAVING; sender->flags &= ~flags_to_copy; sender->flags |= (reported_flags & flags_to_copy); /* Currently we just save the config without FSYNC nor * update of the cluster state, since the only flag we update * here is LEAVING which is non critical to persist. */ if (sender->flags != old_flags) clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } /* Get info from the gossip section */ if (sender) clusterProcessGossipSection(hdr,link); } else if (type == CLUSTERMSG_TYPE_FAIL) { clusterNode *failing; if (!sender) return 1; failing = clusterLookupNode(hdr->data.fail.about.nodename); if (failing && !(failing->flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_MYSELF))) { serverLog(LL_VERBOSE, "FAIL message received from %.40s about %.40s", hdr->sender, hdr->data.fail.about.nodename); failing->flags |= CLUSTER_NODE_FAIL; failing->fail_time = mstime(); failing->flags &= ~CLUSTER_NODE_PFAIL; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_UPDATE_STATE); } } else if (type == CLUSTERMSG_TYPE_REPLJOB) { uint32_t numjobs = ntohl(hdr->data.jobs.serialized.numjobs); uint32_t datasize = ntohl(hdr->data.jobs.serialized.datasize); job *j; /* Only replicate jobs by known nodes. */ if (!sender || numjobs != 1) return 1; /* Don't replicate jobs if we got already memory issues or if we * are leaving the cluster. */ if (getMemoryWarningLevel() > 0 || myselfLeaving()) return 1; j = deserializeJob(hdr->data.jobs.serialized.jobs_data,datasize,NULL,SER_MESSAGE); if (j == NULL) { serverLog(LL_WARNING, "Received corrupted job description from node %.40s", hdr->sender); } else { /* Don't replicate jobs about queues paused in input. */ queue *q = lookupQueue(j->queue); if (q && q->flags & QUEUE_FLAG_PAUSED_IN) { freeJob(j); return 1; } j->flags |= JOB_FLAG_BCAST_QUEUED; j->state = JOB_STATE_ACTIVE; int retval = registerJob(j); if (retval == C_ERR) { /* The job already exists. Just update the list of nodes * that may have a copy. */ updateJobNodes(j); } else { AOFLoadJob(j); } /* Reply with a GOTJOB message, even if we already did in the past. * The node receiving ADDJOB may try multiple times, and our * GOTJOB messages may get lost. */ if (!(hdr->mflags[0] & CLUSTERMSG_FLAG0_NOREPLY)) clusterSendGotJob(sender,j); /* Release the job if we already had it. */ if (retval == C_ERR) freeJob(j); } } else if (type == CLUSTERMSG_TYPE_YOURJOBS) { uint32_t numjobs = ntohl(hdr->data.jobs.serialized.numjobs); uint32_t datasize = ntohl(hdr->data.jobs.serialized.datasize); if (!sender || numjobs == 0) return 1; receiveYourJobs(sender,numjobs,hdr->data.jobs.serialized.jobs_data,datasize); } else if (type == CLUSTERMSG_TYPE_GOTJOB) { if (!sender) return 1; job *j = lookupJob(hdr->data.jobid.job.id); if (j && j->state == JOB_STATE_WAIT_REPL) { dictAdd(j->nodes_confirmed,sender->name,sender); if (dictSize(j->nodes_confirmed) == j->repl) jobReplicationAchieved(j); } } else if (type == CLUSTERMSG_TYPE_SETACK) { if (!sender) return 1; uint32_t mayhave = ntohl(hdr->data.jobid.job.aux); serverLog(LL_VERBOSE,"RECEIVED SETACK(%d) FROM %.40s FOR JOB %.*s", (int) mayhave, sender->name, JOB_ID_LEN, hdr->data.jobid.job.id); job *j = lookupJob(hdr->data.jobid.job.id); /* If we have the job, change the state to acknowledged. */ if (j) { if (j->state == JOB_STATE_WAIT_REPL) { /* The job was acknowledged before ADDJOB achieved the * replication level requested! Unblock the client and * change the job state to active. */ if (jobReplicationAchieved(j) == C_ERR) { /* The job was externally replicated and deleted from * this node. Nothing to do... */ return 1; } } /* ACK it if not already acked. */ acknowledgeJob(j); } /* Reply according to the job exact state. */ if (j == NULL || dictSize(j->nodes_delivered) <= mayhave) { /* If we don't know the job or our set of nodes that may have * the job is not larger than the sender, reply with GOTACK. */ int known = j ? 1 : 0; clusterSendGotAck(sender,hdr->data.jobid.job.id,known); } else { /* We have the job but we know more nodes that may have it * than the sender, if we are here. Don't reply with GOTACK unless * mayhave is 0 (the sender just received an ACK from client about * a job it does not know), in order to let the sender delete it. */ if (mayhave == 0) clusterSendGotAck(sender,hdr->data.jobid.job.id,1); /* Anyway, start a GC about this job. Because one of the two is * true: * 1) mayhave in the sender is 0, so it's up to us to start a GC * because the sender has just a dummy ACK. * 2) Or, we prevented the sender from finishing the GC since * we are not replying with GOTACK, and we need to take * responsability to evict the job in the cluster. */ tryJobGC(j); } } else if (type == CLUSTERMSG_TYPE_GOTACK) { if (!sender) return 1; uint32_t known = ntohl(hdr->data.jobid.job.aux); job *j = lookupJob(hdr->data.jobid.job.id); if (j) gotAckReceived(sender,j,known); } else if (type == CLUSTERMSG_TYPE_DELJOB) { if (!sender) return 1; job *j = lookupJob(hdr->data.jobid.job.id); if (j) { serverLog(LL_VERBOSE,"RECEIVED DELJOB FOR JOB %.*s",JOB_ID_LEN,j->id); unregisterJob(j); freeJob(j); } } else if (type == CLUSTERMSG_TYPE_ENQUEUE) { if (!sender) return 1; uint32_t delay = ntohl(hdr->data.jobid.job.aux); job *j = lookupJob(hdr->data.jobid.job.id); if (j && j->state < JOB_STATE_QUEUED) { /* Discard this message if the queue is paused in input. */ queue *q = lookupQueue(j->queue); if (q == NULL || !(q->flags & QUEUE_FLAG_PAUSED_IN)) { /* We received an ENQUEUE message: consider this node as the * first to queue the job, so no need to broadcast a QUEUED * message the first time we queue it. */ j->flags &= ~JOB_FLAG_BCAST_QUEUED; serverLog(LL_VERBOSE,"RECEIVED ENQUEUE FOR JOB %.*s", JOB_ID_LEN,j->id); if (delay == 0) { enqueueJob(j,0); } else { updateJobRequeueTime(j,server.mstime+delay*1000); } } } } else if (type == CLUSTERMSG_TYPE_QUEUED || type == CLUSTERMSG_TYPE_WORKING) { if (!sender) return 1; job *j = lookupJob(hdr->data.jobid.job.id); if (j && j->state <= JOB_STATE_QUEUED) { serverLog(LL_VERBOSE,"UPDATING QTIME FOR JOB %.*s",JOB_ID_LEN,j->id); /* Move the time we'll re-queue this job in the future. Moreover * if the sender has a Node ID greater than our node ID, and we * have the message queued as well, dequeue it, to avoid an * useless multiple delivery. * * The message is always dequeued in case the message is of * type WORKING, since this is the explicit semantics of WORKING. * * If the message is WORKING always dequeue regardless of the * sender name, since there is a client claiming to work on the * message. */ if (j->state == JOB_STATE_QUEUED && (type == CLUSTERMSG_TYPE_WORKING || compareNodeIDsByJob(sender,myself,j) > 0)) { dequeueJob(j); } /* Update the time at which this node will attempt to enqueue * the message again. */ if (j->retry) { j->flags |= JOB_FLAG_BCAST_WILLQUEUE; updateJobRequeueTime(j,server.mstime+ j->retry*1000+ randomTimeError(DISQUE_TIME_ERR)); } /* Update multiple deliveries counters. */ if (type == CLUSTERMSG_TYPE_QUEUED) { if (hdr->mflags[0] & CLUSTERMSG_FLAG0_INCR_NACKS) j->num_nacks++; if (hdr->mflags[0] & CLUSTERMSG_FLAG0_INCR_DELIV) j->num_deliv++; } } else if (j && j->state == JOB_STATE_ACKED) { /* Some other node queued a message that we have as * already acknowledged. Try to force it to drop it. */ clusterSendSetAck(sender,j); } } else if (type == CLUSTERMSG_TYPE_WILLQUEUE) { if (!sender) return 1; job *j = lookupJob(hdr->data.jobid.job.id); if (j) { if (j->state == JOB_STATE_ACTIVE || j->state == JOB_STATE_QUEUED) { dictAdd(j->nodes_delivered,sender->name,sender); } if (j->state == JOB_STATE_QUEUED) clusterBroadcastQueued(j,CLUSTERMSG_NOFLAGS); else if (j->state == JOB_STATE_ACKED) clusterSendSetAck(sender,j); } } else if (type == CLUSTERMSG_TYPE_NEEDJOBS || type == CLUSTERMSG_TYPE_PAUSE) { if (!sender) return 1; uint32_t qnamelen = ntohl(hdr->data.queueop.about.qnamelen); uint32_t count = ntohl(hdr->data.queueop.about.aux); robj *qname = createStringObject(hdr->data.queueop.about.qname, qnamelen); serverLog(LL_VERBOSE,"RECEIVED %s FOR QUEUE %s (%d)", (type == CLUSTERMSG_TYPE_NEEDJOBS) ? "NEEDJOBS" : "PAUSE", (char*)qname->ptr,count); if (type == CLUSTERMSG_TYPE_NEEDJOBS) receiveNeedJobs(sender,qname,count); else receivePauseQueue(qname,count); decrRefCount(qname); } else { serverLog(LL_WARNING,"Received unknown packet type: %d", type); } return 1; } /* This function is called when we detect the link with this node is lost. We set the node as no longer connected. The Cluster Cron will detect this connection and will try to get it connected again. Instead if the node is a temporary node used to accept a query, we completely free the node on error. */ void handleLinkIOError(clusterLink *link) { freeClusterLink(link); } /* Send data. This is handled using a trivial send buffer that gets * consumed by write(). We don't try to optimize this for speed too much * as this is a very low traffic channel. */ void clusterWriteHandler(aeEventLoop *el, int fd, void *privdata, int mask) { clusterLink *link = (clusterLink*) privdata; ssize_t nwritten; UNUSED(el); UNUSED(mask); nwritten = write(fd, link->sndbuf, sdslen(link->sndbuf)); if (nwritten <= 0) { serverLog(LL_DEBUG,"I/O error writing to node link: %s", strerror(errno)); handleLinkIOError(link); return; } sdsrange(link->sndbuf,nwritten,-1); if (sdslen(link->sndbuf) == 0) aeDeleteFileEvent(server.el, link->fd, AE_WRITABLE); } /* Read data. Try to read the first field of the header first to check the * full length of the packet. When a whole packet is in memory this function * will call the function to process the packet. And so forth. */ void clusterReadHandler(aeEventLoop *el, int fd, void *privdata, int mask) { char buf[sizeof(clusterMsg)]; ssize_t nread; clusterMsg *hdr; clusterLink *link = (clusterLink*) privdata; unsigned int readlen, rcvbuflen; UNUSED(el); UNUSED(mask); while(1) { /* Read as long as there is data to read. */ rcvbuflen = sdslen(link->rcvbuf); if (rcvbuflen < 8) { /* First, obtain the first 8 bytes to get the full message * length. */ readlen = 8 - rcvbuflen; } else { /* Finally read the full message. */ hdr = (clusterMsg*) link->rcvbuf; if (rcvbuflen == 8) { /* Perform some sanity check on the message signature * and length. */ if (memcmp(hdr->sig,"DbuZ",4) != 0 || ntohl(hdr->totlen) < CLUSTERMSG_MIN_LEN) { serverLog(LL_WARNING, "Bad message length or signature received " "from Cluster bus."); handleLinkIOError(link); return; } } readlen = ntohl(hdr->totlen) - rcvbuflen; if (readlen > sizeof(buf)) readlen = sizeof(buf); } nread = read(fd,buf,readlen); if (nread == -1 && errno == EAGAIN) return; /* No more data ready. */ if (nread <= 0) { /* I/O error... */ serverLog(LL_DEBUG,"I/O error reading from node link: %s", (nread == 0) ? "connection closed" : strerror(errno)); handleLinkIOError(link); return; } else { /* Read data and recast the pointer to the new buffer. */ link->rcvbuf = sdscatlen(link->rcvbuf,buf,nread); hdr = (clusterMsg*) link->rcvbuf; rcvbuflen += nread; } /* Total length obtained? Process this packet. */ if (rcvbuflen >= 8 && rcvbuflen == ntohl(hdr->totlen)) { if (clusterProcessPacket(link)) { sdsfree(link->rcvbuf); link->rcvbuf = sdsempty(); } else { return; /* Link no longer valid. */ } } } } /* Put stuff into the send buffer. * * It is guaranteed that this function will never have as a side effect * the link to be invalidated, so it is safe to call this function * from event handlers that will do stuff with the same link later. */ void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) { if (sdslen(link->sndbuf) == 0 && msglen != 0) aeCreateFileEvent(server.el,link->fd,AE_WRITABLE, clusterWriteHandler,link); link->sndbuf = sdscatlen(link->sndbuf, msg, msglen); server.cluster->stats_bus_messages_sent++; } /* Send a message to all the nodes in the specified dictionary of nodes, that * are part of the cluster and have a connected link. * * It is guaranteed that this function will never have as a side effect * some node->link to be invalidated, so it is safe to call this function * from event handlers that will do stuff with node links later. * * Note that this function expects 'nodes' to be stored as keys, so the * value can be anything. This is possible since node->name is both the * node key, and the pointer to the start of the structure. */ void clusterBroadcastMessage(dict *nodes, void *buf, size_t len) { dictIterator *di; dictEntry *de; di = dictGetSafeIterator(nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetKey(de); if (!node->link) continue; if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_HANDSHAKE)) continue; clusterSendMessage(node->link,buf,len); } dictReleaseIterator(di); } /* Build the message header */ void clusterBuildMessageHdr(clusterMsg *hdr, int type) { int totlen = 0; memset(hdr,0,sizeof(*hdr)); hdr->ver = htons(CLUSTER_PROTO_VER); hdr->sig[0] = 'D'; hdr->sig[1] = 'b'; hdr->sig[2] = 'u'; hdr->sig[3] = 'Z'; hdr->type = htons(type); memcpy(hdr->sender,myself->name,CLUSTER_NAMELEN); hdr->port = htons(server.port); hdr->flags = htons(myself->flags); hdr->state = server.cluster->state; /* Compute the message length for certain messages. For other messages * this is up to the caller. */ if (type == CLUSTERMSG_TYPE_FAIL) { totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += sizeof(clusterMsgDataFail); } else if (type == CLUSTERMSG_TYPE_GOTJOB || type == CLUSTERMSG_TYPE_ENQUEUE || type == CLUSTERMSG_TYPE_QUEUED || type == CLUSTERMSG_TYPE_WORKING || type == CLUSTERMSG_TYPE_SETACK || type == CLUSTERMSG_TYPE_GOTACK || type == CLUSTERMSG_TYPE_DELJOB || type == CLUSTERMSG_TYPE_WILLQUEUE) { totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += sizeof(clusterMsgDataJobID); } hdr->totlen = htonl(totlen); /* For PING, PONG, and MEET, fixing the totlen field is up to the caller. */ } /* Send a PING or PONG packet to the specified node, making sure to add enough * gossip informations. */ void clusterSendPing(clusterLink *link, int type) { unsigned char *buf; clusterMsg *hdr; int gossipcount = 0; /* Number of gossip sections added so far. */ int wanted; /* Number of gossip sections we want to append if possible. */ int totlen; /* Total packet length. */ /* freshnodes is the max number of nodes we can hope to append at all: * nodes available minus two (ourself and the node we are sending the * message to). However practically there may be less valid nodes since * nodes in handshake state, disconnected, are not considered. */ int freshnodes = dictSize(server.cluster->nodes)-2; /* How many gossip sections we want to add? 1/10 of the number of nodes * and anyway at least 3. Why 1/10? * * If we have N masters, with N/10 entries, and we consider that in * node_timeout we exchange with each other node at least 4 packets * (we ping in the worst case in node_timeout/2 time, and we also * receive two pings from the host), we have a total of 8 packets * in the node_timeout*2 falure reports validity time. So we have * that, for a single PFAIL node, we can expect to receive the following * number of failure reports (in the specified window of time): * * PROB * GOSSIP_ENTRIES_PER_PACKET * TOTAL_PACKETS: * * PROB = probability of being featured in a single gossip entry, * which is 1 / NUM_OF_NODES. * ENTRIES = 10. * TOTAL_PACKETS = 2 * 4 * NUM_OF_MASTERS. * * If we assume we have just masters (so num of nodes and num of masters * is the same), with 1/10 we always get over the majority, and specifically * 80% of the number of nodes, to account for many masters failing at the * same time. * * Since we have non-voting slaves that lower the probability of an entry * to feature our node, we set the number of entires per packet as * 10% of the total nodes we have. */ wanted = floor(dictSize(server.cluster->nodes)/10); if (wanted < 3) wanted = 3; if (wanted > freshnodes) wanted = freshnodes; /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen * later according to the number of gossip sections we really were able * to put inside the packet. */ totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += (sizeof(clusterMsgDataGossip)*wanted); /* Note: clusterBuildMessageHdr() expects the buffer to be always at least * sizeof(clusterMsg) or more. */ if (totlen < (int)sizeof(clusterMsg)) totlen = sizeof(clusterMsg); buf = zcalloc(totlen); hdr = (clusterMsg*) buf; /* Populate the header. */ if (link->node && type == CLUSTERMSG_TYPE_PING) link->node->ping_sent = mstime(); clusterBuildMessageHdr(hdr,type); /* Populate the gossip fields */ int maxiterations = wanted*3; while(freshnodes > 0 && gossipcount < wanted && maxiterations--) { dictEntry *de = dictGetRandomKey(server.cluster->nodes); clusterNode *this = dictGetVal(de); clusterMsgDataGossip *gossip; int j; /* Don't include this node: the whole packet header is about us * already, so we just gossip about other nodes. */ if (this == myself) continue; /* Give a bias to FAIL/PFAIL nodes. */ if (maxiterations > wanted*2 && !(this->flags & (CLUSTER_NODE_PFAIL|CLUSTER_NODE_FAIL))) continue; /* In the gossip section don't include: * 1) Nodes in HANDSHAKE state. * 3) Nodes with the NOADDR flag set. * 4) Disconnected nodes if they don't have configured slots. */ if (this->flags & (CLUSTER_NODE_HANDSHAKE|CLUSTER_NODE_NOADDR) || this->link == NULL) { freshnodes--; /* Tecnically not correct, but saves CPU. */ continue; } /* Check if we already added this node */ for (j = 0; j < gossipcount; j++) { if (memcmp(hdr->data.ping.gossip[j].nodename,this->name, CLUSTER_NAMELEN) == 0) break; } if (j != gossipcount) continue; /* Add it */ freshnodes--; gossip = &(hdr->data.ping.gossip[gossipcount]); memcpy(gossip->nodename,this->name,CLUSTER_NAMELEN); gossip->ping_sent = htonl(this->ping_sent); gossip->pong_received = htonl(this->pong_received); memcpy(gossip->ip,this->ip,sizeof(this->ip)); gossip->port = htons(this->port); gossip->flags = htons(this->flags); gossip->notused1 = 0; gossip->notused2 = 0; gossipcount++; } /* Ready to send... fix the totlen field and queue the message in the * output buffer. */ totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += (sizeof(clusterMsgDataGossip)*gossipcount); hdr->count = htons(gossipcount); hdr->totlen = htonl(totlen); clusterSendMessage(link,buf,totlen); zfree(buf); } /* Send a FAIL message to all the nodes we are able to contact. * The FAIL message is sent when we detect that a node is failing * (CLUSTER_NODE_PFAIL) and we also receive a gossip confirmation of this: * we switch the node state to CLUSTER_NODE_FAIL and ask all the other * nodes to do the same ASAP. */ void clusterSendFail(char *nodename) { unsigned char buf[sizeof(clusterMsg)]; clusterMsg *hdr = (clusterMsg*) buf; clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_FAIL); memcpy(hdr->data.fail.about.nodename,nodename,CLUSTER_NAMELEN); clusterBroadcastMessage(server.cluster->nodes,buf,ntohl(hdr->totlen)); } /* ----------------------------------------------------------------------------- * CLUSTER job related messages * -------------------------------------------------------------------------- */ /* Broadcast a REPLJOB message to 'repl' nodes, and populates the list of jobs * that may have the job. * * If there are already nodes that received the message, additional * 'repl' nodes will be added to the list (if possible), and the message * broadcasted again to all the nodes that may already have the message, * plus the new ones. * * However for nodes for which we already have the GOTJOB reply, we flag * the message with the NOREPLY flag. If the 'noreply' argument of the function * is true we also flag the message with NOREPLY regardless of the fact the * node we are sending REPLJOB to already replied or not. * * Nodes are selected from server.cluster->reachable_nodes list. * The function returns the number of new (additional) nodes that MAY have * received the message. */ int clusterReplicateJob(job *j, int repl, int noreply) { int i, added = 0; if (repl <= 0) return 0; /* Add the specified number of nodes to the list of receivers. */ clusterShuffleReachableNodes(); for (i = 0; i < server.cluster->reachable_nodes_count; i++) { clusterNode *node = server.cluster->reachable_nodes[i]; if (node->link == NULL) continue; /* No link, no party... */ if (dictAdd(j->nodes_delivered,node->name,node) == DICT_OK) { /* Only counts non-duplicated nodes. */ added++; if (--repl == 0) break; } } /* Resend the message to all the past and new nodes. */ unsigned char buf[sizeof(clusterMsg)], *payload; clusterMsg *hdr = (clusterMsg*) buf; uint32_t totlen; sds serialized = serializeJob(sdsempty(),j,SER_MESSAGE); totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += sizeof(clusterMsgDataJob) - sizeof(hdr->data.jobs.serialized.jobs_data) + sdslen(serialized); clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_REPLJOB); hdr->data.jobs.serialized.numjobs = htonl(1); hdr->data.jobs.serialized.datasize = htonl(sdslen(serialized)); hdr->totlen = htonl(totlen); if (totlen < sizeof(buf)) { payload = buf; } else { payload = zmalloc(totlen); memcpy(payload,buf,sizeof(clusterMsg)); hdr = (clusterMsg*) payload; } memcpy(hdr->data.jobs.serialized.jobs_data,serialized,sdslen(serialized)); sdsfree(serialized); /* Actual delivery of the message to the list of nodes. */ dictIterator *di = dictGetIterator(j->nodes_delivered); dictEntry *de; while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node == myself) continue; /* We ask for reply only if 'noreply' is false and the target node * did not already replied with GOTJOB. */ int acked = j->nodes_confirmed && dictFind(j->nodes_confirmed,node); if (noreply || acked) { hdr->mflags[0] |= CLUSTERMSG_FLAG0_NOREPLY; } else { hdr->mflags[0] &= ~CLUSTERMSG_FLAG0_NOREPLY; } /* If the target node acknowledged the message already, send it again * only if there are additional nodes. We want the target node to refresh * its list of receivers. */ if (node->link && !(acked && added == 0)) clusterSendMessage(node->link,payload,totlen); } dictReleaseIterator(di); if (payload != buf) zfree(payload); return added; } /* Helper function to send all the messages that have just a type, * a Job ID, and an optional 'aux' additional value. */ void clusterSendJobIDMessage(int type, clusterNode *node, char *id, int aux) { unsigned char buf[sizeof(clusterMsg)]; clusterMsg *hdr = (clusterMsg*) buf; if (node->link == NULL) return; /* This is a best effort message. */ clusterBuildMessageHdr(hdr,type); memcpy(hdr->data.jobid.job.id,id,JOB_ID_LEN); hdr->data.jobid.job.aux = htonl(aux); clusterSendMessage(node->link,buf,ntohl(hdr->totlen)); } /* Like clusterSendJobIDMessage(), but sends the message to the specified set * of nodes (excluding myself if included in the set of nodes). */ void clusterBroadcastJobIDMessage(dict *nodes, char *id, int type, uint32_t aux, unsigned char flags) { dictIterator *di = dictGetIterator(nodes); dictEntry *de; unsigned char buf[sizeof(clusterMsg)]; clusterMsg *hdr = (clusterMsg*) buf; /* Build the message one time, send the same to everybody. */ clusterBuildMessageHdr(hdr,type); memcpy(hdr->data.jobid.job.id,id,JOB_ID_LEN); hdr->data.jobid.job.aux = htonl(aux); hdr->mflags[0] = flags; while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node == myself) continue; if (node->link) clusterSendMessage(node->link,buf,ntohl(hdr->totlen)); } dictReleaseIterator(di); } /* Send a GOTJOB message to the specified node, if connected. * GOTJOB messages only contain the ID of the job, and are acknowledges * that the job was replicated to a target node. The receiver of the message * will be able to reply to the client that the job was accepted by the * system when enough nodes have a copy of the job. */ void clusterSendGotJob(clusterNode *node, job *j) { clusterSendJobIDMessage(CLUSTERMSG_TYPE_GOTJOB,node,j->id,0); } /* Force the receiver to queue a job, if it has that job in an active * state. */ void clusterSendEnqueue(clusterNode *node, job *j, uint32_t delay) { clusterSendJobIDMessage(CLUSTERMSG_TYPE_ENQUEUE,node,j->id,delay); } /* Tell the receiver that the specified job was just re-queued by the * sender, so it should update its qtime to the future, and if the job * is also queued by the receiving node, to drop it from the queue if the * sender has a greater node ID (so that we try in a best-effort way to * avoid useless multi deliveries). * * This message is sent to all the nodes we believe may have a copy * of the message and are reachable. */ void clusterBroadcastQueued(job *j, unsigned char flags) { serverLog(LL_VERBOSE,"BCAST QUEUED: %.*s",JOB_ID_LEN,j->id); clusterBroadcastJobIDMessage(j->nodes_delivered,j->id, CLUSTERMSG_TYPE_QUEUED,0,flags); } /* WORKING is like QUEUED, but will always force the receiver to dequeue. */ void clusterBroadcastWorking(job *j) { serverLog(LL_VERBOSE,"BCAST WORKING: %.*s",JOB_ID_LEN,j->id); clusterBroadcastJobIDMessage(j->nodes_delivered,j->id, CLUSTERMSG_TYPE_WORKING,0,CLUSTERMSG_NOFLAGS); } /* Send a DELJOB message to all the nodes that may have a copy. */ void clusterBroadcastDelJob(job *j) { serverLog(LL_VERBOSE,"BCAST DELJOB: %.*s",JOB_ID_LEN,j->id); clusterBroadcastJobIDMessage(j->nodes_delivered,j->id, CLUSTERMSG_TYPE_DELJOB,0,CLUSTERMSG_NOFLAGS); } /* Tell the receiver to reply with a QUEUED message if it has the job * already queued, to prevent us from queueing it in the next few * milliseconds. */ void clusterSendWillQueue(job *j) { serverLog(LL_VERBOSE,"BCAST WILLQUEUE: %.*s",JOB_ID_LEN,j->id); clusterBroadcastJobIDMessage(j->nodes_delivered,j->id, CLUSTERMSG_TYPE_WILLQUEUE,0,CLUSTERMSG_NOFLAGS); } /* Force the receiver to acknowledge the job as delivered. */ void clusterSendSetAck(clusterNode *node, job *j) { uint32_t maxowners = dictSize(j->nodes_delivered); clusterSendJobIDMessage(CLUSTERMSG_TYPE_SETACK,node,j->id,maxowners); } /* Acknowledge a SETACK message. */ void clusterSendGotAck(clusterNode *node, char *jobid, int known) { clusterSendJobIDMessage(CLUSTERMSG_TYPE_GOTACK,node,jobid,known); } /* Send a NEEDJOBS message to the specified set of nodes. */ void clusterSendNeedJobs(robj *qname, int numjobs, dict *nodes) { uint32_t totlen, qnamelen = sdslen(qname->ptr); uint32_t alloclen; clusterMsg *hdr; serverLog(LL_VERBOSE,"Sending NEEDJOBS for %s %d, %d nodes", (char*)qname->ptr, (int)numjobs, (int)dictSize(nodes)); totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += sizeof(clusterMsgDataQueueOp) - 8 + qnamelen; alloclen = totlen; if (alloclen < (int)sizeof(clusterMsg)) alloclen = sizeof(clusterMsg); hdr = zmalloc(alloclen); clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_NEEDJOBS); hdr->data.queueop.about.aux = htonl(numjobs); hdr->data.queueop.about.qnamelen = htonl(qnamelen); memcpy(hdr->data.queueop.about.qname, qname->ptr, qnamelen); hdr->totlen = htonl(totlen); clusterBroadcastMessage(nodes,hdr,totlen); zfree(hdr); } /* Send a PAUSE message to the specified set of nodes. */ void clusterSendPause(robj *qname, uint32_t flags, dict *nodes) { uint32_t totlen, qnamelen = sdslen(qname->ptr); uint32_t alloclen; clusterMsg *hdr; serverLog(LL_VERBOSE,"Sending PAUSE for %s flags=%d, %d nodes", (char*)qname->ptr, (int)flags, (int)dictSize(nodes)); totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += sizeof(clusterMsgDataQueueOp) - 8 + qnamelen; alloclen = totlen; if (alloclen < (int)sizeof(clusterMsg)) alloclen = sizeof(clusterMsg); hdr = zmalloc(alloclen); clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_PAUSE); hdr->data.queueop.about.aux = htonl(flags); hdr->data.queueop.about.qnamelen = htonl(qnamelen); memcpy(hdr->data.queueop.about.qname, qname->ptr, qnamelen); hdr->totlen = htonl(totlen); clusterBroadcastMessage(nodes,hdr,totlen); zfree(hdr); } /* Same as clusterSendPause() but broadcasts the message to the * whole cluster. */ void clusterBroadcastPause(robj *qname, uint32_t flags) { clusterSendPause(qname,flags,server.cluster->nodes); } /* Send a YOURJOBS message to the specified node, with a serialized copy of * the jobs referneced by the 'jobs' array and containing 'count' jobs. */ void clusterSendYourJobs(clusterNode *node, job **jobs, uint32_t count) { unsigned char buf[sizeof(clusterMsg)], *payload; clusterMsg *hdr = (clusterMsg*) buf; uint32_t totlen, j; if (!node->link) return; serverLog(LL_VERBOSE,"Sending %d jobs to %.40s", (int)count,node->name); totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += sizeof(clusterMsgDataJob) - sizeof(hdr->data.jobs.serialized.jobs_data); sds serialized = sdsempty(); for (j = 0; j < count; j++) serialized = serializeJob(serialized,jobs[j],SER_MESSAGE); totlen += sdslen(serialized); clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_YOURJOBS); hdr->data.jobs.serialized.numjobs = htonl(count); hdr->data.jobs.serialized.datasize = htonl(sdslen(serialized)); hdr->totlen = htonl(totlen); if (totlen < sizeof(buf)) { payload = buf; } else { payload = zmalloc(totlen); memcpy(payload,buf,sizeof(clusterMsg)); hdr = (clusterMsg*) payload; } memcpy(hdr->data.jobs.serialized.jobs_data,serialized,sdslen(serialized)); sdsfree(serialized); clusterSendMessage(node->link,payload,totlen); if (payload != buf) zfree(payload); } /* ----------------------------------------------------------------------------- * CLUSTER cron job * -------------------------------------------------------------------------- */ /* This is executed 10 times every second */ void clusterCron(void) { dictIterator *di; dictEntry *de; int update_state = 0; mstime_t min_pong = 0, now = mstime(); clusterNode *min_pong_node = NULL; static unsigned long long iteration = 0; mstime_t handshake_timeout; iteration++; /* Number of times this function was called so far. */ /* The handshake timeout is the time after which a handshake node that was * not turned into a normal node is removed from the nodes. Usually it is * just the NODE_TIMEOUT value, but when NODE_TIMEOUT is too small we use * the value of 1 second. */ handshake_timeout = server.cluster_node_timeout; if (handshake_timeout < 1000) handshake_timeout = 1000; /* Check if we have disconnected nodes and re-establish the connection. */ di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_NOADDR)) continue; /* A Node in HANDSHAKE state has a limited lifespan equal to the * configured node timeout. */ if (nodeInHandshake(node) && now - node->ctime > handshake_timeout) { clusterDelNode(node); continue; } if (node->link == NULL) { int fd; mstime_t old_ping_sent; clusterLink *link; fd = anetTcpNonBlockBindConnect(server.neterr, node->ip, node->port+CLUSTER_PORT_INCR, NET_FIRST_BIND_ADDR); if (fd == -1) { /* We got a synchronous error from connect before * clusterSendPing() had a chance to be called. * If node->ping_sent is zero, failure detection can't work, * so we claim we actually sent a ping now (that will * be really sent as soon as the link is obtained). */ if (node->ping_sent == 0) node->ping_sent = mstime(); serverLog(LL_DEBUG, "Unable to connect to " "Cluster Node [%s]:%d -> %s", node->ip, node->port+CLUSTER_PORT_INCR, server.neterr); continue; } anetEnableTcpNoDelay(NULL,fd); link = createClusterLink(node); link->fd = fd; node->link = link; aeCreateFileEvent(server.el,link->fd,AE_READABLE, clusterReadHandler,link); /* Queue a PING in the new connection ASAP: this is crucial * to avoid false positives in failure detection. * * If the node is flagged as MEET, we send a MEET message instead * of a PING one, to force the receiver to add us in its node * table. */ old_ping_sent = node->ping_sent; clusterSendPing(link, node->flags & CLUSTER_NODE_MEET ? CLUSTERMSG_TYPE_MEET : CLUSTERMSG_TYPE_PING); if (old_ping_sent) { /* If there was an active ping before the link was * disconnected, we want to restore the ping time, otherwise * replaced by the clusterSendPing() call. */ node->ping_sent = old_ping_sent; } /* We can clear the flag after the first packet is sent. * If we'll never receive a PONG, we'll never send new packets * to this node. Instead after the PONG is received and we * are no longer in meet/handshake status, we want to send * normal PING packets. */ node->flags &= ~CLUSTER_NODE_MEET; serverLog(LL_DEBUG,"Connecting with Node %.40s at %s:%d", node->name, node->ip, node->port+CLUSTER_PORT_INCR); } } dictReleaseIterator(di); /* Ping some random node 1 time every 10 iterations, so that we usually ping * one random node every second. */ if (!(iteration % 10)) { int j; /* Check a few random nodes and ping the one with the oldest * pong_received time. */ for (j = 0; j < 5; j++) { de = dictGetRandomKey(server.cluster->nodes); clusterNode *this = dictGetVal(de); /* Don't ping nodes disconnected or with a ping currently active. */ if (this->link == NULL || this->ping_sent != 0) continue; if (this->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_HANDSHAKE)) continue; if (min_pong_node == NULL || min_pong > this->pong_received) { min_pong_node = this; min_pong = this->pong_received; } } if (min_pong_node) { serverLog(LL_DEBUG,"Pinging node %.40s", min_pong_node->name); clusterSendPing(min_pong_node->link, CLUSTERMSG_TYPE_PING); } } /* Iterate nodes to check if we need to flag something as failing. * This loop is also responsible to: * 1) Check if there are orphaned masters (masters without non failing * slaves). * 2) Count the max number of non failing slaves for a single master. * 3) Count the number of slaves for our master, if we are a slave. */ di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); now = mstime(); /* Use an updated time at every iteration. */ mstime_t delay; if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_NOADDR|CLUSTER_NODE_HANDSHAKE)) continue; /* If we are waiting for the PONG more than half the cluster * timeout, reconnect the link: maybe there is a connection * issue even if the node is alive. */ if (node->link && /* is connected */ now - node->link->ctime > server.cluster_node_timeout && /* was not already reconnected */ node->ping_sent && /* we already sent a ping */ node->pong_received < node->ping_sent && /* still waiting pong */ /* and we are waiting for the pong more than timeout/2 */ now - node->ping_sent > server.cluster_node_timeout/2) { /* Disconnect the link, it will be reconnected automatically. */ freeClusterLink(node->link); } /* If we have currently no active ping in this instance, and the * received PONG is older than half the cluster timeout, send * a new ping now, to ensure all the nodes are pinged without * a too big delay. */ if (node->link && node->ping_sent == 0 && (now - node->pong_received) > server.cluster_node_timeout/2) { clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); continue; } /* Check only if we have an active ping for this instance. */ if (node->ping_sent == 0) continue; /* Compute the delay of the PONG. Note that if we already received * the PONG, then node->ping_sent is zero, so can't reach this * code at all. */ delay = now - node->ping_sent; if (delay > server.cluster_node_timeout) { /* Timeout reached. Set the node as possibly failing if it is * not already in this state. */ if (!(node->flags & (CLUSTER_NODE_PFAIL|CLUSTER_NODE_FAIL))) { serverLog(LL_DEBUG,"*** NODE %.40s possibly failing", node->name); node->flags |= CLUSTER_NODE_PFAIL; update_state = 1; } } } dictReleaseIterator(di); if (update_state || server.cluster->state == CLUSTER_FAIL) clusterUpdateState(); clusterUpdateReachableNodes(); } /* This function is called before the event handler returns to sleep for * events. It is useful to perform operations that must be done ASAP in * reaction to events fired but that are not safe to perform inside event * handlers, or to perform potentially expansive tasks that we need to do * a single time before replying to clients. */ void clusterBeforeSleep(void) { /* Update the cluster state. */ if (server.cluster->todo_before_sleep & CLUSTER_TODO_UPDATE_STATE) clusterUpdateState(); /* Save the config, possibly using fsync. */ if (server.cluster->todo_before_sleep & CLUSTER_TODO_SAVE_CONFIG) { int fsync = server.cluster->todo_before_sleep & CLUSTER_TODO_FSYNC_CONFIG; clusterSaveConfigOrDie(fsync); } /* Reset our flags (not strictly needed since every single function * called for flags set should be able to clear its flag). */ server.cluster->todo_before_sleep = 0; } void clusterDoBeforeSleep(int flags) { server.cluster->todo_before_sleep |= flags; } /* ----------------------------------------------------------------------------- * Cluster state evaluation function * -------------------------------------------------------------------------- */ /* The following are defines that are only used in the evaluation function * and are based on heuristics. Actaully the main point about the rejoin and * writable delay is that they should be a few orders of magnitude larger * than the network latency. */ #define CLUSTER_MAX_REJOIN_DELAY 5000 #define CLUSTER_MIN_REJOIN_DELAY 500 #define CLUSTER_WRITABLE_DELAY 2000 void clusterUpdateState(void) { server.cluster->todo_before_sleep &= ~CLUSTER_TODO_UPDATE_STATE; /* Compute the cluster size, that is the number of nodes * actually partecipating to the cluster (so nodes in handshake * state are excluded). * * Disque does not use any quorum, so this is only useful in order to * mark node from PFAIL to FAIL when we have failure reports from * majority. The notion of failure is only used for optimizations, * for example if a node appears to be failing we don't try to send * it ACKs or replicate messages to it. */ { dictIterator *di; dictEntry *de; server.cluster->size = 0; di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (!(node->flags & CLUSTER_NODE_HANDSHAKE)) server.cluster->size++; } dictReleaseIterator(di); } } /* ----------------------------------------------------------------------------- * Nodes to string representation functions. * -------------------------------------------------------------------------- */ struct disqueNodeFlags { uint16_t flag; char *name; }; static struct disqueNodeFlags disqueNodeFlagsTable[] = { {CLUSTER_NODE_MYSELF, "myself,"}, {CLUSTER_NODE_PFAIL, "fail?,"}, {CLUSTER_NODE_FAIL, "fail,"}, {CLUSTER_NODE_HANDSHAKE, "handshake,"}, {CLUSTER_NODE_NOADDR, "noaddr,"}, {CLUSTER_NODE_LEAVING, "leaving,"} }; /* Concatenate the comma separated list of node flags to the given SDS * string 'ci'. */ sds representClusterNodeFlags(sds ci, uint16_t flags) { if (flags == 0) { ci = sdscat(ci,"noflags,"); } else { int i, size = sizeof(disqueNodeFlagsTable)/sizeof(struct disqueNodeFlags); for (i = 0; i < size; i++) { struct disqueNodeFlags *nodeflag = disqueNodeFlagsTable + i; if (flags & nodeflag->flag) ci = sdscat(ci, nodeflag->name); } } sdsIncrLen(ci,-1); /* Remove trailing comma. */ return ci; } /* Generate a csv-alike representation of the specified cluster node. * See clusterGenNodesDescription() top comment for more information. * * The function returns the string representation as an SDS string. */ sds clusterGenNodeDescription(clusterNode *node) { sds ci; /* Node coordinates */ ci = sdscatprintf(sdsempty(),"%.40s %s:%d ", node->name, node->ip, node->port); /* Flags */ ci = representClusterNodeFlags(ci, node->flags); /* Latency from the POV of this node, link status */ ci = sdscatprintf(ci," %lld %lld %s", (long long) node->ping_sent, (long long) node->pong_received, (node->link || node->flags & CLUSTER_NODE_MYSELF) ? "connected" : "disconnected"); return ci; } /* Generate a csv-alike representation of the nodes we are aware of, * including the "myself" node, and return an SDS string containing the * representation (it is up to the caller to free it). * * All the nodes matching at least one of the node flags specified in * "filter" are excluded from the output, so using zero as a filter will * include all the known nodes in the representation, including nodes in * the HANDSHAKE state. * * The representation obtained using this function is used for the output * of the CLUSTER NODES function, and as format for the cluster * configuration file (nodes.conf) for a given node. */ sds clusterGenNodesDescription(int filter) { sds ci = sdsempty(), ni; dictIterator *di; dictEntry *de; di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node->flags & filter) continue; ni = clusterGenNodeDescription(node); ci = sdscatsds(ci,ni); sdsfree(ni); ci = sdscatlen(ci,"\n",1); } dictReleaseIterator(di); return ci; } /* ----------------------------------------------------------------------------- * CLUSTER utility functions * -------------------------------------------------------------------------- */ /* Update server.reachable_nodes and server.reachable_nodes_count with * a list of reachable nodes (not in PFAIL state), excluding this node. * Note that nodes in handshake are skipped as not yet part of the cluster, * this means that handshake nodes are not referenced by jobs, and are nodes * that we can safely free. */ void clusterUpdateReachableNodes(void) { dictIterator *di; dictEntry *de; int maxsize = dictSize(server.cluster->nodes) * sizeof(clusterNode*); server.cluster->reachable_nodes = zrealloc(server.cluster->reachable_nodes,maxsize); server.cluster->reachable_nodes_count = 0; di = dictGetSafeIterator(server.cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); if (node->flags & (CLUSTER_NODE_MYSELF| CLUSTER_NODE_HANDSHAKE| CLUSTER_NODE_LEAVING| CLUSTER_NODE_PFAIL| CLUSTER_NODE_FAIL)) continue; server.cluster->reachable_nodes[server.cluster->reachable_nodes_count++] = node; } dictReleaseIterator(di); } /* Shuffle the array of reachable nodes using the Fisher Yates method so the * caller can just pick the first N to send messages to N random nodes. * */ void clusterShuffleReachableNodes(void) { int r, i; clusterNode *tmp; for(i = server.cluster->reachable_nodes_count - 1; i > 0; i--) { r = rand() % (i + 1); tmp = server.cluster->reachable_nodes[r]; server.cluster->reachable_nodes[r] = server.cluster->reachable_nodes[i]; server.cluster->reachable_nodes[i] = tmp; } } /* ----------------------------------------------------------------------------- * CLUSTER command * -------------------------------------------------------------------------- */ void clusterCommand(client *c) { if (!strcasecmp(c->argv[1]->ptr,"meet") && c->argc == 4) { long long port; if (getLongLongFromObject(c->argv[3], &port) != C_OK) { addReplyErrorFormat(c,"Invalid TCP port specified: %s", (char*)c->argv[3]->ptr); return; } if (clusterStartHandshake(c->argv[2]->ptr,port) == 0 && errno == EINVAL) { addReplyErrorFormat(c,"Invalid node address specified: %s:%s", (char*)c->argv[2]->ptr, (char*)c->argv[3]->ptr); } else { addReply(c,shared.ok); } } else if (!strcasecmp(c->argv[1]->ptr,"nodes") && c->argc == 2) { /* CLUSTER NODES */ robj *o; sds ci = clusterGenNodesDescription(0); o = createObject(OBJ_STRING,ci); addReplyBulk(c,o); decrRefCount(o); } else if (!strcasecmp(c->argv[1]->ptr,"myid") && c->argc == 2) { /* CLUSTER MYID */ addReplyBulkCBuffer(c,myself->name, CLUSTER_NAMELEN); } else if (!strcasecmp(c->argv[1]->ptr,"info") && c->argc == 2) { /* CLUSTER INFO */ char *statestr[] = {"ok","fail","needhelp"}; sds info = sdscatprintf(sdsempty(), "cluster_state:%s\r\n" "cluster_known_nodes:%lu\r\n" "cluster_reachable_nodes:%d\r\n" "cluster_size:%d\r\n" "cluster_stats_messages_sent:%lld\r\n" "cluster_stats_messages_received:%lld\r\n" , statestr[server.cluster->state], dictSize(server.cluster->nodes), server.cluster->reachable_nodes_count, server.cluster->size, server.cluster->stats_bus_messages_sent, server.cluster->stats_bus_messages_received ); addReplyBulkSds(c, info); } else if (!strcasecmp(c->argv[1]->ptr,"saveconfig") && c->argc == 2) { int retval = clusterSaveConfig(1); if (retval == 0) addReply(c,shared.ok); else addReplyErrorFormat(c,"error saving the cluster node config: %s", strerror(errno)); } else if (!strcasecmp(c->argv[1]->ptr,"forget") && c->argc == 3) { /* CLUSTER FORGET */ clusterNode *n; if (sdslen(c->argv[2]->ptr) != CLUSTER_NAMELEN) { addReplyError(c,"Invalid node identifier"); return; } n = clusterLookupNode(c->argv[2]->ptr); if (!n) { addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr); return; } else if (n == myself) { addReplyError(c,"I tried hard but I can't forget myself..."); return; } clusterBlacklistAddNode(n); clusterDelNode(n); clusterUpdateReachableNodes(); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE| CLUSTER_TODO_SAVE_CONFIG); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"reset") && (c->argc == 2 || c->argc == 3)) { /* CLUSTER RESET [SOFT|HARD] */ int hard = 0; /* Parse soft/hard argument. Default is soft. */ if (c->argc == 3) { if (!strcasecmp(c->argv[2]->ptr,"hard")) { hard = 1; } else if (!strcasecmp(c->argv[2]->ptr,"soft")) { hard = 0; } else { addReply(c,shared.syntaxerr); return; } } clusterReset(hard); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"leaving") && (c->argc == 2 || c->argc == 3)) { /* CLUSTER LEAVING [yes|no] */ if (c->argc == 3) { int oldflags = myself->flags; if (!strcasecmp(c->argv[2]->ptr,"yes")) { myself->flags |= CLUSTER_NODE_LEAVING; addReply(c,shared.ok); } else if (!strcasecmp(c->argv[2]->ptr,"no")) { myself->flags &= ~CLUSTER_NODE_LEAVING; addReply(c,shared.ok); } else { addReplyError(c, "Wrong argument for CLUSTER LEAVING. Use 'yes' or 'no'"); } if (oldflags != myself->flags) clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } else { addReplySds(c, (myself->flags & CLUSTER_NODE_LEAVING) ? sdsnew("+yes\r\n") : sdsnew("+no\r\n") ); } } else { addReplyError(c,"Wrong CLUSTER subcommand or number of arguments"); } } /* HELLO handshake command. Returns an array with: * 1) Reply version. 1 for this version. * 2) This node ID. * The following elements list all the available nodes with * ID, IP, port, priority. A smaller priority means better node * in terms of availability / latency. */ void helloCommand(client *c) { addReplyMultiBulkLen(c,2+dictSize(server.cluster->nodes)); addReplyLongLong(c,1); /* Version. */ addReplyBulkCBuffer(c,myself->name,CLUSTER_NAMELEN); /* My ID. */ dictForeach(server.cluster->nodes,de) clusterNode *node = dictGetVal(de); int priority = 1; if (node->link == NULL && node != server.cluster->myself) priority = 5; if (node->flags & CLUSTER_NODE_PFAIL) priority = 10; if (node->flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_LEAVING)) priority = 100; addReplyMultiBulkLen(c,4); addReplyBulkCBuffer(c,node->name,CLUSTER_NAMELEN); /* ID. */ addReplyBulkCString(c,node->ip); /* IP address. */ addReplyBulkLongLong(c,node->port); /* TCP port. */ addReplyBulkLongLong(c,priority); /* Priority. */ dictEndForeach } disque-1.0-rc1/src/cluster.h000066400000000000000000000257721264176561100157760ustar00rootroot00000000000000#ifndef __CLUSTER_H #define __CLUSTER_H #include "job.h" #include "queue.h" /*----------------------------------------------------------------------------- * Disque cluster data structures, defines, exported API. *----------------------------------------------------------------------------*/ #define CLUSTER_OK 0 /* Everything looks ok */ #define CLUSTER_FAIL 1 /* The cluster can't work */ #define CLUSTER_NAMELEN 40 /* sha1 hex length */ #define CLUSTER_PORT_INCR 10000 /* Cluster port = baseport + PORT_INCR */ /* The following defines are amount of time, sometimes expressed as * multiplicators of the node timeout value (when ending with MULT). */ #define CLUSTER_DEFAULT_NODE_TIMEOUT 15000 #define CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ #define CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if master is back. */ struct clusterNode; /* clusterLink encapsulates everything needed to talk with a remote node. */ typedef struct clusterLink { mstime_t ctime; /* Link creation time */ int fd; /* TCP socket file descriptor */ sds sndbuf; /* Packet send buffer */ sds rcvbuf; /* Packet reception buffer */ struct clusterNode *node; /* Node related to this link if any, or NULL */ } clusterLink; /* Cluster node flags and macros. */ #define CLUSTER_NODE_PFAIL (1<<0) /* Failure? Need acknowledge */ #define CLUSTER_NODE_FAIL (1<<1) /* The node is believed to be malfunctioning */ #define CLUSTER_NODE_MYSELF (1<<2) /* This node is myself */ #define CLUSTER_NODE_HANDSHAKE (1<<3) /* Node in handshake state. */ #define CLUSTER_NODE_NOADDR (1<<4) /* Node address unknown */ #define CLUSTER_NODE_MEET (1<<5) /* Send a MEET message to this node */ #define CLUSTER_NODE_DELETED (1<<6) /* Node no longer part of the cluster */ #define CLUSTER_NODE_LEAVING (1<<7) /* Node is leaving the cluster. */ #define CLUSTER_NODE_NULL_NAME "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" #define nodeInHandshake(n) ((n)->flags & CLUSTER_NODE_HANDSHAKE) #define nodeHasAddr(n) (!((n)->flags & CLUSTER_NODE_NOADDR)) #define nodeWithoutAddr(n) ((n)->flags & CLUSTER_NODE_NOADDR) #define nodeTimedOut(n) ((n)->flags & CLUSTER_NODE_PFAIL) #define nodeFailed(n) ((n)->flags & CLUSTER_NODE_FAIL) #define nodeLeaving(n) ((n)->flags & CLUSTER_NODE_LEAVING) #define myselfLeaving() nodeLeaving(server.cluster->myself) /* This structure represent elements of node->fail_reports. */ typedef struct clusterNodeFailReport { struct clusterNode *node; /* Node reporting the failure condition. */ mstime_t time; /* Time of the last report from this node. */ } clusterNodeFailReport; typedef struct clusterNode { char name[CLUSTER_NAMELEN]; /* Node name, hex string, sha1-size */ mstime_t ctime; /* Node object creation time. */ int flags; /* CLUSTER_NODE_... */ mstime_t ping_sent; /* Unix time we sent latest ping */ mstime_t pong_received; /* Unix time we received the pong */ mstime_t fail_time; /* Unix time when FAIL flag was set */ char ip[NET_IP_STR_LEN]; /* Latest known IP address of this node */ int port; /* Latest known port of this node */ clusterLink *link; /* TCP/IP link with this node */ list *fail_reports; /* List of nodes signaling this as failing */ } clusterNode; typedef struct clusterState { clusterNode *myself; /* This node */ int state; /* CLUSTER_OK, CLUSTER_FAIL, ... */ int size; /* Num of known cluster nodes */ dict *nodes; /* Hash table of name -> clusterNode structures */ dict *deleted_nodes; /* Nodes removed from the cluster. */ dict *nodes_black_list; /* Nodes we don't re-add for a few seconds. */ int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ /* Reachable nodes array. This array only lists reachable nodes * excluding our own node, and is used in order to quickly select * random receivers of messages to populate. */ clusterNode **reachable_nodes; int reachable_nodes_count; /* Statistics. */ long long stats_bus_messages_sent; /* Num of msg sent via cluster bus. */ long long stats_bus_messages_received; /* Num of msg rcvd via cluster bus.*/ } clusterState; /* clusterState todo_before_sleep flags. */ #define CLUSTER_TODO_UPDATE_STATE (1<<0) #define CLUSTER_TODO_SAVE_CONFIG (1<<1) #define CLUSTER_TODO_FSYNC_CONFIG (1<<2) /* Disque cluster messages header */ /* Note that the PING, PONG and MEET messages are actually the same exact * kind of packet. PONG is the reply to ping, in the exact format as a PING, * while MEET is a special PING that forces the receiver to add the sender * as a node (if it is not already in the list). */ #define CLUSTERMSG_TYPE_PING 0 /* Ping. */ #define CLUSTERMSG_TYPE_PONG 1 /* Reply to Ping. */ #define CLUSTERMSG_TYPE_MEET 2 /* Meet "let's join" message. */ #define CLUSTERMSG_TYPE_FAIL 3 /* Mark node xxx as failing. */ #define CLUSTERMSG_TYPE_REPLJOB 4 /* Add a job to receiver. */ #define CLUSTERMSG_TYPE_GOTJOB 5 /* REPLJOB received acknowledge. */ #define CLUSTERMSG_TYPE_ENQUEUE 6 /* Enqueue the specified job. */ #define CLUSTERMSG_TYPE_QUEUED 7 /* Update your job qtime. */ #define CLUSTERMSG_TYPE_SETACK 8 /* Move job state as ACKed. */ #define CLUSTERMSG_TYPE_WILLQUEUE 9 /* I'll queue this job, ok? */ #define CLUSTERMSG_TYPE_GOTACK 10 /* Acknowledge SETACK. */ #define CLUSTERMSG_TYPE_DELJOB 11 /* Delete the specified job. */ #define CLUSTERMSG_TYPE_NEEDJOBS 12 /* I need jobs for some queue. */ #define CLUSTERMSG_TYPE_YOURJOBS 13 /* NEEDJOBS reply with jobs. */ #define CLUSTERMSG_TYPE_WORKING 14 /* Postpone re-queueing & dequeue */ #define CLUSTERMSG_TYPE_PAUSE 15 /* Change queue paused state. */ /* Initially we don't know our "name", but we'll find it once we connect * to the first node, using the getsockname() function. Then we'll use this * address for all the next messages. */ typedef struct { char nodename[CLUSTER_NAMELEN]; uint32_t ping_sent; uint32_t pong_received; char ip[NET_IP_STR_LEN]; /* IP address last time it was seen */ uint16_t port; /* port last time it was seen */ uint16_t flags; /* node->flags copy */ uint16_t notused1; /* Some room for future improvements. */ uint32_t notused2; } clusterMsgDataGossip; typedef struct { char nodename[CLUSTER_NAMELEN]; } clusterMsgDataFail; /* This data section is used in different message types where we need to * transmit one or multiple full jobs. * * Used by: ADDJOB, YOURJOBS. */ typedef struct { uint32_t numjobs; /* Number of jobs stored here. */ uint32_t datasize; /* Number of bytes following to describe jobs. */ /* The variable data section here is composed of 4 bytes little endian * prefixed length + serialized job data for each job: * [4 bytes len] + [serialized job] + [4 bytes len] + [serialized job] ... * For a total of exactly 'datasize' bytes. */ unsigned char jobs_data[8]; /* Defined as 8 bytes just for alignment. */ } clusterMsgDataJob; /* This data section is used when we need to send just a job ID. * * Used by: GOTJOB, SETACK, and many more. */ typedef struct { char id[JOB_ID_LEN]; uint32_t aux; /* Optional field: For SETACK: Number of nodes that may have this message. For QUEUEJOB: Delay starting from msg reception. */ } clusterMsgDataJobID; /* This data section is used by NEEDJOBS to specify in what queue we need * a job, and how many jobs we request. The same format is also used by * the PAUSE command to specify the queue to change the paused state. */ typedef struct { uint32_t aux; /* For NEEDJOB, how many jobs we request. * FOR PAUSE, the pause flags to set on the queue. */ uint32_t qnamelen; /* Queue name total length. */ char qname[8]; /* Defined as 8 bytes just for alignment. */ } clusterMsgDataQueueOp; union clusterMsgData { /* PING, MEET and PONG. */ struct { /* Array of N clusterMsgDataGossip structures */ clusterMsgDataGossip gossip[1]; } ping; /* FAIL. */ struct { clusterMsgDataFail about; } fail; /* Messages with one or more full jobs. */ struct { clusterMsgDataJob serialized; } jobs; /* Messages with a single Job ID. */ struct { clusterMsgDataJobID job; } jobid; /* Messages requesting jobs (NEEDJOBS). */ struct { clusterMsgDataQueueOp about; } queueop; }; #define CLUSTER_PROTO_VER 1 /* Cluster bus protocol version. */ typedef struct { char sig[4]; /* Siganture "DbuZ" (Disque Cluster message bus). */ uint32_t totlen; /* Total length of this message */ uint16_t ver; /* Protocol version, currently set to 0. */ uint16_t notused0; /* 2 bytes not used. */ uint16_t type; /* Message type */ uint16_t count; /* Only used for some kind of messages. */ char sender[CLUSTER_NAMELEN]; /* Name of the sender node */ char myip[NET_IP_STR_LEN]; /* My IP, if not all zeroed. */ char notused1[34]; /* 34 bytes reserved for future usage. */ uint16_t port; /* Sender TCP base port */ uint16_t flags; /* Sender node flags */ unsigned char state; /* Cluster state from the POV of the sender */ unsigned char mflags[3]; /* Message flags: CLUSTERMSG_FLAG[012]_... */ union clusterMsgData data; } clusterMsg; #define CLUSTERMSG_MIN_LEN (sizeof(clusterMsg)-sizeof(union clusterMsgData)) /* Message flags better specify the packet content or are used to * provide some information about the node state. */ #define CLUSTERMSG_NOFLAGS 0 #define CLUSTERMSG_FLAG0_NOREPLY (1<<0) /* Don't reply to this message. */ #define CLUSTERMSG_FLAG0_INCR_NACKS (1<<1) /* Increment job nacks counter. */ #define CLUSTERMSG_FLAG0_INCR_DELIV (1<<2) /* Increment job delivers counter. */ /*----------------------------------------------------------------------------- * Exported API. *----------------------------------------------------------------------------*/ extern clusterNode *myself; clusterNode *clusterLookupNode(char *name); void clusterUpdateReachableNodes(void); int clusterReplicateJob(job *j, int repl, int noreply); void clusterSendEnqueue(clusterNode *node, job *j, uint32_t delay); void clusterBroadcastQueued(job *j, unsigned char flags); void clusterBroadcastWorking(job *j); void clusterBroadcastDelJob(job *j); void clusterSendWillQueue(job *j); void clusterSendSetAck(clusterNode *node, job *j); void clusterSendNeedJobs(robj *qname, int numjobs, dict *nodes); void clusterSendYourJobs(clusterNode *node, job **jobs, uint32_t count); void clusterBroadcastJobIDMessage(dict *nodes, char *id, int type, uint32_t aux, unsigned char flags); void clusterBroadcastPause(robj *qname, uint32_t flags); #endif /* __CLUSTER_H */ disque-1.0-rc1/src/config.c000066400000000000000000001563011264176561100155460ustar00rootroot00000000000000/* Configuration file parsing and CONFIG GET/SET commands implementation. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "cluster.h" #include #include /*----------------------------------------------------------------------------- * Config file name-value maps. *----------------------------------------------------------------------------*/ typedef struct configEnum { const char *name; const int val; } configEnum; configEnum maxmemory_policy_enum[] = { {"acks",MAXMEMORY_ACKS}, {"noeviction",MAXMEMORY_NO_EVICTION}, {NULL, 0} }; configEnum syslog_facility_enum[] = { {"user", LOG_USER}, {"local0", LOG_LOCAL0}, {"local1", LOG_LOCAL1}, {"local2", LOG_LOCAL2}, {"local3", LOG_LOCAL3}, {"local4", LOG_LOCAL4}, {"local5", LOG_LOCAL5}, {"local6", LOG_LOCAL6}, {"local7", LOG_LOCAL7}, {NULL, 0} }; configEnum loglevel_enum[] = { {"debug", LL_DEBUG}, {"verbose", LL_VERBOSE}, {"notice", LL_NOTICE}, {"warning", LL_WARNING}, {NULL,0} }; configEnum aof_fsync_enum[] = { {"everysec", AOF_FSYNC_EVERYSEC}, {"always", AOF_FSYNC_ALWAYS}, {"no", AOF_FSYNC_NO}, {NULL, 0} }; /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_COUNT] = { {0, 0, 0} /* normal */ }; /*----------------------------------------------------------------------------- * Enum access functions *----------------------------------------------------------------------------*/ /* Get enum value from name. If there is no match INT_MIN is returned. */ int configEnumGetValue(configEnum *ce, char *name) { while(ce->name != NULL) { if (!strcasecmp(ce->name,name)) return ce->val; ce++; } return INT_MIN; } /* Get enum name from value. If no match is found NULL is returned. */ const char *configEnumGetName(configEnum *ce, int val) { while(ce->name != NULL) { if (ce->val == val) return ce->name; ce++; } return NULL; } /* Wrapper for configEnumGetName() returning "unknown" insetad of NULL if * there is no match. */ const char *configEnumGetNameOrUnknown(configEnum *ce, int val) { const char *name = configEnumGetName(ce,val); return name ? name : "unknown"; } /* Used for INFO generation. */ const char *maxmemoryToString(void) { return configEnumGetNameOrUnknown(maxmemory_policy_enum,server.maxmemory); } /*----------------------------------------------------------------------------- * Config file parsing *----------------------------------------------------------------------------*/ int yesnotoi(char *s) { if (!strcasecmp(s,"yes")) return 1; else if (!strcasecmp(s,"no")) return 0; else return -1; } void loadServerConfigFromString(char *config) { char *err = NULL; int linenum = 0, totlines, i; sds *lines; lines = sdssplitlen(config,strlen(config),"\n",1,&totlines); for (i = 0; i < totlines; i++) { sds *argv; int argc; linenum = i+1; lines[i] = sdstrim(lines[i]," \t\r\n"); /* Skip comments and blank lines */ if (lines[i][0] == '#' || lines[i][0] == '\0') continue; /* Split into arguments */ argv = sdssplitargs(lines[i],&argc); if (argv == NULL) { err = "Unbalanced quotes in configuration line"; goto loaderr; } /* Skip this line if the resulting command vector is empty. */ if (argc == 0) { sdsfreesplitres(argv,argc); continue; } sdstolower(argv[0]); /* Execute config directives */ if (!strcasecmp(argv[0],"timeout") && argc == 2) { server.maxidletime = atoi(argv[1]); if (server.maxidletime < 0) { err = "Invalid timeout value"; goto loaderr; } } else if (!strcasecmp(argv[0],"tcp-keepalive") && argc == 2) { server.tcpkeepalive = atoi(argv[1]); if (server.tcpkeepalive < 0) { err = "Invalid tcp-keepalive value"; goto loaderr; } } else if (!strcasecmp(argv[0],"port") && argc == 2) { server.port = atoi(argv[1]); if (server.port < 0 || server.port > 65535) { err = "Invalid port"; goto loaderr; } } else if (!strcasecmp(argv[0],"tcp-backlog") && argc == 2) { server.tcp_backlog = atoi(argv[1]); if (server.tcp_backlog < 0) { err = "Invalid backlog value"; goto loaderr; } } else if (!strcasecmp(argv[0],"bind") && argc >= 2) { int j, addresses = argc-1; if (addresses > CONFIG_BINDADDR_MAX) { err = "Too many bind addresses specified"; goto loaderr; } for (j = 0; j < addresses; j++) server.bindaddr[j] = zstrdup(argv[j+1]); server.bindaddr_count = addresses; } else if (!strcasecmp(argv[0],"unixsocket") && argc == 2) { server.unixsocket = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"unixsocketperm") && argc == 2) { errno = 0; server.unixsocketperm = (mode_t)strtol(argv[1], NULL, 8); if (errno || server.unixsocketperm > 0777) { err = "Invalid socket file permissions"; goto loaderr; } } else if (!strcasecmp(argv[0],"dir") && argc == 2) { if (chdir(argv[1]) == -1) { serverLog(LL_WARNING,"Can't chdir to '%s': %s", argv[1], strerror(errno)); exit(1); } } else if (!strcasecmp(argv[0],"loglevel") && argc == 2) { server.verbosity = configEnumGetValue(loglevel_enum,argv[1]); if (server.verbosity == INT_MIN) { err = "Invalid log level. " "Must be one of debug, verbose, notice, warning"; goto loaderr; } } else if (!strcasecmp(argv[0],"logfile") && argc == 2) { FILE *logfp; zfree(server.logfile); server.logfile = zstrdup(argv[1]); if (server.logfile[0] != '\0') { /* Test if we are able to open the file. The server will not * be able to abort just for this problem later... */ logfp = fopen(server.logfile,"a"); if (logfp == NULL) { err = sdscatprintf(sdsempty(), "Can't open the log file: %s", strerror(errno)); goto loaderr; } fclose(logfp); } } else if (!strcasecmp(argv[0],"syslog-enabled") && argc == 2) { if ((server.syslog_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"syslog-ident") && argc == 2) { if (server.syslog_ident) zfree(server.syslog_ident); server.syslog_ident = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"syslog-facility") && argc == 2) { server.syslog_facility = configEnumGetValue(syslog_facility_enum,argv[1]); if (server.syslog_facility == INT_MIN) { err = "Invalid log facility. Must be one of USER or between LOCAL0-LOCAL7"; goto loaderr; } } else if (!strcasecmp(argv[0],"databases") && argc == 2) { server.dbnum = atoi(argv[1]); if (server.dbnum < 1) { err = "Invalid number of databases"; goto loaderr; } } else if (!strcasecmp(argv[0],"include") && argc == 2) { loadServerConfig(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxclients") && argc == 2) { server.maxclients = atoi(argv[1]); if (server.maxclients < 1) { err = "Invalid max clients limit"; goto loaderr; } } else if (!strcasecmp(argv[0],"maxmemory") && argc == 2) { /* To check for negative values, we need a long long first */ long long maxmemory = memtoll(argv[1],NULL); server.maxmemory = maxmemory; if (maxmemory <= 0) { err = "You need to specify a memory limit greater than zero"; goto loaderr; } } else if (!strcasecmp(argv[0],"maxmemory-policy") && argc == 2) { server.maxmemory_policy = configEnumGetValue(maxmemory_policy_enum,argv[1]); if (server.maxmemory_policy == INT_MIN) { err = "Invalid maxmemory policy"; goto loaderr; } } else if (!strcasecmp(argv[0],"maxmemory-samples") && argc == 2) { server.maxmemory_samples = atoi(argv[1]); if (server.maxmemory_samples <= 0) { err = "maxmemory-samples must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"activerehashing") && argc == 2) { if ((server.activerehashing = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"daemonize") && argc == 2) { if ((server.daemonize = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"hz") && argc == 2) { server.hz = atoi(argv[1]); if (server.hz < CONFIG_MIN_HZ) server.hz = CONFIG_MIN_HZ; if (server.hz > CONFIG_MAX_HZ) server.hz = CONFIG_MAX_HZ; } else if (!strcasecmp(argv[0],"appendonly") && argc == 2) { int yes; if ((yes = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } server.aof_state = yes ? AOF_ON : AOF_OFF; } else if (!strcasecmp(argv[0],"appendfilename") && argc == 2) { if (!pathIsBaseName(argv[1])) { err = "appendfilename can't be a path, just a filename"; goto loaderr; } zfree(server.aof_filename); server.aof_filename = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"no-appendfsync-on-rewrite") && argc == 2) { if ((server.aof_no_fsync_on_rewrite= yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"appendfsync") && argc == 2) { server.aof_fsync = configEnumGetValue(aof_fsync_enum,argv[1]); if (server.aof_fsync == INT_MIN) { err = "argument must be 'no', 'always' or 'everysec'"; goto loaderr; } } else if (!strcasecmp(argv[0],"auto-aof-rewrite-percentage") && argc == 2) { server.aof_rewrite_perc = atoi(argv[1]); if (server.aof_rewrite_perc < 0) { err = "Invalid negative percentage for AOF auto rewrite"; goto loaderr; } } else if (!strcasecmp(argv[0],"auto-aof-rewrite-min-size") && argc == 2) { server.aof_rewrite_min_size = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"aof-rewrite-incremental-fsync") && argc == 2) { if ((server.aof_rewrite_incremental_fsync = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"aof-load-truncated") && argc == 2) { if ((server.aof_load_truncated = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"aof-enqueue-jobs-once") && argc == 2) { if ((server.aof_enqueue_jobs_once = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"requirepass") && argc == 2) { if (strlen(argv[1]) > CONFIG_AUTHPASS_MAX_LEN) { err = "Password is longer than CONFIG_AUTHPASS_MAX_LEN"; goto loaderr; } server.requirepass = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"pidfile") && argc == 2) { zfree(server.pidfile); server.pidfile = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"rename-command") && argc == 3) { struct serverCommand *cmd = lookupCommand(argv[1]); int retval; if (!cmd) { err = "No such command in rename-command"; goto loaderr; } /* If the target command name is the empty string we just * remove it from the command table. */ retval = dictDelete(server.commands, argv[1]); serverAssert(retval == DICT_OK); /* Otherwise we re-add the command under a different name. */ if (sdslen(argv[2]) != 0) { sds copy = sdsdup(argv[2]); retval = dictAdd(server.commands, copy, cmd); if (retval != DICT_OK) { sdsfree(copy); err = "Target command name already exists"; goto loaderr; } } } else if (!strcasecmp(argv[0],"cluster-config-file") && argc == 2) { zfree(server.cluster_configfile); server.cluster_configfile = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"cluster-node-timeout") && argc == 2) { server.cluster_node_timeout = strtoll(argv[1],NULL,10); if (server.cluster_node_timeout <= 0) { err = "cluster node timeout must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"slowlog-log-slower-than") && argc == 2) { server.slowlog_log_slower_than = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"latency-monitor-threshold") && argc == 2) { server.latency_monitor_threshold = strtoll(argv[1],NULL,10); if (server.latency_monitor_threshold < 0) { err = "The latency threshold can't be negative"; goto loaderr; } } else if (!strcasecmp(argv[0],"slowlog-max-len") && argc == 2) { server.slowlog_max_len = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"client-output-buffer-limit") && argc == 5) { int class = getClientTypeByName(argv[1]); unsigned long long hard, soft; int soft_seconds; if (class == -1) { err = "Unrecognized client limit class"; goto loaderr; } hard = memtoll(argv[2],NULL); soft = memtoll(argv[3],NULL); soft_seconds = atoi(argv[4]); if (soft_seconds < 0) { err = "Negative number of seconds in soft limit is invalid"; goto loaderr; } server.client_obuf_limits[class].hard_limit_bytes = hard; server.client_obuf_limits[class].soft_limit_bytes = soft; server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } else { err = "Bad directive or wrong number of arguments"; goto loaderr; } sdsfreesplitres(argv,argc); } sdsfreesplitres(lines,totlines); return; loaderr: fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR ***\n"); fprintf(stderr, "Reading the configuration file, at line %d\n", linenum); fprintf(stderr, ">>> '%s'\n", lines[i]); fprintf(stderr, "%s\n", err); exit(1); } /* Load the server configuration from the specified filename. * The function appends the additional configuration directives stored * in the 'options' string to the config file before loading. * * Both filename and options can be NULL, in such a case are considered * empty. This way loadServerConfig can be used to just load a file or * just load a string. */ void loadServerConfig(char *filename, char *options) { sds config = sdsempty(); char buf[CONFIG_MAX_LINE+1]; /* Load the file content */ if (filename) { FILE *fp; if (filename[0] == '-' && filename[1] == '\0') { fp = stdin; } else { if ((fp = fopen(filename,"r")) == NULL) { serverLog(LL_WARNING, "Fatal error, can't open config file '%s'", filename); exit(1); } } while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL) config = sdscat(config,buf); if (fp != stdin) fclose(fp); } /* Append the additional options */ if (options) { config = sdscat(config,"\n"); config = sdscat(config,options); } loadServerConfigFromString(config); sdsfree(config); } /*----------------------------------------------------------------------------- * CONFIG SET implementation *----------------------------------------------------------------------------*/ #define config_set_bool_field(_name,_var) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ int yn = yesnotoi(o->ptr); \ if (yn == -1) goto badfmt; \ _var = yn; #define config_set_numerical_field(_name,_var,min,max) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ if (getLongLongFromObject(o,&ll) == C_ERR || ll < 0) goto badfmt; \ if (min != LLONG_MIN && ll < min) goto badfmt; \ if (max != LLONG_MAX && ll > max) goto badfmt; \ _var = ll; #define config_set_memory_field(_name,_var) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ ll = memtoll(o->ptr,&err); \ if (err || ll < 0) goto badfmt; \ _var = ll; #define config_set_enum_field(_name,_var,_enumvar) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { \ int enumval = configEnumGetValue(_enumvar,o->ptr); \ if (enumval == INT_MIN) goto badfmt; \ _var = enumval; #define config_set_special_field(_name) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { #define config_set_else } else void configSetCommand(client *c) { robj *o; long long ll; int err; serverAssertWithInfo(c,c->argv[2],sdsEncodedObject(c->argv[2])); serverAssertWithInfo(c,c->argv[3],sdsEncodedObject(c->argv[3])); o = c->argv[3]; if (0) { /* this starts the config_set macros else-if chain. */ /* Special fields that can't be handled with general macros. */ config_set_special_field("requirepass") { if (sdslen(o->ptr) > CONFIG_AUTHPASS_MAX_LEN) goto badfmt; zfree(server.requirepass); server.requirepass = ((char*)o->ptr)[0] ? zstrdup(o->ptr) : NULL; } config_set_special_field("maxclients") { int orig_value = server.maxclients; if (getLongLongFromObject(o,&ll) == C_ERR || ll < 1) goto badfmt; /* Try to check if the OS is capable of supporting so many FDs. */ server.maxclients = ll; if (ll > orig_value) { adjustOpenFilesLimit(); if (server.maxclients != ll) { addReplyErrorFormat(c,"The operating system is not able to handle the specified number of clients, try with %d", server.maxclients); server.maxclients = orig_value; return; } if ((unsigned int) aeGetSetSize(server.el) < server.maxclients + CONFIG_FDSET_INCR) { if (aeResizeSetSize(server.el, server.maxclients + CONFIG_FDSET_INCR) == AE_ERR) { addReplyError(c,"The event loop API used by Disque is not able to handle the specified number of clients"); server.maxclients = orig_value; return; } } } } config_set_special_field("appendonly") { int enable = yesnotoi(o->ptr); if (enable == -1) goto badfmt; if (enable == 0 && server.aof_state != AOF_OFF) { stopAppendOnly(); } else if (enable && server.aof_state == AOF_OFF) { if (startAppendOnly() == C_ERR) { addReplyError(c, "Unable to turn on AOF. Check server logs."); return; } } } config_set_special_field("dir") { if (chdir((char*)o->ptr) == -1) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); return; } } config_set_special_field("client-output-buffer-limit") { int vlen, j; sds *v = sdssplitlen(o->ptr,sdslen(o->ptr)," ",1,&vlen); /* We need a multiple of 4: */ if (vlen % 4) { sdsfreesplitres(v,vlen); goto badfmt; } /* Sanity check of single arguments, so that we either refuse the * whole configuration string or accept it all, even if a single * error in a single client class is present. */ for (j = 0; j < vlen; j++) { char *eptr; long val; if ((j % 4) == 0) { if (getClientTypeByName(v[j]) == -1) { sdsfreesplitres(v,vlen); goto badfmt; } } else { val = strtoll(v[j], &eptr, 10); if (eptr[0] != '\0' || val < 0) { sdsfreesplitres(v,vlen); goto badfmt; } } } /* Finally set the new config */ for (j = 0; j < vlen; j += 4) { int class; unsigned long long hard, soft; int soft_seconds; class = getClientTypeByName(v[j]); hard = strtoll(v[j+1],NULL,10); soft = strtoll(v[j+2],NULL,10); soft_seconds = strtoll(v[j+3],NULL,10); server.client_obuf_limits[class].hard_limit_bytes = hard; server.client_obuf_limits[class].soft_limit_bytes = soft; server.client_obuf_limits[class].soft_limit_seconds = soft_seconds; } sdsfreesplitres(v,vlen); /* Boolean fields. * config_set_bool_field(name,var). */ } config_set_bool_field( "aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync) { } config_set_bool_field( "aof-load-truncated",server.aof_load_truncated) { } config_set_bool_field( "activerehashing",server.activerehashing) { } config_set_bool_field( "tcp-keepalive",server.tcpkeepalive) { /* Numerical fields. * config_set_numerical_field(name,var,min,max) */ } config_set_numerical_field( "maxmemory-samples",server.maxmemory_samples,1,LLONG_MAX) { } config_set_numerical_field( "timeout",server.maxidletime,0,LONG_MAX) { } config_set_numerical_field( "auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,LLONG_MAX){ } config_set_numerical_field( "auto-aof-rewrite-min-size",server.aof_rewrite_min_size,0,LLONG_MAX) { } config_set_numerical_field( "slowlog-log-slower-than",server.slowlog_log_slower_than,0,LLONG_MAX) { } config_set_numerical_field( "slowlog-max-len",ll,0,LLONG_MAX) { /* Cast to unsigned. */ server.slowlog_max_len = (unsigned)ll; } config_set_numerical_field( "latency-monitor-threshold",server.latency_monitor_threshold,0,LLONG_MAX){ } config_set_numerical_field( "cluster-node-timeout",server.cluster_node_timeout,0,LLONG_MAX) { } config_set_numerical_field( "hz",server.hz,0,LLONG_MAX) { /* Hz is more an hint from the user, so we accept values out of range * but cap them to reasonable values. */ if (server.hz < CONFIG_MIN_HZ) server.hz = CONFIG_MIN_HZ; if (server.hz > CONFIG_MAX_HZ) server.hz = CONFIG_MAX_HZ; } config_set_numerical_field( "watchdog-period",ll,0,LLONG_MAX) { if (ll) enableWatchdog(ll); else disableWatchdog(); /* Memory fields. * config_set_memory_field(name,var) */ } config_set_memory_field("maxmemory",server.maxmemory) { if (server.maxmemory) { if (server.maxmemory < zmalloc_used_memory()) { serverLog(LL_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in jobs eviction and/or inability to accept new jobs depending on the maxmemory-policy."); } freeMemoryIfNeeded(); } /* Enumeration fields. * config_set_enum_field(name,var,enum_var) */ } config_set_enum_field( "loglevel",server.verbosity,loglevel_enum) { } config_set_enum_field( "maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum) { } config_set_enum_field( "appendfsync",server.aof_fsync,aof_fsync_enum) { /* Everyhing else is an error... */ } config_set_else { addReplyErrorFormat(c,"Unsupported CONFIG parameter: %s", (char*)c->argv[2]->ptr); return; } /* On success we just return a generic OK for all the options. */ addReply(c,shared.ok); return; badfmt: /* Bad format errors */ addReplyErrorFormat(c,"Invalid argument '%s' for CONFIG SET '%s'", (char*)o->ptr, (char*)c->argv[2]->ptr); } /*----------------------------------------------------------------------------- * CONFIG GET implementation *----------------------------------------------------------------------------*/ #define config_get_string_field(_name,_var) do { \ if (stringmatch(pattern,_name,0)) { \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,_var ? _var : ""); \ matches++; \ } \ } while(0); #define config_get_bool_field(_name,_var) do { \ if (stringmatch(pattern,_name,0)) { \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,_var ? "yes" : "no"); \ matches++; \ } \ } while(0); #define config_get_numerical_field(_name,_var) do { \ if (stringmatch(pattern,_name,0)) { \ ll2string(buf,sizeof(buf),_var); \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,buf); \ matches++; \ } \ } while(0); #define config_get_enum_field(_name,_var,_enumvar) do { \ if (stringmatch(pattern,_name,0)) { \ addReplyBulkCString(c,_name); \ addReplyBulkCString(c,configEnumGetNameOrUnknown(_enumvar,_var)); \ matches++; \ } \ } while(0); void configGetCommand(client *c) { robj *o = c->argv[2]; void *replylen = addDeferredMultiBulkLength(c); char *pattern = o->ptr; char buf[128]; int matches = 0; serverAssertWithInfo(c,o,sdsEncodedObject(o)); /* String values */ config_get_string_field("requirepass",server.requirepass); config_get_string_field("unixsocket",server.unixsocket); config_get_string_field("logfile",server.logfile); config_get_string_field("pidfile",server.pidfile); /* Numerical values */ config_get_numerical_field("maxmemory",server.maxmemory); config_get_numerical_field("maxmemory-samples",server.maxmemory_samples); config_get_numerical_field("timeout",server.maxidletime); config_get_numerical_field("auto-aof-rewrite-percentage", server.aof_rewrite_perc); config_get_numerical_field("auto-aof-rewrite-min-size", server.aof_rewrite_min_size); config_get_numerical_field("slowlog-log-slower-than", server.slowlog_log_slower_than); config_get_numerical_field("latency-monitor-threshold", server.latency_monitor_threshold); config_get_numerical_field("slowlog-max-len", server.slowlog_max_len); config_get_numerical_field("port",server.port); config_get_numerical_field("tcp-backlog",server.tcp_backlog); config_get_numerical_field("databases",server.dbnum); config_get_numerical_field("maxclients",server.maxclients); config_get_numerical_field("watchdog-period",server.watchdog_period); config_get_numerical_field("hz",server.hz); config_get_numerical_field("cluster-node-timeout",server.cluster_node_timeout); /* Bool (yes/no) values */ config_get_bool_field("tcp-keepalive",server.tcpkeepalive); config_get_bool_field("no-appendfsync-on-rewrite", server.aof_no_fsync_on_rewrite); config_get_bool_field("daemonize", server.daemonize); config_get_bool_field("activerehashing", server.activerehashing); config_get_bool_field("aof-rewrite-incremental-fsync", server.aof_rewrite_incremental_fsync); config_get_bool_field("aof-load-truncated", server.aof_load_truncated); config_get_bool_field("aof-enqueue-jobs-once", server.aof_enqueue_jobs_once); /* Enum values */ config_get_enum_field("maxmemory-policy", server.maxmemory_policy,maxmemory_policy_enum); config_get_enum_field("loglevel", server.verbosity,loglevel_enum); config_get_enum_field("appendfsync", server.aof_fsync,aof_fsync_enum); config_get_enum_field("syslog-facility", server.syslog_facility,syslog_facility_enum); /* Everything we can't handle with macros follows. */ if (stringmatch(pattern,"appendonly",0)) { addReplyBulkCString(c,"appendonly"); addReplyBulkCString(c,server.aof_state == AOF_OFF ? "no" : "yes"); matches++; } if (stringmatch(pattern,"dir",0)) { char buf[1024]; if (getcwd(buf,sizeof(buf)) == NULL) buf[0] = '\0'; addReplyBulkCString(c,"dir"); addReplyBulkCString(c,buf); matches++; } if (stringmatch(pattern,"client-output-buffer-limit",0)) { sds buf = sdsempty(); int j; for (j = 0; j < CLIENT_TYPE_COUNT; j++) { buf = sdscatprintf(buf,"%s %llu %llu %ld", getClientTypeName(j), server.client_obuf_limits[j].hard_limit_bytes, server.client_obuf_limits[j].soft_limit_bytes, (long) server.client_obuf_limits[j].soft_limit_seconds); if (j != CLIENT_TYPE_COUNT-1) buf = sdscatlen(buf," ",1); } addReplyBulkCString(c,"client-output-buffer-limit"); addReplyBulkCString(c,buf); sdsfree(buf); matches++; } if (stringmatch(pattern,"unixsocketperm",0)) { char buf[32]; snprintf(buf,sizeof(buf),"%o",server.unixsocketperm); addReplyBulkCString(c,"unixsocketperm"); addReplyBulkCString(c,buf); matches++; } if (stringmatch(pattern,"bind",0)) { sds aux = sdsjoin(server.bindaddr,server.bindaddr_count," "); addReplyBulkCString(c,"bind"); addReplyBulkCString(c,aux); sdsfree(aux); matches++; } setDeferredMultiBulkLength(c,replylen,matches*2); } /*----------------------------------------------------------------------------- * CONFIG REWRITE implementation *----------------------------------------------------------------------------*/ #define DISQUE_CONFIG_REWRITE_SIGNATURE "# Generated by CONFIG REWRITE" /* We use the following dictionary type to store where a configuration * option is mentioned in the old configuration file, so it's * like "maxmemory" -> list of line numbers (first line is zero). */ unsigned int dictSdsCaseHash(const void *key); int dictSdsKeyCaseCompare(void *privdata, const void *key1, const void *key2); void dictSdsDestructor(void *privdata, void *val); void dictListDestructor(void *privdata, void *val); dictType optionToLineDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor /* val destructor */ }; dictType optionSetDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL /* val destructor */ }; /* The config rewrite state. */ struct rewriteConfigState { dict *option_to_line; /* Option -> list of config file lines map */ dict *rewritten; /* Dictionary of already processed options */ int numlines; /* Number of lines in current config */ sds *lines; /* Current lines as an array of sds strings */ int has_tail; /* True if we already added directives that were not present in the original config file. */ }; /* Append the new line to the current configuration state. */ void rewriteConfigAppendLine(struct rewriteConfigState *state, sds line) { state->lines = zrealloc(state->lines, sizeof(char*) * (state->numlines+1)); state->lines[state->numlines++] = line; } /* Populate the option -> list of line numbers map. */ void rewriteConfigAddLineNumberToOption(struct rewriteConfigState *state, sds option, int linenum) { list *l = dictFetchValue(state->option_to_line,option); if (l == NULL) { l = listCreate(); dictAdd(state->option_to_line,sdsdup(option),l); } listAddNodeTail(l,(void*)(long)linenum); } /* Add the specified option to the set of processed options. * This is useful as only unused lines of processed options will be blanked * in the config file, while options the rewrite process does not understand * remain untouched. */ void rewriteConfigMarkAsProcessed(struct rewriteConfigState *state, const char *option) { sds opt = sdsnew(option); if (dictAdd(state->rewritten,opt,NULL) != DICT_OK) sdsfree(opt); } /* Read the old file, split it into lines to populate a newly created * config rewrite state, and return it to the caller. * * If it is impossible to read the old file, NULL is returned. * If the old file does not exist at all, an empty state is returned. */ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) { FILE *fp = fopen(path,"r"); struct rewriteConfigState *state = zmalloc(sizeof(*state)); char buf[CONFIG_MAX_LINE+1]; int linenum = -1; if (fp == NULL && errno != ENOENT) return NULL; state->option_to_line = dictCreate(&optionToLineDictType,NULL); state->rewritten = dictCreate(&optionSetDictType,NULL); state->numlines = 0; state->lines = NULL; state->has_tail = 0; if (fp == NULL) return state; /* Read the old file line by line, populate the state. */ while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL) { int argc; sds *argv; sds line = sdstrim(sdsnew(buf),"\r\n\t "); linenum++; /* Zero based, so we init at -1 */ /* Handle comments and empty lines. */ if (line[0] == '#' || line[0] == '\0') { if (!state->has_tail && !strcmp(line,DISQUE_CONFIG_REWRITE_SIGNATURE)) state->has_tail = 1; rewriteConfigAppendLine(state,line); continue; } /* Not a comment, split into arguments. */ argv = sdssplitargs(line,&argc); if (argv == NULL) { /* Apparently the line is unparsable for some reason, for * instance it may have unbalanced quotes. Load it as a * comment. */ sds aux = sdsnew("# ??? "); aux = sdscatsds(aux,line); sdsfree(line); rewriteConfigAppendLine(state,aux); continue; } sdstolower(argv[0]); /* We only want lowercase config directives. */ /* Now we populate the state according to the content of this line. * Append the line and populate the option -> line numbers map. */ rewriteConfigAppendLine(state,line); rewriteConfigAddLineNumberToOption(state,argv[0],linenum); sdsfreesplitres(argv,argc); } fclose(fp); return state; } /* Rewrite the specified configuration option with the new "line". * It progressively uses lines of the file that were already used for the same * configuration option in the old version of the file, removing that line from * the map of options -> line numbers. * * If there are lines associated with a given configuration option and * "force" is non-zero, the line is appended to the configuration file. * Usually "force" is true when an option has not its default value, so it * must be rewritten even if not present previously. * * The first time a line is appended into a configuration file, a comment * is added to show that starting from that point the config file was generated * by CONFIG REWRITE. * * "line" is either used, or freed, so the caller does not need to free it * in any way. */ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force) { sds o = sdsnew(option); list *l = dictFetchValue(state->option_to_line,o); rewriteConfigMarkAsProcessed(state,option); if (!l && !force) { /* Option not used previously, and we are not forced to use it. */ sdsfree(line); sdsfree(o); return; } if (l) { listNode *ln = listFirst(l); int linenum = (long) ln->value; /* There are still lines in the old configuration file we can reuse * for this option. Replace the line with the new one. */ listDelNode(l,ln); if (listLength(l) == 0) dictDelete(state->option_to_line,o); sdsfree(state->lines[linenum]); state->lines[linenum] = line; } else { /* Append a new line. */ if (!state->has_tail) { rewriteConfigAppendLine(state, sdsnew(DISQUE_CONFIG_REWRITE_SIGNATURE)); state->has_tail = 1; } rewriteConfigAppendLine(state,line); } sdsfree(o); } /* Write the long long 'bytes' value as a string in a way that is parsable * inside disque.conf. If possible uses the GB, MB, KB notation. */ int rewriteConfigFormatMemory(char *buf, size_t len, long long bytes) { int gb = 1024*1024*1024; int mb = 1024*1024; int kb = 1024; if (bytes && (bytes % gb) == 0) { return snprintf(buf,len,"%lldgb",bytes/gb); } else if (bytes && (bytes % mb) == 0) { return snprintf(buf,len,"%lldmb",bytes/mb); } else if (bytes && (bytes % kb) == 0) { return snprintf(buf,len,"%lldkb",bytes/kb); } else { return snprintf(buf,len,"%lld",bytes); } } /* Rewrite a simple "option-name " configuration option. */ void rewriteConfigBytesOption(struct rewriteConfigState *state, char *option, long long value, long long defvalue) { char buf[64]; int force = value != defvalue; sds line; rewriteConfigFormatMemory(buf,sizeof(buf),value); line = sdscatprintf(sdsempty(),"%s %s",option,buf); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a yes/no option. */ void rewriteConfigYesNoOption(struct rewriteConfigState *state, char *option, int value, int defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %s",option, value ? "yes" : "no"); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a string option. */ void rewriteConfigStringOption(struct rewriteConfigState *state, char *option, char *value, char *defvalue) { int force = 1; sds line; /* String options set to NULL need to be not present at all in the * configuration file to be set to NULL again at the next reboot. */ if (value == NULL) { rewriteConfigMarkAsProcessed(state,option); return; } /* Set force to zero if the value is set to its default. */ if (defvalue && strcmp(value,defvalue) == 0) force = 0; line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatrepr(line, value, strlen(value)); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a numerical (long long range) option. */ void rewriteConfigNumericalOption(struct rewriteConfigState *state, char *option, long long value, long long defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %lld",option,value); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite a octal option. */ void rewriteConfigOctalOption(struct rewriteConfigState *state, char *option, int value, int defvalue) { int force = value != defvalue; sds line = sdscatprintf(sdsempty(),"%s %o",option,value); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite an enumeration option. It takes as usually state and option name, * and in addition the enumeration array and the default value for the * option. */ void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int value, configEnum *ce, int defval) { sds line; const char *name = configEnumGetNameOrUnknown(ce,value); int force = value != defval; line = sdscatprintf(sdsempty(),"%s %s",option,name); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite the syslog-facility option. */ void rewriteConfigSyslogfacilityOption(struct rewriteConfigState *state) { int value = server.syslog_facility; int force = value != LOG_LOCAL0; const char *name = NULL, *option = "syslog-facility"; sds line; name = configEnumGetNameOrUnknown(syslog_facility_enum,value); line = sdscatprintf(sdsempty(),"%s %s",option,name); rewriteConfigRewriteLine(state,option,line,force); } /* Rewrite the dir option, always using absolute paths.*/ void rewriteConfigDirOption(struct rewriteConfigState *state) { char cwd[1024]; if (getcwd(cwd,sizeof(cwd)) == NULL) { rewriteConfigMarkAsProcessed(state,"dir"); return; /* no rewrite on error. */ } rewriteConfigStringOption(state,"dir",cwd,NULL); } /* Rewrite the client-output-buffer-limit option. */ void rewriteConfigClientoutputbufferlimitOption(struct rewriteConfigState *state) { int j; char *option = "client-output-buffer-limit"; for (j = 0; j < CLIENT_TYPE_COUNT; j++) { int force = (server.client_obuf_limits[j].hard_limit_bytes != clientBufferLimitsDefaults[j].hard_limit_bytes) || (server.client_obuf_limits[j].soft_limit_bytes != clientBufferLimitsDefaults[j].soft_limit_bytes) || (server.client_obuf_limits[j].soft_limit_seconds != clientBufferLimitsDefaults[j].soft_limit_seconds); sds line; char hard[64], soft[64]; rewriteConfigFormatMemory(hard,sizeof(hard), server.client_obuf_limits[j].hard_limit_bytes); rewriteConfigFormatMemory(soft,sizeof(soft), server.client_obuf_limits[j].soft_limit_bytes); line = sdscatprintf(sdsempty(),"%s %s %s %s %ld", option, getClientTypeName(j), hard, soft, (long) server.client_obuf_limits[j].soft_limit_seconds); rewriteConfigRewriteLine(state,option,line,force); } } /* Rewrite the bind option. */ void rewriteConfigBindOption(struct rewriteConfigState *state) { int force = 1; sds line, addresses; char *option = "bind"; /* Nothing to rewrite if we don't have bind addresses. */ if (server.bindaddr_count == 0) { rewriteConfigMarkAsProcessed(state,option); return; } /* Rewrite as bind ... */ addresses = sdsjoin(server.bindaddr,server.bindaddr_count," "); line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatsds(line, addresses); sdsfree(addresses); rewriteConfigRewriteLine(state,option,line,force); } /* Glue together the configuration lines in the current configuration * rewrite state into a single string, stripping multiple empty lines. */ sds rewriteConfigGetContentFromState(struct rewriteConfigState *state) { sds content = sdsempty(); int j, was_empty = 0; for (j = 0; j < state->numlines; j++) { /* Every cluster of empty lines is turned into a single empty line. */ if (sdslen(state->lines[j]) == 0) { if (was_empty) continue; was_empty = 1; } else { was_empty = 0; } content = sdscatsds(content,state->lines[j]); content = sdscatlen(content,"\n",1); } return content; } /* Free the configuration rewrite state. */ void rewriteConfigReleaseState(struct rewriteConfigState *state) { sdsfreesplitres(state->lines,state->numlines); dictRelease(state->option_to_line); dictRelease(state->rewritten); zfree(state); } /* At the end of the rewrite process the state contains the remaining * map between "option name" => "lines in the original config file". * Lines used by the rewrite process were removed by the function * rewriteConfigRewriteLine(), all the other lines are "orphaned" and * should be replaced by empty lines. * * This function does just this, iterating all the option names and * blanking all the lines still associated. */ void rewriteConfigRemoveOrphaned(struct rewriteConfigState *state) { dictIterator *di = dictGetIterator(state->option_to_line); dictEntry *de; while((de = dictNext(di)) != NULL) { list *l = dictGetVal(de); sds option = dictGetKey(de); /* Don't blank lines about options the rewrite process * don't understand. */ if (dictFind(state->rewritten,option) == NULL) { serverLog(LL_DEBUG,"Not rewritten option: %s", option); continue; } while(listLength(l)) { listNode *ln = listFirst(l); int linenum = (long) ln->value; sdsfree(state->lines[linenum]); state->lines[linenum] = sdsempty(); listDelNode(l,ln); } } dictReleaseIterator(di); } /* This function overwrites the old configuration file with the new content. * * 1) The old file length is obtained. * 2) If the new content is smaller, padding is added. * 3) A single write(2) call is used to replace the content of the file. * 4) Later the file is truncated to the length of the new content. * * This way we are sure the file is left in a consistent state even if the * process is stopped between any of the four operations. * * The function returns 0 on success, otherwise -1 is returned and errno * set accordingly. */ int rewriteConfigOverwriteFile(char *configfile, sds content) { int retval = 0; int fd = open(configfile,O_RDWR|O_CREAT,0644); int content_size = sdslen(content), padding = 0; struct stat sb; sds content_padded; /* 1) Open the old file (or create a new one if it does not * exist), get the size. */ if (fd == -1) return -1; /* errno set by open(). */ if (fstat(fd,&sb) == -1) { close(fd); return -1; /* errno set by fstat(). */ } /* 2) Pad the content at least match the old file size. */ content_padded = sdsdup(content); if (content_size < sb.st_size) { /* If the old file was bigger, pad the content with * a newline plus as many "#" chars as required. */ padding = sb.st_size - content_size; content_padded = sdsgrowzero(content_padded,sb.st_size); content_padded[content_size] = '\n'; memset(content_padded+content_size+1,'#',padding-1); } /* 3) Write the new content using a single write(2). */ if (write(fd,content_padded,strlen(content_padded)) == -1) { retval = -1; goto cleanup; } /* 4) Truncate the file to the right length if we used padding. */ if (padding) { if (ftruncate(fd,content_size) == -1) { /* Non critical error... */ } } cleanup: sdsfree(content_padded); close(fd); return retval; } /* Rewrite the configuration file at "path". * If the configuration file already exists, we try at best to retain comments * and overall structure. * * Configuration parameters that are at their default value, unless already * explicitly included in the old configuration file, are not rewritten. * * On error -1 is returned and errno is set accordingly, otherwise 0. */ int rewriteConfig(char *path) { struct rewriteConfigState *state; sds newcontent; int retval; /* Step 1: read the old config into our rewrite state. */ if ((state = rewriteConfigReadOldFile(path)) == NULL) return -1; /* Step 2: rewrite every single option, replacing or appending it inside * the rewrite state. */ rewriteConfigYesNoOption(state,"daemonize",server.daemonize,0); rewriteConfigStringOption(state,"pidfile",server.pidfile,CONFIG_DEFAULT_PID_FILE); rewriteConfigNumericalOption(state,"port",server.port,CONFIG_DEFAULT_SERVER_PORT); rewriteConfigNumericalOption(state,"tcp-backlog",server.tcp_backlog,CONFIG_DEFAULT_TCP_BACKLOG); rewriteConfigBindOption(state); rewriteConfigStringOption(state,"unixsocket",server.unixsocket,NULL); rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,CONFIG_DEFAULT_UNIX_SOCKET_PERM); rewriteConfigNumericalOption(state,"timeout",server.maxidletime,CONFIG_DEFAULT_CLIENT_TIMEOUT); rewriteConfigNumericalOption(state,"tcp-keepalive",server.tcpkeepalive,CONFIG_DEFAULT_TCP_KEEPALIVE); rewriteConfigEnumOption(state,"loglevel",server.verbosity,loglevel_enum,CONFIG_DEFAULT_VERBOSITY); rewriteConfigStringOption(state,"logfile",server.logfile,CONFIG_DEFAULT_LOGFILE); rewriteConfigYesNoOption(state,"syslog-enabled",server.syslog_enabled,CONFIG_DEFAULT_SYSLOG_ENABLED); rewriteConfigStringOption(state,"syslog-ident",server.syslog_ident,CONFIG_DEFAULT_SYSLOG_IDENT); rewriteConfigSyslogfacilityOption(state); rewriteConfigDirOption(state); rewriteConfigStringOption(state,"requirepass",server.requirepass,NULL); rewriteConfigNumericalOption(state,"maxclients",server.maxclients,CONFIG_DEFAULT_MAX_CLIENTS); rewriteConfigBytesOption(state,"maxmemory",server.maxmemory,CONFIG_DEFAULT_MAXMEMORY); rewriteConfigEnumOption(state,"maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum,CONFIG_DEFAULT_MAXMEMORY_POLICY); rewriteConfigNumericalOption(state,"maxmemory-samples",server.maxmemory_samples,CONFIG_DEFAULT_MAXMEMORY_SAMPLES); rewriteConfigYesNoOption(state,"appendonly",server.aof_state != AOF_OFF,0); rewriteConfigStringOption(state,"appendfilename",server.aof_filename,CONFIG_DEFAULT_AOF_FILENAME); rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync,aof_fsync_enum,CONFIG_DEFAULT_AOF_FSYNC); rewriteConfigYesNoOption(state,"no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite,CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE); rewriteConfigNumericalOption(state,"auto-aof-rewrite-percentage",server.aof_rewrite_perc,AOF_REWRITE_PERC); rewriteConfigBytesOption(state,"auto-aof-rewrite-min-size",server.aof_rewrite_min_size,AOF_REWRITE_MIN_SIZE); rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); rewriteConfigNumericalOption(state,"cluster-node-timeout",server.cluster_node_timeout,CLUSTER_DEFAULT_NODE_TIMEOUT); rewriteConfigNumericalOption(state,"slowlog-log-slower-than",server.slowlog_log_slower_than,CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN); rewriteConfigNumericalOption(state,"latency-monitor-threshold",server.latency_monitor_threshold,CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD); rewriteConfigNumericalOption(state,"slowlog-max-len",server.slowlog_max_len,CONFIG_DEFAULT_SLOWLOG_MAX_LEN); rewriteConfigYesNoOption(state,"activerehashing",server.activerehashing,CONFIG_DEFAULT_ACTIVE_REHASHING); rewriteConfigClientoutputbufferlimitOption(state); rewriteConfigNumericalOption(state,"hz",server.hz,CONFIG_DEFAULT_HZ); rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,CONFIG_DEFAULT_AOF_LOAD_TRUNCATED); rewriteConfigYesNoOption(state,"aof-enqueue-jobs-once",server.aof_enqueue_jobs_once,CONFIG_DEFAULT_AOF_ENQUEUE_JOBS_ONCE); /* Step 3: remove all the orphaned lines in the old file, that is, lines * that were used by a config option and are no longer used, like in case * of multiple "save" options or duplicated options. */ rewriteConfigRemoveOrphaned(state); /* Step 4: generate a new configuration file from the modified state * and write it into the original file. */ newcontent = rewriteConfigGetContentFromState(state); retval = rewriteConfigOverwriteFile(server.configfile,newcontent); sdsfree(newcontent); rewriteConfigReleaseState(state); return retval; } /*----------------------------------------------------------------------------- * CONFIG command entry point *----------------------------------------------------------------------------*/ void configCommand(client *c) { if (!strcasecmp(c->argv[1]->ptr,"set")) { if (c->argc != 4) goto badarity; configSetCommand(c); } else if (!strcasecmp(c->argv[1]->ptr,"get")) { if (c->argc != 3) goto badarity; configGetCommand(c); } else if (!strcasecmp(c->argv[1]->ptr,"resetstat")) { if (c->argc != 2) goto badarity; resetServerStats(); resetCommandTableStats(); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"rewrite")) { if (c->argc != 2) goto badarity; if (server.configfile == NULL) { addReplyError(c,"The server is running without a config file"); return; } if (rewriteConfig(server.configfile) == -1) { serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(errno)); addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno)); } else { serverLog(LL_WARNING,"CONFIG REWRITE executed with success."); addReply(c,shared.ok); } } else { addReplyError(c, "CONFIG subcommand must be one of GET, SET, RESETSTAT, REWRITE"); } return; badarity: addReplyErrorFormat(c,"Wrong number of arguments for CONFIG %s", (char*) c->argv[1]->ptr); } disque-1.0-rc1/src/config.h000066400000000000000000000144131264176561100155500ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __CONFIG_H #define __CONFIG_H #ifdef __APPLE__ #include #endif /* Define disque_fstat to fstat or fstat64() */ #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) #define disque_fstat fstat64 #define disque_stat stat64 #else #define disque_fstat fstat #define disque_stat stat #endif /* Test for proc filesystem */ #ifdef __linux__ #define HAVE_PROC_STAT 1 #define HAVE_PROC_MAPS 1 #define HAVE_PROC_SMAPS 1 #define HAVE_PROC_SOMAXCONN 1 #endif /* Test for task_info() */ #if defined(__APPLE__) #define HAVE_TASKINFO 1 #endif /* Test for backtrace() */ #if defined(__APPLE__) || defined(__linux__) #define HAVE_BACKTRACE 1 #endif /* Test for polling API */ #ifdef __linux__ #define HAVE_EPOLL 1 #endif #if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__) #define HAVE_KQUEUE 1 #endif #ifdef __sun #include #ifdef _DTRACE_VERSION #define HAVE_EVPORT 1 #endif #endif /* Define aof_fsync to fdatasync() in Linux and fsync() for all the rest */ #ifdef __linux__ #define aof_fsync fdatasync #else #define aof_fsync fsync #endif /* Define rdb_fsync_range to sync_file_range() on Linux, otherwise we use * the plain fsync() call. */ #ifdef __linux__ #include #include #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) #if (LINUX_VERSION_CODE >= 0x020611 && __GLIBC_PREREQ(2, 6)) #define HAVE_SYNC_FILE_RANGE 1 #endif #else #if (LINUX_VERSION_CODE >= 0x020611) #define HAVE_SYNC_FILE_RANGE 1 #endif #endif #endif #ifdef HAVE_SYNC_FILE_RANGE #define rdb_fsync_range(fd,off,size) sync_file_range(fd,off,size,SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE) #else #define rdb_fsync_range(fd,off,size) fsync(fd) #endif /* Check if we can use setproctitle(). * BSD systems have support for it, we provide an implementation for * Linux and osx. */ #if (defined __NetBSD__ || defined __FreeBSD__ || defined __OpenBSD__) #define USE_SETPROCTITLE #endif #if (defined __linux || defined __APPLE__) #define USE_SETPROCTITLE #define INIT_SETPROCTITLE_REPLACEMENT void spt_init(int argc, char *argv[]); void setproctitle(const char *fmt, ...); #endif /* Byte ordering detection */ #include /* This will likely define BYTE_ORDER */ #ifndef BYTE_ORDER #if (BSD >= 199103) # include #else #if defined(linux) || defined(__linux__) # include #else #define LITTLE_ENDIAN 1234 /* least-significant byte first (vax, pc) */ #define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ #define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp)*/ #if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) || \ defined(vax) || defined(ns32000) || defined(sun386) || \ defined(MIPSEL) || defined(_MIPSEL) || defined(BIT_ZERO_ON_RIGHT) || \ defined(__alpha__) || defined(__alpha) #define BYTE_ORDER LITTLE_ENDIAN #endif #if defined(sel) || defined(pyr) || defined(mc68000) || defined(sparc) || \ defined(is68k) || defined(tahoe) || defined(ibm032) || defined(ibm370) || \ defined(MIPSEB) || defined(_MIPSEB) || defined(_IBMR2) || defined(DGUX) ||\ defined(apollo) || defined(__convex__) || defined(_CRAY) || \ defined(__hppa) || defined(__hp9000) || \ defined(__hp9000s300) || defined(__hp9000s700) || \ defined (BIT_ZERO_ON_LEFT) || defined(m68k) || defined(__sparc) #define BYTE_ORDER BIG_ENDIAN #endif #endif /* linux */ #endif /* BSD */ #endif /* BYTE_ORDER */ /* Sometimes after including an OS-specific header that defines the * endianess we end with __BYTE_ORDER but not with BYTE_ORDER that is what * the Disque code uses. In this case let's define everything without the * underscores. */ #ifndef BYTE_ORDER #ifdef __BYTE_ORDER #if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) #ifndef LITTLE_ENDIAN #define LITTLE_ENDIAN __LITTLE_ENDIAN #endif #ifndef BIG_ENDIAN #define BIG_ENDIAN __BIG_ENDIAN #endif #if (__BYTE_ORDER == __LITTLE_ENDIAN) #define BYTE_ORDER LITTLE_ENDIAN #else #define BYTE_ORDER BIG_ENDIAN #endif #endif #endif #endif #if !defined(BYTE_ORDER) || \ (BYTE_ORDER != BIG_ENDIAN && BYTE_ORDER != LITTLE_ENDIAN) /* you must determine what the correct bit order is for * your compiler - the next line is an intentional error * which will force your compiles to bomb until you fix * the above macros. */ #error "Undefined or invalid BYTE_ORDER" #endif #if (__i386 || __amd64 || __powerpc__) && __GNUC__ #define GNUC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) #if defined(__clang__) #define HAVE_ATOMIC #endif #if (defined(__GLIBC__) && defined(__GLIBC_PREREQ)) #if (GNUC_VERSION >= 40100 && __GLIBC_PREREQ(2, 6)) #define HAVE_ATOMIC #endif #endif #endif #endif disque-1.0-rc1/src/crc64.c000066400000000000000000000246061264176561100152240ustar00rootroot00000000000000/* Disque uses the CRC64 variant with "Jones" coefficients and init value of 0. * * Specification of this CRC64 variant follows: * Name: crc-64-jones * Width: 64 bites * Poly: 0xad93d23594c935a9 * Reflected In: True * Xor_In: 0xffffffffffffffff * Reflected_Out: True * Xor_Out: 0x0 * Check("123456789"): 0xe9c6d914c4b8d9ca * * Copyright (c) 2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include static const uint64_t crc64_tab[256] = { UINT64_C(0x0000000000000000), UINT64_C(0x7ad870c830358979), UINT64_C(0xf5b0e190606b12f2), UINT64_C(0x8f689158505e9b8b), UINT64_C(0xc038e5739841b68f), UINT64_C(0xbae095bba8743ff6), UINT64_C(0x358804e3f82aa47d), UINT64_C(0x4f50742bc81f2d04), UINT64_C(0xab28ecb46814fe75), UINT64_C(0xd1f09c7c5821770c), UINT64_C(0x5e980d24087fec87), UINT64_C(0x24407dec384a65fe), UINT64_C(0x6b1009c7f05548fa), UINT64_C(0x11c8790fc060c183), UINT64_C(0x9ea0e857903e5a08), UINT64_C(0xe478989fa00bd371), UINT64_C(0x7d08ff3b88be6f81), UINT64_C(0x07d08ff3b88be6f8), UINT64_C(0x88b81eabe8d57d73), UINT64_C(0xf2606e63d8e0f40a), UINT64_C(0xbd301a4810ffd90e), UINT64_C(0xc7e86a8020ca5077), UINT64_C(0x4880fbd87094cbfc), UINT64_C(0x32588b1040a14285), UINT64_C(0xd620138fe0aa91f4), UINT64_C(0xacf86347d09f188d), UINT64_C(0x2390f21f80c18306), UINT64_C(0x594882d7b0f40a7f), UINT64_C(0x1618f6fc78eb277b), UINT64_C(0x6cc0863448deae02), UINT64_C(0xe3a8176c18803589), UINT64_C(0x997067a428b5bcf0), UINT64_C(0xfa11fe77117cdf02), UINT64_C(0x80c98ebf2149567b), UINT64_C(0x0fa11fe77117cdf0), UINT64_C(0x75796f2f41224489), UINT64_C(0x3a291b04893d698d), UINT64_C(0x40f16bccb908e0f4), UINT64_C(0xcf99fa94e9567b7f), UINT64_C(0xb5418a5cd963f206), UINT64_C(0x513912c379682177), UINT64_C(0x2be1620b495da80e), UINT64_C(0xa489f35319033385), UINT64_C(0xde51839b2936bafc), UINT64_C(0x9101f7b0e12997f8), UINT64_C(0xebd98778d11c1e81), UINT64_C(0x64b116208142850a), UINT64_C(0x1e6966e8b1770c73), UINT64_C(0x8719014c99c2b083), UINT64_C(0xfdc17184a9f739fa), UINT64_C(0x72a9e0dcf9a9a271), UINT64_C(0x08719014c99c2b08), UINT64_C(0x4721e43f0183060c), UINT64_C(0x3df994f731b68f75), UINT64_C(0xb29105af61e814fe), UINT64_C(0xc849756751dd9d87), UINT64_C(0x2c31edf8f1d64ef6), UINT64_C(0x56e99d30c1e3c78f), UINT64_C(0xd9810c6891bd5c04), UINT64_C(0xa3597ca0a188d57d), UINT64_C(0xec09088b6997f879), UINT64_C(0x96d1784359a27100), UINT64_C(0x19b9e91b09fcea8b), UINT64_C(0x636199d339c963f2), UINT64_C(0xdf7adabd7a6e2d6f), UINT64_C(0xa5a2aa754a5ba416), UINT64_C(0x2aca3b2d1a053f9d), UINT64_C(0x50124be52a30b6e4), UINT64_C(0x1f423fcee22f9be0), UINT64_C(0x659a4f06d21a1299), UINT64_C(0xeaf2de5e82448912), UINT64_C(0x902aae96b271006b), UINT64_C(0x74523609127ad31a), UINT64_C(0x0e8a46c1224f5a63), UINT64_C(0x81e2d7997211c1e8), UINT64_C(0xfb3aa75142244891), UINT64_C(0xb46ad37a8a3b6595), UINT64_C(0xceb2a3b2ba0eecec), UINT64_C(0x41da32eaea507767), UINT64_C(0x3b024222da65fe1e), UINT64_C(0xa2722586f2d042ee), UINT64_C(0xd8aa554ec2e5cb97), UINT64_C(0x57c2c41692bb501c), UINT64_C(0x2d1ab4dea28ed965), UINT64_C(0x624ac0f56a91f461), UINT64_C(0x1892b03d5aa47d18), UINT64_C(0x97fa21650afae693), UINT64_C(0xed2251ad3acf6fea), UINT64_C(0x095ac9329ac4bc9b), UINT64_C(0x7382b9faaaf135e2), UINT64_C(0xfcea28a2faafae69), UINT64_C(0x8632586aca9a2710), UINT64_C(0xc9622c4102850a14), UINT64_C(0xb3ba5c8932b0836d), UINT64_C(0x3cd2cdd162ee18e6), UINT64_C(0x460abd1952db919f), UINT64_C(0x256b24ca6b12f26d), UINT64_C(0x5fb354025b277b14), UINT64_C(0xd0dbc55a0b79e09f), UINT64_C(0xaa03b5923b4c69e6), UINT64_C(0xe553c1b9f35344e2), UINT64_C(0x9f8bb171c366cd9b), UINT64_C(0x10e3202993385610), UINT64_C(0x6a3b50e1a30ddf69), UINT64_C(0x8e43c87e03060c18), UINT64_C(0xf49bb8b633338561), UINT64_C(0x7bf329ee636d1eea), UINT64_C(0x012b592653589793), UINT64_C(0x4e7b2d0d9b47ba97), UINT64_C(0x34a35dc5ab7233ee), UINT64_C(0xbbcbcc9dfb2ca865), UINT64_C(0xc113bc55cb19211c), UINT64_C(0x5863dbf1e3ac9dec), UINT64_C(0x22bbab39d3991495), UINT64_C(0xadd33a6183c78f1e), UINT64_C(0xd70b4aa9b3f20667), UINT64_C(0x985b3e827bed2b63), UINT64_C(0xe2834e4a4bd8a21a), UINT64_C(0x6debdf121b863991), UINT64_C(0x1733afda2bb3b0e8), UINT64_C(0xf34b37458bb86399), UINT64_C(0x8993478dbb8deae0), UINT64_C(0x06fbd6d5ebd3716b), UINT64_C(0x7c23a61ddbe6f812), UINT64_C(0x3373d23613f9d516), UINT64_C(0x49aba2fe23cc5c6f), UINT64_C(0xc6c333a67392c7e4), UINT64_C(0xbc1b436e43a74e9d), UINT64_C(0x95ac9329ac4bc9b5), UINT64_C(0xef74e3e19c7e40cc), UINT64_C(0x601c72b9cc20db47), UINT64_C(0x1ac40271fc15523e), UINT64_C(0x5594765a340a7f3a), UINT64_C(0x2f4c0692043ff643), UINT64_C(0xa02497ca54616dc8), UINT64_C(0xdafce7026454e4b1), UINT64_C(0x3e847f9dc45f37c0), UINT64_C(0x445c0f55f46abeb9), UINT64_C(0xcb349e0da4342532), UINT64_C(0xb1eceec59401ac4b), UINT64_C(0xfebc9aee5c1e814f), UINT64_C(0x8464ea266c2b0836), UINT64_C(0x0b0c7b7e3c7593bd), UINT64_C(0x71d40bb60c401ac4), UINT64_C(0xe8a46c1224f5a634), UINT64_C(0x927c1cda14c02f4d), UINT64_C(0x1d148d82449eb4c6), UINT64_C(0x67ccfd4a74ab3dbf), UINT64_C(0x289c8961bcb410bb), UINT64_C(0x5244f9a98c8199c2), UINT64_C(0xdd2c68f1dcdf0249), UINT64_C(0xa7f41839ecea8b30), UINT64_C(0x438c80a64ce15841), UINT64_C(0x3954f06e7cd4d138), UINT64_C(0xb63c61362c8a4ab3), UINT64_C(0xcce411fe1cbfc3ca), UINT64_C(0x83b465d5d4a0eece), UINT64_C(0xf96c151de49567b7), UINT64_C(0x76048445b4cbfc3c), UINT64_C(0x0cdcf48d84fe7545), UINT64_C(0x6fbd6d5ebd3716b7), UINT64_C(0x15651d968d029fce), UINT64_C(0x9a0d8ccedd5c0445), UINT64_C(0xe0d5fc06ed698d3c), UINT64_C(0xaf85882d2576a038), UINT64_C(0xd55df8e515432941), UINT64_C(0x5a3569bd451db2ca), UINT64_C(0x20ed197575283bb3), UINT64_C(0xc49581ead523e8c2), UINT64_C(0xbe4df122e51661bb), UINT64_C(0x3125607ab548fa30), UINT64_C(0x4bfd10b2857d7349), UINT64_C(0x04ad64994d625e4d), UINT64_C(0x7e7514517d57d734), UINT64_C(0xf11d85092d094cbf), UINT64_C(0x8bc5f5c11d3cc5c6), UINT64_C(0x12b5926535897936), UINT64_C(0x686de2ad05bcf04f), UINT64_C(0xe70573f555e26bc4), UINT64_C(0x9ddd033d65d7e2bd), UINT64_C(0xd28d7716adc8cfb9), UINT64_C(0xa85507de9dfd46c0), UINT64_C(0x273d9686cda3dd4b), UINT64_C(0x5de5e64efd965432), UINT64_C(0xb99d7ed15d9d8743), UINT64_C(0xc3450e196da80e3a), UINT64_C(0x4c2d9f413df695b1), UINT64_C(0x36f5ef890dc31cc8), UINT64_C(0x79a59ba2c5dc31cc), UINT64_C(0x037deb6af5e9b8b5), UINT64_C(0x8c157a32a5b7233e), UINT64_C(0xf6cd0afa9582aa47), UINT64_C(0x4ad64994d625e4da), UINT64_C(0x300e395ce6106da3), UINT64_C(0xbf66a804b64ef628), UINT64_C(0xc5bed8cc867b7f51), UINT64_C(0x8aeeace74e645255), UINT64_C(0xf036dc2f7e51db2c), UINT64_C(0x7f5e4d772e0f40a7), UINT64_C(0x05863dbf1e3ac9de), UINT64_C(0xe1fea520be311aaf), UINT64_C(0x9b26d5e88e0493d6), UINT64_C(0x144e44b0de5a085d), UINT64_C(0x6e963478ee6f8124), UINT64_C(0x21c640532670ac20), UINT64_C(0x5b1e309b16452559), UINT64_C(0xd476a1c3461bbed2), UINT64_C(0xaeaed10b762e37ab), UINT64_C(0x37deb6af5e9b8b5b), UINT64_C(0x4d06c6676eae0222), UINT64_C(0xc26e573f3ef099a9), UINT64_C(0xb8b627f70ec510d0), UINT64_C(0xf7e653dcc6da3dd4), UINT64_C(0x8d3e2314f6efb4ad), UINT64_C(0x0256b24ca6b12f26), UINT64_C(0x788ec2849684a65f), UINT64_C(0x9cf65a1b368f752e), UINT64_C(0xe62e2ad306bafc57), UINT64_C(0x6946bb8b56e467dc), UINT64_C(0x139ecb4366d1eea5), UINT64_C(0x5ccebf68aecec3a1), UINT64_C(0x2616cfa09efb4ad8), UINT64_C(0xa97e5ef8cea5d153), UINT64_C(0xd3a62e30fe90582a), UINT64_C(0xb0c7b7e3c7593bd8), UINT64_C(0xca1fc72bf76cb2a1), UINT64_C(0x45775673a732292a), UINT64_C(0x3faf26bb9707a053), UINT64_C(0x70ff52905f188d57), UINT64_C(0x0a2722586f2d042e), UINT64_C(0x854fb3003f739fa5), UINT64_C(0xff97c3c80f4616dc), UINT64_C(0x1bef5b57af4dc5ad), UINT64_C(0x61372b9f9f784cd4), UINT64_C(0xee5fbac7cf26d75f), UINT64_C(0x9487ca0fff135e26), UINT64_C(0xdbd7be24370c7322), UINT64_C(0xa10fceec0739fa5b), UINT64_C(0x2e675fb4576761d0), UINT64_C(0x54bf2f7c6752e8a9), UINT64_C(0xcdcf48d84fe75459), UINT64_C(0xb71738107fd2dd20), UINT64_C(0x387fa9482f8c46ab), UINT64_C(0x42a7d9801fb9cfd2), UINT64_C(0x0df7adabd7a6e2d6), UINT64_C(0x772fdd63e7936baf), UINT64_C(0xf8474c3bb7cdf024), UINT64_C(0x829f3cf387f8795d), UINT64_C(0x66e7a46c27f3aa2c), UINT64_C(0x1c3fd4a417c62355), UINT64_C(0x935745fc4798b8de), UINT64_C(0xe98f353477ad31a7), UINT64_C(0xa6df411fbfb21ca3), UINT64_C(0xdc0731d78f8795da), UINT64_C(0x536fa08fdfd90e51), UINT64_C(0x29b7d047efec8728), }; uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l) { uint64_t j; for (j = 0; j < l; j++) { uint8_t byte = s[j]; crc = crc64_tab[(uint8_t)crc ^ byte] ^ (crc >> 8); } return crc; } /* Test main */ #ifdef TEST_MAIN #include int main(void) { printf("e9c6d914c4b8d9ca == %016llx\n", (unsigned long long) crc64(0,(unsigned char*)"123456789",9)); return 0; } #endif disque-1.0-rc1/src/crc64.h000066400000000000000000000002001264176561100152110ustar00rootroot00000000000000#ifndef CRC64_H #define CRC64_H #include uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l); #endif disque-1.0-rc1/src/debug.c000066400000000000000000000611451264176561100153700ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "sha1.h" /* SHA1 is used for DEBUG DIGEST */ #include "crc64.h" #include "job.h" #include "queue.h" #include #include #ifdef HAVE_BACKTRACE #include #include #include #include "bio.h" #endif /* HAVE_BACKTRACE */ #ifdef __CYGWIN__ #ifndef SA_ONSTACK #define SA_ONSTACK 0x08000000 #endif #endif /* ================================= Debugging ============================== */ void debugCommand(client *c) { if (!strcasecmp(c->argv[1]->ptr,"segfault")) { *((char*)-1) = 'x'; } else if (!strcasecmp(c->argv[1]->ptr,"restart") || !strcasecmp(c->argv[1]->ptr,"crash-and-recover")) { long long delay = 0; if (c->argc >= 3) { if (getLongLongFromObjectOrReply(c, c->argv[2], &delay, NULL) != C_OK) return; if (delay < 0) delay = 0; } int flags = !strcasecmp(c->argv[1]->ptr,"restart") ? (RESTART_SERVER_GRACEFULLY|RESTART_SERVER_CONFIG_REWRITE) : RESTART_SERVER_NONE; restartServer(flags,delay); addReplyError(c,"failed to restart the server. Check server logs."); } else if (!strcasecmp(c->argv[1]->ptr,"oom")) { void *ptr = zmalloc(ULONG_MAX); /* Should trigger an out of memory. */ zfree(ptr); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"assert")) { if (c->argc >= 3) c->argv[2] = tryObjectEncoding(c->argv[2]); serverAssertWithInfo(c,c->argv[0],1 == 2); } else if (!strcasecmp(c->argv[1]->ptr,"flushall")) { flushServerData(); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"loadaof")) { if (server.aof_state == AOF_ON) flushAppendOnlyFile(1); flushServerData(); if (loadAppendOnlyFile(server.aof_filename) != C_OK) { addReply(c,shared.err); return; } serverLog(LL_WARNING,"Append Only File loaded by DEBUG LOADAOF"); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"sleep") && c->argc == 3) { double dtime = strtod(c->argv[2]->ptr,NULL); long long utime = dtime*1000000; struct timespec tv; tv.tv_sec = utime / 1000000; tv.tv_nsec = (utime % 1000000) * 1000; nanosleep(&tv, NULL); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"error") && c->argc == 3) { sds errstr = sdsnewlen("-",1); errstr = sdscatsds(errstr,c->argv[2]->ptr); errstr = sdsmapchars(errstr,"\n\r"," ",2); /* no newlines in errors. */ errstr = sdscatlen(errstr,"\r\n",2); addReplySds(c,errstr); } else if (!strcasecmp(c->argv[1]->ptr,"structsize") && c->argc == 2) { sds sizes = sdsempty(); sizes = sdscatprintf(sizes,"bits:%d ",(sizeof(void*) == 8)?64:32); sizes = sdscatprintf(sizes,"job:%d ", (int)sizeof(job)); sizes = sdscatprintf(sizes,"queue:%d ", (int)sizeof(queue)); sizes = sdscatprintf(sizes,"robj:%d ",(int)sizeof(robj)); sizes = sdscatprintf(sizes,"dictentry:%d ",(int)sizeof(dictEntry)); sizes = sdscatprintf(sizes,"sdshdr5:%d ",(int)sizeof(struct sdshdr5)); sizes = sdscatprintf(sizes,"sdshdr8:%d ",(int)sizeof(struct sdshdr8)); sizes = sdscatprintf(sizes,"sdshdr16:%d ",(int)sizeof(struct sdshdr16)); sizes = sdscatprintf(sizes,"sdshdr32:%d ",(int)sizeof(struct sdshdr32)); sizes = sdscatprintf(sizes,"sdshdr64:%d ",(int)sizeof(struct sdshdr64)); addReplyBulkSds(c,sizes); } else { addReplyErrorFormat(c, "Unknown DEBUG subcommand or wrong number of arguments for '%s'", (char*)c->argv[1]->ptr); } } /* =========================== Crash handling ============================== */ void _serverAssert(char *estr, char *file, int line) { bugReportStart(); serverLog(LL_WARNING,"=== ASSERTION FAILED ==="); serverLog(LL_WARNING,"==> %s:%d '%s' is not true",file,line,estr); #ifdef HAVE_BACKTRACE server.assert_failed = estr; server.assert_file = file; server.assert_line = line; serverLog(LL_WARNING,"(forcing SIGSEGV to print the bug report.)"); #endif *((char*)-1) = 'x'; } void _serverAssertPrintClientInfo(client *c) { int j; bugReportStart(); serverLog(LL_WARNING,"=== ASSERTION FAILED CLIENT CONTEXT ==="); serverLog(LL_WARNING,"client->flags = %d", c->flags); serverLog(LL_WARNING,"client->fd = %d", c->fd); serverLog(LL_WARNING,"client->argc = %d", c->argc); for (j=0; j < c->argc; j++) { char buf[128]; char *arg; if (c->argv[j]->type == OBJ_STRING && sdsEncodedObject(c->argv[j])) { arg = (char*) c->argv[j]->ptr; } else { snprintf(buf,sizeof(buf),"Object type: %u, encoding: %u", c->argv[j]->type, c->argv[j]->encoding); arg = buf; } serverLog(LL_WARNING,"client->argv[%d] = \"%s\" (refcount: %d)", j, arg, c->argv[j]->refcount); } } void _serverAssertPrintObject(robj *o) { bugReportStart(); serverLog(LL_WARNING,"=== ASSERTION FAILED OBJECT CONTEXT ==="); serverLogObjectDebugInfo(o); } void serverLogObjectDebugInfo(robj *o) { serverLog(LL_WARNING,"Object type: %d", o->type); serverLog(LL_WARNING,"Object encoding: %d", o->encoding); serverLog(LL_WARNING,"Object refcount: %d", o->refcount); if (o->type == OBJ_STRING && sdsEncodedObject(o)) { serverLog(LL_WARNING,"Object raw string len: %zu", sdslen(o->ptr)); if (sdslen(o->ptr) < 4096) { sds repr = sdscatrepr(sdsempty(),o->ptr,sdslen(o->ptr)); serverLog(LL_WARNING,"Object raw string content: %s", repr); sdsfree(repr); } } } void _serverAssertWithInfo(client *c, robj *o, char *estr, char *file, int line) { if (c) _serverAssertPrintClientInfo(c); if (o) _serverAssertPrintObject(o); _serverAssert(estr,file,line); } void _serverPanic(char *msg, char *file, int line) { bugReportStart(); serverLog(LL_WARNING,"------------------------------------------------"); serverLog(LL_WARNING,"!!! Software Failure. Press left mouse button to continue"); serverLog(LL_WARNING,"Guru Meditation: %s #%s:%d",msg,file,line); #ifdef HAVE_BACKTRACE serverLog(LL_WARNING,"(forcing SIGSEGV in order to print the stack trace)"); #endif serverLog(LL_WARNING,"------------------------------------------------"); *((char*)-1) = 'x'; } void bugReportStart(void) { if (server.bug_report_start == 0) { serverLog(LL_WARNING, "\n\n=== DISQUE BUG REPORT START: Cut & paste starting from here ==="); server.bug_report_start = 1; } } #ifdef HAVE_BACKTRACE static void *getMcontextEip(ucontext_t *uc) { #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) /* OSX < 10.6 */ #if defined(__x86_64__) return (void*) uc->uc_mcontext->__ss.__rip; #elif defined(__i386__) return (void*) uc->uc_mcontext->__ss.__eip; #else return (void*) uc->uc_mcontext->__ss.__srr0; #endif #elif defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) /* OSX >= 10.6 */ #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) return (void*) uc->uc_mcontext->__ss.__rip; #else return (void*) uc->uc_mcontext->__ss.__eip; #endif #elif defined(__linux__) /* Linux */ #if defined(__i386__) return (void*) uc->uc_mcontext.gregs[14]; /* Linux 32 */ #elif defined(__X86_64__) || defined(__x86_64__) return (void*) uc->uc_mcontext.gregs[16]; /* Linux 64 */ #elif defined(__ia64__) /* Linux IA64 */ return (void*) uc->uc_mcontext.sc_ip; #endif #else return NULL; #endif } void logStackContent(void **sp) { int i; for (i = 15; i >= 0; i--) { unsigned long addr = (unsigned long) sp+i; unsigned long val = (unsigned long) sp[i]; if (sizeof(long) == 4) serverLog(LL_WARNING, "(%08lx) -> %08lx", addr, val); else serverLog(LL_WARNING, "(%016lx) -> %016lx", addr, val); } } void logRegisters(ucontext_t *uc) { serverLog(LL_WARNING, "--- REGISTERS"); /* OSX */ #if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) /* OSX AMD64 */ #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCS :%016lx FS:%016lx GS:%016lx", (unsigned long) uc->uc_mcontext->__ss.__rax, (unsigned long) uc->uc_mcontext->__ss.__rbx, (unsigned long) uc->uc_mcontext->__ss.__rcx, (unsigned long) uc->uc_mcontext->__ss.__rdx, (unsigned long) uc->uc_mcontext->__ss.__rdi, (unsigned long) uc->uc_mcontext->__ss.__rsi, (unsigned long) uc->uc_mcontext->__ss.__rbp, (unsigned long) uc->uc_mcontext->__ss.__rsp, (unsigned long) uc->uc_mcontext->__ss.__r8, (unsigned long) uc->uc_mcontext->__ss.__r9, (unsigned long) uc->uc_mcontext->__ss.__r10, (unsigned long) uc->uc_mcontext->__ss.__r11, (unsigned long) uc->uc_mcontext->__ss.__r12, (unsigned long) uc->uc_mcontext->__ss.__r13, (unsigned long) uc->uc_mcontext->__ss.__r14, (unsigned long) uc->uc_mcontext->__ss.__r15, (unsigned long) uc->uc_mcontext->__ss.__rip, (unsigned long) uc->uc_mcontext->__ss.__rflags, (unsigned long) uc->uc_mcontext->__ss.__cs, (unsigned long) uc->uc_mcontext->__ss.__fs, (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__rsp); #else /* OSX x86 */ serverLog(LL_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" "SS:%08lx EFL:%08lx EIP:%08lx CS :%08lx\n" "DS:%08lx ES:%08lx FS :%08lx GS :%08lx", (unsigned long) uc->uc_mcontext->__ss.__eax, (unsigned long) uc->uc_mcontext->__ss.__ebx, (unsigned long) uc->uc_mcontext->__ss.__ecx, (unsigned long) uc->uc_mcontext->__ss.__edx, (unsigned long) uc->uc_mcontext->__ss.__edi, (unsigned long) uc->uc_mcontext->__ss.__esi, (unsigned long) uc->uc_mcontext->__ss.__ebp, (unsigned long) uc->uc_mcontext->__ss.__esp, (unsigned long) uc->uc_mcontext->__ss.__ss, (unsigned long) uc->uc_mcontext->__ss.__eflags, (unsigned long) uc->uc_mcontext->__ss.__eip, (unsigned long) uc->uc_mcontext->__ss.__cs, (unsigned long) uc->uc_mcontext->__ss.__ds, (unsigned long) uc->uc_mcontext->__ss.__es, (unsigned long) uc->uc_mcontext->__ss.__fs, (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__esp); #endif /* Linux */ #elif defined(__linux__) /* Linux x86 */ #if defined(__i386__) serverLog(LL_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" "SS :%08lx EFL:%08lx EIP:%08lx CS:%08lx\n" "DS :%08lx ES :%08lx FS :%08lx GS:%08lx", (unsigned long) uc->uc_mcontext.gregs[11], (unsigned long) uc->uc_mcontext.gregs[8], (unsigned long) uc->uc_mcontext.gregs[10], (unsigned long) uc->uc_mcontext.gregs[9], (unsigned long) uc->uc_mcontext.gregs[4], (unsigned long) uc->uc_mcontext.gregs[5], (unsigned long) uc->uc_mcontext.gregs[6], (unsigned long) uc->uc_mcontext.gregs[7], (unsigned long) uc->uc_mcontext.gregs[18], (unsigned long) uc->uc_mcontext.gregs[17], (unsigned long) uc->uc_mcontext.gregs[14], (unsigned long) uc->uc_mcontext.gregs[15], (unsigned long) uc->uc_mcontext.gregs[3], (unsigned long) uc->uc_mcontext.gregs[2], (unsigned long) uc->uc_mcontext.gregs[1], (unsigned long) uc->uc_mcontext.gregs[0] ); logStackContent((void**)uc->uc_mcontext.gregs[7]); #elif defined(__X86_64__) || defined(__x86_64__) /* Linux AMD64 */ serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", (unsigned long) uc->uc_mcontext.gregs[13], (unsigned long) uc->uc_mcontext.gregs[11], (unsigned long) uc->uc_mcontext.gregs[14], (unsigned long) uc->uc_mcontext.gregs[12], (unsigned long) uc->uc_mcontext.gregs[8], (unsigned long) uc->uc_mcontext.gregs[9], (unsigned long) uc->uc_mcontext.gregs[10], (unsigned long) uc->uc_mcontext.gregs[15], (unsigned long) uc->uc_mcontext.gregs[0], (unsigned long) uc->uc_mcontext.gregs[1], (unsigned long) uc->uc_mcontext.gregs[2], (unsigned long) uc->uc_mcontext.gregs[3], (unsigned long) uc->uc_mcontext.gregs[4], (unsigned long) uc->uc_mcontext.gregs[5], (unsigned long) uc->uc_mcontext.gregs[6], (unsigned long) uc->uc_mcontext.gregs[7], (unsigned long) uc->uc_mcontext.gregs[16], (unsigned long) uc->uc_mcontext.gregs[17], (unsigned long) uc->uc_mcontext.gregs[18] ); logStackContent((void**)uc->uc_mcontext.gregs[15]); #endif #else serverLog(LL_WARNING, " Dumping of registers not supported for this OS/arch"); #endif } /* Logs the stack trace using the backtrace() call. This function is designed * to be called from signal handlers safely. */ void logStackTrace(ucontext_t *uc) { void *trace[100]; int trace_size = 0, fd; int log_to_stdout = server.logfile[0] == '\0'; /* Open the log file in append mode. */ fd = log_to_stdout ? STDOUT_FILENO : open(server.logfile, O_APPEND|O_CREAT|O_WRONLY, 0644); if (fd == -1) return; /* Generate the stack trace */ trace_size = backtrace(trace, 100); /* overwrite sigaction with caller's address */ if (getMcontextEip(uc) != NULL) trace[1] = getMcontextEip(uc); /* Write symbols to log file */ backtrace_symbols_fd(trace, trace_size, fd); /* Cleanup */ if (!log_to_stdout) close(fd); } /* Log information about the "current" client, that is, the client that is * currently being served by Disque. May be NULL if Disque is not serving a * client right now. */ void logCurrentClient(void) { if (server.current_client == NULL) return; client *cc = server.current_client; sds client; int j; serverLog(LL_WARNING, "--- CURRENT CLIENT INFO"); client = catClientInfoString(sdsempty(),cc); serverLog(LL_WARNING,"client: %s", client); sdsfree(client); for (j = 0; j < cc->argc; j++) { robj *decoded; decoded = getDecodedObject(cc->argv[j]); serverLog(LL_WARNING,"argv[%d]: '%s'", j, (char*)decoded->ptr); decrRefCount(decoded); } } #if defined(HAVE_PROC_MAPS) void memtest_non_destructive_invert(void *addr, size_t size); void memtest_non_destructive_swap(void *addr, size_t size); #define MEMTEST_MAX_REGIONS 128 int memtest_test_linux_anonymous_maps(void) { FILE *fp = fopen("/proc/self/maps","r"); char line[1024]; size_t start_addr, end_addr, size; size_t start_vect[MEMTEST_MAX_REGIONS]; size_t size_vect[MEMTEST_MAX_REGIONS]; int regions = 0, j; uint64_t crc1 = 0, crc2 = 0, crc3 = 0; while(fgets(line,sizeof(line),fp) != NULL) { char *start, *end, *p = line; start = p; p = strchr(p,'-'); if (!p) continue; *p++ = '\0'; end = p; p = strchr(p,' '); if (!p) continue; *p++ = '\0'; if (strstr(p,"stack") || strstr(p,"vdso") || strstr(p,"vsyscall")) continue; if (!strstr(p,"00:00")) continue; if (!strstr(p,"rw")) continue; start_addr = strtoul(start,NULL,16); end_addr = strtoul(end,NULL,16); size = end_addr-start_addr; start_vect[regions] = start_addr; size_vect[regions] = size; printf("Testing %lx %lu\n", (unsigned long) start_vect[regions], (unsigned long) size_vect[regions]); regions++; } /* Test all the regions as an unique sequential region. * 1) Take the CRC64 of the memory region. */ for (j = 0; j < regions; j++) { crc1 = crc64(crc1,(void*)start_vect[j],size_vect[j]); } /* 2) Invert bits, swap adjacent words, swap again, invert bits. * This is the error amplification step. */ for (j = 0; j < regions; j++) memtest_non_destructive_invert((void*)start_vect[j],size_vect[j]); for (j = 0; j < regions; j++) memtest_non_destructive_swap((void*)start_vect[j],size_vect[j]); for (j = 0; j < regions; j++) memtest_non_destructive_swap((void*)start_vect[j],size_vect[j]); for (j = 0; j < regions; j++) memtest_non_destructive_invert((void*)start_vect[j],size_vect[j]); /* 3) Take the CRC64 sum again. */ for (j = 0; j < regions; j++) crc2 = crc64(crc2,(void*)start_vect[j],size_vect[j]); /* 4) Swap + Swap again */ for (j = 0; j < regions; j++) memtest_non_destructive_swap((void*)start_vect[j],size_vect[j]); for (j = 0; j < regions; j++) memtest_non_destructive_swap((void*)start_vect[j],size_vect[j]); /* 5) Take the CRC64 sum again. */ for (j = 0; j < regions; j++) crc3 = crc64(crc3,(void*)start_vect[j],size_vect[j]); /* NOTE: It is very important to close the file descriptor only now * because closing it before may result into unmapping of some memory * region that we are testing. */ fclose(fp); /* If the two CRC are not the same, we trapped a memory error. */ return crc1 != crc2 || crc2 != crc3; } #endif void sigsegvHandler(int sig, siginfo_t *info, void *secret) { ucontext_t *uc = (ucontext_t*) secret; sds infostring, clients; struct sigaction act; UNUSED(info); bugReportStart(); serverLog(LL_WARNING, " Disque %s crashed by signal: %d", DISQUE_VERSION, sig); serverLog(LL_WARNING, " Failed assertion: %s (%s:%d)", server.assert_failed, server.assert_file, server.assert_line); /* Log the stack trace */ serverLog(LL_WARNING, "--- STACK TRACE"); logStackTrace(uc); /* Log INFO and CLIENT LIST */ serverLog(LL_WARNING, "--- INFO OUTPUT"); infostring = genDisqueInfoString("all"); infostring = sdscatprintf(infostring, "hash_init_value: %u\n", dictGetHashFunctionSeed()); serverLogRaw(LL_WARNING, infostring); serverLog(LL_WARNING, "--- CLIENT LIST OUTPUT"); clients = getAllClientsInfoString(); serverLogRaw(LL_WARNING, clients); sdsfree(infostring); sdsfree(clients); /* Log the current client */ logCurrentClient(); /* Log dump of processor registers */ logRegisters(uc); #if defined(HAVE_PROC_MAPS) /* Test memory */ serverLog(LL_WARNING, "--- FAST MEMORY TEST"); bioKillThreads(); if (memtest_test_linux_anonymous_maps()) { serverLog(LL_WARNING, "!!! MEMORY ERROR DETECTED! Check your memory ASAP !!!"); } else { serverLog(LL_WARNING, "Fast memory test PASSED, however your memory can still be broken. Please run a memory test for several hours if possible."); } #endif serverLog(LL_WARNING, "\n=== DISQUE BUG REPORT END. Make sure to include from START to END. ===\n\n" " Please report the crash by opening an issue on github:\n\n" " http://github.com/antirez/disque/issues\n\n" " Suspect RAM error? Use disque-server --test-memory to verify it.\n\n" ); /* free(messages); Don't call free() with possibly corrupted memory. */ if (server.daemonize) unlink(server.pidfile); /* Make sure we exit with the right signal at the end. So for instance * the core will be dumped if enabled. */ sigemptyset (&act.sa_mask); act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = SIG_DFL; sigaction (sig, &act, NULL); kill(getpid(),sig); } #endif /* HAVE_BACKTRACE */ /* ==================== Logging functions for debugging ===================== */ void serverLogHexDump(int level, char *descr, void *value, size_t len) { char buf[65], *b; unsigned char *v = value; char charset[] = "0123456789abcdef"; serverLog(level,"%s (hexdump):", descr); b = buf; while(len) { b[0] = charset[(*v)>>4]; b[1] = charset[(*v)&0xf]; b[2] = '\0'; b += 2; len--; v++; if (b-buf == 64 || len == 0) { serverLogRaw(level|LL_RAW,buf); b = buf; } } serverLogRaw(level|LL_RAW,"\n"); } /* =========================== Software Watchdog ============================ */ #include void watchdogSignalHandler(int sig, siginfo_t *info, void *secret) { #ifdef HAVE_BACKTRACE ucontext_t *uc = (ucontext_t*) secret; #endif UNUSED(info); UNUSED(sig); serverLogFromHandler(LL_WARNING,"\n--- WATCHDOG TIMER EXPIRED ---"); #ifdef HAVE_BACKTRACE logStackTrace(uc); #else serverLogFromHandler(LL_WARNING,"Sorry: no support for backtrace()."); #endif serverLogFromHandler(LL_WARNING,"--------\n"); } /* Schedule a SIGALRM delivery after the specified period in milliseconds. * If a timer is already scheduled, this function will re-schedule it to the * specified time. If period is 0 the current timer is disabled. */ void watchdogScheduleSignal(int period) { struct itimerval it; /* Will stop the timer if period is 0. */ it.it_value.tv_sec = period/1000; it.it_value.tv_usec = (period%1000)*1000; /* Don't automatically restart. */ it.it_interval.tv_sec = 0; it.it_interval.tv_usec = 0; setitimer(ITIMER_REAL, &it, NULL); } /* Enable the software watchdog with the specified period in milliseconds. */ void enableWatchdog(int period) { int min_period; if (server.watchdog_period == 0) { struct sigaction act; /* Watchdog was actually disabled, so we have to setup the signal * handler. */ sigemptyset(&act.sa_mask); act.sa_flags = SA_ONSTACK | SA_SIGINFO; act.sa_sigaction = watchdogSignalHandler; sigaction(SIGALRM, &act, NULL); } /* If the configured period is smaller than twice the timer period, it is * too short for the software watchdog to work reliably. Fix it now * if needed. */ min_period = (1000/server.hz)*2; if (period < min_period) period = min_period; watchdogScheduleSignal(period); /* Adjust the current timer. */ server.watchdog_period = period; } /* Disable the software watchdog. */ void disableWatchdog(void) { struct sigaction act; if (server.watchdog_period == 0) return; /* Already disabled. */ watchdogScheduleSignal(0); /* Stop the current timer. */ /* Set the signal handler to SIG_IGN, this will also remove pending * signals from the queue. */ sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_handler = SIG_IGN; sigaction(SIGALRM, &act, NULL); server.watchdog_period = 0; } disque-1.0-rc1/src/dict.c000066400000000000000000001105201264176561100152150ustar00rootroot00000000000000/* Hash Tables Implementation. * * This file implements in memory hash tables with insert/del/replace/find/ * get-random-element operations. Hash tables will auto resize if needed * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include #include #include #include #include "dict.h" #include "zmalloc.h" #include "disqueassert.h" /* Using dictEnableResize() / dictDisableResize() we make possible to * enable/disable resizing of the hash table as needed. This is very important * for Disque, as we use copy-on-write and don't want to move too much memory * around when there is a child performing saving operations. * * Note that even when dict_can_resize is set to 0, not all resizes are * prevented: a hash table is still allowed to grow if the ratio between * the number of elements and the buckets > dict_force_resize_ratio. */ static int dict_can_resize = 1; static unsigned int dict_force_resize_ratio = 5; /* -------------------------- private prototypes ---------------------------- */ static int _dictExpandIfNeeded(dict *ht); static unsigned long _dictNextPower(unsigned long size); static int _dictKeyIndex(dict *ht, const void *key); static int _dictInit(dict *ht, dictType *type, void *privDataPtr); /* -------------------------- hash functions -------------------------------- */ /* Thomas Wang's 32 bit Mix Function */ unsigned int dictIntHashFunction(unsigned int key) { key += ~(key << 15); key ^= (key >> 10); key += (key << 3); key ^= (key >> 6); key += ~(key << 11); key ^= (key >> 16); return key; } static uint32_t dict_hash_function_seed = 5381; void dictSetHashFunctionSeed(uint32_t seed) { dict_hash_function_seed = seed; } uint32_t dictGetHashFunctionSeed(void) { return dict_hash_function_seed; } /* MurmurHash2, by Austin Appleby * Note - This code makes a few assumptions about how your machine behaves - * 1. We can read a 4-byte value from any address without crashing * 2. sizeof(int) == 4 * * And it has a few limitations - * * 1. It will not work incrementally. * 2. It will not produce the same results on little-endian and big-endian * machines. */ unsigned int dictGenHashFunction(const void *key, int len) { /* 'm' and 'r' are mixing constants generated offline. They're not really 'magic', they just happen to work well. */ uint32_t seed = dict_hash_function_seed; const uint32_t m = 0x5bd1e995; const int r = 24; /* Initialize the hash to a 'random' value */ uint32_t h = seed ^ len; /* Mix 4 bytes at a time into the hash */ const unsigned char *data = (const unsigned char *)key; while(len >= 4) { uint32_t k = *(uint32_t*)data; k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; data += 4; len -= 4; } /* Handle the last few bytes of the input array */ switch(len) { case 3: h ^= data[2] << 16; case 2: h ^= data[1] << 8; case 1: h ^= data[0]; h *= m; }; /* Do a few final mixes of the hash to ensure the last few * bytes are well-incorporated. */ h ^= h >> 13; h *= m; h ^= h >> 15; return (unsigned int)h; } /* And a case insensitive hash function (based on djb hash) */ unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len) { unsigned int hash = (unsigned int)dict_hash_function_seed; while (len--) hash = ((hash << 5) + hash) + (tolower(*buf++)); /* hash * 33 + c */ return hash; } /* ----------------------------- API implementation ------------------------- */ /* Reset a hash table already initialized with ht_init(). * NOTE: This function should only be called by ht_destroy(). */ static void _dictReset(dictht *ht) { ht->table = NULL; ht->size = 0; ht->sizemask = 0; ht->used = 0; } /* Create a new hash table */ dict *dictCreate(dictType *type, void *privDataPtr) { dict *d = zmalloc(sizeof(*d)); _dictInit(d,type,privDataPtr); return d; } /* Initialize the hash table */ int _dictInit(dict *d, dictType *type, void *privDataPtr) { _dictReset(&d->ht[0]); _dictReset(&d->ht[1]); d->type = type; d->privdata = privDataPtr; d->rehashidx = -1; d->iterators = 0; return DICT_OK; } /* Resize the table to the minimal size that contains all the elements, * but with the invariant of a USED/BUCKETS ratio near to <= 1 */ int dictResize(dict *d) { int minimal; if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR; minimal = d->ht[0].used; if (minimal < DICT_HT_INITIAL_SIZE) minimal = DICT_HT_INITIAL_SIZE; return dictExpand(d, minimal); } /* Expand or create the hash table */ int dictExpand(dict *d, unsigned long size) { dictht n; /* the new hash table */ unsigned long realsize = _dictNextPower(size); /* the size is invalid if it is smaller than the number of * elements already inside the hash table */ if (dictIsRehashing(d) || d->ht[0].used > size) return DICT_ERR; /* Rehashing to the same table size is not useful. */ if (realsize == d->ht[0].size) return DICT_ERR; /* Allocate the new hash table and initialize all pointers to NULL */ n.size = realsize; n.sizemask = realsize-1; n.table = zcalloc(realsize*sizeof(dictEntry*)); n.used = 0; /* Is this the first initialization? If so it's not really a rehashing * we just set the first hash table so that it can accept keys. */ if (d->ht[0].table == NULL) { d->ht[0] = n; return DICT_OK; } /* Prepare a second hash table for incremental rehashing */ d->ht[1] = n; d->rehashidx = 0; return DICT_OK; } /* Performs N steps of incremental rehashing. Returns 1 if there are still * keys to move from the old to the new hash table, otherwise 0 is returned. * * Note that a rehashing step consists in moving a bucket (that may have more * than one key as we use chaining) from the old to the new hash table, however * since part of the hash table may be composed of empty spaces, it is not * guaranteed that this function will rehash even a single bucket, since it * will visit at max N*10 empty buckets in total, otherwise the amount of * work it does would be unbound and the function may block for a long time. */ int dictRehash(dict *d, int n) { int empty_visits = n*10; /* Max number of empty buckets to visit. */ if (!dictIsRehashing(d)) return 0; while(n-- && d->ht[0].used != 0) { dictEntry *de, *nextde; /* Note that rehashidx can't overflow as we are sure there are more * elements because ht[0].used != 0 */ assert(d->ht[0].size > (unsigned long)d->rehashidx); while(d->ht[0].table[d->rehashidx] == NULL) { d->rehashidx++; if (--empty_visits == 0) return 1; } de = d->ht[0].table[d->rehashidx]; /* Move all the keys in this bucket from the old to the new hash HT */ while(de) { unsigned int h; nextde = de->next; /* Get the index in the new hash table */ h = dictHashKey(d, de->key) & d->ht[1].sizemask; de->next = d->ht[1].table[h]; d->ht[1].table[h] = de; d->ht[0].used--; d->ht[1].used++; de = nextde; } d->ht[0].table[d->rehashidx] = NULL; d->rehashidx++; } /* Check if we already rehashed the whole table... */ if (d->ht[0].used == 0) { zfree(d->ht[0].table); d->ht[0] = d->ht[1]; _dictReset(&d->ht[1]); d->rehashidx = -1; return 0; } /* More to rehash... */ return 1; } long long timeInMilliseconds(void) { struct timeval tv; gettimeofday(&tv,NULL); return (((long long)tv.tv_sec)*1000)+(tv.tv_usec/1000); } /* Rehash for an amount of time between ms milliseconds and ms+1 milliseconds */ int dictRehashMilliseconds(dict *d, int ms) { long long start = timeInMilliseconds(); int rehashes = 0; while(dictRehash(d,100)) { rehashes += 100; if (timeInMilliseconds()-start > ms) break; } return rehashes; } /* This function performs just a step of rehashing, and only if there are * no safe iterators bound to our hash table. When we have iterators in the * middle of a rehashing we can't mess with the two hash tables otherwise * some element can be missed or duplicated. * * This function is called by common lookup or update operations in the * dictionary so that the hash table automatically migrates from H1 to H2 * while it is actively used. */ static void _dictRehashStep(dict *d) { if (d->iterators == 0) dictRehash(d,1); } /* Add an element to the target hash table */ int dictAdd(dict *d, void *key, void *val) { dictEntry *entry = dictAddRaw(d,key); if (!entry) return DICT_ERR; dictSetVal(d, entry, val); return DICT_OK; } /* Low level add. This function adds the entry but instead of setting * a value returns the dictEntry structure to the user, that will make * sure to fill the value field as he wishes. * * This function is also directly exposed to the user API to be called * mainly in order to store non-pointers inside the hash value, example: * * entry = dictAddRaw(dict,mykey); * if (entry != NULL) dictSetSignedIntegerVal(entry,1000); * * Return values: * * If key already exists NULL is returned. * If key was added, the hash entry is returned to be manipulated by the caller. */ dictEntry *dictAddRaw(dict *d, void *key) { int index; dictEntry *entry; dictht *ht; if (dictIsRehashing(d)) _dictRehashStep(d); /* Get the index of the new element, or -1 if * the element already exists. */ if ((index = _dictKeyIndex(d, key)) == -1) return NULL; /* Allocate the memory and store the new entry */ ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0]; entry = zmalloc(sizeof(*entry)); entry->next = ht->table[index]; ht->table[index] = entry; ht->used++; /* Set the hash entry fields. */ dictSetKey(d, entry, key); return entry; } /* Add an element, discarding the old if the key already exists. * Return 1 if the key was added from scratch, 0 if there was already an * element with such key and dictReplace() just performed a value update * operation. */ int dictReplace(dict *d, void *key, void *val) { dictEntry *entry, auxentry; /* Try to add the element. If the key * does not exists dictAdd will suceed. */ if (dictAdd(d, key, val) == DICT_OK) return 1; /* It already exists, get the entry */ entry = dictFind(d, key); /* Set the new value and free the old one. Note that it is important * to do that in this order, as the value may just be exactly the same * as the previous one. In this context, think to reference counting, * you want to increment (set), and then decrement (free), and not the * reverse. */ auxentry = *entry; dictSetVal(d, entry, val); dictFreeVal(d, &auxentry); return 0; } /* dictReplaceRaw() is simply a version of dictAddRaw() that always * returns the hash entry of the specified key, even if the key already * exists and can't be added (in that case the entry of the already * existing key is returned.) * * See dictAddRaw() for more information. */ dictEntry *dictReplaceRaw(dict *d, void *key) { dictEntry *entry = dictFind(d,key); return entry ? entry : dictAddRaw(d,key); } /* Search and remove an element */ static int dictGenericDelete(dict *d, const void *key, int nofree) { unsigned int h, idx; dictEntry *he, *prevHe; int table; if (d->ht[0].size == 0) return DICT_ERR; /* d->ht[0].table is NULL */ if (dictIsRehashing(d)) _dictRehashStep(d); h = dictHashKey(d, key); for (table = 0; table <= 1; table++) { idx = h & d->ht[table].sizemask; he = d->ht[table].table[idx]; prevHe = NULL; while(he) { if (dictCompareKeys(d, key, he->key)) { /* Unlink the element from the list */ if (prevHe) prevHe->next = he->next; else d->ht[table].table[idx] = he->next; if (!nofree) { dictFreeKey(d, he); dictFreeVal(d, he); } zfree(he); d->ht[table].used--; return DICT_OK; } prevHe = he; he = he->next; } if (!dictIsRehashing(d)) break; } return DICT_ERR; /* not found */ } int dictDelete(dict *ht, const void *key) { return dictGenericDelete(ht,key,0); } int dictDeleteNoFree(dict *ht, const void *key) { return dictGenericDelete(ht,key,1); } /* Destroy an entire dictionary */ int _dictClear(dict *d, dictht *ht, void(callback)(void *)) { unsigned long i; /* Free all the elements */ for (i = 0; i < ht->size && ht->used > 0; i++) { dictEntry *he, *nextHe; if (callback && (i & 65535) == 0) callback(d->privdata); if ((he = ht->table[i]) == NULL) continue; while(he) { nextHe = he->next; dictFreeKey(d, he); dictFreeVal(d, he); zfree(he); ht->used--; he = nextHe; } } /* Free the table and the allocated cache structure */ zfree(ht->table); /* Re-initialize the table */ _dictReset(ht); return DICT_OK; /* never fails */ } /* Clear & Release the hash table */ void dictRelease(dict *d) { _dictClear(d,&d->ht[0],NULL); _dictClear(d,&d->ht[1],NULL); zfree(d); } dictEntry *dictFind(dict *d, const void *key) { dictEntry *he; unsigned int h, idx, table; if (d->ht[0].size == 0) return NULL; /* We don't have a table at all */ if (dictIsRehashing(d)) _dictRehashStep(d); h = dictHashKey(d, key); for (table = 0; table <= 1; table++) { idx = h & d->ht[table].sizemask; he = d->ht[table].table[idx]; while(he) { if (dictCompareKeys(d, key, he->key)) return he; he = he->next; } if (!dictIsRehashing(d)) return NULL; } return NULL; } void *dictFetchValue(dict *d, const void *key) { dictEntry *he; he = dictFind(d,key); return he ? dictGetVal(he) : NULL; } /* A fingerprint is a 64 bit number that represents the state of the dictionary * at a given time, it's just a few dict properties xored together. * When an unsafe iterator is initialized, we get the dict fingerprint, and check * the fingerprint again when the iterator is released. * If the two fingerprints are different it means that the user of the iterator * performed forbidden operations against the dictionary while iterating. */ long long dictFingerprint(dict *d) { long long integers[6], hash = 0; int j; integers[0] = (long) d->ht[0].table; integers[1] = d->ht[0].size; integers[2] = d->ht[0].used; integers[3] = (long) d->ht[1].table; integers[4] = d->ht[1].size; integers[5] = d->ht[1].used; /* We hash N integers by summing every successive integer with the integer * hashing of the previous sum. Basically: * * Result = hash(hash(hash(int1)+int2)+int3) ... * * This way the same set of integers in a different order will (likely) hash * to a different number. */ for (j = 0; j < 6; j++) { hash += integers[j]; /* For the hashing step we use Tomas Wang's 64 bit integer hash. */ hash = (~hash) + (hash << 21); // hash = (hash << 21) - hash - 1; hash = hash ^ (hash >> 24); hash = (hash + (hash << 3)) + (hash << 8); // hash * 265 hash = hash ^ (hash >> 14); hash = (hash + (hash << 2)) + (hash << 4); // hash * 21 hash = hash ^ (hash >> 28); hash = hash + (hash << 31); } return hash; } dictIterator *dictGetIterator(dict *d) { dictIterator *iter = zmalloc(sizeof(*iter)); iter->d = d; iter->table = 0; iter->index = -1; iter->safe = 0; iter->entry = NULL; iter->nextEntry = NULL; return iter; } dictIterator *dictGetSafeIterator(dict *d) { dictIterator *i = dictGetIterator(d); i->safe = 1; return i; } dictEntry *dictNext(dictIterator *iter) { while (1) { if (iter->entry == NULL) { dictht *ht = &iter->d->ht[iter->table]; if (iter->index == -1 && iter->table == 0) { if (iter->safe) iter->d->iterators++; else iter->fingerprint = dictFingerprint(iter->d); } iter->index++; if (iter->index >= (long) ht->size) { if (dictIsRehashing(iter->d) && iter->table == 0) { iter->table++; iter->index = 0; ht = &iter->d->ht[1]; } else { break; } } iter->entry = ht->table[iter->index]; } else { iter->entry = iter->nextEntry; } if (iter->entry) { /* We need to save the 'next' here, the iterator user * may delete the entry we are returning. */ iter->nextEntry = iter->entry->next; return iter->entry; } } return NULL; } void dictReleaseIterator(dictIterator *iter) { if (!(iter->index == -1 && iter->table == 0)) { if (iter->safe) iter->d->iterators--; else assert(iter->fingerprint == dictFingerprint(iter->d)); } zfree(iter); } /* Return a random entry from the hash table. Useful to * implement randomized algorithms */ dictEntry *dictGetRandomKey(dict *d) { dictEntry *he, *orighe; unsigned int h; int listlen, listele; if (dictSize(d) == 0) return NULL; if (dictIsRehashing(d)) _dictRehashStep(d); if (dictIsRehashing(d)) { do { /* We are sure there are no elements in indexes from 0 * to rehashidx-1 */ h = d->rehashidx + (random() % (d->ht[0].size + d->ht[1].size - d->rehashidx)); he = (h >= d->ht[0].size) ? d->ht[1].table[h - d->ht[0].size] : d->ht[0].table[h]; } while(he == NULL); } else { do { h = random() & d->ht[0].sizemask; he = d->ht[0].table[h]; } while(he == NULL); } /* Now we found a non empty bucket, but it is a linked * list and we need to get a random element from the list. * The only sane way to do so is counting the elements and * select a random index. */ listlen = 0; orighe = he; while(he) { he = he->next; listlen++; } listele = random() % listlen; he = orighe; while(listele--) he = he->next; return he; } /* This function samples the dictionary to return a few keys from random * locations. * * It does not guarantee to return all the keys specified in 'count', nor * it does guarantee to return non-duplicated elements, however it will make * some effort to do both things. * * Returned pointers to hash table entries are stored into 'des' that * points to an array of dictEntry pointers. The array must have room for * at least 'count' elements, that is the argument we pass to the function * to tell how many random elements we need. * * The function returns the number of items stored into 'des', that may * be less than 'count' if the hash table has less than 'count' elements * inside, or if not enough elements were found in a reasonable amount of * steps. * * Note that this function is not suitable when you need a good distribution * of the returned items, but only when you need to "sample" a given number * of continuous elements to run some kind of algorithm or to produce * statistics. However the function is much faster than dictGetRandomKey() * at producing N elements. */ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { unsigned int j; /* internal hash table id, 0 or 1. */ unsigned int tables; /* 1 or 2 tables? */ unsigned int stored = 0, maxsizemask; unsigned int maxsteps; if (dictSize(d) < count) count = dictSize(d); maxsteps = count*10; /* Try to do a rehashing work proportional to 'count'. */ for (j = 0; j < count; j++) { if (dictIsRehashing(d)) _dictRehashStep(d); else break; } tables = dictIsRehashing(d) ? 2 : 1; maxsizemask = d->ht[0].sizemask; if (tables > 1 && maxsizemask < d->ht[1].sizemask) maxsizemask = d->ht[1].sizemask; /* Pick a random point inside the larger table. */ unsigned int i = random() & maxsizemask; unsigned int emptylen = 0; /* Continuous empty entries so far. */ while(stored < count && maxsteps--) { for (j = 0; j < tables; j++) { /* Invariant of the dict.c rehashing: up to the indexes already * visited in ht[0] during the rehashing, there are no populated * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ if (tables == 2 && j == 0 && i < d->rehashidx) { /* Moreover, if we are currently out of range in the second * table, there will be no elements in both tables up to * the current rehashing index, so we jump if possible. * (this happens when going from big to small table). */ if (i >= d->ht[1].size) i = d->rehashidx; continue; } if (i >= d->ht[j].size) continue; /* Out of range for this table. */ dictEntry *he = d->ht[j].table[i]; /* Count contiguous empty buckets, and jump to other * locations if they reach 'count' (with a minimum of 5). */ if (he == NULL) { emptylen++; if (emptylen >= 5 && emptylen > count) { i = random() & maxsizemask; emptylen = 0; } } else { emptylen = 0; while (he) { /* Collect all the elements of the buckets found non * empty while iterating. */ *des = he; des++; he = he->next; stored++; if (stored == count) return stored; } } } i = (i+1) & maxsizemask; } return stored; } /* Function to reverse bits. Algorithm from: * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */ static unsigned long rev(unsigned long v) { unsigned long s = 8 * sizeof(v); // bit size; must be power of 2 unsigned long mask = ~0; while ((s >>= 1) > 0) { mask ^= (mask << s); v = ((v >> s) & mask) | ((v << s) & ~mask); } return v; } /* dictScan() is used to iterate over the elements of a dictionary. * * Iterating works the following way: * * 1) Initially you call the function using a cursor (v) value of 0. * 2) The function performs one step of the iteration, and returns the * new cursor value you must use in the next call. * 3) When the returned cursor is 0, the iteration is complete. * * The function guarantees all elements present in the * dictionary get returned between the start and end of the iteration. * However it is possible some elements get returned multiple times. * * For every element returned, the callback argument 'fn' is * called with 'privdata' as first argument and the dictionary entry * 'de' as second argument. * * HOW IT WORKS. * * The iteration algorithm was designed by Pieter Noordhuis. * The main idea is to increment a cursor starting from the higher order * bits. That is, instead of incrementing the cursor normally, the bits * of the cursor are reversed, then the cursor is incremented, and finally * the bits are reversed again. * * This strategy is needed because the hash table may be resized between * iteration calls. * * dict.c hash tables are always power of two in size, and they * use chaining, so the position of an element in a given table is given * by computing the bitwise AND between Hash(key) and SIZE-1 * (where SIZE-1 is always the mask that is equivalent to taking the rest * of the division between the Hash of the key and SIZE). * * For example if the current hash table size is 16, the mask is * (in binary) 1111. The position of a key in the hash table will always be * the last four bits of the hash output, and so forth. * * WHAT HAPPENS IF THE TABLE CHANGES IN SIZE? * * If the hash table grows, elements can go anywhere in one multiple of * the old bucket: for example let's say we already iterated with * a 4 bit cursor 1100 (the mask is 1111 because hash table size = 16). * * If the hash table will be resized to 64 elements, then the new mask will * be 111111. The new buckets you obtain by substituting in ??1100 * with either 0 or 1 can be targeted only by keys we already visited * when scanning the bucket 1100 in the smaller hash table. * * By iterating the higher bits first, because of the inverted counter, the * cursor does not need to restart if the table size gets bigger. It will * continue iterating using cursors without '1100' at the end, and also * without any other combination of the final 4 bits already explored. * * Similarly when the table size shrinks over time, for example going from * 16 to 8, if a combination of the lower three bits (the mask for size 8 * is 111) were already completely explored, it would not be visited again * because we are sure we tried, for example, both 0111 and 1111 (all the * variations of the higher bit) so we don't need to test it again. * * WAIT... YOU HAVE *TWO* TABLES DURING REHASHING! * * Yes, this is true, but we always iterate the smaller table first, then * we test all the expansions of the current cursor into the larger * table. For example if the current cursor is 101 and we also have a * larger table of size 16, we also test (0)101 and (1)101 inside the larger * table. This reduces the problem back to having only one table, where * the larger one, if it exists, is just an expansion of the smaller one. * * LIMITATIONS * * This iterator is completely stateless, and this is a huge advantage, * including no additional memory used. * * The disadvantages resulting from this design are: * * 1) It is possible we return elements more than once. However this is usually * easy to deal with in the application level. * 2) The iterator must return multiple elements per call, as it needs to always * return all the keys chained in a given bucket, and all the expansions, so * we are sure we don't miss keys moving during rehashing. * 3) The reverse cursor is somewhat hard to understand at first, but this * comment is supposed to help. */ unsigned long dictScan(dict *d, unsigned long v, dictScanFunction *fn, void *privdata) { dictht *t0, *t1; const dictEntry *de; unsigned long m0, m1; if (dictSize(d) == 0) return 0; if (!dictIsRehashing(d)) { t0 = &(d->ht[0]); m0 = t0->sizemask; /* Emit entries at cursor */ de = t0->table[v & m0]; while (de) { fn(privdata, de); de = de->next; } } else { t0 = &d->ht[0]; t1 = &d->ht[1]; /* Make sure t0 is the smaller and t1 is the bigger table */ if (t0->size > t1->size) { t0 = &d->ht[1]; t1 = &d->ht[0]; } m0 = t0->sizemask; m1 = t1->sizemask; /* Emit entries at cursor */ de = t0->table[v & m0]; while (de) { fn(privdata, de); de = de->next; } /* Iterate over indices in larger table that are the expansion * of the index pointed to by the cursor in the smaller table */ do { /* Emit entries at cursor */ de = t1->table[v & m1]; while (de) { fn(privdata, de); de = de->next; } /* Increment bits not covered by the smaller mask */ v = (((v | m0) + 1) & ~m0) | (v & m0); /* Continue while bits covered by mask difference is non-zero */ } while (v & (m0 ^ m1)); } /* Set unmasked bits so incrementing the reversed cursor * operates on the masked bits of the smaller table */ v |= ~m0; /* Increment the reverse cursor */ v = rev(v); v++; v = rev(v); return v; } /* ------------------------- private functions ------------------------------ */ /* Expand the hash table if needed */ static int _dictExpandIfNeeded(dict *d) { /* Incremental rehashing already in progress. Return. */ if (dictIsRehashing(d)) return DICT_OK; /* If the hash table is empty expand it to the initial size. */ if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE); /* If we reached the 1:1 ratio, and we are allowed to resize the hash * table (global setting) or we should avoid it but the ratio between * elements/buckets is over the "safe" threshold, we resize doubling * the number of buckets. */ if (d->ht[0].used >= d->ht[0].size && (dict_can_resize || d->ht[0].used/d->ht[0].size > dict_force_resize_ratio)) { return dictExpand(d, d->ht[0].used*2); } return DICT_OK; } /* Our hash table capability is a power of two */ static unsigned long _dictNextPower(unsigned long size) { unsigned long i = DICT_HT_INITIAL_SIZE; if (size >= LONG_MAX) return LONG_MAX; while(1) { if (i >= size) return i; i *= 2; } } /* Returns the index of a free slot that can be populated with * a hash entry for the given 'key'. * If the key already exists, -1 is returned. * * Note that if we are in the process of rehashing the hash table, the * index is always returned in the context of the second (new) hash table. */ static int _dictKeyIndex(dict *d, const void *key) { unsigned int h, idx, table; dictEntry *he; /* Expand the hash table if needed */ if (_dictExpandIfNeeded(d) == DICT_ERR) return -1; /* Compute the key hash value */ h = dictHashKey(d, key); for (table = 0; table <= 1; table++) { idx = h & d->ht[table].sizemask; /* Search if this slot does not already contain the given key */ he = d->ht[table].table[idx]; while(he) { if (dictCompareKeys(d, key, he->key)) return -1; he = he->next; } if (!dictIsRehashing(d)) break; } return idx; } void dictEmpty(dict *d, void(callback)(void*)) { _dictClear(d,&d->ht[0],callback); _dictClear(d,&d->ht[1],callback); d->rehashidx = -1; d->iterators = 0; } void dictEnableResize(void) { dict_can_resize = 1; } void dictDisableResize(void) { dict_can_resize = 0; } #if 0 /* The following is code that we don't use for Disque currently, but that is * part of the library. */ /* ----------------------- Debugging ------------------------*/ #define DICT_STATS_VECTLEN 50 static void _dictPrintStatsHt(dictht *ht) { unsigned long i, slots = 0, chainlen, maxchainlen = 0; unsigned long totchainlen = 0; unsigned long clvector[DICT_STATS_VECTLEN]; if (ht->used == 0) { printf("No stats available for empty dictionaries\n"); return; } for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0; for (i = 0; i < ht->size; i++) { dictEntry *he; if (ht->table[i] == NULL) { clvector[0]++; continue; } slots++; /* For each hash entry on this slot... */ chainlen = 0; he = ht->table[i]; while(he) { chainlen++; he = he->next; } clvector[(chainlen < DICT_STATS_VECTLEN) ? chainlen : (DICT_STATS_VECTLEN-1)]++; if (chainlen > maxchainlen) maxchainlen = chainlen; totchainlen += chainlen; } printf("Hash table stats:\n"); printf(" table size: %ld\n", ht->size); printf(" number of elements: %ld\n", ht->used); printf(" different slots: %ld\n", slots); printf(" max chain length: %ld\n", maxchainlen); printf(" avg chain length (counted): %.02f\n", (float)totchainlen/slots); printf(" avg chain length (computed): %.02f\n", (float)ht->used/slots); printf(" Chain length distribution:\n"); for (i = 0; i < DICT_STATS_VECTLEN-1; i++) { if (clvector[i] == 0) continue; printf(" %s%ld: %ld (%.02f%%)\n",(i == DICT_STATS_VECTLEN-1)?">= ":"", i, clvector[i], ((float)clvector[i]/ht->size)*100); } } void dictPrintStats(dict *d) { _dictPrintStatsHt(&d->ht[0]); if (dictIsRehashing(d)) { printf("-- Rehashing into ht[1]:\n"); _dictPrintStatsHt(&d->ht[1]); } } /* ----------------------- StringCopy Hash Table Type ------------------------*/ static unsigned int _dictStringCopyHTHashFunction(const void *key) { return dictGenHashFunction(key, strlen(key)); } static void *_dictStringDup(void *privdata, const void *key) { int len = strlen(key); char *copy = zmalloc(len+1); DICT_NOTUSED(privdata); memcpy(copy, key, len); copy[len] = '\0'; return copy; } static int _dictStringCopyHTKeyCompare(void *privdata, const void *key1, const void *key2) { DICT_NOTUSED(privdata); return strcmp(key1, key2) == 0; } static void _dictStringDestructor(void *privdata, void *key) { DICT_NOTUSED(privdata); zfree(key); } dictType dictTypeHeapStringCopyKey = { _dictStringCopyHTHashFunction, /* hash function */ _dictStringDup, /* key dup */ NULL, /* val dup */ _dictStringCopyHTKeyCompare, /* key compare */ _dictStringDestructor, /* key destructor */ NULL /* val destructor */ }; /* This is like StringCopy but does not auto-duplicate the key. * It's used for intepreter's shared strings. */ dictType dictTypeHeapStrings = { _dictStringCopyHTHashFunction, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ _dictStringCopyHTKeyCompare, /* key compare */ _dictStringDestructor, /* key destructor */ NULL /* val destructor */ }; /* This is like StringCopy but also automatically handle dynamic * allocated C strings as values. */ dictType dictTypeHeapStringCopyKeyValue = { _dictStringCopyHTHashFunction, /* hash function */ _dictStringDup, /* key dup */ _dictStringDup, /* val dup */ _dictStringCopyHTKeyCompare, /* key compare */ _dictStringDestructor, /* key destructor */ _dictStringDestructor, /* val destructor */ }; #endif disque-1.0-rc1/src/dict.h000066400000000000000000000162401264176561100152260ustar00rootroot00000000000000/* Hash Tables Implementation. * * This file implements in-memory hash tables with insert/del/replace/find/ * get-random-element operations. Hash tables will auto-resize if needed * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #ifndef __DICT_H #define __DICT_H #define DICT_OK 0 #define DICT_ERR 1 /* Unused arguments generate annoying warnings... */ #define DICT_NOTUSED(V) ((void) V) typedef struct dictEntry { void *key; union { void *val; uint64_t u64; int64_t s64; double d; } v; struct dictEntry *next; } dictEntry; typedef struct dictType { unsigned int (*hashFunction)(const void *key); void *(*keyDup)(void *privdata, const void *key); void *(*valDup)(void *privdata, const void *obj); int (*keyCompare)(void *privdata, const void *key1, const void *key2); void (*keyDestructor)(void *privdata, void *key); void (*valDestructor)(void *privdata, void *obj); } dictType; /* This is our hash table structure. Every dictionary has two of this as we * implement incremental rehashing, for the old to the new table. */ typedef struct dictht { dictEntry **table; unsigned long size; unsigned long sizemask; unsigned long used; } dictht; typedef struct dict { dictType *type; void *privdata; dictht ht[2]; long rehashidx; /* rehashing not in progress if rehashidx == -1 */ int iterators; /* number of iterators currently running */ } dict; /* If safe is set to 1 this is a safe iterator, that means, you can call * dictAdd, dictFind, and other functions against the dictionary even while * iterating. Otherwise it is a non safe iterator, and only dictNext() * should be called while iterating. */ typedef struct dictIterator { dict *d; long index; int table, safe; dictEntry *entry, *nextEntry; /* unsafe iterator fingerprint for misuse detection. */ long long fingerprint; } dictIterator; typedef void (dictScanFunction)(void *privdata, const dictEntry *de); /* This is the initial size of every hash table */ #define DICT_HT_INITIAL_SIZE 4 /* ------------------------------- Macros ------------------------------------*/ #define dictFreeVal(d, entry) \ if ((d)->type->valDestructor) \ (d)->type->valDestructor((d)->privdata, (entry)->v.val) #define dictSetVal(d, entry, _val_) do { \ if ((d)->type->valDup) \ entry->v.val = (d)->type->valDup((d)->privdata, _val_); \ else \ entry->v.val = (_val_); \ } while(0) #define dictSetSignedIntegerVal(entry, _val_) \ do { entry->v.s64 = _val_; } while(0) #define dictSetUnsignedIntegerVal(entry, _val_) \ do { entry->v.u64 = _val_; } while(0) #define dictSetDoubleVal(entry, _val_) \ do { entry->v.d = _val_; } while(0) #define dictFreeKey(d, entry) \ if ((d)->type->keyDestructor) \ (d)->type->keyDestructor((d)->privdata, (entry)->key) #define dictSetKey(d, entry, _key_) do { \ if ((d)->type->keyDup) \ entry->key = (d)->type->keyDup((d)->privdata, _key_); \ else \ entry->key = (_key_); \ } while(0) #define dictCompareKeys(d, key1, key2) \ (((d)->type->keyCompare) ? \ (d)->type->keyCompare((d)->privdata, key1, key2) : \ (key1) == (key2)) #define dictHashKey(d, key) (d)->type->hashFunction(key) #define dictGetKey(he) ((he)->key) #define dictGetVal(he) ((he)->v.val) #define dictGetSignedIntegerVal(he) ((he)->v.s64) #define dictGetUnsignedIntegerVal(he) ((he)->v.u64) #define dictGetDoubleVal(he) ((he)->v.d) #define dictSlots(d) ((d)->ht[0].size+(d)->ht[1].size) #define dictSize(d) ((d)->ht[0].used+(d)->ht[1].used) #define dictIsRehashing(d) ((d)->rehashidx != -1) /* Foreach macro. */ #define dictForeach(_d,_devar) { \ dictIterator *_diter = dictGetIterator(_d); \ dictEntry *_devar; \ while((_devar = dictNext(_diter)) != NULL) { #define dictSafeForeach(_d,_devar) { \ dictIterator *_diter = dictGetSafeIterator(_d); \ dictEntry *_devar; \ while((_devar = dictNext(_diter)) != NULL) { #define dictEndForeach } dictReleaseIterator(_diter); } /* API */ dict *dictCreate(dictType *type, void *privDataPtr); int dictExpand(dict *d, unsigned long size); int dictAdd(dict *d, void *key, void *val); dictEntry *dictAddRaw(dict *d, void *key); int dictReplace(dict *d, void *key, void *val); dictEntry *dictReplaceRaw(dict *d, void *key); int dictDelete(dict *d, const void *key); int dictDeleteNoFree(dict *d, const void *key); void dictRelease(dict *d); dictEntry * dictFind(dict *d, const void *key); void *dictFetchValue(dict *d, const void *key); int dictResize(dict *d); dictIterator *dictGetIterator(dict *d); dictIterator *dictGetSafeIterator(dict *d); dictEntry *dictNext(dictIterator *iter); void dictReleaseIterator(dictIterator *iter); dictEntry *dictGetRandomKey(dict *d); unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count); void dictPrintStats(dict *d); unsigned int dictGenHashFunction(const void *key, int len); unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len); void dictEmpty(dict *d, void(callback)(void*)); void dictEnableResize(void); void dictDisableResize(void); int dictRehash(dict *d, int n); int dictRehashMilliseconds(dict *d, int ms); void dictSetHashFunctionSeed(unsigned int initval); unsigned int dictGetHashFunctionSeed(void); unsigned long dictScan(dict *d, unsigned long v, dictScanFunction *fn, void *privdata); /* Hash table types */ extern dictType dictTypeHeapStringCopyKey; extern dictType dictTypeHeapStrings; extern dictType dictTypeHeapStringCopyKeyValue; #endif /* __DICT_H */ disque-1.0-rc1/src/disque-check-aof.c000066400000000000000000000142731264176561100174120ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Pieter Noordhuis * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include #include #include "config.h" #define ERROR(...) { \ char __buf[1024]; \ sprintf(__buf, __VA_ARGS__); \ sprintf(error, "0x%16llx: %s", (long long)epos, __buf); \ } static char error[1024]; static off_t epos; int consumeNewline(char *buf) { if (strncmp(buf,"\r\n",2) != 0) { ERROR("Expected \\r\\n, got: %02x%02x",buf[0],buf[1]); return 0; } return 1; } int readLong(FILE *fp, char prefix, long *target) { char buf[128], *eptr; epos = ftello(fp); if (fgets(buf,sizeof(buf),fp) == NULL) { return 0; } if (buf[0] != prefix) { ERROR("Expected prefix '%c', got: '%c'",buf[0],prefix); return 0; } *target = strtol(buf+1,&eptr,10); return consumeNewline(eptr); } int readBytes(FILE *fp, char *target, long length) { long real; epos = ftello(fp); real = fread(target,1,length,fp); if (real != length) { ERROR("Expected to read %ld bytes, got %ld bytes",length,real); return 0; } return 1; } int readString(FILE *fp, char** target) { long len; *target = NULL; if (!readLong(fp,'$',&len)) { return 0; } /* Increase length to also consume \r\n */ len += 2; *target = (char*)malloc(len); if (!readBytes(fp,*target,len)) { return 0; } if (!consumeNewline(*target+len-2)) { return 0; } (*target)[len-2] = '\0'; return 1; } int readArgc(FILE *fp, long *target) { return readLong(fp,'*',target); } off_t process(FILE *fp) { long argc; off_t pos = 0; int i, multi = 0; char *str; while(1) { if (!multi) pos = ftello(fp); if (!readArgc(fp, &argc)) break; for (i = 0; i < argc; i++) { if (!readString(fp,&str)) break; if (i == 0) { if (strcasecmp(str, "multi") == 0) { if (multi++) { ERROR("Unexpected MULTI"); break; } } else if (strcasecmp(str, "exec") == 0) { if (--multi) { ERROR("Unexpected EXEC"); break; } } } free(str); } /* Stop if the loop did not finish */ if (i < argc) { if (str) free(str); break; } } if (feof(fp) && multi && strlen(error) == 0) { ERROR("Reached EOF before reading EXEC for MULTI"); } if (strlen(error) > 0) { printf("%s\n", error); } return pos; } int main(int argc, char **argv) { char *filename; int fix = 0; if (argc < 2) { printf("Usage: %s [--fix] \n", argv[0]); exit(1); } else if (argc == 2) { filename = argv[1]; } else if (argc == 3) { if (strcmp(argv[1],"--fix") != 0) { printf("Invalid argument: %s\n", argv[1]); exit(1); } filename = argv[2]; fix = 1; } else { printf("Invalid arguments\n"); exit(1); } FILE *fp = fopen(filename,"r+"); if (fp == NULL) { printf("Cannot open file: %s\n", filename); exit(1); } struct disque_stat sb; if (disque_fstat(fileno(fp),&sb) == -1) { printf("Cannot stat file: %s\n", filename); exit(1); } off_t size = sb.st_size; if (size == 0) { printf("Empty file: %s\n", filename); exit(1); } off_t pos = process(fp); off_t diff = size-pos; printf("AOF analyzed: size=%lld, ok_up_to=%lld, diff=%lld\n", (long long) size, (long long) pos, (long long) diff); if (diff > 0) { if (fix) { char buf[2]; printf("This will shrink the AOF from %lld bytes, with %lld bytes, to %lld bytes\n",(long long)size,(long long)diff,(long long)pos); printf("Continue? [y/N]: "); if (fgets(buf,sizeof(buf),stdin) == NULL || strncasecmp(buf,"y",1) != 0) { printf("Aborting...\n"); exit(1); } if (ftruncate(fileno(fp), pos) == -1) { printf("Failed to truncate AOF\n"); exit(1); } else { printf("Successfully truncated AOF\n"); } } else { printf("AOF is not valid\n"); exit(1); } } else { printf("AOF is valid\n"); } fclose(fp); return 0; } disque-1.0-rc1/src/disque-cli.c000066400000000000000000001756071264176561100163520ustar00rootroot00000000000000/* Disque CLI (command line interface) * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include "version.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* use sds.h from hiredis, so that only one set of sds functions will be present in the binary */ #include "zmalloc.h" #include "linenoise.h" #include "help.h" #include "anet.h" #include "ae.h" #define UNUSED(V) ((void) V) #define OUTPUT_STANDARD 0 #define OUTPUT_RAW 1 #define OUTPUT_CSV 2 #define DISQUE_CLI_KEEPALIVE_INTERVAL 15 /* seconds */ #define DISQUE_CLI_DEFAULT_PIPE_TIMEOUT 30 /* seconds */ static redisContext *context; static struct config { char *hostip; int hostport; char *hostsocket; long repeat; long interval; int dbnum; int interactive; int shutdown; int monitor_mode; int pubsub_mode; int latency_mode; int latency_history; int cluster_mode; int cluster_reissue_command; int slave_mode; int pipe_mode; int pipe_timeout; int getrdb_mode; int stat_mode; int scan_mode; int intrinsic_latency_mode; int intrinsic_latency_duration; char *pattern; char *rdb_filename; int bigkeys; int stdinarg; /* get last arg from stdin. (-x option) */ char *auth; int output; /* output mode, see OUTPUT_* defines */ sds mb_delim; char prompt[128]; char *eval; int last_cmd_type; } config; static volatile sig_atomic_t force_cancel_loop = 0; static void usage(void); static void slaveMode(void); char *disqueGitSHA1(void); char *disqueGitDirty(void); /*------------------------------------------------------------------------------ * Utility functions *--------------------------------------------------------------------------- */ static long long ustime(void) { struct timeval tv; long long ust; gettimeofday(&tv, NULL); ust = ((long long)tv.tv_sec)*1000000; ust += tv.tv_usec; return ust; } static long long mstime(void) { return ustime()/1000; } static void cliRefreshPrompt(void) { int len; if (config.hostsocket != NULL) len = snprintf(config.prompt,sizeof(config.prompt),"disque %s", config.hostsocket); else len = snprintf(config.prompt,sizeof(config.prompt), strchr(config.hostip,':') ? "[%s]:%d" : "%s:%d", config.hostip, config.hostport); /* Add [dbnum] if needed */ if (config.dbnum != 0 && config.last_cmd_type != REDIS_REPLY_ERROR) len += snprintf(config.prompt+len,sizeof(config.prompt)-len,"[%d]", config.dbnum); snprintf(config.prompt+len,sizeof(config.prompt)-len,"> "); } /*------------------------------------------------------------------------------ * Help functions *--------------------------------------------------------------------------- */ #define CLI_HELP_COMMAND 1 #define CLI_HELP_GROUP 2 typedef struct { int type; int argc; sds *argv; sds full; /* Only used for help on commands */ struct commandHelp *org; } helpEntry; static helpEntry *helpEntries; static int helpEntriesLen; static sds cliVersion(void) { sds version; version = sdscatprintf(sdsempty(), "%s", DISQUE_VERSION); /* Add git commit and working tree status when available */ if (strtoll(disqueGitSHA1(),NULL,16)) { version = sdscatprintf(version, " (git:%s", disqueGitSHA1()); if (strtoll(disqueGitDirty(),NULL,10)) version = sdscatprintf(version, "-dirty"); version = sdscat(version, ")"); } return version; } static void cliInitHelp(void) { int commandslen = sizeof(commandHelp)/sizeof(struct commandHelp); int groupslen = sizeof(commandGroups)/sizeof(char*); int i, len, pos = 0; helpEntry tmp; helpEntriesLen = len = commandslen+groupslen; helpEntries = malloc(sizeof(helpEntry)*len); for (i = 0; i < groupslen; i++) { tmp.argc = 1; tmp.argv = malloc(sizeof(sds)); tmp.argv[0] = sdscatprintf(sdsempty(),"@%s",commandGroups[i]); tmp.full = tmp.argv[0]; tmp.type = CLI_HELP_GROUP; tmp.org = NULL; helpEntries[pos++] = tmp; } for (i = 0; i < commandslen; i++) { tmp.argv = sdssplitargs(commandHelp[i].name,&tmp.argc); tmp.full = sdsnew(commandHelp[i].name); tmp.type = CLI_HELP_COMMAND; tmp.org = &commandHelp[i]; helpEntries[pos++] = tmp; } } /* Output command help to stdout. */ static void cliOutputCommandHelp(struct commandHelp *help, int group) { printf("\r\n \x1b[1m%s\x1b[0m \x1b[90m%s\x1b[0m\r\n", help->name, help->params); printf(" \x1b[33msummary:\x1b[0m %s\r\n", help->summary); printf(" \x1b[33msince:\x1b[0m %s\r\n", help->since); if (group) { printf(" \x1b[33mgroup:\x1b[0m %s\r\n", commandGroups[help->group]); } } /* Print generic help. */ static void cliOutputGenericHelp(void) { sds version = cliVersion(); printf( "disque %s\r\n" "Type: \"help @\" to get a list of commands in \r\n" " \"help \" for help on \r\n" " \"help \" to get a list of possible help topics\r\n" " \"quit\" to exit\r\n", version ); sdsfree(version); } /* Output all command help, filtering by group or command name. */ static void cliOutputHelp(int argc, char **argv) { int i, j, len; int group = -1; helpEntry *entry; struct commandHelp *help; if (argc == 0) { cliOutputGenericHelp(); return; } else if (argc > 0 && argv[0][0] == '@') { len = sizeof(commandGroups)/sizeof(char*); for (i = 0; i < len; i++) { if (strcasecmp(argv[0]+1,commandGroups[i]) == 0) { group = i; break; } } } assert(argc > 0); for (i = 0; i < helpEntriesLen; i++) { entry = &helpEntries[i]; if (entry->type != CLI_HELP_COMMAND) continue; help = entry->org; if (group == -1) { /* Compare all arguments */ if (argc == entry->argc) { for (j = 0; j < argc; j++) { if (strcasecmp(argv[j],entry->argv[j]) != 0) break; } if (j == argc) { cliOutputCommandHelp(help,1); } } } else { if (group == help->group) { cliOutputCommandHelp(help,0); } } } printf("\r\n"); } static void completionCallback(const char *buf, linenoiseCompletions *lc) { size_t startpos = 0; int mask; int i; size_t matchlen; sds tmp; if (strncasecmp(buf,"help ",5) == 0) { startpos = 5; while (isspace(buf[startpos])) startpos++; mask = CLI_HELP_COMMAND | CLI_HELP_GROUP; } else { mask = CLI_HELP_COMMAND; } for (i = 0; i < helpEntriesLen; i++) { if (!(helpEntries[i].type & mask)) continue; matchlen = strlen(buf+startpos); if (strncasecmp(buf+startpos,helpEntries[i].full,matchlen) == 0) { tmp = sdsnewlen(buf,startpos); tmp = sdscat(tmp,helpEntries[i].full); linenoiseAddCompletion(lc,tmp); sdsfree(tmp); } } } /*------------------------------------------------------------------------------ * Networking / parsing *--------------------------------------------------------------------------- */ /* Send AUTH command to the server */ static int cliAuth() { redisReply *reply; if (config.auth == NULL) return REDIS_OK; reply = redisCommand(context,"AUTH %s",config.auth); if (reply != NULL) { freeReplyObject(reply); return REDIS_OK; } return REDIS_ERR; } /* Send SELECT dbnum to the server */ static int cliSelect() { redisReply *reply; if (config.dbnum == 0) return REDIS_OK; reply = redisCommand(context,"SELECT %d",config.dbnum); if (reply != NULL) { int result = REDIS_OK; if (reply->type == REDIS_REPLY_ERROR) result = REDIS_ERR; freeReplyObject(reply); return result; } return REDIS_ERR; } /* Connect to the server. If force is not zero the connection is performed * even if there is already a connected socket. */ static int cliConnect(int force) { if (context == NULL || force) { if (context != NULL) redisFree(context); if (config.hostsocket == NULL) { context = redisConnect(config.hostip,config.hostport); } else { context = redisConnectUnix(config.hostsocket); } if (context->err) { fprintf(stderr,"Could not connect to Disque at "); if (config.hostsocket == NULL) fprintf(stderr,"%s:%d: %s\n",config.hostip,config.hostport,context->errstr); else fprintf(stderr,"%s: %s\n",config.hostsocket,context->errstr); redisFree(context); context = NULL; return REDIS_ERR; } /* Set aggressive KEEP_ALIVE socket option in the Disque context socket * in order to prevent timeouts caused by the execution of long * commands. At the same time this improves the detection of real * errors. */ anetKeepAlive(NULL, context->fd, DISQUE_CLI_KEEPALIVE_INTERVAL); /* Do AUTH and select the right DB. */ if (cliAuth() != REDIS_OK) return REDIS_ERR; if (cliSelect() != REDIS_OK) return REDIS_ERR; } return REDIS_OK; } static void cliPrintContextError(void) { if (context == NULL) return; fprintf(stderr,"Error: %s\n",context->errstr); } static sds cliFormatReplyTTY(redisReply *r, char *prefix) { sds out = sdsempty(); switch (r->type) { case REDIS_REPLY_ERROR: out = sdscatprintf(out,"(error) %s\n", r->str); break; case REDIS_REPLY_STATUS: out = sdscat(out,r->str); out = sdscat(out,"\n"); break; case REDIS_REPLY_INTEGER: out = sdscatprintf(out,"(integer) %lld\n",r->integer); break; case REDIS_REPLY_STRING: /* If you are producing output for the standard output we want * a more interesting output with quoted characters and so forth */ out = sdscatrepr(out,r->str,r->len); out = sdscat(out,"\n"); break; case REDIS_REPLY_NIL: out = sdscat(out,"(nil)\n"); break; case REDIS_REPLY_ARRAY: if (r->elements == 0) { out = sdscat(out,"(empty list or set)\n"); } else { unsigned int i, idxlen = 0; char _prefixlen[16]; char _prefixfmt[16]; sds _prefix; sds tmp; /* Calculate chars needed to represent the largest index */ i = r->elements; do { idxlen++; i /= 10; } while(i); /* Prefix for nested multi bulks should grow with idxlen+2 spaces */ memset(_prefixlen,' ',idxlen+2); _prefixlen[idxlen+2] = '\0'; _prefix = sdscat(sdsnew(prefix),_prefixlen); /* Setup prefix format for every entry */ snprintf(_prefixfmt,sizeof(_prefixfmt),"%%s%%%ud) ",idxlen); for (i = 0; i < r->elements; i++) { /* Don't use the prefix for the first element, as the parent * caller already prepended the index number. */ out = sdscatprintf(out,_prefixfmt,i == 0 ? "" : prefix,i+1); /* Format the multi bulk entry */ tmp = cliFormatReplyTTY(r->element[i],_prefix); out = sdscatlen(out,tmp,sdslen(tmp)); sdsfree(tmp); } sdsfree(_prefix); } break; default: fprintf(stderr,"Unknown reply type: %d\n", r->type); exit(1); } return out; } static sds cliFormatReplyRaw(redisReply *r) { sds out = sdsempty(), tmp; size_t i; switch (r->type) { case REDIS_REPLY_NIL: /* Nothing... */ break; case REDIS_REPLY_ERROR: out = sdscatlen(out,r->str,r->len); out = sdscatlen(out,"\n",1); break; case REDIS_REPLY_STATUS: case REDIS_REPLY_STRING: out = sdscatlen(out,r->str,r->len); break; case REDIS_REPLY_INTEGER: out = sdscatprintf(out,"%lld",r->integer); break; case REDIS_REPLY_ARRAY: for (i = 0; i < r->elements; i++) { if (i > 0) out = sdscat(out,config.mb_delim); tmp = cliFormatReplyRaw(r->element[i]); out = sdscatlen(out,tmp,sdslen(tmp)); sdsfree(tmp); } break; default: fprintf(stderr,"Unknown reply type: %d\n", r->type); exit(1); } return out; } static sds cliFormatReplyCSV(redisReply *r) { unsigned int i; sds out = sdsempty(); switch (r->type) { case REDIS_REPLY_ERROR: out = sdscat(out,"ERROR,"); out = sdscatrepr(out,r->str,strlen(r->str)); break; case REDIS_REPLY_STATUS: out = sdscatrepr(out,r->str,r->len); break; case REDIS_REPLY_INTEGER: out = sdscatprintf(out,"%lld",r->integer); break; case REDIS_REPLY_STRING: out = sdscatrepr(out,r->str,r->len); break; case REDIS_REPLY_NIL: out = sdscat(out,"NIL\n"); break; case REDIS_REPLY_ARRAY: for (i = 0; i < r->elements; i++) { sds tmp = cliFormatReplyCSV(r->element[i]); out = sdscatlen(out,tmp,sdslen(tmp)); if (i != r->elements-1) out = sdscat(out,","); sdsfree(tmp); } break; default: fprintf(stderr,"Unknown reply type: %d\n", r->type); exit(1); } return out; } static int cliReadReply(int output_raw_strings) { void *_reply; redisReply *reply; sds out = NULL; int output = 1; if (redisGetReply(context,&_reply) != REDIS_OK) { if (config.shutdown) { redisFree(context); context = NULL; return REDIS_OK; } if (config.interactive) { /* Filter cases where we should reconnect */ if (context->err == REDIS_ERR_IO && (errno == ECONNRESET || errno == EPIPE)) return REDIS_ERR; if (context->err == REDIS_ERR_EOF) return REDIS_ERR; } cliPrintContextError(); exit(1); return REDIS_ERR; /* avoid compiler warning */ } reply = (redisReply*)_reply; config.last_cmd_type = reply->type; /* Check if we need to connect to a different node and reissue the * request. */ if (config.cluster_mode && reply->type == REDIS_REPLY_ERROR && (!strncmp(reply->str,"MOVED",5) || !strcmp(reply->str,"ASK"))) { char *p = reply->str, *s; int slot; output = 0; /* Comments show the position of the pointer as: * * [S] for pointer 's' * [P] for pointer 'p' */ s = strchr(p,' '); /* MOVED[S]3999 127.0.0.1:6381 */ p = strchr(s+1,' '); /* MOVED[S]3999[P]127.0.0.1:6381 */ *p = '\0'; slot = atoi(s+1); s = strchr(p+1,':'); /* MOVED 3999[P]127.0.0.1[S]6381 */ *s = '\0'; sdsfree(config.hostip); config.hostip = sdsnew(p+1); config.hostport = atoi(s+1); if (config.interactive) printf("-> Redirected to slot [%d] located at %s:%d\n", slot, config.hostip, config.hostport); config.cluster_reissue_command = 1; cliRefreshPrompt(); } if (output) { if (output_raw_strings) { out = cliFormatReplyRaw(reply); } else { if (config.output == OUTPUT_RAW) { out = cliFormatReplyRaw(reply); out = sdscat(out,"\n"); } else if (config.output == OUTPUT_STANDARD) { out = cliFormatReplyTTY(reply,""); } else if (config.output == OUTPUT_CSV) { out = cliFormatReplyCSV(reply); out = sdscat(out,"\n"); } } fwrite(out,sdslen(out),1,stdout); sdsfree(out); } freeReplyObject(reply); return REDIS_OK; } static int cliSendCommand(int argc, char **argv, int repeat) { char *command = argv[0]; size_t *argvlen; int j, output_raw; if (!strcasecmp(command,"help") || !strcasecmp(command,"?")) { cliOutputHelp(--argc, ++argv); return REDIS_OK; } if (context == NULL) return REDIS_ERR; output_raw = 0; if (!strcasecmp(command,"info") || (argc == 2 && !strcasecmp(command,"cluster") && (!strcasecmp(argv[1],"nodes") || !strcasecmp(argv[1],"info"))) || (argc == 2 && !strcasecmp(command,"client") && !strcasecmp(argv[1],"list")) || (argc == 3 && !strcasecmp(command,"latency") && !strcasecmp(argv[1],"graph")) || (argc == 2 && !strcasecmp(command,"latency") && !strcasecmp(argv[1],"doctor"))) { output_raw = 1; } if (!strcasecmp(command,"shutdown")) config.shutdown = 1; if (!strcasecmp(command,"monitor")) config.monitor_mode = 1; if (!strcasecmp(command,"subscribe") || !strcasecmp(command,"psubscribe")) config.pubsub_mode = 1; if (!strcasecmp(command,"sync") || !strcasecmp(command,"psync")) config.slave_mode = 1; /* Setup argument length */ argvlen = malloc(argc*sizeof(size_t)); for (j = 0; j < argc; j++) argvlen[j] = sdslen(argv[j]); while(repeat--) { redisAppendCommandArgv(context,argc,(const char**)argv,argvlen); while (config.monitor_mode) { if (cliReadReply(output_raw) != REDIS_OK) exit(1); fflush(stdout); } if (config.pubsub_mode) { if (config.output != OUTPUT_RAW) printf("Reading messages... (press Ctrl-C to quit)\n"); while (1) { if (cliReadReply(output_raw) != REDIS_OK) exit(1); } } if (config.slave_mode) { printf("Entering slave output mode... (press Ctrl-C to quit)\n"); slaveMode(); config.slave_mode = 0; free(argvlen); return REDIS_ERR; /* Error = slaveMode lost connection to master */ } if (cliReadReply(output_raw) != REDIS_OK) { free(argvlen); return REDIS_ERR; } else { /* Store database number when SELECT was successfully executed. */ if (!strcasecmp(command,"select") && argc == 2) { config.dbnum = atoi(argv[1]); cliRefreshPrompt(); } else if (!strcasecmp(command,"auth") && argc == 2) { cliSelect(); } } if (config.interval) usleep(config.interval); fflush(stdout); /* Make it grep friendly */ } free(argvlen); return REDIS_OK; } /* Send the INFO command, reconnecting the link if needed. */ static redisReply *reconnectingInfo(void) { redisContext *c = context; redisReply *reply = NULL; int tries = 0; assert(!c->err); while(reply == NULL) { while (c->err & (REDIS_ERR_IO | REDIS_ERR_EOF)) { printf("Reconnecting (%d)...\r", ++tries); fflush(stdout); redisFree(c); c = redisConnect(config.hostip,config.hostport); usleep(1000000); } reply = redisCommand(c,"INFO"); if (c->err && !(c->err & (REDIS_ERR_IO | REDIS_ERR_EOF))) { fprintf(stderr, "Error: %s\n", c->errstr); exit(1); } else if (tries > 0) { printf("\n"); } } context = c; return reply; } /*------------------------------------------------------------------------------ * User interface *--------------------------------------------------------------------------- */ static int parseOptions(int argc, char **argv) { int i; for (i = 1; i < argc; i++) { int lastarg = i==argc-1; if (!strcmp(argv[i],"-h") && !lastarg) { sdsfree(config.hostip); config.hostip = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"-h") && lastarg) { usage(); } else if (!strcmp(argv[i],"--help")) { usage(); } else if (!strcmp(argv[i],"-x")) { config.stdinarg = 1; } else if (!strcmp(argv[i],"-p") && !lastarg) { config.hostport = atoi(argv[++i]); } else if (!strcmp(argv[i],"-s") && !lastarg) { config.hostsocket = argv[++i]; } else if (!strcmp(argv[i],"-r") && !lastarg) { config.repeat = strtoll(argv[++i],NULL,10); } else if (!strcmp(argv[i],"-i") && !lastarg) { double seconds = atof(argv[++i]); config.interval = seconds*1000000; } else if (!strcmp(argv[i],"-n") && !lastarg) { config.dbnum = atoi(argv[++i]); } else if (!strcmp(argv[i],"-a") && !lastarg) { config.auth = argv[++i]; } else if (!strcmp(argv[i],"--raw")) { config.output = OUTPUT_RAW; } else if (!strcmp(argv[i],"--no-raw")) { config.output = OUTPUT_STANDARD; } else if (!strcmp(argv[i],"--csv")) { config.output = OUTPUT_CSV; } else if (!strcmp(argv[i],"--latency")) { config.latency_mode = 1; } else if (!strcmp(argv[i],"--latency-history")) { config.latency_mode = 1; config.latency_history = 1; } else if (!strcmp(argv[i],"--slave")) { config.slave_mode = 1; } else if (!strcmp(argv[i],"--stat")) { config.stat_mode = 1; } else if (!strcmp(argv[i],"--scan")) { config.scan_mode = 1; } else if (!strcmp(argv[i],"--pattern") && !lastarg) { config.pattern = argv[++i]; } else if (!strcmp(argv[i],"--intrinsic-latency") && !lastarg) { config.intrinsic_latency_mode = 1; config.intrinsic_latency_duration = atoi(argv[++i]); } else if (!strcmp(argv[i],"--rdb") && !lastarg) { config.getrdb_mode = 1; config.rdb_filename = argv[++i]; } else if (!strcmp(argv[i],"--pipe")) { config.pipe_mode = 1; } else if (!strcmp(argv[i],"--pipe-timeout") && !lastarg) { config.pipe_timeout = atoi(argv[++i]); } else if (!strcmp(argv[i],"--bigkeys")) { config.bigkeys = 1; } else if (!strcmp(argv[i],"--eval") && !lastarg) { config.eval = argv[++i]; } else if (!strcmp(argv[i],"-c")) { config.cluster_mode = 1; } else if (!strcmp(argv[i],"-d") && !lastarg) { sdsfree(config.mb_delim); config.mb_delim = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"-v") || !strcmp(argv[i], "--version")) { sds version = cliVersion(); printf("disque %s\n", version); sdsfree(version); exit(0); } else { if (argv[i][0] == '-') { fprintf(stderr, "Unrecognized option or bad number of args for: '%s'\n", argv[i]); exit(1); } else { /* Likely the command name, stop here. */ break; } } } return i; } static sds readArgFromStdin(void) { char buf[1024]; sds arg = sdsempty(); while(1) { int nread = read(fileno(stdin),buf,1024); if (nread == 0) break; else if (nread == -1) { perror("Reading from standard input"); exit(1); } arg = sdscatlen(arg,buf,nread); } return arg; } static void usage(void) { sds version = cliVersion(); fprintf(stderr, "disque %s\n" "\n" "Usage: disque [OPTIONS] [cmd [arg [arg ...]]]\n" " -h Server hostname (default: 127.0.0.1).\n" " -p Server port (default: 7711).\n" " -s Server socket (overrides hostname and port).\n" " -a Password to use when connecting to the server.\n" " -r Execute specified command N times.\n" " -i When -r is used, waits seconds per command.\n" " It is possible to specify sub-second times like -i 0.1.\n" " -n Database number.\n" " -x Read last argument from STDIN.\n" " -d Multi-bulk delimiter in for raw formatting (default: \\n).\n" " -c Enable cluster mode (follow -ASK and -MOVED redirections).\n" " --raw Use raw formatting for replies (default when STDOUT is\n" " not a tty).\n" " --no-raw Force formatted output even when STDOUT is not a tty.\n" " --csv Output in CSV format.\n" " --latency Enter a special mode continuously sampling latency.\n" " --latency-history Like --latency but tracking latency changes over time.\n" " Default time interval is 15 sec. Change it using -i.\n" " --slave Simulate a slave showing commands received from the master.\n" " --rdb Transfer an RDB dump from remote server to local file.\n" " --pipe Transfer raw Disque protocol from stdin to server.\n" " --pipe-timeout In --pipe mode, abort with error if after sending all data.\n" " no reply is received within seconds.\n" " Default timeout: %d. Use 0 to wait forever.\n" " --bigkeys Sample Disque keys looking for big keys.\n" " --scan List all keys using the SCAN command.\n" " --pattern Useful with --scan to specify a SCAN pattern.\n" " --intrinsic-latency Run a test to measure intrinsic system latency.\n" " The test will run for the specified amount of seconds.\n" " --eval Send an EVAL command using the Lua script at .\n" " --help Output this help and exit.\n" " --version Output version and exit.\n" "\n" "Examples:\n" " cat /etc/passwd | disque -x set mypasswd\n" " disque get mypasswd\n" " disque -r 100 lpush mylist x\n" " disque -r 100 -i 1 info | grep used_memory_human:\n" " disque --eval myscript.lua key1 key2 , arg1 arg2 arg3\n" " disque --scan --pattern '*:12345*'\n" "\n" " (Note: when using --eval the comma separates KEYS[] from ARGV[] items)\n" "\n" "When no command is given, disque starts in interactive mode.\n" "Type \"help\" in interactive mode for information on available commands.\n" "\n", version, DISQUE_CLI_DEFAULT_PIPE_TIMEOUT); sdsfree(version); exit(1); } /* Turn the plain C strings into Sds strings */ static char **convertToSds(int count, char** args) { int j; char **sds = zmalloc(sizeof(char*)*count); for(j = 0; j < count; j++) sds[j] = sdsnew(args[j]); return sds; } static void repl(void) { sds historyfile = NULL; int history = 0; char *line; int argc; sds *argv; config.interactive = 1; linenoiseSetMultiLine(1); linenoiseSetCompletionCallback(completionCallback); /* Only use history when stdin is a tty. */ if (isatty(fileno(stdin))) { history = 1; if (getenv("HOME") != NULL) { historyfile = sdscatprintf(sdsempty(),"%s/.disque_history",getenv("HOME")); linenoiseHistoryLoad(historyfile); } } cliRefreshPrompt(); while((line = linenoise(context ? config.prompt : "not connected> ")) != NULL) { if (line[0] != '\0') { argv = sdssplitargs(line,&argc); if (history) linenoiseHistoryAdd(line); if (historyfile) linenoiseHistorySave(historyfile); if (argv == NULL) { printf("Invalid argument(s)\n"); free(line); continue; } else if (argc > 0) { if (strcasecmp(argv[0],"quit") == 0 || strcasecmp(argv[0],"exit") == 0) { exit(0); } else if (argc == 3 && !strcasecmp(argv[0],"connect")) { sdsfree(config.hostip); config.hostip = sdsnew(argv[1]); config.hostport = atoi(argv[2]); cliRefreshPrompt(); cliConnect(1); } else if (argc == 1 && !strcasecmp(argv[0],"clear")) { linenoiseClearScreen(); } else { long long start_time = mstime(), elapsed; int repeat, skipargs = 0; repeat = atoi(argv[0]); if (argc > 1 && repeat) { skipargs = 1; } else { repeat = 1; } while (1) { config.cluster_reissue_command = 0; if (cliSendCommand(argc-skipargs,argv+skipargs,repeat) != REDIS_OK) { cliConnect(1); /* If we still cannot send the command print error. * We'll try to reconnect the next time. */ if (cliSendCommand(argc-skipargs,argv+skipargs,repeat) != REDIS_OK) cliPrintContextError(); } /* Issue the command again if we got redirected in cluster mode */ if (config.cluster_mode && config.cluster_reissue_command) { cliConnect(1); } else { break; } } elapsed = mstime()-start_time; if (elapsed >= 500) { printf("(%.2fs)\n",(double)elapsed/1000); } } } /* Free the argument vector */ sdsfreesplitres(argv,argc); } /* linenoise() returns malloc-ed lines like readline() */ free(line); } exit(0); } static int noninteractive(int argc, char **argv) { int retval = 0; if (config.stdinarg) { argv = zrealloc(argv, (argc+1)*sizeof(char*)); argv[argc] = readArgFromStdin(); retval = cliSendCommand(argc+1, argv, config.repeat); } else { /* stdin is probably a tty, can be tested with S_ISCHR(s.st_mode) */ retval = cliSendCommand(argc, argv, config.repeat); } return retval; } /*------------------------------------------------------------------------------ * Eval mode *--------------------------------------------------------------------------- */ static int evalMode(int argc, char **argv) { sds script = sdsempty(); FILE *fp; char buf[1024]; size_t nread; char **argv2; int j, got_comma = 0, keys = 0; /* Load the script from the file, as an sds string. */ fp = fopen(config.eval,"r"); if (!fp) { fprintf(stderr, "Can't open file '%s': %s\n", config.eval, strerror(errno)); exit(1); } while((nread = fread(buf,1,sizeof(buf),fp)) != 0) { script = sdscatlen(script,buf,nread); } fclose(fp); /* Create our argument vector */ argv2 = zmalloc(sizeof(sds)*(argc+3)); argv2[0] = sdsnew("EVAL"); argv2[1] = script; for (j = 0; j < argc; j++) { if (!got_comma && argv[j][0] == ',' && argv[j][1] == 0) { got_comma = 1; continue; } argv2[j+3-got_comma] = sdsnew(argv[j]); if (!got_comma) keys++; } argv2[2] = sdscatprintf(sdsempty(),"%d",keys); /* Call it */ return cliSendCommand(argc+3-got_comma, argv2, config.repeat); } /*------------------------------------------------------------------------------ * Latency and latency history modes *--------------------------------------------------------------------------- */ #define LATENCY_SAMPLE_RATE 10 /* milliseconds. */ #define LATENCY_HISTORY_DEFAULT_INTERVAL 15000 /* milliseconds. */ static void latencyMode(void) { redisReply *reply; long long start, latency, min = 0, max = 0, tot = 0, count = 0; long long history_interval = config.interval ? config.interval/1000 : LATENCY_HISTORY_DEFAULT_INTERVAL; double avg; long long history_start = mstime(); if (!context) exit(1); while(1) { start = mstime(); reply = redisCommand(context,"PING"); if (reply == NULL) { fprintf(stderr,"\nI/O error\n"); exit(1); } latency = mstime()-start; freeReplyObject(reply); count++; if (count == 1) { min = max = tot = latency; avg = (double) latency; } else { if (latency < min) min = latency; if (latency > max) max = latency; tot += latency; avg = (double) tot/count; } printf("\x1b[0G\x1b[2Kmin: %lld, max: %lld, avg: %.2f (%lld samples)", min, max, avg, count); fflush(stdout); if (config.latency_history && mstime()-history_start > history_interval) { printf(" -- %.2f seconds range\n", (float)(mstime()-history_start)/1000); history_start = mstime(); min = max = tot = count = 0; } usleep(LATENCY_SAMPLE_RATE * 1000); } } /*------------------------------------------------------------------------------ * Slave mode *--------------------------------------------------------------------------- */ /* Sends SYNC and reads the number of bytes in the payload. Used both by * slaveMode() and getRDB(). */ unsigned long long sendSync(int fd) { /* To start we need to send the SYNC command and return the payload. * The hidisque client lib does not understand this part of the protocol * and we don't want to mess with its buffers, so everything is performed * using direct low-level I/O. */ char buf[4096], *p; ssize_t nread; /* Send the SYNC command. */ if (write(fd,"SYNC\r\n",6) != 6) { fprintf(stderr,"Error writing to master\n"); exit(1); } /* Read $\r\n, making sure to read just up to "\n" */ p = buf; while(1) { nread = read(fd,p,1); if (nread <= 0) { fprintf(stderr,"Error reading bulk length while SYNCing\n"); exit(1); } if (*p == '\n' && p != buf) break; if (*p != '\n') p++; } *p = '\0'; if (buf[0] == '-') { printf("SYNC with master failed: %s\n", buf); exit(1); } return strtoull(buf+1,NULL,10); } static void slaveMode(void) { int fd = context->fd; unsigned long long payload = sendSync(fd); char buf[1024]; int original_output = config.output; fprintf(stderr,"SYNC with master, discarding %llu " "bytes of bulk transfer...\n", payload); /* Discard the payload. */ while(payload) { ssize_t nread; nread = read(fd,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"Error reading RDB payload while SYNCing\n"); exit(1); } payload -= nread; } fprintf(stderr,"SYNC done. Logging commands from master.\n"); /* Now we can use hidisque to read the incoming protocol. */ config.output = OUTPUT_CSV; while (cliReadReply(0) == REDIS_OK); config.output = original_output; } /*------------------------------------------------------------------------------ * RDB transfer mode *--------------------------------------------------------------------------- */ /* This function implements --rdb, so it uses the replication protocol in order * to fetch the RDB file from a remote server. */ static void getRDB(void) { int s = context->fd; int fd; unsigned long long payload = sendSync(s); char buf[4096]; fprintf(stderr,"SYNC sent to master, writing %llu bytes to '%s'\n", payload, config.rdb_filename); /* Write to file. */ if (!strcmp(config.rdb_filename,"-")) { fd = STDOUT_FILENO; } else { fd = open(config.rdb_filename, O_CREAT|O_WRONLY, 0644); if (fd == -1) { fprintf(stderr, "Error opening '%s': %s\n", config.rdb_filename, strerror(errno)); exit(1); } } while(payload) { ssize_t nread, nwritten; nread = read(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"I/O Error reading RDB payload from socket\n"); exit(1); } nwritten = write(fd, buf, nread); if (nwritten != nread) { fprintf(stderr,"Error writing data to file: %s\n", strerror(errno)); exit(1); } payload -= nread; } close(s); /* Close the file descriptor ASAP as fsync() may take time. */ fsync(fd); fprintf(stderr,"Transfer finished with success.\n"); exit(0); } /*------------------------------------------------------------------------------ * Bulk import (pipe) mode *--------------------------------------------------------------------------- */ static void pipeMode(void) { int fd = context->fd; long long errors = 0, replies = 0, obuf_len = 0, obuf_pos = 0; char ibuf[1024*16], obuf[1024*16]; /* Input and output buffers */ char aneterr[ANET_ERR_LEN]; redisReader *reader = redisReaderCreate(); redisReply *reply; int eof = 0; /* True once we consumed all the standard input. */ int done = 0; char magic[20]; /* Special reply we recognize. */ time_t last_read_time = time(NULL); srand(time(NULL)); /* Use non blocking I/O. */ if (anetNonBlock(aneterr,fd) == ANET_ERR) { fprintf(stderr, "Can't set the socket in non blocking mode: %s\n", aneterr); exit(1); } /* Transfer raw protocol and read replies from the server at the same * time. */ while(!done) { int mask = AE_READABLE; if (!eof || obuf_len != 0) mask |= AE_WRITABLE; mask = aeWait(fd,mask,1000); /* Handle the readable state: we can read replies from the server. */ if (mask & AE_READABLE) { ssize_t nread; /* Read from socket and feed the hidisque reader. */ do { nread = read(fd,ibuf,sizeof(ibuf)); if (nread == -1 && errno != EAGAIN && errno != EINTR) { fprintf(stderr, "Error reading from the server: %s\n", strerror(errno)); exit(1); } if (nread > 0) { redisReaderFeed(reader,ibuf,nread); last_read_time = time(NULL); } } while(nread > 0); /* Consume replies. */ do { if (redisReaderGetReply(reader,(void**)&reply) == REDIS_ERR) { fprintf(stderr, "Error reading replies from server\n"); exit(1); } if (reply) { if (reply->type == REDIS_REPLY_ERROR) { fprintf(stderr,"%s\n", reply->str); errors++; } else if (eof && reply->type == REDIS_REPLY_STRING && reply->len == 20) { /* Check if this is the reply to our final ECHO * command. If so everything was received * from the server. */ if (memcmp(reply->str,magic,20) == 0) { printf("Last reply received from server.\n"); done = 1; replies--; } } replies++; freeReplyObject(reply); } } while(reply); } /* Handle the writable state: we can send protocol to the server. */ if (mask & AE_WRITABLE) { while(1) { /* Transfer current buffer to server. */ if (obuf_len != 0) { ssize_t nwritten = write(fd,obuf+obuf_pos,obuf_len); if (nwritten == -1) { if (errno != EAGAIN && errno != EINTR) { fprintf(stderr, "Error writing to the server: %s\n", strerror(errno)); exit(1); } else { nwritten = 0; } } obuf_len -= nwritten; obuf_pos += nwritten; if (obuf_len != 0) break; /* Can't accept more data. */ } /* If buffer is empty, load from stdin. */ if (obuf_len == 0 && !eof) { ssize_t nread = read(STDIN_FILENO,obuf,sizeof(obuf)); if (nread == 0) { /* The ECHO sequence starts with a "\r\n" so that if there * is garbage in the protocol we read from stdin, the ECHO * will likely still be properly formatted. * CRLF is ignored by Disque, so it has no effects. */ char echo[] = "\r\n*2\r\n$4\r\nECHO\r\n$20\r\n01234567890123456789\r\n"; int j; eof = 1; /* Everything transferred, so we queue a special * ECHO command that we can match in the replies * to make sure everything was read from the server. */ for (j = 0; j < 20; j++) magic[j] = rand() & 0xff; memcpy(echo+21,magic,20); memcpy(obuf,echo,sizeof(echo)-1); obuf_len = sizeof(echo)-1; obuf_pos = 0; printf("All data transferred. Waiting for the last reply...\n"); } else if (nread == -1) { fprintf(stderr, "Error reading from stdin: %s\n", strerror(errno)); exit(1); } else { obuf_len = nread; obuf_pos = 0; } } if (obuf_len == 0 && eof) break; } } /* Handle timeout, that is, we reached EOF, and we are not getting * replies from the server for a few seconds, nor the final ECHO is * received. */ if (eof && config.pipe_timeout > 0 && time(NULL)-last_read_time > config.pipe_timeout) { fprintf(stderr,"No replies for %d seconds: exiting.\n", config.pipe_timeout); errors++; break; } } redisReaderFree(reader); printf("errors: %lld, replies: %lld\n", errors, replies); if (errors) exit(1); else exit(0); } /*------------------------------------------------------------------------------ * Find big keys *--------------------------------------------------------------------------- */ #define TYPE_STRING 0 #define TYPE_LIST 1 #define TYPE_SET 2 #define TYPE_HASH 3 #define TYPE_ZSET 4 #define TYPE_NONE 5 static redisReply *sendScan(unsigned long long *it) { redisReply *reply = redisCommand(context, "SCAN %llu", *it); /* Handle any error conditions */ if(reply == NULL) { fprintf(stderr, "\nI/O error\n"); exit(1); } else if(reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "SCAN error: %s\n", reply->str); exit(1); } else if(reply->type != REDIS_REPLY_ARRAY) { fprintf(stderr, "Non ARRAY response from SCAN!\n"); exit(1); } else if(reply->elements != 2) { fprintf(stderr, "Invalid element count from SCAN!\n"); exit(1); } /* Validate our types are correct */ assert(reply->element[0]->type == REDIS_REPLY_STRING); assert(reply->element[1]->type == REDIS_REPLY_ARRAY); /* Update iterator */ *it = atoi(reply->element[0]->str); return reply; } static int getDbSize(void) { redisReply *reply; int size; reply = redisCommand(context, "DBSIZE"); if(reply == NULL || reply->type != REDIS_REPLY_INTEGER) { fprintf(stderr, "Couldn't determine DBSIZE!\n"); exit(1); } /* Grab the number of keys and free our reply */ size = reply->integer; freeReplyObject(reply); return size; } static int toIntType(char *key, char *type) { if(!strcmp(type, "string")) { return TYPE_STRING; } else if(!strcmp(type, "list")) { return TYPE_LIST; } else if(!strcmp(type, "set")) { return TYPE_SET; } else if(!strcmp(type, "hash")) { return TYPE_HASH; } else if(!strcmp(type, "zset")) { return TYPE_ZSET; } else if(!strcmp(type, "none")) { return TYPE_NONE; } else { fprintf(stderr, "Unknown type '%s' for key '%s'\n", type, key); exit(1); } } static void getKeyTypes(redisReply *keys, int *types) { redisReply *reply; unsigned int i; /* Pipeline TYPE commands */ for(i=0;ielements;i++) { redisAppendCommand(context, "TYPE %s", keys->element[i]->str); } /* Retrieve types */ for(i=0;ielements;i++) { if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { fprintf(stderr, "Error getting type for key '%s' (%d: %s)\n", keys->element[i]->str, context->err, context->errstr); exit(1); } else if(reply->type != REDIS_REPLY_STATUS) { fprintf(stderr, "Invalid reply type (%d) for TYPE on key '%s'!\n", reply->type, keys->element[i]->str); exit(1); } types[i] = toIntType(keys->element[i]->str, reply->str); freeReplyObject(reply); } } static void getKeySizes(redisReply *keys, int *types, unsigned long long *sizes) { redisReply *reply; char *sizecmds[] = {"STRLEN","LLEN","SCARD","HLEN","ZCARD"}; unsigned int i; /* Pipeline size commands */ for(i=0;ielements;i++) { /* Skip keys that were deleted */ if(types[i]==TYPE_NONE) continue; redisAppendCommand(context, "%s %s", sizecmds[types[i]], keys->element[i]->str); } /* Retreive sizes */ for(i=0;ielements;i++) { /* Skip keys that dissapeared between SCAN and TYPE */ if(types[i] == TYPE_NONE) { sizes[i] = 0; continue; } /* Retreive size */ if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { fprintf(stderr, "Error getting size for key '%s' (%d: %s)\n", keys->element[i]->str, context->err, context->errstr); exit(1); } else if(reply->type != REDIS_REPLY_INTEGER) { /* Theoretically the key could have been removed and * added as a different type between TYPE and SIZE */ fprintf(stderr, "Warning: %s on '%s' failed (may have changed type)\n", sizecmds[types[i]], keys->element[i]->str); sizes[i] = 0; } else { sizes[i] = reply->integer; } freeReplyObject(reply); } } static void findBigKeys(void) { unsigned long long biggest[5] = {0}, counts[5] = {0}, totalsize[5] = {0}; unsigned long long sampled = 0, total_keys, totlen=0, *sizes=NULL, it=0; sds maxkeys[5] = {0}; char *typename[] = {"string","list","set","hash","zset"}; char *typeunit[] = {"bytes","items","members","fields","members"}; redisReply *reply, *keys; unsigned int arrsize=0, i; int type, *types=NULL; double pct; /* Total keys pre scanning */ total_keys = getDbSize(); /* Status message */ printf("\n# Scanning the entire keyspace to find biggest keys as well as\n"); printf("# average sizes per key type. You can use -i 0.1 to sleep 0.1 sec\n"); printf("# per 100 SCAN commands (not usually needed).\n\n"); /* New up sds strings to keep track of overall biggest per type */ for(i=0;ielement[1]; /* Reallocate our type and size array if we need to */ if(keys->elements > arrsize) { types = zrealloc(types, sizeof(int)*keys->elements); sizes = zrealloc(sizes, sizeof(unsigned long long)*keys->elements); if(!types || !sizes) { fprintf(stderr, "Failed to allocate storage for keys!\n"); exit(1); } arrsize = keys->elements; } /* Retreive types and then sizes */ getKeyTypes(keys, types); getKeySizes(keys, types, sizes); /* Now update our stats */ for(i=0;ielements;i++) { if((type = types[i]) == TYPE_NONE) continue; totalsize[type] += sizes[i]; counts[type]++; totlen += keys->element[i]->len; sampled++; if(biggest[type]element[i]->str, sizes[i], typeunit[type]); /* Keep track of biggest key name for this type */ maxkeys[type] = sdscpy(maxkeys[type], keys->element[i]->str); if(!maxkeys[type]) { fprintf(stderr, "Failed to allocate memory for key!\n"); exit(1); } /* Keep track of the biggest size for this type */ biggest[type] = sizes[i]; } /* Update overall progress */ if(sampled % 1000000 == 0) { printf("[%05.2f%%] Sampled %llu keys so far\n", pct, sampled); } } /* Sleep if we've been directed to do so */ if(sampled && (sampled %100) == 0 && config.interval) { usleep(config.interval); } freeReplyObject(reply); } while(it != 0); if(types) zfree(types); if(sizes) zfree(sizes); /* We're done */ printf("\n-------- summary -------\n\n"); printf("Sampled %llu keys in the keyspace!\n", sampled); printf("Total key length in bytes is %llu (avg len %.2f)\n\n", totlen, totlen ? (double)totlen/sampled : 0); /* Output the biggest keys we found, for types we did find */ for(i=0;i0) { printf("Biggest %6s found '%s' has %llu %s\n", typename[i], maxkeys[i], biggest[i], typeunit[i]); } } printf("\n"); for(i=0;itype == REDIS_REPLY_ERROR) { printf("ERROR: %s\n", reply->str); exit(1); } if ((i++ % 20) == 0) { printf( "------- data ------ --------------------- load -------------------- - child -\n" "keys mem clients blocked requests connections \n"); } /* Keys */ aux = 0; for (j = 0; j < 20; j++) { long k; sprintf(buf,"db%d:keys",j); k = getLongInfoField(reply->str,buf); if (k == LONG_MIN) continue; aux += k; } sprintf(buf,"%ld",aux); printf("%-11s",buf); /* Used memory */ aux = getLongInfoField(reply->str,"used_memory"); bytesToHuman(buf,aux); printf("%-8s",buf); /* Clients */ aux = getLongInfoField(reply->str,"connected_clients"); sprintf(buf,"%ld",aux); printf(" %-8s",buf); /* Blocked (BLPOPPING) Clients */ aux = getLongInfoField(reply->str,"blocked_clients"); sprintf(buf,"%ld",aux); printf("%-8s",buf); /* Requets */ aux = getLongInfoField(reply->str,"total_commands_processed"); sprintf(buf,"%ld (+%ld)",aux,requests == 0 ? 0 : aux-requests); printf("%-19s",buf); requests = aux; /* Connections */ aux = getLongInfoField(reply->str,"total_connections_received"); sprintf(buf,"%ld",aux); printf(" %-12s",buf); /* Children */ aux = getLongInfoField(reply->str,"bgsave_in_progress"); aux |= getLongInfoField(reply->str,"aof_rewrite_in_progress") << 1; switch(aux) { case 0: break; case 1: printf("SAVE"); break; case 2: printf("AOF"); break; case 3: printf("SAVE+AOF"); break; } printf("\n"); freeReplyObject(reply); usleep(config.interval); } } /*------------------------------------------------------------------------------ * Scan mode *--------------------------------------------------------------------------- */ static void scanMode(void) { redisReply *reply; unsigned long long cur = 0; do { if (config.pattern) reply = redisCommand(context,"SCAN %llu MATCH %s", cur,config.pattern); else reply = redisCommand(context,"SCAN %llu",cur); if (reply == NULL) { printf("I/O error\n"); exit(1); } else if (reply->type == REDIS_REPLY_ERROR) { printf("ERROR: %s\n", reply->str); exit(1); } else { unsigned int j; cur = strtoull(reply->element[0]->str,NULL,10); for (j = 0; j < reply->element[1]->elements; j++) printf("%s\n", reply->element[1]->element[j]->str); } freeReplyObject(reply); } while(cur != 0); exit(0); } /*------------------------------------------------------------------------------ * Intrisic latency mode. * * Measure max latency of a running process that does not result from * syscalls. Basically this software should provide an hint about how much * time the kernel leaves the process without a chance to run. *--------------------------------------------------------------------------- */ /* This is just some computation the compiler can't optimize out. * Should run in less than 100-200 microseconds even using very * slow hardware. Runs in less than 10 microseconds in modern HW. */ unsigned long compute_something_fast(void) { unsigned char s[256], i, j, t; int count = 1000, k; unsigned long output = 0; for (k = 0; k < 256; k++) s[k] = k; i = 0; j = 0; while(count--) { i++; j = j + s[i]; t = s[i]; s[i] = s[j]; s[j] = t; output += s[(s[i]+s[j])&255]; } return output; } static void intrinsicLatencyModeStop(int s) { UNUSED(s); force_cancel_loop = 1; } static void intrinsicLatencyMode(void) { long long test_end, run_time, max_latency = 0, runs = 0; run_time = config.intrinsic_latency_duration*1000000; test_end = ustime() + run_time; signal(SIGINT, intrinsicLatencyModeStop); while(1) { long long start, end, latency; start = ustime(); compute_something_fast(); end = ustime(); latency = end-start; runs++; if (latency <= 0) continue; /* Reporting */ if (latency > max_latency) { max_latency = latency; printf("Max latency so far: %lld microseconds.\n", max_latency); } double avg_us = (double)run_time/runs; double avg_ns = avg_us * 10e3; if (force_cancel_loop || end > test_end) { printf("\n%lld total runs " "(avg latency: " "%.4f microseconds / %.2f nanoseconds per run).\n", runs, avg_us, avg_ns); printf("Worst run took %.0fx longer than the average latency.\n", max_latency / avg_us); exit(0); } } } /*------------------------------------------------------------------------------ * Program main() *--------------------------------------------------------------------------- */ int main(int argc, char **argv) { int firstarg; config.hostip = sdsnew("127.0.0.1"); config.hostport = 7711; config.hostsocket = NULL; config.repeat = 1; config.interval = 0; config.dbnum = 0; config.interactive = 0; config.shutdown = 0; config.monitor_mode = 0; config.pubsub_mode = 0; config.latency_mode = 0; config.latency_history = 0; config.cluster_mode = 0; config.slave_mode = 0; config.getrdb_mode = 0; config.stat_mode = 0; config.scan_mode = 0; config.intrinsic_latency_mode = 0; config.pattern = NULL; config.rdb_filename = NULL; config.pipe_mode = 0; config.pipe_timeout = DISQUE_CLI_DEFAULT_PIPE_TIMEOUT; config.bigkeys = 0; config.stdinarg = 0; config.auth = NULL; config.eval = NULL; config.last_cmd_type = -1; if (!isatty(fileno(stdout)) && (getenv("FAKETTY") == NULL)) config.output = OUTPUT_RAW; else config.output = OUTPUT_STANDARD; config.mb_delim = sdsnew("\n"); cliInitHelp(); firstarg = parseOptions(argc,argv); argc -= firstarg; argv += firstarg; signal(SIGPIPE, SIG_IGN); /* Latency mode */ if (config.latency_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); latencyMode(); } /* Slave mode */ if (config.slave_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); slaveMode(); } /* Get RDB mode. */ if (config.getrdb_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); getRDB(); } /* Pipe mode */ if (config.pipe_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); pipeMode(); } /* Find big keys */ if (config.bigkeys) { if (cliConnect(0) == REDIS_ERR) exit(1); findBigKeys(); } /* Stat mode */ if (config.stat_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); if (config.interval == 0) config.interval = 1000000; statMode(); } /* Scan mode */ if (config.scan_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); scanMode(); } /* Intrinsic latency mode */ if (config.intrinsic_latency_mode) intrinsicLatencyMode(); /* Start interactive mode when no command is provided */ if (argc == 0 && !config.eval) { /* Note that in repl mode we don't abort on connection error. * A new attempt will be performed for every command send. */ cliConnect(0); repl(); } /* Otherwise, we have some arguments to execute */ if (cliConnect(0) != REDIS_OK) exit(1); if (config.eval) { return evalMode(argc,argv); } else { return noninteractive(argc,convertToSds(argc,argv)); } } disque-1.0-rc1/src/disqueassert.h000066400000000000000000000043541264176561100170220ustar00rootroot00000000000000/* disqueassert.h -- Drop in replacemnet assert.h that prints the stack trace * in the Disque logs. * * This file should be included instead of "assert.h" inside libraries used by * Disque that are using assertions, so instead of Disque disappearing with * SIGABORT, we get the details and stack trace inside the log file. * * ---------------------------------------------------------------------------- * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __DISQUE_ASSERT_H__ #define __DISQUE_ASSERT_H__ #include /* for _exit() */ #define assert(_e) ((_e)?(void)0 : (_serverAssert(#_e,__FILE__,__LINE__),_exit(1))) void _serverAssert(char *estr, char *file, int line); #endif disque-1.0-rc1/src/endianconv.c000066400000000000000000000071441264176561100164250ustar00rootroot00000000000000/* endinconv.c -- Endian conversions utilities. * * This functions are never called directly, but always using the macros * defined into endianconv.h, this way we define everything is a non-operation * if the arch is already little endian. * * Disque tries to encode everything as little endian (but a few things that need * to be backward compatible are still in big endian) because most of the * production environments are little endian, and we have a lot of conversions * in a few places because ziplists, intsets, zipmaps, need to be endian-neutral * even in memory, since they are serialied on RDB files directly with a single * write(2) without other additional steps. * * ---------------------------------------------------------------------------- * * Copyright (c) 2011-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include /* Toggle the 16 bit unsigned integer pointed by *p from little endian to * big endian */ void memrev16(void *p) { unsigned char *x = p, t; t = x[0]; x[0] = x[1]; x[1] = t; } /* Toggle the 32 bit unsigned integer pointed by *p from little endian to * big endian */ void memrev32(void *p) { unsigned char *x = p, t; t = x[0]; x[0] = x[3]; x[3] = t; t = x[1]; x[1] = x[2]; x[2] = t; } /* Toggle the 64 bit unsigned integer pointed by *p from little endian to * big endian */ void memrev64(void *p) { unsigned char *x = p, t; t = x[0]; x[0] = x[7]; x[7] = t; t = x[1]; x[1] = x[6]; x[6] = t; t = x[2]; x[2] = x[5]; x[5] = t; t = x[3]; x[3] = x[4]; x[4] = t; } uint16_t intrev16(uint16_t v) { memrev16(&v); return v; } uint32_t intrev32(uint32_t v) { memrev32(&v); return v; } uint64_t intrev64(uint64_t v) { memrev64(&v); return v; } #ifdef TESTMAIN #include int main(void) { char buf[32]; sprintf(buf,"ciaoroma"); memrev16(buf); printf("%s\n", buf); sprintf(buf,"ciaoroma"); memrev32(buf); printf("%s\n", buf); sprintf(buf,"ciaoroma"); memrev64(buf); printf("%s\n", buf); return 0; } #endif disque-1.0-rc1/src/endianconv.h000066400000000000000000000054201264176561100164250ustar00rootroot00000000000000/* See endianconv.c top comments for more information * * ---------------------------------------------------------------------------- * * Copyright (c) 2011-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __ENDIANCONV_H #define __ENDIANCONV_H #include "config.h" #include void memrev16(void *p); void memrev32(void *p); void memrev64(void *p); uint16_t intrev16(uint16_t v); uint32_t intrev32(uint32_t v); uint64_t intrev64(uint64_t v); /* variants of the function doing the actual convertion only if the target * host is big endian */ #if (BYTE_ORDER == LITTLE_ENDIAN) #define memrev16ifbe(p) #define memrev32ifbe(p) #define memrev64ifbe(p) #define intrev16ifbe(v) (v) #define intrev32ifbe(v) (v) #define intrev64ifbe(v) (v) #else #define memrev16ifbe(p) memrev16(p) #define memrev32ifbe(p) memrev32(p) #define memrev64ifbe(p) memrev64(p) #define intrev16ifbe(v) intrev16(v) #define intrev32ifbe(v) intrev32(v) #define intrev64ifbe(v) intrev64(v) #endif /* The functions htonu64() and ntohu64() convert the specified value to * network byte ordering and back. In big endian systems they are no-ops. */ #if (BYTE_ORDER == BIG_ENDIAN) #define htonu64(v) (v) #define ntohu64(v) (v) #else #define htonu64(v) intrev64(v) #define ntohu64(v) intrev64(v) #endif #endif disque-1.0-rc1/src/fmacros.h000066400000000000000000000041461264176561100157370ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DISQUE_FMACRO_H #define _DISQUE_FMACRO_H #define _BSD_SOURCE #if defined(__linux__) #define _GNU_SOURCE #define _DEFAULT_SOURCE #endif #if defined(_AIX) #define _ALL_SOURCE #endif #if defined(__linux__) || defined(__OpenBSD__) #define _XOPEN_SOURCE 700 /* * On NetBSD, _XOPEN_SOURCE undefines _NETBSD_SOURCE and * thus hides inet_aton etc. */ #elif !defined(__NetBSD__) #define _XOPEN_SOURCE #endif #if defined(__sun) #define _POSIX_C_SOURCE 199506L #endif #define _LARGEFILE_SOURCE #define _FILE_OFFSET_BITS 64 #endif disque-1.0-rc1/src/help.h000066400000000000000000000101241264176561100152260ustar00rootroot00000000000000/* Automatically generated by utils/generate-command-help.rb, do not edit. */ #ifndef __DISQUE_HELP_H #define __DISQUE_HELP_H static char *commandGroups[] = { "connection", /* 0 */ "server", /* 1 */ "cluster", /* 2 */ "queues", /* 3 */ "jobs" /* 4 */ }; struct commandHelp { char *name; char *params; char *summary; int group; char *since; } commandHelp[] = { { "AUTH", "password", "Authenticate to the server", 0, "1.0.0" }, { "PING", "-", "Ping the server", 0, "1.0.0" }, { "INFO", "[section]", "Get information and statistics about the server", 1, "1.0.0" }, { "SHUTDOWN", "[NOSAVE] [SAVE]", "Synchronously save the dataset to disk and then shut down the server", 1, "1.0.0" }, { "MONITOR", "-", "Listen for all requests received by the server in real time", 0, "1.0.0" }, { "CLUSTER", " [OPTIONS...]", "Cluster management command", 2, "1.0.0" }, { "CLIENT GETNAME", "-", "Get the current connection name", 0, "1.0.0" }, { "CLIENT KILL", "ip:port", "Kill the connection of a client", 0, "1.0.0" }, { "CLIENT LIST", "-", "Get the list of client connections", 0, "1.0.0" }, { "CLIENT PAUSE", "timeout", "Stop processing commands from clients for some time", 0, "1.0.0" }, { "CLIENT SETNAME", "connection-name", "Set the current connection name", 0, "1.0.0" }, { "CONFIG GET", "parameter", "Get the value of a configuration parameter", 1, "1.0.0" }, { "CONFIG RESETSTAT", "-", "Reset the stats returned by INFO", 1, "1.0.0" }, { "CONFIG REWRITE", "-", "Rewrite the configuration file with the in memory configuration", 1, "1.0.0" }, { "CONFIG SET", "parameter value", "Set a configuration parameter to the given value", 1, "1.0.0" }, { "BGREWRITEAOF", "-", "Asynchronously rewrite the append-only file", 1, "1.0.0" }, { "DEBUG SEGFAULT", "-", "Make the server crash", 1, "1.0.0" }, { "DEBUG FLUSHALL", "-", "Delete all jobs and queues", 1, "1.0.0" }, { "QUIT", "-", "Close the connection", 0, "1.0.0" }, { "LATENCY", "]", "Latency monitoring", 1, "1.0.0" }, { "HELLO", "-", "Get list of nodes for clients connection management", 2, "1.0.0" }, { "ADDJOB", " [REPLICATE ] [DELAY ] [RETRY ] [TTL ] [MAXLEN ] [ASYNC]", "Create a new job", 4, "1.0.0" }, { "GETJOB", "[NOHANG] [TIMEOUT ] [COUNT ] [WITHCOUNTERS] FROM queue1 queue2 ... queueN", "Fetch jobs from queues", 4, "1.0.0" }, { "ACKJOB", " [ ...]", "Acknowledge jobs as delivered", 4, "1.0.0" }, { "FASTACK", " [ ...]", "Acknowledge jobs as delivered deleting the job from the reachable nodes ASAP", 4, "1.0.0" }, { "WORKING", "", "Attempt to postpone next delivery of the specified job", 4, "1.0.0" }, { "NACK", " [ ...]", "Negative acknowledge: increment the NACK counter for the job and ask for a next delivery ASAP", 4, "1.0.0" }, { "QLEN", "", "Return number of queued jobs in the specified queue in the local node", 3, "1.0.0" }, { "QSTAT", "", "Return local node queue statistics", 3, "1.0.0" }, { "QPEEK", " ", "Inspect jobs into a queue without actually fetching them", 3, "1.0.0" }, { "ENQUEUE", " [ ...]", "Force local node to put the specified jobs back into the queue", 3, "1.0.0" }, { "DEQUEUE", " [ ...]", "Force local node to remove the specified jobs out of the queue", 3, "1.0.0" }, { "DELJOB", " [ ...]", "Remove job from local node", 4, "1.0.0" }, { "SHOW", "", "Show info about the specified job", 4, "1.0.0" }, { "QSCAN", "[COUNT ] [BUSYLOOP] [MINLEN ] [MAXLEN ] [IMPORTRATE ]", "Iterate local node queues", 3, "1.0.0" }, { "JSCAN", "[] [COUNT ] [BUSYLOOP] [QUEUE ] [STATE STATE ... STATE ] [REPLY all|id]", "Iterate local node jobs", 4, "1.0.0" }, { "PAUSE", " option1 [option2 ... optionN]", "Change queue paused state", 3, "1.0.0" } }; #endif disque-1.0-rc1/src/job.c000066400000000000000000001712651264176561100150610ustar00rootroot00000000000000/* Jobs handling and commands. * * Copyright (c) 2014, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "cluster.h" #include "job.h" #include "queue.h" #include "ack.h" #include "sha1.h" #include "endianconv.h" #include /* ------------------------- Low level jobs functions ----------------------- */ /* Generate a new Job ID and writes it to the string pointed by 'id' * (NOT including a null term), that must be JOB_ID_LEN or more. * * An ID is 40 bytes string composed as such: * * +--+-----------------+-+--------------------- --------+-+-----+ * |D-| 8 bytes Node ID |-| 144-bit ID (base64: 24 bytes)|-| TTL | * +--+-----------------+-+------------------------------+-+-----+ * * "D-" is just a fixed string. All Disque job IDs start with this * two bytes. * * Node ID is the first 8 bytes of the hexadecimal Node ID where the * message was created. The main use for this is that a consumer receiving * messages from a given queue can collect stats about where the producers * are connected, and switch to improve the cluster efficiency. * * The 144 bit ID is the unique message ID, encoded in base 64 with * the standard charset "A-Za-z0-9+/". * * The TTL is a big endian 16 bit unsigned number ceiled to 2^16-1 * if greater than that, and is only used in order to expire ACKs * when the job is no longer avaialbe. It represents the TTL of the * original job in *minutes*, not seconds, and is encoded in as a * 4 digits hexadecimal number. * * The TTL is even if the job retry value is 0 (at most once jobs), * otherwise is odd, so the actual precision of the value is 2 minutes. * This is useful since the receiver of an ACKJOB command can avoid * creating a "dummy ack" for unknown job IDs for at most once jobs. */ void generateJobID(char *id, int ttl, int retry) { char *b64cset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; char *hexcset = "0123456789abcdef"; SHA1_CTX ctx; unsigned char ttlbytes[2], hash[20]; int j; static uint64_t counter; /* Get the pseudo random bytes using SHA1 in counter mode. */ counter++; SHA1Init(&ctx); SHA1Update(&ctx,(unsigned char*)server.jobid_seed,CONFIG_RUN_ID_SIZE); SHA1Update(&ctx,(unsigned char*)&counter,sizeof(counter)); SHA1Final(hash,&ctx); ttl /= 60; /* Store TTL in minutes. */ if (ttl > 65535) ttl = 65535; if (ttl < 0) ttl = 1; /* Force the TTL to be odd if retry > 0, even if retry == 0. */ ttl = (retry > 0) ? (ttl|1) : (ttl & ~1); ttlbytes[0] = (ttl&0xff00)>>8; ttlbytes[1] = ttl&0xff; *id++ = 'D'; *id++ = '-'; /* 8 bytes from Node ID + separator */ for (j = 0; j < 8; j++) *id++ = server.cluster->myself->name[j]; *id++ = '-'; /* Pseudorandom Message ID + separator. We encode 4 base64 chars * per loop (3 digest bytes), and each char encodes 6 bits, so we have * to loop 6 times to encode all the 144 bits into 24 destination chars. */ unsigned char *h = hash; for (j = 0; j < 6; j++) { id[0] = b64cset[h[0]>>2]; id[1] = b64cset[(h[0]<<4|h[1]>>4)&63]; id[2] = b64cset[(h[1]<<2|h[2]>>6)&63]; id[3] = b64cset[h[3]&63]; id += 4; h += 3; } *id++ = '-'; /* 4 TTL bytes in hex. */ id[0] = hexcset[(ttlbytes[0]&0xf0)>>4]; id[1] = hexcset[ttlbytes[0]&0xf]; id[2] = hexcset[(ttlbytes[1]&0xf0)>>4]; id[3] = hexcset[ttlbytes[1]&0xf]; id += 4; } /* Helper function for setJobTTLFromID() in order to extract the TTL stored * as hex big endian number in the Job ID. The function is only used for this * but is more generic. 'p' points to the first digit for 'count' hex digits. * The number is assumed to be stored in big endian format. For each byte * the first hex char is the most significative. If invalid digits are found * considered to be zero, however errno is set to EINVAL if this happens. */ uint64_t hexToInt(char *p, size_t count) { uint64_t value = 0; char *charset = "0123456789abcdef"; errno = 0; while(count--) { int c = tolower(*p++); char *pos = strchr(charset,c); int v; if (!pos) { errno = EINVAL; v = 0; } else { v = pos-charset; } value = (value << 4) | v; } return value; } /* Disque aims to avoid to deliver duplicated message whenever possible, so * it is always desirable that a given message is not queued by multiple owners * at the same time. This cannot be guaranteed because of partitions, but one * of the best-effort things we do is that, when a QUEUED message is received * by a node about a job, the node IDs of the sender and the receiver are * compared: If the sender has a greater node ID, we drop the message from our * queue (but retain a copy of the message to retry again later). * * However comparing nodes just by node ID means that a given node is always * greater than the other. So before comparing the node IDs, we mix the IDs * with the pseudorandom part of the Job ID, using the XOR function. This way * the comparision depends on the job. */ int compareNodeIDsByJob(clusterNode *nodea, clusterNode *nodeb, job *j) { int i; char ida[CLUSTER_NAMELEN], idb[CLUSTER_NAMELEN]; memcpy(ida,nodea->name,CLUSTER_NAMELEN); memcpy(idb,nodeb->name,CLUSTER_NAMELEN); for (i = 0; i < CLUSTER_NAMELEN; i++) { /* The Job ID has 24 bytes of pseudo random bits starting at * offset 11. */ ida[i] ^= j->id[11 + i%24]; idb[i] ^= j->id[11 + i%24]; } return memcmp(ida,idb,CLUSTER_NAMELEN); } /* Return the raw TTL (in minutes) from a well-formed Job ID. * The caller should do sanity check on the job ID before calling this * function. Note that the 'id' field of a a job structure is always valid. */ int getRawTTLFromJobID(char *id) { return hexToInt(id+36,4); } /* Set the job ttl from the encoded ttl in its ID. This is useful when we * create a new job just to store the fact it's acknowledged. Thanks to * the TTL encoded in the ID we are able to set the expire time for the job * regardless of the fact we have no info about the job. */ void setJobTTLFromID(job *job) { int expire_minutes = getRawTTLFromJobID(job->id); /* Convert back to absolute unix time. */ job->etime = server.unixtime + expire_minutes*60; } /* Validate the string 'id' as a job ID. 'len' is the number of bytes the * string is composed of. The function just checks length and prefix/suffix. * It's pretty pointless to use more CPU to validate it better since anyway * the lookup will fail. */ int validateJobID(char *id, size_t len) { if (len != JOB_ID_LEN) return C_ERR; if (id[0] != 'D' || id[1] != '-' || id[10] != '-' || id[35] != '-') return C_ERR; return C_OK; } /* Like validateJobID() but if the ID is invalid an error message is sent * to the client 'c' if not NULL. */ int validateJobIdOrReply(client *c, char *id, size_t len) { int retval = validateJobID(id,len); if (retval == C_ERR && c) addReplySds(c,sdsnew("-BADID Invalid Job ID format.\r\n")); return retval; } /* Create a new job in a given state. If 'id' is NULL, a new ID will be * created as assigned, otherwise the specified ID is used. * The 'ttl' and 'retry' arguments are only used if 'id' is not NULL. * * This function only creates the job without any body, the only populated * fields are the ID and the state. */ job *createJob(char *id, int state, int ttl, int retry) { job *j = zmalloc(sizeof(job)); /* Generate a new Job ID if not specified by the caller. */ if (id == NULL) generateJobID(j->id,ttl,retry); else memcpy(j->id,id,JOB_ID_LEN); j->queue = NULL; j->state = state; j->gc_retry = 0; j->flags = 0; j->body = NULL; j->nodes_delivered = dictCreate(&clusterNodesDictType,NULL); j->nodes_confirmed = NULL; /* Only created later on-demand. */ j->awakeme = 0; /* Not yet registered in awakeme skiplist. */ /* Number of NACKs and additiona deliveries start at zero and * are incremented as QUEUED messages are received or sent. */ j->num_nacks = 0; j->num_deliv = 0; return j; } /* Free a job. Does not automatically unregister it. */ void freeJob(job *j) { if (j == NULL) return; if (j->queue) decrRefCount(j->queue); sdsfree(j->body); if (j->nodes_delivered) dictRelease(j->nodes_delivered); if (j->nodes_confirmed) dictRelease(j->nodes_confirmed); zfree(j); } /* Add the job in the jobs hash table, so that we can use lookupJob() * (by job ID) later. If a node knows about a job, the job must be registered * and can be retrieved via lookupJob(), regardless of is state. * * On success C_OK is returned. If there is already a job with the * specified ID, no operation is performed and the function returns * C_ERR. */ int registerJob(job *j) { int retval = dictAdd(server.jobs, j->id, NULL); if (retval == DICT_ERR) return C_ERR; updateJobAwakeTime(j,0); return C_OK; } /* Lookup a job by ID. */ job *lookupJob(char *id) { struct dictEntry *de = dictFind(server.jobs, id); return de ? dictGetKey(de) : NULL; } /* Remove job references from the system, without freeing the job itself. * If the job was already unregistered, C_ERR is returned, otherwise * C_OK is returned. */ int unregisterJob(job *j) { j = lookupJob(j->id); if (!j) return C_ERR; /* Emit a DELJOB command for all the job states but WAITREPL (no * ADDJOB emitted yer), and ACKED (DELJOB already emitted). */ if (j->state >= JOB_STATE_ACTIVE && j->state != JOB_STATE_ACKED) AOFDelJob(j); /* Remove from awake skip list. */ if (j->awakeme) serverAssert(skiplistDelete(server.awakeme,j)); /* If the job is queued, remove from queue. */ if (j->state == JOB_STATE_QUEUED) dequeueJob(j); /* If there is a client blocked for this job, inform it that the job * got deleted, and unblock it. This should only happen when the job * gets expired before the requested replication level is reached. */ if (j->state == JOB_STATE_WAIT_REPL) { client *c = jobGetAssociatedValue(j); setJobAssociatedValue(j,NULL); addReplySds(c, sdsnew("-NOREPL job removed (expired?) before the requested " "replication level was achieved\r\n")); /* Change job state otherwise unblockClientWaitingJobRepl() will * try to remove the job itself. */ j->state = JOB_STATE_ACTIVE; clusterBroadcastDelJob(j); unblockClient(c); } /* Remove the job from the jobs hash table. */ dictDelete(server.jobs, j->id); return C_OK; } /* We use the server.jobs hash table in a space efficient way by storing the * job only at 'key' pointer, so the 'value' pointer is free to be used * for state specific associated information. * * When the job state is JOB_STATE_WAIT_REPL, the value is set to the client * that is waiting for synchronous replication of the job. */ void setJobAssociatedValue(job *j, void *val) { struct dictEntry *de = dictFind(server.jobs, j->id); if (de) dictSetVal(server.jobs,de,val); } /* See setJobAssociatedValue() top comment. */ void *jobGetAssociatedValue(job *j) { struct dictEntry *de = dictFind(server.jobs, j->id); return de ? dictGetVal(de) : NULL; } /* Return the job state as a C string pointer. This is mainly useful for * reporting / debugign tasks. */ char *jobStateToString(int state) { char *states[] = {"wait-repl","active","queued","acked"}; if (state < 0 || state > JOB_STATE_ACKED) return "unknown"; return states[state]; } /* Return the state number for the specified C string, or -1 if * there is no match. */ int jobStateFromString(char *state) { if (!strcasecmp(state,"wait-repl")) return JOB_STATE_WAIT_REPL; else if (!strcasecmp(state,"active")) return JOB_STATE_ACTIVE; else if (!strcasecmp(state,"queued")) return JOB_STATE_QUEUED; else if (!strcasecmp(state,"acked")) return JOB_STATE_ACKED; else return -1; } /* ----------------------------- Awakeme list ------------------------------ * Disque needs to perform periodic tasks on registered jobs, for example * we need to remove expired jobs (TTL reached), requeue existing jobs that * where not acknowledged in time, schedule the job garbage collection after * the job is acknowledged, and so forth. * * To simplify the handling of periodic operations without adding multiple * timers for each job, jobs are put into a skip list that order jobs for * the unix time we need to take some action about them. * * Every registered job is into this list. After we update some job field * that is related to scheduled operations on the job, or when it's state * is updated, we need to call updateJobAwakeTime() again in order to move * the job into the appropriate place in the awakeme skip list. * * processJobs() takes care of handling the part of the awakeme list which * has an awakeme time <= to the current time. As a result of processing a * job, we expect it to likely be updated to be processed in the future * again, or deleted at all. */ /* Ask the system to update the time the job will be called again as an * argument of awakeJob() in order to handle delayed tasks for this job. * If 'at' is zero, the function computes the next time we should check * the job status based on the next quee time (qtime), expire time, garbage * collection if it's an ACK, and so forth. * * Otherwise if 'at' is non-zero, it's up to the caller to set the time * at which the job will be awake again. */ void updateJobAwakeTime(job *j, mstime_t at) { if (at == 0) { /* Best case is to handle it for eviction. One second more is added * in order to make sure when the job is processed we found it to * be already expired. */ at = (mstime_t)j->etime*1000+1000; if (j->state == JOB_STATE_ACKED) { /* Try to garbage collect this ACKed job again in the future. */ mstime_t retry_gc_again = getNextGCRetryTime(j); if (retry_gc_again < at) at = retry_gc_again; } else if ((j->state == JOB_STATE_ACTIVE || j->state == JOB_STATE_QUEUED) && j->qtime) { /* Schedule the job to be queued, and if the job is flagged * BCAST_WILLQUEUE, make sure to awake the job a bit earlier * to broadcast a WILLQUEUE message. */ mstime_t qtime = j->qtime; if (j->flags & JOB_FLAG_BCAST_WILLQUEUE) qtime -= JOB_WILLQUEUE_ADVANCE; if (qtime < at) at = qtime; } } /* Only update the job position into the skiplist if needed. */ if (at != j->awakeme) { /* Remove from skip list. */ if (j->awakeme) { serverAssert(skiplistDelete(server.awakeme,j)); } /* Insert it back again in the skip list with the new awake time. */ j->awakeme = at; skiplistInsert(server.awakeme,j); } } /* Set the specified unix time at which a job will be queued again * in the local node. */ void updateJobRequeueTime(job *j, mstime_t qtime) { /* Don't violate at-most-once (retry == 0) contract in case of bugs. */ if (j->retry == 0 || j->qtime == 0) return; j->qtime = qtime; updateJobAwakeTime(j,0); } /* Job comparision inside the awakeme skiplist: by awakeme time. If it is the * same jobs are compared by ctime. If the same again, by job ID. */ int skiplistCompareJobsToAwake(const void *a, const void *b) { const job *ja = a, *jb = b; if (ja->awakeme > jb->awakeme) return 1; if (jb->awakeme > ja->awakeme) return -1; if (ja->ctime > jb->ctime) return 1; if (jb->ctime > ja->ctime) return -1; return memcmp(ja->id,jb->id,JOB_ID_LEN); } /* Used to show jobs info for debugging or under unexpected conditions. */ void logJobsDebugInfo(int level, char *msg, job *j) { serverLog(level, "%s %.*s: state=%d retry=%d delay=%d replicate=%d flags=%d now=%lld cached_now=%lld awake=%lld (%lld) qtime=%lld etime=%lld", msg, JOB_ID_LEN, j->id, (int)j->state, (int)j->retry, (int)j->delay, (int)j->repl, (int)j->flags, (long long)mstime(), (long long)server.mstime, (long long)j->awakeme-mstime(), (long long)j->awakeme, (long long)j->qtime-mstime(), (long long)j->etime*1000-mstime() ); } /* Process the specified job to perform asynchronous operations on it. * Check processJobs() for more info. */ void processJob(job *j) { mstime_t old_awakeme = j->awakeme; logJobsDebugInfo(LL_VERBOSE,"PROCESSING",j); /* Remove expired jobs. */ if (j->etime <= server.unixtime) { serverLog(LL_VERBOSE,"EVICT %.*s", JOB_ID_LEN, j->id); unregisterJob(j); freeJob(j); return; } /* Broadcast WILLQUEUE to inform other nodes we are going to re-queue * the job shortly. */ if ((j->state == JOB_STATE_ACTIVE || j->state == JOB_STATE_QUEUED) && j->flags & JOB_FLAG_BCAST_WILLQUEUE && j->qtime-JOB_WILLQUEUE_ADVANCE <= server.mstime) { if (j->state != JOB_STATE_QUEUED) clusterSendWillQueue(j); /* Clear the WILLQUEUE flag, so that the job will be rescheduled * for when we need to queue it (otherwise it is scheduled * JOB_WILLQUEUE_ADVANCE milliseconds before). */ j->flags &= ~JOB_FLAG_BCAST_WILLQUEUE; updateJobAwakeTime(j,0); } /* Requeue job if needed. This will also care about putting the job * into the queue for the first time for delayed jobs, including the * ones with retry=0. */ if (j->state == JOB_STATE_ACTIVE && j->qtime <= server.mstime) { queue *q; /* We need to check if the queue is paused in input. If that's * the case, we do: * * If retry != 0, postpone the enqueue-time of "retry" time. * * If retry == 0 (at most once job), this is a job with a delay that * will never be queued again, and we are the only owner. * In such a case, put it into the queue, or the job will be leaked. */ if (j->retry != 0 && (q = lookupQueue(j->queue)) != NULL && q->flags & QUEUE_FLAG_PAUSED_IN) { updateJobRequeueTime(j,server.mstime+ j->retry*1000+ randomTimeError(DISQUE_TIME_ERR)); } else { enqueueJob(j,0); } } /* Update job re-queue time if job is already queued. */ if (j->state == JOB_STATE_QUEUED && j->qtime <= server.mstime && j->retry) { j->flags |= JOB_FLAG_BCAST_WILLQUEUE; j->qtime = server.mstime + j->retry*1000 + randomTimeError(DISQUE_TIME_ERR); updateJobAwakeTime(j,0); } /* Try a job garbage collection. */ if (j->state == JOB_STATE_ACKED) { tryJobGC(j); updateJobAwakeTime(j,0); } if (old_awakeme == j->awakeme) logJobsDebugInfo(LL_WARNING, "~~~WARNING~~~ NOT PROCESSABLE JOB", j); } int processJobs(struct aeEventLoop *eventLoop, long long id, void *clientData) { int period = 100; /* 100 ms default period. */ int max = 10000; /* 10k jobs * 1000 milliseconds = 10M jobs/sec max. */ mstime_t now = mstime(), latency; skiplistNode *current, *next; UNUSED(eventLoop); UNUSED(id); UNUSED(clientData); #ifdef DEBUG_SCHEDULER static time_t last_log = 0; int canlog = 0; if (server.port == 25000 && time(NULL) != last_log) { last_log = time(NULL); canlog = 1; } if (canlog) printf("--- LEN: %d ---\n", (int) skiplistLength(server.awakeme)); #endif latencyStartMonitor(latency); server.mstime = now; /* Update it since it's used by processJob(). */ current = server.awakeme->header->level[0].forward; while(current && max--) { job *j = current->obj; #ifdef DEBUG_SCHEDULER if (canlog) { printf("%.*s %d (in %d) [%s]\n", JOB_ID_LEN, j->id, (int) j->awakeme, (int) (j->awakeme-server.mstime), jobStateToString(j->state)); } #endif if (j->awakeme > now) break; next = current->level[0].forward; processJob(j); current = next; } /* Try to block between 1 and 100 millseconds depending on how near * in time is the next async event to process. Note that because of * received commands or change in state jobs state may be modified so * we set a max time of 100 milliseconds to wakeup anyway. */ current = server.awakeme->header->level[0].forward; if (current) { job *j = current->obj; period = server.mstime-j->awakeme; if (period < 1) period = 1; else if (period > 100) period = 100; } latencyEndMonitor(latency); latencyAddSampleIfNeeded("jobs-processing",latency); #ifdef DEBUG_SCHEDULER if (canlog) printf("---\n\n"); #endif return period; } /* --------------------------- Jobs serialization -------------------------- */ /* Serialize an SDS string as a little endian 32 bit count followed * by the bytes representing the string. The serialized string is * written to the memory pointed by 'p'. The return value of the function * is the original 'p' advanced of 4 + sdslen(s) bytes, in order to * be ready to store the next value to serialize. */ char *serializeSdsString(char *p, sds s) { size_t len = s ? sdslen(s) : 0; uint32_t count = intrev32ifbe(len); memcpy(p,&count,sizeof(count)); if (s) memcpy(p+sizeof(count),s,len); return p + sizeof(count) + len; } /* Serialize the job pointed by 'j' appending the serialized version of * the job into the passed SDS string 'jobs'. * * The serialization may be performed in two slightly different ways * depending on the 'type' argument: * * If type is SER_MESSAGE the expire time field is serialized using * the relative TTL still remaining for the job. This serialization format * is suitable for sending messages to other nodes that may have non * synchronized clocks. If instead SER_STORAGE is used as type, the expire * time filed is serialized using an absolute unix time (as it is normally * in the job structure representation). This makes the job suitable to be * loaded at a latter time from disk, and is used in order to emit * LOADJOB commands in the AOF file. * * Moreover if SER_MESSAGE is used, the JOB_FLAG_DELIVERED is cleared before * the serialization, since this is a local node flag and should not be * propagated. * * When the job is deserialized with deserializeJob() function call, the * appropriate type must be passed, depending on how the job was serialized. * * Serialization format * --------------------- * * len | struct | queuename | job | nodes * * len: The first 4 bytes are a little endian 32 bit unsigned * integer that announces the full size of the serialized job. * * struct: JOB_STRUCT_SER_LEN bytes of the 'job' structure * with fields fixed to be little endian regardless of the arch of the * system. * * queuename: uint32_t little endian len + actual bytes of the queue * name string. * * job: uint32_t little endian len + actual bytes of the job body. * * nodes: List of nodes that may have a copy of the message. uint32_t * little endian with the count of N node names followig. Then N * fixed lenght node names of CLUSTER_NODE_NAMELEN characters each. * * The message is concatenated to the existing sds string 'jobs'. * Just use sdsempty() as first argument to get a single job serialized. * * ---------------------------------------------------------------------- * * Since each job has a prefixed length it is possible to glue multiple * jobs one after the other in a single string. */ sds serializeJob(sds jobs, job *j, int sertype) { size_t len; struct job *sj; char *p, *msg; uint32_t count; /* Compute the total length of the serialized job. */ len = 4; /* Prefixed length of the serialized bytes. */ len += JOB_STRUCT_SER_LEN; /* Structure header directly serializable. */ len += 4; /* Queue name length field. */ len += j->queue ? sdslen(j->queue->ptr) : 0; /* Queue name bytes. */ len += 4; /* Body length field. */ len += j->body ? sdslen(j->body) : 0; /* Body bytes. */ len += 4; /* Node IDs (that may have a copy) count. */ len += dictSize(j->nodes_delivered) * CLUSTER_NAMELEN; /* Make room at the end of the SDS buffer to hold our message. */ jobs = sdsMakeRoomFor(jobs,len); msg = jobs + sdslen(jobs); /* Concatenate to the end of buffer. */ sdsIncrLen(jobs,len); /* Adjust SDS string final length. */ /* Total serialized length prefix, not including the length itself. */ count = intrev32ifbe(len-4); memcpy(msg,&count,sizeof(count)); /* The serializable part of the job structure is copied, and fields * fixed to be little endian (no op in little endian CPUs). */ sj = (job*) (msg+4); memcpy(sj,j,JOB_STRUCT_SER_LEN); memrev16ifbe(&sj->repl); memrev64ifbe(&sj->ctime); /* Use a relative expire time for serialization, but only for the * type SER_MESSAGE. When we want to target storage, it's better to use * absolute times in every field. */ if (sertype == SER_MESSAGE) { if (sj->etime >= server.unixtime) sj->etime = sj->etime - server.unixtime + 1; else sj->etime = 1; sj->flags &= ~JOB_FLAG_DELIVERED; } memrev32ifbe(&sj->etime); memrev32ifbe(&sj->delay); memrev32ifbe(&sj->retry); memrev16ifbe(&sj->num_nacks); memrev16ifbe(&sj->num_deliv); /* p now points to the start of the variable part of the serialization. */ p = msg + 4 + JOB_STRUCT_SER_LEN; /* Queue name is 4 bytes prefixed len in little endian + actual bytes. */ p = serializeSdsString(p,j->queue ? j->queue->ptr : NULL); /* Body is 4 bytes prefixed len in little endian + actual bytes. */ p = serializeSdsString(p,j->body); /* Node IDs that may have a copy of the message: 4 bytes count in little * endian plus (count * CLUSTER_NAMELEN) bytes. */ count = intrev32ifbe(dictSize(j->nodes_delivered)); memcpy(p,&count,sizeof(count)); p += sizeof(count); dictIterator *di = dictGetSafeIterator(j->nodes_delivered); dictEntry *de; while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); memcpy(p,node->name,CLUSTER_NAMELEN); p += CLUSTER_NAMELEN; } dictReleaseIterator(di); /* Make sure we wrote exactly the intented number of bytes. */ serverAssert(len == (size_t)(p-msg)); return jobs; } /* Deserialize a job serialized with serializeJob. Note that this only * deserializes the first job even if the input buffer contains multiple * jobs, but it stores the pointer to the next job (if any) into * '*next'. If there are no more jobs, '*next' is set to NULL. * '*next' is not updated if 'next' is a NULL pointer. * * The return value is the job structure populated with all the fields * present in the serialized structure. On deserialization error (wrong * format) NULL is returned. * * Arguments: 'p' is the pointer to the start of the job (the 4 bytes * where the job serialized length is stored). While 'len' is the total * number of bytes the buffer contains (that may be larger than the * serialized job 'p' is pointing to). * * The 'sertype' field specifies the serialization type the job was * serialized with, by serializeJob() call. * * When the serialization type is SER_STORAGE, the job state is loaded * as it is, otherwise when SER_MESSAGE is used, the job state is set * to JOB_STATE_ACTIVE. * * In both cases the gc retry field is reset to 0. */ job *deserializeJob(unsigned char *p, size_t len, unsigned char **next, int sertype) { job *j = zcalloc(sizeof(*j)); unsigned char *start = p; /* To check total processed bytes later. */ uint32_t joblen, aux; /* Min len is: 4 (joblen) + JOB_STRUCT_SER_LEN + 4 (queue name len) + * 4 (body len) + 4 (Node IDs count) */ if (len < 4+JOB_STRUCT_SER_LEN+4+4+4) goto fmterr; /* Get total length. */ memcpy(&joblen,p,sizeof(joblen)); p += sizeof(joblen); len -= sizeof(joblen); joblen = intrev32ifbe(joblen); if (len < joblen) goto fmterr; /* Deserialize the static part just copying and fixing endianess. */ memcpy(j,p,JOB_STRUCT_SER_LEN); memrev16ifbe(j->repl); memrev64ifbe(j->ctime); memrev32ifbe(j->etime); if (sertype == SER_MESSAGE) { /* Convert back to absolute time if needed. */ j->etime = server.unixtime + j->etime; } memrev32ifbe(j->delay); memrev32ifbe(j->retry); memrev16ifbe(&sj->num_nacks); memrev16ifbe(&sj->num_deliv); p += JOB_STRUCT_SER_LEN; len -= JOB_STRUCT_SER_LEN; /* GC attempts are always reset, while the state will be likely set to * the caller, but otherwise, we assume the job is active if this message * is received from another node. When loading a message from disk instead * (SER_STORAGE serializaiton type), the state is left untouched. */ if (sertype == SER_MESSAGE) j->state = JOB_STATE_ACTIVE; j->gc_retry = 0; /* Compute next queue time from known parameters. */ if (j->retry) { j->flags |= JOB_FLAG_BCAST_WILLQUEUE; j->qtime = server.mstime + j->delay*1000 + j->retry*1000 + randomTimeError(DISQUE_TIME_ERR); } else { j->qtime = 0; } /* Queue name. */ memcpy(&aux,p,sizeof(aux)); p += sizeof(aux); len -= sizeof(aux); aux = intrev32ifbe(aux); if (len < aux) goto fmterr; j->queue = createStringObject((char*)p,aux); p += aux; len -= aux; /* Job body. */ memcpy(&aux,p,sizeof(aux)); p += sizeof(aux); len -= sizeof(aux); aux = intrev32ifbe(aux); if (len < aux) goto fmterr; j->body = sdsnewlen(p,aux); p += aux; len -= aux; /* Nodes IDs. */ memcpy(&aux,p,sizeof(aux)); p += sizeof(aux); len -= sizeof(aux); aux = intrev32ifbe(aux); if (len < aux*CLUSTER_NAMELEN) goto fmterr; j->nodes_delivered = dictCreate(&clusterNodesDictType,NULL); while(aux--) { clusterNode *node = clusterLookupNode((char*)p); if (node) dictAdd(j->nodes_delivered,node->name,node); p += CLUSTER_NAMELEN; len -= CLUSTER_NAMELEN; } if ((uint32_t)(p-start)-sizeof(joblen) != joblen) goto fmterr; if (len && next) *next = p; return j; fmterr: freeJob(j); return NULL; } /* This function is called when the job id at 'j' may be duplicated and we * likely already have the job, but we want to update the list of nodes * that may have the message by taking the union of our list with the * job 'j' list. */ void updateJobNodes(job *j) { job *old = lookupJob(j->id); if (!old) return; dictIterator *di = dictGetIterator(j->nodes_delivered); dictEntry *de; while((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); dictAdd(old->nodes_delivered,node->name,node); } dictReleaseIterator(di); } /* ------------------------- Jobs cluster functions ------------------------ */ /* This function sends a DELJOB message to all the nodes that may have * a copy of the job, in order to trigger deletion of the job. * It is used when an ADDJOB command time out to unregister (in a best * effort way, without gurantees) the job, and in the ACKs grabage * collection procedure. * * This function also unregisters and releases the job from the local * node. * * The function is best effort, and does not need to *guarantee* that the * specific property that after it gets called, no copy of the job is found * on the cluster. It just attempts to avoid useless multiple deliveries, * and to free memory of jobs that are already processed or that were never * confirmed to the producer. */ void deleteJobFromCluster(job *j) { clusterBroadcastDelJob(j); unregisterJob(j); freeJob(j); } /* ---------------------------- Utility functions -------------------------- */ /* Validate a set of job IDs. Return C_OK if all the IDs are valid, * otherwise C_ERR is returned. * * When C_ERR is returned, an error is send to the client 'c' if not * NULL. */ int validateJobIDs(client *c, robj **ids, int count) { int j; /* Mass-validate the Job IDs, so if we have to stop with an error, nothing * at all is processed. */ for (j = 0; j < count; j++) { if (validateJobIdOrReply(c,ids[j]->ptr,sdslen(ids[j]->ptr)) == C_ERR) return C_ERR; } return C_OK; } /* ---------------------------------- AOF ---------------------------------- */ /* Emit a LOADJOB command into the AOF. which is used explicitly to load * serialized jobs form disk: LOADJOB . */ void AOFLoadJob(job *job) { if (server.aof_state == AOF_OFF) return; sds serialized = serializeJob(sdsempty(),job,SER_STORAGE); robj *serobj = createObject(OBJ_STRING,serialized); robj *argv[2] = {shared.loadjob, serobj}; feedAppendOnlyFile(argv,2); decrRefCount(serobj); } /* Emit a DELJOB command into the AOF. This function is called in the following * two cases: * * 1) As a side effect of the job being acknowledged, when AOFAckJob() * is called. * 2) When the server evicts a job from memory, but only if the state is one * of active or queued. Yet not replicated jobs are not written into the * AOF so there is no need to send a DELJOB, while already acknowledged * jobs are handled by point "1". */ void AOFDelJob(job *job) { if (server.aof_state == AOF_OFF) return; robj *jobid = createStringObject(job->id,JOB_ID_LEN); robj *argv[2] = {shared.deljob, jobid}; feedAppendOnlyFile(argv,2); decrRefCount(jobid); } /* Emit a DELJOB command, since ths is how we handle acknowledged jobs from * the point of view of AOF. We are not interested in loading back acknowledged * jobs, nor we include them on AOF rewrites, since ACKs garbage collection * works anyway if nodes forget about ACKs and dropping ACKs is not a safety * violation, it may just result into multiple deliveries of the same * message. * * However we keep the API separated, so it will be simple if we change our * mind or we want to have a feature to persist ACKs. */ void AOFAckJob(job *job) { if (server.aof_state == AOF_OFF) return; AOFDelJob(job); } /* The LOADJOB command is emitted in the AOF to load serialized jobs at * restart, and is only processed while loading AOFs. Clients calling this * command get an error. */ void loadjobCommand(client *c) { if (!(c->flags & CLIENT_AOF_CLIENT)) { addReplyError(c,"LOADJOB is a special command only processed from AOF"); return; } job *job = deserializeJob(c->argv[1]->ptr,sdslen(c->argv[1]->ptr),NULL,SER_STORAGE); /* We expect to be able to read back what we serialized. */ if (job == NULL) { serverLog(LL_WARNING, "Unrecoverable error loading AOF: corrupted LOADJOB data."); exit(1); } int enqueue_job = 0; if (job->state == JOB_STATE_QUEUED) { if (server.aof_enqueue_jobs_once) enqueue_job = 1; job->state = JOB_STATE_ACTIVE; } /* Check if the job expired before registering it. */ if (job->etime <= server.unixtime) { freeJob(job); return; } /* Register the job, and if needed enqueue it: we put jobs back into * queues only if enqueue-jobs-at-next-restart option is set, that is, * when a controlled restart happens. */ if (registerJob(job) == C_OK && enqueue_job) enqueueJob(job,0); } /* -------------------------- Jobs related commands ------------------------ */ /* This is called by unblockClient() to perform the cleanup of a client * blocked by ADDJOB. Never call it directly, call unblockClient() * instead. */ void unblockClientWaitingJobRepl(client *c) { /* If the job is still waiting for synchronous replication, but the client * waiting it gets freed or reaches the timeout, we unblock the client and * forget about the job. */ if (c->bpop.job->state == JOB_STATE_WAIT_REPL) { /* Set the job as active before calling deleteJobFromCluster() since * otherwise unregistering the job will, in turn, unblock the client, * which we are already doing here. */ c->bpop.job->state = JOB_STATE_ACTIVE; deleteJobFromCluster(c->bpop.job); } c->bpop.job = NULL; } /* Return a simple string reply with the Job ID. */ void addReplyJobID(client *c, job *j) { addReplyStatusLength(c,j->id,JOB_ID_LEN); } /* This function is called by cluster.c when the job was replicated * and the replication acknowledged at least job->repl times. * * Here we need to queue the job, and unblock the client waiting for the job * if it still exists. * * This function is only called if the job is in JOB_STATE_WAIT_REPL. * The functionc an also assume that there is a client waiting to be * unblocked if this function is called, since if the blocked client is * released, the job is deleted (and a best effort try is made to remove * copies from other nodes), to avoid non acknowledged jobs to be active * when possible. * * Return value: if the job is retained after the function is called * (normal replication) then C_OK is returned. Otherwise if the * function removes the job from the node, since the job is externally * replicated, C_ERR is returned, in order to signal the client further * accesses to the job are not allowed. */ int jobReplicationAchieved(job *j) { serverLog(LL_VERBOSE,"Replication ACHIEVED %.*s",JOB_ID_LEN,j->id); /* Change the job state to active. This is critical to avoid the job * will be freed by unblockClient() if found still in the old state. */ j->state = JOB_STATE_ACTIVE; /* Reply to the blocked client with the Job ID and unblock the client. */ client *c = jobGetAssociatedValue(j); setJobAssociatedValue(j,NULL); addReplyJobID(c,j); unblockClient(c); /* If the job was externally replicated, send a QUEUE message to one of * the nodes that acknowledged to have a copy, and forget about it ASAP. */ if (dictFind(j->nodes_delivered,myself->name) == NULL) { dictEntry *de = dictGetRandomKey(j->nodes_confirmed); if (de) { clusterNode *n = dictGetVal(de); clusterSendEnqueue(n,j,j->delay); } unregisterJob(j); freeJob(j); return C_ERR; } /* If set, cleanup nodes_confirmed to free memory. We'll reuse this * hash table again for ACKs tracking in order to garbage collect the * job once processed. */ if (j->nodes_confirmed) { dictRelease(j->nodes_confirmed); j->nodes_confirmed = NULL; } /* Queue the job locally. */ if (j->delay == 0) enqueueJob(j,0); /* Will change the job state. */ else updateJobAwakeTime(j,0); /* Queue with delay. */ AOFLoadJob(j); return C_OK; } /* This function is called periodically by clientsCron(). Its goal is to * check if a client blocked waiting for a job synchronous replication * is taking too time, and add a new node to the set of nodes contacted * in order to replicate the job. This way some of the nodes initially * contacted are not reachable, are slow, or are out of memory (and are * not accepting our job), we have a chance to make the ADDJOB call * succeed using other nodes. * * The function always returns 0 since it never terminates the client. */ #define DELAYED_JOB_ADD_NODE_MIN_PERIOD 50 /* 50 milliseconds. */ int clientsCronHandleDelayedJobReplication(client *c) { /* Return ASAP if this client is not blocked for job replication. */ if (!(c->flags & CLIENT_BLOCKED) || c->btype != BLOCKED_JOB_REPL) return 0; /* Note that clientsCronHandleDelayedJobReplication() is called after * refreshing server.mstime, so no need to call mstime() again here, * we can use the cached value. However we use a fresh timestamp if * we have to set added_node_time again. */ mstime_t elapsed = server.mstime - c->bpop.added_node_time; if (elapsed >= DELAYED_JOB_ADD_NODE_MIN_PERIOD) { if (clusterReplicateJob(c->bpop.job, 1, 0) > 0) c->bpop.added_node_time = mstime(); } return 0; } /* ADDJOB queue job timeout [REPLICATE ] [TTL ] [RETRY ] [ASYNC] * * The function changes replication strategy if the memory warning level * is greater than zero. * * When there is no memory pressure: * 1) A copy of the job is replicated locally. * 2) The job is queued locally. * 3) W-1 copies of the job are replicated to other nodes, synchronously * or asynchronously if ASYNC is provided. * * When there is memory pressure: * 1) The job is replicated only to W external nodes. * 2) The job is queued to a random external node sending a QUEUE message. * 3) QUEUE is sent ASAP for asynchronous jobs, for synchronous jobs instead * QUEUE is sent by jobReplicationAchieved to one of the nodes that * acknowledged to have a copy of the job. * 4) The job is discareded by the local node ASAP, that is, when the * selected replication level is achieved or before to returning to * the caller for asynchronous jobs. */ void addjobCommand(client *c) { long long replicate = server.cluster->size > 3 ? 3 : server.cluster->size; long long ttl = 3600*24; long long retry = -1; long long delay = 0; long long maxlen = 0; /* Max queue length for job to be accepted. */ mstime_t timeout; int j, retval; int async = 0; /* Asynchronous request? */ int extrepl = getMemoryWarningLevel() > 0; /* Replicate externally? */ int leaving = myselfLeaving(); static uint64_t prev_ctime = 0; /* Another case for external replication, other than memory pressure, is * if this node is leaving the cluster. In this case we don't want to create * new messages here. */ if (leaving) extrepl = 1; /* Parse args. */ for (j = 4; j < c->argc; j++) { char *opt = c->argv[j]->ptr; int lastarg = j == c->argc-1; if (!strcasecmp(opt,"replicate") && !lastarg) { retval = getLongLongFromObject(c->argv[j+1],&replicate); if (retval != C_OK || replicate <= 0 || replicate > 65535) { addReplyError(c,"REPLICATE must be between 1 and 65535"); return; } j++; } else if (!strcasecmp(opt,"ttl") && !lastarg) { retval = getLongLongFromObject(c->argv[j+1],&ttl); if (retval != C_OK || ttl <= 0) { addReplyError(c,"TTL must be a number > 0"); return; } j++; } else if (!strcasecmp(opt,"retry") && !lastarg) { retval = getLongLongFromObject(c->argv[j+1],&retry); if (retval != C_OK || retry < 0) { addReplyError(c,"RETRY time must be a non negative number"); return; } j++; } else if (!strcasecmp(opt,"delay") && !lastarg) { retval = getLongLongFromObject(c->argv[j+1],&delay); if (retval != C_OK || delay < 0) { addReplyError(c,"DELAY time must be a non negative number"); return; } j++; } else if (!strcasecmp(opt,"maxlen") && !lastarg) { retval = getLongLongFromObject(c->argv[j+1],&maxlen); if (retval != C_OK || maxlen <= 0) { addReplyError(c,"MAXLEN must be a positive number"); return; } j++; } else if (!strcasecmp(opt,"async")) { async = 1; } else { addReply(c,shared.syntaxerr); return; } } /* Parse the timeout argument. */ if (getTimeoutFromObjectOrReply(c,c->argv[3],&timeout,UNIT_MILLISECONDS) != C_OK) return; /* REPLICATE > 1 and RETRY set to 0 does not make sense, why to replicate * the job if it will never try to be re-queued if case the job processing * is not acknowledged? */ if (replicate > 1 && retry == 0) { addReplyError(c,"With RETRY set to 0 please explicitly set " "REPLICATE to 1 (at-most-once delivery)"); return; } /* DELAY greater or equal to TTL is silly. */ if (delay >= ttl) { addReplyError(c,"The specified DELAY is greater than TTL. Job refused " "since would never be delivered"); return; } /* When retry is not specified, it defaults to 1/10 of the TTL, with * an hard limit of JOB_DEFAULT_RETRY_MAX seconds (5 minutes normally). */ if (retry == -1) { retry = ttl/10; if (retry > JOB_DEFAULT_RETRY_MAX) retry = JOB_DEFAULT_RETRY_MAX; if (retry == 0) retry = 1; } /* Check if REPLICATE can't be honoured at all. */ int additional_nodes = extrepl ? replicate : replicate-1; if (additional_nodes > server.cluster->reachable_nodes_count) { if (extrepl && additional_nodes-1 == server.cluster->reachable_nodes_count) { addReplySds(c, sdscatprintf(sdsempty(), "-NOREPL Not enough reachable nodes " "for the requested replication level, since I'm unable " "to hold a copy of the message for the following " "reason: %s\r\n", leaving ? "I'm leaving the cluster" : "I'm out of memory")); } else { addReplySds(c, sdsnew("-NOREPL Not enough reachable nodes " "for the requested replication level\r\n")); } return; } /* Lookup the queue by the name, in order to perform checks for * MAXLEN and to check for paused queue. */ queue *q = lookupQueue(c->argv[1]); /* If maxlen was specified, check that the local queue len is * within the requested limits. */ if (maxlen && q && queueLength(q) >= (unsigned long) maxlen) { addReplySds(c, sdsnew("-MAXLEN Queue is already longer than " "the specified MAXLEN count\r\n")); return; } /* If the queue is paused in input, refuse the job. */ if (q && q->flags & QUEUE_FLAG_PAUSED_IN) { addReplySds(c, sdsnew("-PAUSED Queue paused in input, try later\r\n")); return; } /* Are we going to discard the local copy before to return to the caller? * This happens when the job is at the same type asynchronously * replicated AND because of memory warning level we are going to * replicate externally without taking a copy. */ int discard_local_copy = async && extrepl; /* Create a new job. */ job *job = createJob(NULL,JOB_STATE_WAIT_REPL,ttl,retry); job->queue = c->argv[1]; incrRefCount(c->argv[1]); job->repl = replicate; /* If no external replication is used, add myself to the list of nodes * that have a copy of the job. */ if (!extrepl) dictAdd(job->nodes_delivered,myself->name,myself); /* Job ctime is milliseconds * 1000000. Jobs created in the same * millisecond gets an incremental ctime. The ctime is used to sort * queues, so we have some weak sorting semantics for jobs: non-requeued * jobs are delivered roughly in the order they are added into a given * node. */ job->ctime = mstime()*1000000; if (job->ctime <= prev_ctime) job->ctime = prev_ctime+1; prev_ctime = job->ctime; job->etime = server.unixtime + ttl; job->delay = delay; job->retry = retry; job->body = sdsdup(c->argv[2]->ptr); /* Set the next time the job will be queued. Note that once we call * enqueueJob() the first time, this will be set to 0 (never queue * again) for jobs that have a zero retry value (at most once jobs). */ if (delay) { job->qtime = server.mstime + delay*1000; } else { /* This will be updated anyway by enqueueJob(). */ job->qtime = server.mstime + retry*1000; } /* Register the job locally, unless we are going to remove it locally. */ if (!discard_local_copy && registerJob(job) == C_ERR) { /* A job ID with the same name? Practically impossible but * let's handle it to trap possible bugs in a cleaner way. */ serverLog(LL_WARNING,"ID already existing in ADDJOB command!"); freeJob(job); addReplyError(c,"Internal error creating the job, check server logs"); return; } /* For replicated messages where ASYNC option was not asked, block * the client, and wait for acks. Otherwise if no synchronous replication * is used, or ASYNC option was enabled, we just queue the job and * return to the client ASAP. * * Note that for REPLICATE > 1 and ASYNC the replication process is * best effort. */ if (replicate > 1 && !async) { c->bpop.timeout = timeout; c->bpop.job = job; c->bpop.added_node_time = mstime(); blockClient(c,BLOCKED_JOB_REPL); setJobAssociatedValue(job,c); /* Create the nodes_confirmed dictionary only if we actually need * it for synchronous replication. It will be released later * when we move away from JOB_STATE_WAIT_REPL. */ job->nodes_confirmed = dictCreate(&clusterNodesDictType,NULL); /* Confirm itself as an acknowledged receiver if this node will * retain a copy of the job. */ if (!extrepl) dictAdd(job->nodes_confirmed,myself->name,myself); } else { if (job->delay == 0) { if (!extrepl) enqueueJob(job,0); /* Will change the job state. */ } else { /* Delayed jobs that don't wait for replication can move * forward to ACTIVE state ASAP, and get scheduled for * queueing. */ job->state = JOB_STATE_ACTIVE; if (!discard_local_copy) updateJobAwakeTime(job,0); } addReplyJobID(c,job); if (!extrepl) AOFLoadJob(job); } /* If the replication factor is > 1, send REPLJOB messages to REPLICATE-1 * nodes. */ if (additional_nodes > 0) clusterReplicateJob(job, additional_nodes, async); /* If the job is asynchronously and externally replicated at the same time, * send an ENQUEUE message ASAP to one random node, and delete the job from * this node right now. */ if (discard_local_copy) { dictEntry *de = dictGetRandomKey(job->nodes_delivered); if (de) { clusterNode *n = dictGetVal(de); clusterSendEnqueue(n,job,job->delay); } /* We don't have to unregister the job since we did not registered * it if it's async + extrepl. */ freeJob(job); } } /* Client reply function for SHOW and JSCAN. */ void addReplyJobInfo(client *c, job *j) { addReplyMultiBulkLen(c,30); addReplyBulkCString(c,"id"); addReplyBulkCBuffer(c,j->id,JOB_ID_LEN); addReplyBulkCString(c,"queue"); if (j->queue) addReplyBulk(c,j->queue); else addReply(c,shared.nullbulk); addReplyBulkCString(c,"state"); addReplyBulkCString(c,jobStateToString(j->state)); addReplyBulkCString(c,"repl"); addReplyLongLong(c,j->repl); int64_t ttl = j->etime - time(NULL); if (ttl < 0) ttl = 0; addReplyBulkCString(c,"ttl"); addReplyLongLong(c,ttl); addReplyBulkCString(c,"ctime"); addReplyLongLong(c,j->ctime); addReplyBulkCString(c,"delay"); addReplyLongLong(c,j->delay); addReplyBulkCString(c,"retry"); addReplyLongLong(c,j->retry); addReplyBulkCString(c,"nacks"); addReplyLongLong(c,j->num_nacks); addReplyBulkCString(c,"additional-deliveries"); addReplyLongLong(c,j->num_deliv); addReplyBulkCString(c,"nodes-delivered"); if (j->nodes_delivered) { addReplyMultiBulkLen(c,dictSize(j->nodes_delivered)); dictForeach(j->nodes_delivered,de) addReplyBulkCBuffer(c,dictGetKey(de),CLUSTER_NAMELEN); dictEndForeach } else { addReplyMultiBulkLen(c,0); } addReplyBulkCString(c,"nodes-confirmed"); if (j->nodes_confirmed) { addReplyMultiBulkLen(c,dictSize(j->nodes_confirmed)); dictForeach(j->nodes_confirmed,de) addReplyBulkCBuffer(c,dictGetKey(de),CLUSTER_NAMELEN); dictEndForeach } else { addReplyMultiBulkLen(c,0); } mstime_t next_requeue = j->qtime - mstime(); if (next_requeue < 0) next_requeue = 0; addReplyBulkCString(c,"next-requeue-within"); if (j->qtime == 0) addReply(c,shared.nullbulk); else addReplyLongLong(c,next_requeue); mstime_t next_awake = j->awakeme - mstime(); if (next_awake < 0) next_awake = 0; addReplyBulkCString(c,"next-awake-within"); if (j->awakeme == 0) addReply(c,shared.nullbulk); else addReplyLongLong(c,next_awake); addReplyBulkCString(c,"body"); if (j->body) addReplyBulkCBuffer(c,j->body,sdslen(j->body)); else addReply(c,shared.nullbulk); } /* SHOW */ void showCommand(client *c) { if (validateJobIdOrReply(c,c->argv[1]->ptr,sdslen(c->argv[1]->ptr)) == C_ERR) return; job *j = lookupJob(c->argv[1]->ptr); if (!j) { addReply(c,shared.nullbulk); return; } addReplyJobInfo(c,j); } /* DELJOB jobid_1 jobid_2 ... jobid_N * * Evict (and possibly remove from queue) all the jobs in memeory * matching the specified job IDs. Jobs are evicted whatever their state * is, since this command is mostly used inside the AOF or for debugging * purposes. * * The return value is the number of jobs evicted. */ void deljobCommand(client *c) { int j, evicted = 0; if (validateJobIDs(c,c->argv+1,c->argc-1) == C_ERR) return; /* Perform the appropriate action for each job. */ for (j = 1; j < c->argc; j++) { job *job = lookupJob(c->argv[j]->ptr); if (job == NULL) continue; unregisterJob(job); freeJob(job); evicted++; } addReplyLongLong(c,evicted); } /* JSCAN [] [COUNT ] [BUSYLOOP] [QUEUE ] * [STATE STATE ... STATE ] * [REPLY all|id] * * The command provides an interface to iterate all the existing jobs in * the local node, providing a cursor in the form of an integer that is passed * to the next command invocation. During the first call cursor must be 0, * in the next calls the cursor returned in the previous call is used in the * next. The iterator guarantees to return all the elements but may return * duplicated elements. * * Options: * * COUNT -- An hit about how much work to do per iteration. * BUSYLOOP -- Block and return all the elements in a busy loop. * QUEUE -- Return only jobs in the specified queue. * STATE -- Return jobs in the specified state. * Can be used multiple times for a logic OR. * REPLY -- Job reply type. Default is to report just the job * ID. If "all" is specified the full job state is * returned like for the SHOW command. * * The cursor argument can be in any place, the first non matching option * that has valid cursor form of an usigned number will be sensed as a valid * cursor. */ /* JSCAN reply type. */ #define JSCAN_REPLY_ID 0 /* Just report the Job ID. */ #define JSCAN_REPLY_ALL 1 /* Reply full job info like SHOW. */ /* The structure to pass the filter options to the callback. */ struct jscanFilter { int state[16]; /* Every state to return is set to non-zero. */ int numstates; /* Number of states non-true. 0 = match all. */ robj *queue; /* Queue name or NULL to return any queue. */ }; /* Callback for the dictionary scan used by JSCAN. */ void jscanCallback(void *privdata, const dictEntry *de) { void **pd = (void**)privdata; list *list = pd[0]; struct jscanFilter *filter = pd[1]; job *job = dictGetKey(de); /* Skip dummy jobs created by ACK command when job ID is unknown. */ if (dictSize(job->nodes_delivered) == 0) return; /* Don't add the item if it does not satisfies our filter. */ if (filter->queue && !equalStringObjects(job->queue,filter->queue)) return; if (filter->numstates && !filter->state[job->state]) return; /* Otherwise put the queue into the list that will be returned to the * client later. */ listAddNodeTail(list,job); } #define JSCAN_DEFAULT_COUNT 100 void jscanCommand(client *c) { struct jscanFilter filter; int busyloop = 0; /* If true return all the jobs in a blocking way. */ long count = JSCAN_DEFAULT_COUNT; long maxiterations; unsigned long cursor = 0; int cursor_set = 0, j; int reply_type = JSCAN_REPLY_ID; memset(&filter,0,sizeof(filter)); /* Parse arguments and cursor if any. */ for (j = 1; j < c->argc; j++) { int remaining = c->argc - j -1; char *opt = c->argv[j]->ptr; if (!strcasecmp(opt,"count") && remaining) { if (getLongFromObjectOrReply(c, c->argv[j+1], &count, NULL) != C_OK) return; j++; } else if (!strcasecmp(opt,"busyloop")) { busyloop = 1; } else if (!strcasecmp(opt,"queue") && remaining) { filter.queue = c->argv[j+1]; j++; } else if (!strcasecmp(opt,"state") && remaining) { int jobstate = jobStateFromString(c->argv[j+1]->ptr); if (jobstate == -1) { addReplyError(c,"Invalid job state name"); return; } filter.state[jobstate] = 1; filter.numstates++; j++; } else if (!strcasecmp(opt,"reply") && remaining) { if (!strcasecmp(c->argv[j+1]->ptr,"id")) { reply_type = JSCAN_REPLY_ID; } else if (!strcasecmp(c->argv[j+1]->ptr,"all")) { reply_type = JSCAN_REPLY_ALL; } else { addReplyError(c,"Invalid REPLY type, try ID or ALL"); return; } j++; } else { if (cursor_set != 0) { addReply(c,shared.syntaxerr); return; } if (parseScanCursorOrReply(c,c->argv[j],&cursor) == C_ERR) return; cursor_set = 1; } } /* Scan the hash table to retrieve elements. */ maxiterations = count*10; /* Put a bound in the work we'll do. */ /* We pass two pointsr to the callback: the list where to append * elements and the filter structure so that the callback will refuse * to add non matching elements. */ void *privdata[2]; list *list = listCreate(); privdata[0] = list; privdata[1] = &filter; do { cursor = dictScan(server.jobs,cursor,jscanCallback,privdata); } while (cursor && (busyloop || /* If it's a busyloop, don't check iterations & len */ (maxiterations-- && listLength(list) < (unsigned long)count))); /* Provide the reply to the client. */ addReplyMultiBulkLen(c, 2); addReplyBulkLongLong(c,cursor); addReplyMultiBulkLen(c, listLength(list)); listNode *node; while ((node = listFirst(list)) != NULL) { job *j = listNodeValue(node); if (reply_type == JSCAN_REPLY_ID) addReplyJobID(c,j); else if (reply_type == JSCAN_REPLY_ALL) addReplyJobInfo(c,j); else serverPanic("Unknown JSCAN reply type"); listDelNode(list, node); } listRelease(list); } disque-1.0-rc1/src/job.h000066400000000000000000000215571264176561100150640ustar00rootroot00000000000000/* * Copyright (c) 2014, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __DISQUE_JOB_H #define __DISQUE_JOB_H /* A Job ID is 42 bytes, check generateJobID() inside job.c for more info. */ #define JOB_ID_LEN 40 /* This represents a Job across the system. * * The Job ID is the unique identifier of the job, both in the client * protocol and in the cluster messages between nodes. * * Times: * * When the expire time is reached, the job can be destroied even if it * was not successfully processed. The requeue time is the amount of time * that should elapse for this job to be queued again (put into an active * queue), if it was not yet processed. The queue time is the unix time at * which the job was queued last time. * * Note that nodes receiving the job from other nodes via REPLJOB messages * set their local time as ctime and etime (they recompute the expire time * doing etime-ctime in the received fields). * * List of nodes and ACKs garbage collection: * * We keep a list of nodes that *may* have the message (nodes_delivered hash), * so that the creating node is able to gargage collect ACKs even if not all * the nodes in the cluster are reachable, but only the nodes that may have a * copy of this job. The list includes nodes that we send the message to but * never received the confirmation, this is why we can have more listed nodes * than the 'repl' count. * * This optimized GC is possible when a client ACKs the message or when we * receive a SETACK message from another node. Nodes having just the * ACK but not a copy of the job instead, need to go for the usual path for * ACKs GC, that need a confirmation from all the nodes. * * Body: * * The body can be anything, including the empty string. Disque is * totally content-agnostic. When the 'body' filed is set to NULL, the * job structure just represents an ACK without other jobs information. * Jobs that are actually just ACKs are created when a client sends a * node an ACK about an unknown Job ID, or when a SETACK message is received * about an unknown node. */ #define JOB_STATE_WAIT_REPL 0 /* Waiting to be replicated enough times. */ #define JOB_STATE_ACTIVE 1 /* Not acked, not queued, still active job. */ #define JOB_STATE_QUEUED 2 /* Not acked, but queued in this node. */ #define JOB_STATE_ACKED 3 /* Acked, no longer active, to garbage collect.*/ #define JOB_FLAG_BCAST_QUEUED (1<<0) /* Broadcast msg when re-queued. */ #define JOB_FLAG_BCAST_WILLQUEUE (1<<1) /* Broadcast msg before re-quequeing. */ #define JOB_FLAG_DELIVERED (1<<2) /* This node delivered this job >= 1 times. */ #define JOB_WILLQUEUE_ADVANCE 500 /* Milliseconds of WILLQUEUE advance. */ #define JOB_GC_RETRY_MIN_PERIOD 1000 /* Initial GC retry time is 1 seconds. */ #define JOB_GC_RETRY_MAX_PERIOD (60000*3) /* Exponentially up to 3 minutes... */ #define JOB_DEFAULT_RETRY_MAX (60*5) /* Maximum default retry value. */ /* In the job structure we have a counter for the GC attempt. The only use * for this is to calcualte an exponential time for the retry, starting from * JOB_GC_RETRY_MIN_PERIOD but without exceeding JOB_GC_RETRY_MAX_PERIOD. * However we don't want the counter to overflow, so after reaching * JOB_GC_RETRY_COUNT_MAX we don't count more, since we already reached * the maximum retry period for sue (2^10 multipled for the MIN value). */ #define JOB_GC_RETRY_COUNT_MAX 10 /* Job representation in memory. */ typedef struct job { char id[JOB_ID_LEN]; /* Job ID. */ unsigned int state:4; /* Job state: one of JOB_STATE_* states. */ unsigned int gc_retry:4;/* GC attempts counter, for exponential delay. */ uint8_t flags; /* Job flags. */ uint16_t repl; /* Replication factor. */ uint32_t etime; /* Job expire time. */ uint64_t ctime; /* Job creation time, local node at creation. ctime is time in milliseconds * 1000000, each job created in the same millisecond in the same node gets prev job ctime + 1. */ uint32_t delay; /* Delay before to queue this job for 1st time. */ uint32_t retry; /* Job re-queue time: re-queue period in seconds. */ uint16_t num_nacks; /* Number of NACKs this node observed. */ uint16_t num_deliv; /* Number of deliveries this node observed. */ /* -------------------------------------------------------------------- * Up to this point we use the structure for on-wire serialization, * before here all the fields should be naturally aligned, and pointers * should only be present after. Integer values are stored in the host * native endianess, and only normalized during serialization. * -------------------------------------------------------------------- */ robj *queue; /* Job queue name. */ sds body; /* Body, or NULL if job is just an ACK. */ dict *nodes_delivered; /* Nodes we delievered the job for replication. */ dict *nodes_confirmed; /* Nodes that confirmed to have a copy. If the job state is ACKED, this is a list of nodes that confirmed to have the job in acknowledged state. */ /* Note: qtime and awakeme are in milliseconds because we need to * desync different nodes in an effective way to avoid useless multiple * deliveries when jobs are re-queued. */ mstime_t qtime; /* Next queue time: local unix time the job will be requeued in this node if not ACKed before. Qtime is updated when we receive QUEUED messages to avoid to re-queue if other nodes did. When qtime is set to zero for a job, it never gets re-queued again. */ mstime_t awakeme; /* Time at which we need to take actions about this job in this node. All the registered jobs are ordered by awakeme time in the server.awakeme skip list, unless awakeme is set to zero. */ } job; /* Number of bytes of directly serializable fields in the job structure. */ #define JOB_STRUCT_SER_LEN (JOB_ID_LEN+1+1+2+4+8+4+4+2+2) /* Serialization types for serializeJob() deserializejob(). */ #define SER_MESSAGE 0 #define SER_STORAGE 1 struct clusterNode; job *createJob(char *id, int state, int ttl, int retry); int compareNodeIDsByJob(struct clusterNode *nodea, struct clusterNode *nodeb, job *j); void deleteJobFromCluster(job *j); sds serializeJob(sds msg, job *j, int sertype); job *deserializeJob(unsigned char *p, size_t len, unsigned char **next, int sertype); void fixForeingJobTimes(job *j); void updateJobNodes(job *j); int registerJob(job *j); int unregisterJob(job *j); void freeJob(job *j); int jobReplicationAchieved(job *j); job *lookupJob(char *id); void updateJobAwakeTime(job *j, mstime_t at); void updateJobRequeueTime(job *j, mstime_t qtime); int getRawTTLFromJobID(char *id); void setJobTTLFromID(job *job); int validateJobIdOrReply(client *c, char *id, size_t len); void setJobAssociatedValue(job *j, void *val); void *jobGetAssociatedValue(job *j); char *jobStateToString(int state); int validateJobIDs(client *c, robj **ids, int count); void AOFLoadJob(job *j); void AOFDelJob(job *j); void AOFAckJob(job *j); #endif disque-1.0-rc1/src/latency.c000066400000000000000000000576321264176561100157470ustar00rootroot00000000000000/* The latency monitor allows to easily observe the sources of latency * in a Disque instance using the LATENCY command. Different latency * sources are monitored, like disk I/O, execution of commands, fork * system call, and so forth. * * ---------------------------------------------------------------------------- * * Copyright (c) 2014, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" /* Dictionary type for latency events. */ int dictStringKeyCompare(void *privdata, const void *key1, const void *key2) { UNUSED(privdata); return strcmp(key1,key2) == 0; } unsigned int dictStringHash(const void *key) { return dictGenHashFunction(key, strlen(key)); } void dictVanillaFree(void *privdata, void *val); dictType latencyTimeSeriesDictType = { dictStringHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictStringKeyCompare, /* key compare */ dictVanillaFree, /* key destructor */ dictVanillaFree /* val destructor */ }; /* ---------------------------- Latency API --------------------------------- */ /* Latency monitor initialization. We just need to create the dictionary * of time series, each time serie is craeted on demand in order to avoid * having a fixed list to maintain. */ void latencyMonitorInit(void) { server.latency_events = dictCreate(&latencyTimeSeriesDictType,NULL); } /* Add the specified sample to the specified time series "event". * This function is usually called via latencyAddSampleIfNeeded(), that * is a macro that only adds the sample if the latency is higher than * server.latency_monitor_threshold. */ void latencyAddSample(char *event, mstime_t latency) { struct latencyTimeSeries *ts = dictFetchValue(server.latency_events,event); time_t now = time(NULL); int prev; /* Create the time series if it does not exist. */ if (ts == NULL) { ts = zmalloc(sizeof(*ts)); ts->idx = 0; ts->max = 0; memset(ts->samples,0,sizeof(ts->samples)); dictAdd(server.latency_events,zstrdup(event),ts); } /* If the previous sample is in the same second, we update our old sample * if this latency is > of the old one, or just return. */ prev = (ts->idx + LATENCY_TS_LEN - 1) % LATENCY_TS_LEN; if (ts->samples[prev].time == now) { if (latency > ts->samples[prev].latency) ts->samples[prev].latency = latency; return; } ts->samples[ts->idx].time = time(NULL); ts->samples[ts->idx].latency = latency; if (latency > ts->max) ts->max = latency; ts->idx++; if (ts->idx == LATENCY_TS_LEN) ts->idx = 0; } /* Reset data for the specified event, or all the events data if 'event' is * NULL. * * Note: this is O(N) even when event_to_reset is not NULL because makes * the code simpler and we have a small fixed max number of events. */ int latencyResetEvent(char *event_to_reset) { dictIterator *di; dictEntry *de; int resets = 0; di = dictGetSafeIterator(server.latency_events); while((de = dictNext(di)) != NULL) { char *event = dictGetKey(de); if (event_to_reset == NULL || strcasecmp(event,event_to_reset) == 0) { dictDelete(server.latency_events, event); resets++; } } dictReleaseIterator(di); return resets; } /* ------------------------ Latency reporting (doctor) ---------------------- */ /* Analyze the samples avaialble for a given event and return a structure * populate with different metrics, average, MAD, min, max, and so forth. * Check latency.h definition of struct latenctStat for more info. * If the specified event has no elements the structure is populate with * zero values. */ void analyzeLatencyForEvent(char *event, struct latencyStats *ls) { struct latencyTimeSeries *ts = dictFetchValue(server.latency_events,event); int j; uint64_t sum; ls->all_time_high = ts ? ts->max : 0; ls->avg = 0; ls->min = 0; ls->max = 0; ls->mad = 0; ls->samples = 0; ls->period = 0; if (!ts) return; /* First pass, populate everything but the MAD. */ sum = 0; for (j = 0; j < LATENCY_TS_LEN; j++) { if (ts->samples[j].time == 0) continue; ls->samples++; if (ls->samples == 1) { ls->min = ls->max = ts->samples[j].latency; } else { if (ls->min > ts->samples[j].latency) ls->min = ts->samples[j].latency; if (ls->max < ts->samples[j].latency) ls->max = ts->samples[j].latency; } sum += ts->samples[j].latency; /* Track the oldest event time in ls->period. */ if (ls->period == 0 || ts->samples[j].time < ls->period) ls->period = ts->samples[j].time; } /* So far avg is actually the sum of the latencies, and period is * the oldest event time. We need to make the first an average and * the second a range of seconds. */ if (ls->samples) { ls->avg = sum / ls->samples; ls->period = time(NULL) - ls->period; if (ls->period == 0) ls->period = 1; } /* Second pass, compute MAD. */ sum = 0; for (j = 0; j < LATENCY_TS_LEN; j++) { int64_t delta; if (ts->samples[j].time == 0) continue; delta = (int64_t)ls->avg - ts->samples[j].latency; if (delta < 0) delta = -delta; sum += delta; } if (ls->samples) ls->mad = sum / ls->samples; } /* Create a human readable report of latency events for this Disque instance. */ sds createLatencyReport(void) { sds report = sdsempty(); int advise_better_vm = 0; /* Better virtual machines. */ int advise_slowlog_enabled = 0; /* Enable slowlog. */ int advise_slowlog_tuning = 0; /* Reconfigure slowlog. */ int advise_slowlog_inspect = 0; /* Check your slowlog. */ int advise_disk_contention = 0; /* Try to lower disk contention. */ int advise_scheduler = 0; /* Intrinsic latency. */ int advise_data_writeback = 0; /* data=writeback. */ int advise_no_appendfsync = 0; /* don't fsync during rewrites. */ int advise_local_disk = 0; /* Avoid remote disks. */ int advise_ssd = 0; /* Use an SSD drive. */ int advise_write_load_info = 0; /* Print info about AOF and write load. */ int advise_hz = 0; /* Use higher HZ. */ int advise_large_objects = 0; /* Deletion of large objects. */ int advise_relax_fsync_policy = 0; /* appendfsync always is slow. */ int advices = 0; /* Return ASAP if the latency engine is disabled and it looks like it * was never enabled so far. */ if (dictSize(server.latency_events) == 0 && server.latency_monitor_threshold == 0) { report = sdscat(report,"I'm sorry, Dave, I can't do that. Latency monitoring is disabled in this Disque instance. You may use \"CONFIG SET latency-monitor-threshold .\" in order to enable it. If we weren't in a deep space mission I'd suggest to take a look at http://disque.io/topics/latency-monitor.\n"); return report; } /* Show all the events stats and add for each event some event-related * comment depending on the values. */ dictIterator *di; dictEntry *de; int eventnum = 0; di = dictGetSafeIterator(server.latency_events); while((de = dictNext(di)) != NULL) { char *event = dictGetKey(de); struct latencyTimeSeries *ts = dictGetVal(de); struct latencyStats ls; if (ts == NULL) continue; eventnum++; if (eventnum == 1) { report = sdscat(report,"Dave, I have observed latency spikes in this Disque instance. You don't mind talking about it, do you Dave?\n\n"); } analyzeLatencyForEvent(event,&ls); report = sdscatprintf(report, "%d. %s: %d latency spikes (average %lums, mean deviation %lums, period %.2f sec). Worst all time event %lums.", eventnum, event, ls.samples, (unsigned long) ls.avg, (unsigned long) ls.mad, (double) ls.period/ls.samples, (unsigned long) ts->max); /* Fork */ if (!strcasecmp(event,"fork")) { char *fork_quality; if (server.stat_fork_rate < 10) { fork_quality = "terrible"; advise_better_vm = 1; advices++; } else if (server.stat_fork_rate < 25) { fork_quality = "poor"; advise_better_vm = 1; advices++; } else if (server.stat_fork_rate < 100) { fork_quality = "good"; } else { fork_quality = "excellent"; } report = sdscatprintf(report, " Fork rate is %.2f GB/sec (%s).", server.stat_fork_rate, fork_quality); } /* Potentially commands. */ if (!strcasecmp(event,"command")) { if (server.slowlog_log_slower_than == 0) { advise_slowlog_enabled = 1; advices++; } else if (server.slowlog_log_slower_than/1000 > server.latency_monitor_threshold) { advise_slowlog_tuning = 1; advices++; } advise_slowlog_inspect = 1; advise_large_objects = 1; advices += 2; } /* fast-command. */ if (!strcasecmp(event,"fast-command")) { advise_scheduler = 1; advices++; } /* AOF and I/O. */ if (!strcasecmp(event,"aof-write-pending-fsync")) { advise_local_disk = 1; advise_disk_contention = 1; advise_ssd = 1; advise_data_writeback = 1; advices += 4; } if (!strcasecmp(event,"aof-write-active-child")) { advise_no_appendfsync = 1; advise_data_writeback = 1; advise_ssd = 1; advices += 3; } if (!strcasecmp(event,"aof-write-alone")) { advise_local_disk = 1; advise_data_writeback = 1; advise_ssd = 1; advices += 3; } if (!strcasecmp(event,"aof-fsync-always")) { advise_relax_fsync_policy = 1; advices++; } if (!strcasecmp(event,"aof-fstat") || !strcasecmp(event,"rdb-unlik-temp-file")) { advise_disk_contention = 1; advise_local_disk = 1; advices += 2; } if (!strcasecmp(event,"aof-rewrite-diff-write") || !strcasecmp(event,"aof-rename")) { advise_write_load_info = 1; advise_data_writeback = 1; advise_ssd = 1; advise_local_disk = 1; advices += 4; } /* Expire cycle. */ if (!strcasecmp(event,"expire-cycle")) { advise_hz = 1; advise_large_objects = 1; advices += 2; } /* Eviction cycle. */ if (!strcasecmp(event,"eviction-cycle")) { advise_large_objects = 1; advices++; } report = sdscatlen(report,"\n",1); } dictReleaseIterator(di); if (eventnum == 0) { report = sdscat(report,"Dave, no latency spike was observed during the lifetime of this Disque instance, not in the slightest bit. I honestly think you ought to sit down calmly, take a stress pill, and think things over.\n"); } else if (advices == 0) { report = sdscat(report,"\nWhile there are latency events logged, I'm not able to suggest any easy fix. Please use the Disque community to get some help, providing this report in your help request.\n"); } else { /* Add all the suggestions accumulated so far. */ /* Better VM. */ report = sdscat(report,"\nI have a few advices for you:\n\n"); if (advise_better_vm) { report = sdscat(report,"- If you are using a virtual machine, consider upgrading it with a faster one using an hypervisior that provides less latency during fork() calls. Xen is known to have poor fork() performance. Even in the context of the same VM provider, certain kinds of instances can execute fork faster than others.\n"); } /* Slow log. */ if (advise_slowlog_enabled) { report = sdscatprintf(report,"- There are latency issues with potentially slow commands you are using. Try to enable the Slow Log Disque feature using the command 'CONFIG SET slowlog-log-slower-than %llu'. If the Slow log is disabled Disque is not able to log slow commands execution for you.\n", (unsigned long long)server.latency_monitor_threshold*1000); } if (advise_slowlog_tuning) { report = sdscatprintf(report,"- Your current Slow Log configuration only logs events that are slower than your configured latency monitor threshold. Please use 'CONFIG SET slowlog-log-slower-than %llu'.\n", (unsigned long long)server.latency_monitor_threshold*1000); } if (advise_slowlog_inspect) { report = sdscat(report,"- Check your Slow Log to understand what are the commands you are running which are too slow to execute. Please check http://disque.io/commands/slowlog for more information.\n"); } /* Intrinsic latency. */ if (advise_scheduler) { report = sdscat(report,"- The system is slow to execute Disque code paths not containing system calls. This usually means the system does not provide Disque CPU time to run for long periods. You should try to:\n" " 1) Lower the system load.\n" " 2) Use a computer / VM just for Disque if you are running other softawre in the same system.\n" " 3) Check if you have a \"noisy neighbour\" problem.\n" " 4) Check with 'disque-cli --intrinsic-latency 100' what is the intrinsic latency in your system.\n" " 5) Check if the problem is allocator-related by recompiling Disque with MALLOC=libc, if you are using Jemalloc. However this may create fragmentation problems.\n"); } /* AOF / Disk latency. */ if (advise_local_disk) { report = sdscat(report,"- It is strongly advised to use local disks for persistence, especially if you are using AOF. Remote disks provided by platform-as-a-service providers are known to be slow.\n"); } if (advise_ssd) { report = sdscat(report,"- SSD disks are able to reduce fsync latency, and total time needed for snapshotting and AOF log rewriting (resulting in smaller memory usage and smaller final AOF rewrite buffer flushes). With extremely high write load SSD disks can be a good option. However Disque should perform reasonably with high load using normal disks. Use this advice as a last resort.\n"); } if (advise_data_writeback) { report = sdscat(report,"- Mounting ext3/4 filesystems with data=writeback can provide a performance boost compared to data=ordered, however this mode of operation provides less guarantees, and sometimes it can happen that after a hard crash the AOF file will have an half-written command at the end and will require to be repaired before Disque restarts.\n"); } if (advise_disk_contention) { report = sdscat(report,"- Try to lower the disk contention. This is often caused by other disk intensive processes running in the same computer (including other Disque instances).\n"); } if (advise_no_appendfsync) { report = sdscat(report,"- Assuming from the point of view of data safety this is viable in your environment, you could try to enable the 'no-appendfsync-on-rewrite' option, so that fsync will not be performed while there is a child rewriting the AOF file or producing an RDB file (the moment where there is high disk contention).\n"); } if (advise_relax_fsync_policy && server.aof_fsync == AOF_FSYNC_ALWAYS) { report = sdscat(report,"- Your fsync policy is set to 'always'. It is very hard to get good performances with such a setup, if possible try to relax the fsync policy to 'onesec'.\n"); } if (advise_write_load_info) { report = sdscat(report,"- Latency during the AOF atomic rename operation or when the final difference is flushed to the AOF file at the end of the rewrite, sometimes is caused by very high write load, causing the AOF buffer to get very large. If possible try to send less commands to accomplish the same work, or use Lua scripts to group multiple operations into a single EVALSHA call.\n"); } if (advise_hz && server.hz < 100) { report = sdscat(report,"- In order to make the Disque keys expiring process more incremental, try to set the 'hz' configuration parameter to 100 using 'CONFIG SET hz 100'.\n"); } if (advise_large_objects) { report = sdscat(report,"- Deleting, expiring or evicting (because of maxmemory policy) large objects is a blocking operation. If you have very large objects that are often deleted, expired, or evicted, try to fragment those objects into multiple smaller objects.\n"); } } return report; } /* ---------------------- Latency command implementation -------------------- */ /* latencyCommand() helper to produce a time-delay reply for all the samples * in memory for the specified time series. */ void latencyCommandReplyWithSamples(client *c, struct latencyTimeSeries *ts) { void *replylen = addDeferredMultiBulkLength(c); int samples = 0, j; for (j = 0; j < LATENCY_TS_LEN; j++) { int i = (ts->idx + j) % LATENCY_TS_LEN; if (ts->samples[i].time == 0) continue; addReplyMultiBulkLen(c,2); addReplyLongLong(c,ts->samples[i].time); addReplyLongLong(c,ts->samples[i].latency); samples++; } setDeferredMultiBulkLength(c,replylen,samples); } /* latencyCommand() helper to produce the reply for the LATEST subcommand, * listing the last latency sample for every event type registered so far. */ void latencyCommandReplyWithLatestEvents(client *c) { dictIterator *di; dictEntry *de; addReplyMultiBulkLen(c,dictSize(server.latency_events)); di = dictGetIterator(server.latency_events); while((de = dictNext(di)) != NULL) { char *event = dictGetKey(de); struct latencyTimeSeries *ts = dictGetVal(de); int last = (ts->idx + LATENCY_TS_LEN - 1) % LATENCY_TS_LEN; addReplyMultiBulkLen(c,4); addReplyBulkCString(c,event); addReplyLongLong(c,ts->samples[last].time); addReplyLongLong(c,ts->samples[last].latency); addReplyLongLong(c,ts->max); } dictReleaseIterator(di); } #define LATENCY_GRAPH_COLS 80 sds latencyCommandGenSparkeline(char *event, struct latencyTimeSeries *ts) { int j; struct sequence *seq = createSparklineSequence(); sds graph = sdsempty(); uint32_t min = 0, max = 0; for (j = 0; j < LATENCY_TS_LEN; j++) { int i = (ts->idx + j) % LATENCY_TS_LEN; int elapsed; char *label; char buf[64]; if (ts->samples[i].time == 0) continue; /* Update min and max. */ if (seq->length == 0) { min = max = ts->samples[i].latency; } else { if (ts->samples[i].latency > max) max = ts->samples[i].latency; if (ts->samples[i].latency < min) min = ts->samples[i].latency; } /* Use as label the number of seconds / minutes / hours / days * ago the event happened. */ elapsed = time(NULL) - ts->samples[i].time; if (elapsed < 60) snprintf(buf,sizeof(buf),"%ds",elapsed); else if (elapsed < 3600) snprintf(buf,sizeof(buf),"%dm",elapsed/60); else if (elapsed < 3600*24) snprintf(buf,sizeof(buf),"%dh",elapsed/3600); else snprintf(buf,sizeof(buf),"%dd",elapsed/(3600*24)); label = zstrdup(buf); sparklineSequenceAddSample(seq,ts->samples[i].latency,label); } graph = sdscatprintf(graph, "%s - high %lu ms, low %lu ms (all time high %lu ms)\n", event, (unsigned long) max, (unsigned long) min, (unsigned long) ts->max); for (j = 0; j < LATENCY_GRAPH_COLS; j++) graph = sdscatlen(graph,"-",1); graph = sdscatlen(graph,"\n",1); graph = sparklineRender(graph,seq,LATENCY_GRAPH_COLS,4,SPARKLINE_FILL); freeSparklineSequence(seq); return graph; } /* LATENCY command implementations. * * LATENCY SAMPLES: return time-latency samples for the specified event. * LATENCY LATEST: return the latest latency for all the events classes. * LATENCY DOCTOR: returns an human readable analysis of instance latency. * LATENCY GRAPH: provide an ASCII graph of the latency of the specified event. */ void latencyCommand(client *c) { struct latencyTimeSeries *ts; if (!strcasecmp(c->argv[1]->ptr,"history") && c->argc == 3) { /* LATENCY HISTORY */ ts = dictFetchValue(server.latency_events,c->argv[2]->ptr); if (ts == NULL) { addReplyMultiBulkLen(c,0); } else { latencyCommandReplyWithSamples(c,ts); } } else if (!strcasecmp(c->argv[1]->ptr,"graph") && c->argc == 3) { /* LATENCY GRAPH */ sds graph; dictEntry *de; char *event; de = dictFind(server.latency_events,c->argv[2]->ptr); if (de == NULL) goto nodataerr; ts = dictGetVal(de); event = dictGetKey(de); graph = latencyCommandGenSparkeline(event,ts); addReplyBulkCString(c,graph); sdsfree(graph); } else if (!strcasecmp(c->argv[1]->ptr,"latest") && c->argc == 2) { /* LATENCY LATEST */ latencyCommandReplyWithLatestEvents(c); } else if (!strcasecmp(c->argv[1]->ptr,"doctor") && c->argc == 2) { /* LATENCY DOCTOR */ sds report = createLatencyReport(); addReplyBulkCBuffer(c,report,sdslen(report)); sdsfree(report); } else if (!strcasecmp(c->argv[1]->ptr,"reset") && c->argc >= 2) { /* LATENCY RESET */ if (c->argc == 2) { addReplyLongLong(c,latencyResetEvent(NULL)); } else { int j, resets = 0; for (j = 2; j < c->argc; j++) resets += latencyResetEvent(c->argv[j]->ptr); addReplyLongLong(c,resets); } } else { addReply(c,shared.syntaxerr); } return; nodataerr: /* Common error when the user asks for an event we have no latency * information about. */ addReplyErrorFormat(c, "No samples available for event '%s'", (char*) c->argv[2]->ptr); } disque-1.0-rc1/src/latency.h000066400000000000000000000072651264176561100157510ustar00rootroot00000000000000/* latency.h -- latency monitor API header file * See latency.c for more information. * * ---------------------------------------------------------------------------- * * Copyright (c) 2014, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __LATENCY_H #define __LATENCY_H #define LATENCY_TS_LEN 160 /* History length for every monitored event. */ /* Representation of a latency sample: the sampling time and the latency * observed in milliseconds. */ struct latencySample { int32_t time; /* We don't use time_t to force 4 bytes usage everywhere. */ uint32_t latency; /* Latency in milliseconds. */ }; /* The latency time series for a given event. */ struct latencyTimeSeries { int idx; /* Index of the next sample to store. */ uint32_t max; /* Max latency observed for this event. */ struct latencySample samples[LATENCY_TS_LEN]; /* Latest history. */ }; /* Latency statistics structure. */ struct latencyStats { uint32_t all_time_high; /* Absolute max observed since latest reset. */ uint32_t avg; /* Average of current samples. */ uint32_t min; /* Min of current samples. */ uint32_t max; /* Max of current samples. */ uint32_t mad; /* Mean absolute deviation. */ uint32_t samples; /* Number of non-zero samples. */ time_t period; /* Number of seconds since first event and now. */ }; void latencyMonitorInit(void); void latencyAddSample(char *event, mstime_t latency); /* Latency monitoring macros. */ /* Start monitoring an event. We just set the current time. */ #define latencyStartMonitor(var) if (server.latency_monitor_threshold) { \ var = mstime(); \ } else { \ var = 0; \ } /* End monitoring an event, compute the difference with the current time * to check the amount of time elapsed. */ #define latencyEndMonitor(var) if (server.latency_monitor_threshold) { \ var = mstime() - var; \ } /* Add the sample only if the elapsed time is >= to the configured threshold. */ #define latencyAddSampleIfNeeded(event,var) \ if (server.latency_monitor_threshold && \ (var) >= server.latency_monitor_threshold) \ latencyAddSample((event),(var)); #endif /* __LATENCY_H */ disque-1.0-rc1/src/lzf.h000066400000000000000000000104671264176561100151030ustar00rootroot00000000000000/* * Copyright (c) 2000-2008 Marc Alexander Lehmann * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef LZF_H #define LZF_H /*********************************************************************** ** ** lzf -- an extremely fast/free compression/decompression-method ** http://liblzf.plan9.de/ ** ** This algorithm is believed to be patent-free. ** ***********************************************************************/ #define LZF_VERSION 0x0105 /* 1.5, API version */ /* * Compress in_len bytes stored at the memory block starting at * in_data and write the result to out_data, up to a maximum length * of out_len bytes. * * If the output buffer is not large enough or any error occurs return 0, * otherwise return the number of bytes used, which might be considerably * more than in_len (but less than 104% of the original size), so it * makes sense to always use out_len == in_len - 1), to ensure _some_ * compression, and store the data uncompressed otherwise (with a flag, of * course. * * lzf_compress might use different algorithms on different systems and * even different runs, thus might result in different compressed strings * depending on the phase of the moon or similar factors. However, all * these strings are architecture-independent and will result in the * original data when decompressed using lzf_decompress. * * The buffers must not be overlapping. * * If the option LZF_STATE_ARG is enabled, an extra argument must be * supplied which is not reflected in this header file. Refer to lzfP.h * and lzf_c.c. * */ unsigned int lzf_compress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len); /* * Decompress data compressed with some version of the lzf_compress * function and stored at location in_data and length in_len. The result * will be stored at out_data up to a maximum of out_len characters. * * If the output buffer is not large enough to hold the decompressed * data, a 0 is returned and errno is set to E2BIG. Otherwise the number * of decompressed bytes (i.e. the original length of the data) is * returned. * * If an error in the compressed data is detected, a zero is returned and * errno is set to EINVAL. * * This function is very fast, about as fast as a copying loop. */ unsigned int lzf_decompress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len); #endif disque-1.0-rc1/src/lzfP.h000066400000000000000000000120361264176561100152150ustar00rootroot00000000000000/* * Copyright (c) 2000-2007 Marc Alexander Lehmann * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #ifndef LZFP_h #define LZFP_h #define STANDALONE 1 /* at the moment, this is ok. */ #ifndef STANDALONE # include "lzf.h" #endif /* * Size of hashtable is (1 << HLOG) * sizeof (char *) * decompression is independent of the hash table size * the difference between 15 and 14 is very small * for small blocks (and 14 is usually a bit faster). * For a low-memory/faster configuration, use HLOG == 13; * For best compression, use 15 or 16 (or more, up to 23). */ #ifndef HLOG # define HLOG 16 #endif /* * Sacrifice very little compression quality in favour of compression speed. * This gives almost the same compression as the default code, and is * (very roughly) 15% faster. This is the preferred mode of operation. */ #ifndef VERY_FAST # define VERY_FAST 1 #endif /* * Sacrifice some more compression quality in favour of compression speed. * (roughly 1-2% worse compression for large blocks and * 9-10% for small, redundant, blocks and >>20% better speed in both cases) * In short: when in need for speed, enable this for binary data, * possibly disable this for text data. */ #ifndef ULTRA_FAST # define ULTRA_FAST 0 #endif /* * Unconditionally aligning does not cost very much, so do it if unsure */ #ifndef STRICT_ALIGN # define STRICT_ALIGN !(defined(__i386) || defined (__amd64)) #endif /* * You may choose to pre-set the hash table (might be faster on some * modern cpus and large (>>64k) blocks, and also makes compression * deterministic/repeatable when the configuration otherwise is the same). */ #ifndef INIT_HTAB # define INIT_HTAB 0 #endif /* * Avoid assigning values to errno variable? for some embedding purposes * (linux kernel for example), this is necessary. NOTE: this breaks * the documentation in lzf.h. */ #ifndef AVOID_ERRNO # define AVOID_ERRNO 0 #endif /* * Whether to pass the LZF_STATE variable as argument, or allocate it * on the stack. For small-stack environments, define this to 1. * NOTE: this breaks the prototype in lzf.h. */ #ifndef LZF_STATE_ARG # define LZF_STATE_ARG 0 #endif /* * Whether to add extra checks for input validity in lzf_decompress * and return EINVAL if the input stream has been corrupted. This * only shields against overflowing the input buffer and will not * detect most corrupted streams. * This check is not normally noticeable on modern hardware * (<1% slowdown), but might slow down older cpus considerably. */ #ifndef CHECK_INPUT # define CHECK_INPUT 1 #endif /*****************************************************************************/ /* nothing should be changed below */ typedef unsigned char u8; typedef const u8 *LZF_STATE[1 << (HLOG)]; #if !STRICT_ALIGN /* for unaligned accesses we need a 16 bit datatype. */ # include # if USHRT_MAX == 65535 typedef unsigned short u16; # elif UINT_MAX == 65535 typedef unsigned int u16; # else # undef STRICT_ALIGN # define STRICT_ALIGN 1 # endif #endif #if ULTRA_FAST # if defined(VERY_FAST) # undef VERY_FAST # endif #endif #if INIT_HTAB # ifdef __cplusplus # include # else # include # endif #endif #endif disque-1.0-rc1/src/lzf_c.c000066400000000000000000000212271264176561100153740ustar00rootroot00000000000000/* * Copyright (c) 2000-2008 Marc Alexander Lehmann * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include "lzfP.h" #define HSIZE (1 << (HLOG)) /* * don't play with this unless you benchmark! * decompression is not dependent on the hash function * the hashing function might seem strange, just believe me * it works ;) */ #ifndef FRST # define FRST(p) (((p[0]) << 8) | p[1]) # define NEXT(v,p) (((v) << 8) | p[2]) # if ULTRA_FAST # define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1)) # elif VERY_FAST # define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) # else # define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1)) # endif #endif /* * IDX works because it is very similar to a multiplicative hash, e.g. * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1)) * the latter is also quite fast on newer CPUs, and compresses similarly. * * the next one is also quite good, albeit slow ;) * (int)(cos(h & 0xffffff) * 1e6) */ #if 0 /* original lzv-like hash function, much worse and thus slower */ # define FRST(p) (p[0] << 5) ^ p[1] # define NEXT(v,p) ((v) << 5) ^ p[2] # define IDX(h) ((h) & (HSIZE - 1)) #endif #define MAX_LIT (1 << 5) #define MAX_OFF (1 << 13) #define MAX_REF ((1 << 8) + (1 << 3)) #if __GNUC__ >= 3 # define expect(expr,value) __builtin_expect ((expr),(value)) # define inline inline #else # define expect(expr,value) (expr) # define inline static #endif #define expect_false(expr) expect ((expr) != 0, 0) #define expect_true(expr) expect ((expr) != 0, 1) /* * compressed format * * 000LLLLL ; literal * LLLooooo oooooooo ; backref L * 111ooooo LLLLLLLL oooooooo ; backref L+7 * */ unsigned int lzf_compress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len #if LZF_STATE_ARG , LZF_STATE htab #endif ) { #if !LZF_STATE_ARG LZF_STATE htab; #endif const u8 **hslot; const u8 *ip = (const u8 *)in_data; u8 *op = (u8 *)out_data; const u8 *in_end = ip + in_len; u8 *out_end = op + out_len; const u8 *ref; /* off requires a type wide enough to hold a general pointer difference. * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only * works for differences within a single object). We also assume that no * no bit pattern traps. Since the only platform that is both non-POSIX * and fails to support both assumptions is windows 64 bit, we make a * special workaround for it. */ #if defined (WIN32) && defined (_M_X64) unsigned _int64 off; /* workaround for missing POSIX compliance */ #else unsigned long off; #endif unsigned int hval; int lit; if (!in_len || !out_len) return 0; #if INIT_HTAB memset (htab, 0, sizeof (htab)); # if 0 for (hslot = htab; hslot < htab + HSIZE; hslot++) *hslot++ = ip; # endif #endif lit = 0; op++; /* start run */ hval = FRST (ip); while (ip < in_end - 2) { hval = NEXT (hval, ip); hslot = htab + IDX (hval); ref = *hslot; *hslot = ip; if (1 #if INIT_HTAB && ref < ip /* the next test will actually take care of this, but this is faster */ #endif && (off = ip - ref - 1) < MAX_OFF && ip + 4 < in_end && ref > (u8 *)in_data #if STRICT_ALIGN && ref[0] == ip[0] && ref[1] == ip[1] && ref[2] == ip[2] #else && *(u16 *)ref == *(u16 *)ip && ref[2] == ip[2] #endif ) { /* match found at *ref++ */ unsigned int len = 2; unsigned int maxlen = in_end - ip - len; maxlen = maxlen > MAX_REF ? MAX_REF : maxlen; op [- lit - 1] = lit - 1; /* stop run */ op -= !lit; /* undo run if length is zero */ if (expect_false (op + 3 + 1 >= out_end)) return 0; for (;;) { if (expect_true (maxlen > 16)) { len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; len++; if (ref [len] != ip [len]) break; } do len++; while (len < maxlen && ref[len] == ip[len]); break; } len -= 2; /* len is now #octets - 1 */ ip++; if (len < 7) { *op++ = (off >> 8) + (len << 5); } else { *op++ = (off >> 8) + ( 7 << 5); *op++ = len - 7; } *op++ = off; lit = 0; op++; /* start run */ ip += len + 1; if (expect_false (ip >= in_end - 2)) break; #if ULTRA_FAST || VERY_FAST --ip; # if VERY_FAST && !ULTRA_FAST --ip; # endif hval = FRST (ip); hval = NEXT (hval, ip); htab[IDX (hval)] = ip; ip++; # if VERY_FAST && !ULTRA_FAST hval = NEXT (hval, ip); htab[IDX (hval)] = ip; ip++; # endif #else ip -= len + 1; do { hval = NEXT (hval, ip); htab[IDX (hval)] = ip; ip++; } while (len--); #endif } else { /* one more literal byte we must copy */ if (expect_false (op >= out_end)) return 0; lit++; *op++ = *ip++; if (expect_false (lit == MAX_LIT)) { op [- lit - 1] = lit - 1; /* stop run */ lit = 0; op++; /* start run */ } } } if (op + 3 > out_end) /* at most 3 bytes can be missing here */ return 0; while (ip < in_end) { lit++; *op++ = *ip++; if (expect_false (lit == MAX_LIT)) { op [- lit - 1] = lit - 1; /* stop run */ lit = 0; op++; /* start run */ } } op [- lit - 1] = lit - 1; /* end run */ op -= !lit; /* undo run if length is zero */ return op - (u8 *)out_data; } disque-1.0-rc1/src/lzf_d.c000066400000000000000000000103131264176561100153670ustar00rootroot00000000000000/* * Copyright (c) 2000-2007 Marc Alexander Lehmann * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, the contents of this file may be used under the terms of * the GNU General Public License ("GPL") version 2 or any later version, * in which case the provisions of the GPL are applicable instead of * the above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the BSD license, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file under * either the BSD or the GPL. */ #include "lzfP.h" #if AVOID_ERRNO # define SET_ERRNO(n) #else # include # define SET_ERRNO(n) errno = (n) #endif /* #if (__i386 || __amd64) && __GNUC__ >= 3 # define lzf_movsb(dst, src, len) \ asm ("rep movsb" \ : "=D" (dst), "=S" (src), "=c" (len) \ : "0" (dst), "1" (src), "2" (len)); #endif */ unsigned int lzf_decompress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len) { u8 const *ip = (const u8 *)in_data; u8 *op = (u8 *)out_data; u8 const *const in_end = ip + in_len; u8 *const out_end = op + out_len; do { unsigned int ctrl = *ip++; if (ctrl < (1 << 5)) /* literal run */ { ctrl++; if (op + ctrl > out_end) { SET_ERRNO (E2BIG); return 0; } #if CHECK_INPUT if (ip + ctrl > in_end) { SET_ERRNO (EINVAL); return 0; } #endif #ifdef lzf_movsb lzf_movsb (op, ip, ctrl); #else do *op++ = *ip++; while (--ctrl); #endif } else /* back reference */ { unsigned int len = ctrl >> 5; u8 *ref = op - ((ctrl & 0x1f) << 8) - 1; #if CHECK_INPUT if (ip >= in_end) { SET_ERRNO (EINVAL); return 0; } #endif if (len == 7) { len += *ip++; #if CHECK_INPUT if (ip >= in_end) { SET_ERRNO (EINVAL); return 0; } #endif } ref -= *ip++; if (op + len + 2 > out_end) { SET_ERRNO (E2BIG); return 0; } if (ref < (u8 *)out_data) { SET_ERRNO (EINVAL); return 0; } #ifdef lzf_movsb len += 2; lzf_movsb (op, ref, len); #else *op++ = *ref++; *op++ = *ref++; do *op++ = *ref++; while (--len); #endif } } while (ip < in_end); return op - (u8 *)out_data; } disque-1.0-rc1/src/memtest.c000066400000000000000000000223621264176561100157560ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include "config.h" #if (ULONG_MAX == 4294967295UL) #define MEMTEST_32BIT #elif (ULONG_MAX == 18446744073709551615ULL) #define MEMTEST_64BIT #else #error "ULONG_MAX value not supported." #endif #ifdef MEMTEST_32BIT #define ULONG_ONEZERO 0xaaaaaaaaUL #define ULONG_ZEROONE 0x55555555UL #else #define ULONG_ONEZERO 0xaaaaaaaaaaaaaaaaUL #define ULONG_ZEROONE 0x5555555555555555UL #endif static struct winsize ws; size_t progress_printed; /* Printed chars in screen-wide progress bar. */ size_t progress_full; /* How many chars to write to fill the progress bar. */ void memtest_progress_start(char *title, int pass) { int j; printf("\x1b[H\x1b[2J"); /* Cursor home, clear screen. */ /* Fill with dots. */ for (j = 0; j < ws.ws_col*(ws.ws_row-2); j++) printf("."); printf("Please keep the test running several minutes per GB of memory.\n"); printf("Also check http://www.memtest86.com/ and http://pyropus.ca/software/memtester/"); printf("\x1b[H\x1b[2K"); /* Cursor home, clear current line. */ printf("%s [%d]\n", title, pass); /* Print title. */ progress_printed = 0; progress_full = ws.ws_col*(ws.ws_row-3); fflush(stdout); } void memtest_progress_end(void) { printf("\x1b[H\x1b[2J"); /* Cursor home, clear screen. */ } void memtest_progress_step(size_t curr, size_t size, char c) { size_t chars = ((unsigned long long)curr*progress_full)/size, j; for (j = 0; j < chars-progress_printed; j++) printf("%c",c); progress_printed = chars; fflush(stdout); } /* Test that addressing is fine. Every location is populated with its own * address, and finally verified. This test is very fast but may detect * ASAP big issues with the memory subsystem. */ void memtest_addressing(unsigned long *l, size_t bytes) { unsigned long words = bytes/sizeof(unsigned long); unsigned long j, *p; /* Fill */ p = l; for (j = 0; j < words; j++) { *p = (unsigned long)p; p++; if ((j & 0xffff) == 0) memtest_progress_step(j,words*2,'A'); } /* Test */ p = l; for (j = 0; j < words; j++) { if (*p != (unsigned long)p) { printf("\n*** MEMORY ADDRESSING ERROR: %p contains %lu\n", (void*) p, *p); exit(1); } p++; if ((j & 0xffff) == 0) memtest_progress_step(j+words,words*2,'A'); } } /* Fill words stepping a single page at every write, so we continue to * touch all the pages in the smallest amount of time reducing the * effectiveness of caches, and making it hard for the OS to transfer * pages on the swap. */ void memtest_fill_random(unsigned long *l, size_t bytes) { unsigned long step = 4096/sizeof(unsigned long); unsigned long words = bytes/sizeof(unsigned long)/2; unsigned long iwords = words/step; /* words per iteration */ unsigned long off, w, *l1, *l2; assert((bytes & 4095) == 0); for (off = 0; off < step; off++) { l1 = l+off; l2 = l1+words; for (w = 0; w < iwords; w++) { #ifdef MEMTEST_32BIT *l1 = *l2 = ((unsigned long) (rand()&0xffff)) | (((unsigned long) (rand()&0xffff)) << 16); #else *l1 = *l2 = ((unsigned long) (rand()&0xffff)) | (((unsigned long) (rand()&0xffff)) << 16) | (((unsigned long) (rand()&0xffff)) << 32) | (((unsigned long) (rand()&0xffff)) << 48); #endif l1 += step; l2 += step; if ((w & 0xffff) == 0) memtest_progress_step(w+iwords*off,words,'R'); } } } /* Like memtest_fill_random() but uses the two specified values to fill * memory, in an alternated way (v1|v2|v1|v2|...) */ void memtest_fill_value(unsigned long *l, size_t bytes, unsigned long v1, unsigned long v2, char sym) { unsigned long step = 4096/sizeof(unsigned long); unsigned long words = bytes/sizeof(unsigned long)/2; unsigned long iwords = words/step; /* words per iteration */ unsigned long off, w, *l1, *l2, v; assert((bytes & 4095) == 0); for (off = 0; off < step; off++) { l1 = l+off; l2 = l1+words; v = (off & 1) ? v2 : v1; for (w = 0; w < iwords; w++) { #ifdef MEMTEST_32BIT *l1 = *l2 = ((unsigned long) v) | (((unsigned long) v) << 16); #else *l1 = *l2 = ((unsigned long) v) | (((unsigned long) v) << 16) | (((unsigned long) v) << 32) | (((unsigned long) v) << 48); #endif l1 += step; l2 += step; if ((w & 0xffff) == 0) memtest_progress_step(w+iwords*off,words,sym); } } } void memtest_compare(unsigned long *l, size_t bytes) { unsigned long words = bytes/sizeof(unsigned long)/2; unsigned long w, *l1, *l2; assert((bytes & 4095) == 0); l1 = l; l2 = l1+words; for (w = 0; w < words; w++) { if (*l1 != *l2) { printf("\n*** MEMORY ERROR DETECTED: %p != %p (%lu vs %lu)\n", (void*)l1, (void*)l2, *l1, *l2); exit(1); } l1 ++; l2 ++; if ((w & 0xffff) == 0) memtest_progress_step(w,words,'='); } } void memtest_compare_times(unsigned long *m, size_t bytes, int pass, int times) { int j; for (j = 0; j < times; j++) { memtest_progress_start("Compare",pass); memtest_compare(m,bytes); memtest_progress_end(); } } void memtest_test(size_t megabytes, int passes) { size_t bytes = megabytes*1024*1024; unsigned long *m = malloc(bytes); int pass = 0; if (m == NULL) { fprintf(stderr,"Unable to allocate %zu megabytes: %s", megabytes, strerror(errno)); exit(1); } while (pass != passes) { pass++; memtest_progress_start("Addressing test",pass); memtest_addressing(m,bytes); memtest_progress_end(); memtest_progress_start("Random fill",pass); memtest_fill_random(m,bytes); memtest_progress_end(); memtest_compare_times(m,bytes,pass,4); memtest_progress_start("Solid fill",pass); memtest_fill_value(m,bytes,0,(unsigned long)-1,'S'); memtest_progress_end(); memtest_compare_times(m,bytes,pass,4); memtest_progress_start("Checkerboard fill",pass); memtest_fill_value(m,bytes,ULONG_ONEZERO,ULONG_ZEROONE,'C'); memtest_progress_end(); memtest_compare_times(m,bytes,pass,4); } free(m); } void memtest_non_destructive_invert(void *addr, size_t size) { volatile unsigned long *p = addr; size_t words = size / sizeof(unsigned long); size_t j; /* Invert */ for (j = 0; j < words; j++) p[j] = ~p[j]; } void memtest_non_destructive_swap(void *addr, size_t size) { volatile unsigned long *p = addr; size_t words = size / sizeof(unsigned long); size_t j; /* Swap */ for (j = 0; j < words; j += 2) { unsigned long a, b; a = p[j]; b = p[j+1]; p[j] = b; p[j+1] = a; } } void memtest(size_t megabytes, int passes) { if (ioctl(1, TIOCGWINSZ, &ws) == -1) { ws.ws_col = 80; ws.ws_row = 20; } memtest_test(megabytes,passes); printf("\nYour memory passed this test.\n"); printf("Please if you are still in doubt use the following two tools:\n"); printf("1) memtest86: http://www.memtest86.com/\n"); printf("2) memtester: http://pyropus.ca/software/memtester/\n"); exit(0); } disque-1.0-rc1/src/mkreleasehdr.sh000077500000000000000000000010711264176561100171330ustar00rootroot00000000000000#!/bin/sh GIT_SHA1=`(git show-ref --head --hash=8 2> /dev/null || echo 00000000) | head -n1` GIT_DIRTY=`git diff --no-ext-diff 2> /dev/null | wc -l` BUILD_ID=`uname -n`"-"`date +%s` test -f release.h || touch release.h (cat release.h | grep SHA1 | grep $GIT_SHA1) && \ (cat release.h | grep DIRTY | grep $GIT_DIRTY) && exit 0 # Already up-to-date echo "#define DISQUE_GIT_SHA1 \"$GIT_SHA1\"" > release.h echo "#define DISQUE_GIT_DIRTY \"$GIT_DIRTY\"" >> release.h echo "#define DISQUE_BUILD_ID \"$BUILD_ID\"" >> release.h touch release.c # Force recompile of release.c disque-1.0-rc1/src/networking.c000066400000000000000000001632001264176561100164640ustar00rootroot00000000000000/* * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Disque nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include #include static void setProtocolError(client *c, int pos); /* Return the size consumed from the allocator, for the specified SDS string, * including internal fragmentation. This function is used in order to compute * the client output buffer size. */ size_t sdsZmallocSize(sds s) { void *sh = sdsAllocPtr(s); return zmalloc_size(sh); } /* Return the amount of memory used by the sds string at object->ptr * for a string object. */ size_t getStringObjectSdsUsedMemory(robj *o) { serverAssertWithInfo(NULL,o,o->type == OBJ_STRING); switch(o->encoding) { case OBJ_ENCODING_RAW: return sdsZmallocSize(o->ptr); case OBJ_ENCODING_EMBSTR: return zmalloc_size(o)-sizeof(robj); default: return 0; /* Just integer encoding for now. */ } } /* Client.reply list dup and free methods. */ void *dupClientReplyValue(void *o) { return sdsdup(o); } void freeClientReplyValue(void *o) { sdsfree(o); } int listMatchObjects(void *a, void *b) { return equalStringObjects(a,b); } client *createClient(int fd) { client *c = zmalloc(sizeof(client)); /* passing -1 as fd it is possible to create a non connected client. * This is useful since all the commands needs to be executed * in the context of a client. When commands are executed in other * contexts (for instance a Lua script) we need a non connected client. */ if (fd != -1) { anetNonBlock(NULL,fd); anetEnableTcpNoDelay(NULL,fd); if (server.tcpkeepalive) anetKeepAlive(NULL,fd,server.tcpkeepalive); if (aeCreateFileEvent(server.el,fd,AE_READABLE, readQueryFromClient, c) == AE_ERR) { close(fd); zfree(c); return NULL; } } c->id = server.next_client_id++; c->fd = fd; c->name = NULL; c->bufpos = 0; c->querybuf = sdsempty(); c->querybuf_peak = 0; c->reqtype = 0; c->argc = 0; c->argv = NULL; c->cmd = c->lastcmd = NULL; c->multibulklen = 0; c->bulklen = -1; c->sentlen = 0; c->flags = 0; c->ctime = c->lastinteraction = server.unixtime; c->authenticated = 0; c->reply = listCreate(); c->reply_bytes = 0; c->obuf_soft_limit_reached_time = 0; listSetFreeMethod(c->reply,freeClientReplyValue); listSetDupMethod(c->reply,dupClientReplyValue); c->btype = BLOCKED_NONE; c->bpop.timeout = 0; c->bpop.flags = 0; c->bpop.job = NULL; c->bpop.queues = dictCreate(&setDictType,NULL); c->peerid = NULL; if (fd != -1) listAddNodeTail(server.clients,c); return c; } /* This function is called every time we are going to transmit new data * to the client. The behavior is the following: * * If the client should receive new data (normal clients will) the function * returns C_OK, and make sure to install the write handler in our event * loop so that when the socket is writable new data gets written. * * If the client should not receive new data, because it is a fake client * (used to load AOF in memory) or because the setup of the write * handler failed, the function returns C_ERR. * * Typically gets called every time a reply is built, before adding more * data to the clients output buffers. If the function returns C_ERR no * data should be appended to the output buffers. */ int prepareClientToWrite(client *c) { /* CLIENT REPLY OFF / SKIP handling: don't send replies. */ if (c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR; if (c->flags & CLIENT_AOF_CLIENT) return C_ERR; if (c->fd <= 0) return C_ERR; /* Fake client */ /* Schedule the client to write the output buffers to the socket only * if not already done (there were no pending writes already and the client * was yet not flagged), and, for slaves, if the slave can actually * receive writes at this stage. */ if (!clientHasPendingReplies(c) && !(c->flags & CLIENT_PENDING_WRITE)) { /* Here instead of installing the write handler, we just flag the * client and put it into a list of clients that have something * to write to the socket. This way before re-entering the event * loop, we can try to directly write to the client sockets avoiding * a system call. We'll only really install the write handler if * we'll not be able to write the whole reply at once. */ c->flags |= CLIENT_PENDING_WRITE; listAddNodeHead(server.clients_pending_write,c); } /* Authorize the caller to queue in the output buffer of this client. */ return C_OK; } /* ----------------------------------------------------------------------------- * Low level functions to add more data to output buffers. * -------------------------------------------------------------------------- */ int _addReplyToBuffer(client *c, const char *s, size_t len) { size_t available = sizeof(c->buf)-c->bufpos; if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return C_OK; /* If there already are entries in the reply list, we cannot * add anything more to the static buffer. */ if (listLength(c->reply) > 0) return C_ERR; /* Check that the buffer has enough space available for this string. */ if (len > available) return C_ERR; memcpy(c->buf+c->bufpos,s,len); c->bufpos+=len; return C_OK; } void _addReplyObjectToList(client *c, robj *o) { if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return; if (listLength(c->reply) == 0) { sds s = sdsdup(o->ptr); listAddNodeTail(c->reply,s); c->reply_bytes += sdslen(s); } else { listNode *ln = listLast(c->reply); sds tail = listNodeValue(ln); /* Append to this object when possible. If tail == NULL it was * set via addDeferredMultiBulkLength(). */ if (tail && sdslen(tail)+sdslen(o->ptr) <= PROTO_REPLY_CHUNK_BYTES) { tail = sdscatsds(tail,o->ptr); listNodeValue(ln) = tail; c->reply_bytes += sdslen(o->ptr); } else { sds s = sdsdup(o->ptr); listAddNodeTail(c->reply,s); c->reply_bytes += sdslen(s); } } asyncCloseClientOnOutputBufferLimitReached(c); } /* This method takes responsibility over the sds. When it is no longer * needed it will be free'd, otherwise it ends up in a robj. */ void _addReplySdsToList(client *c, sds s) { if (c->flags & CLIENT_CLOSE_AFTER_REPLY) { sdsfree(s); return; } if (listLength(c->reply) == 0) { listAddNodeTail(c->reply,s); c->reply_bytes += sdslen(s); } else { listNode *ln = listLast(c->reply); sds tail = listNodeValue(ln); /* Append to this object when possible. If tail == NULL it was * set via addDeferredMultiBulkLength(). */ if (tail && sdslen(tail)+sdslen(s) <= PROTO_REPLY_CHUNK_BYTES) { tail = sdscatsds(tail,s); listNodeValue(ln) = tail; c->reply_bytes += sdslen(s); sdsfree(s); } else { listAddNodeTail(c->reply,s); c->reply_bytes += sdslen(s); } } asyncCloseClientOnOutputBufferLimitReached(c); } void _addReplyStringToList(client *c, const char *s, size_t len) { if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return; if (listLength(c->reply) == 0) { sds node = sdsnewlen(s,len); listAddNodeTail(c->reply,node); c->reply_bytes += len; } else { listNode *ln = listLast(c->reply); sds tail = listNodeValue(ln); /* Append to this object when possible. If tail == NULL it was * set via addDeferredMultiBulkLength(). */ if (tail && sdslen(tail)+len <= PROTO_REPLY_CHUNK_BYTES) { tail = sdscatlen(tail,s,len); listNodeValue(ln) = tail; c->reply_bytes += len; } else { sds node = sdsnewlen(s,len); listAddNodeTail(c->reply,node); c->reply_bytes += len; } } asyncCloseClientOnOutputBufferLimitReached(c); } /* ----------------------------------------------------------------------------- * Higher level functions to queue data on the client output buffer. * The following functions are the ones that commands implementations will call. * -------------------------------------------------------------------------- */ void addReply(client *c, robj *obj) { if (prepareClientToWrite(c) != C_OK) return; /* This is an important place where we can avoid copy-on-write * when there is a saving child running, avoiding touching the * refcount field of the object if it's not needed. * * If the encoding is RAW and there is room in the static buffer * we'll be able to send the object to the client without * messing with its page. */ if (sdsEncodedObject(obj)) { if (_addReplyToBuffer(c,obj->ptr,sdslen(obj->ptr)) != C_OK) _addReplyObjectToList(c,obj); } else if (obj->encoding == OBJ_ENCODING_INT) { /* Optimization: if there is room in the static buffer for 32 bytes * (more than the max chars a 64 bit integer can take as string) we * avoid decoding the object and go for the lower level approach. */ if (listLength(c->reply) == 0 && (sizeof(c->buf) - c->bufpos) >= 32) { char buf[32]; int len; len = ll2string(buf,sizeof(buf),(long)obj->ptr); if (_addReplyToBuffer(c,buf,len) == C_OK) return; /* else... continue with the normal code path, but should never * happen actually since we verified there is room. */ } obj = getDecodedObject(obj); if (_addReplyToBuffer(c,obj->ptr,sdslen(obj->ptr)) != C_OK) _addReplyObjectToList(c,obj); decrRefCount(obj); } else { serverPanic("Wrong obj->encoding in addReply()"); } } void addReplySds(client *c, sds s) { if (prepareClientToWrite(c) != C_OK) { /* The caller expects the sds to be free'd. */ sdsfree(s); return; } if (_addReplyToBuffer(c,s,sdslen(s)) == C_OK) { sdsfree(s); } else { /* This method free's the sds when it is no longer needed. */ _addReplySdsToList(c,s); } } void addReplyString(client *c, const char *s, size_t len) { if (prepareClientToWrite(c) != C_OK) return; if (_addReplyToBuffer(c,s,len) != C_OK) _addReplyStringToList(c,s,len); } void addReplyErrorLength(client *c, const char *s, size_t len) { addReplyString(c,"-ERR ",5); addReplyString(c,s,len); addReplyString(c,"\r\n",2); } void addReplyError(client *c, const char *err) { addReplyErrorLength(c,err,strlen(err)); } void addReplyErrorFormat(client *c, const char *fmt, ...) { size_t l, j; va_list ap; va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); /* Make sure there are no newlines in the string, otherwise invalid protocol * is emitted. */ l = sdslen(s); for (j = 0; j < l; j++) { if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; } addReplyErrorLength(c,s,sdslen(s)); sdsfree(s); } void addReplyStatusLength(client *c, const char *s, size_t len) { addReplyString(c,"+",1); addReplyString(c,s,len); addReplyString(c,"\r\n",2); } void addReplyStatus(client *c, const char *status) { addReplyStatusLength(c,status,strlen(status)); } void addReplyStatusFormat(client *c, const char *fmt, ...) { va_list ap; va_start(ap,fmt); sds s = sdscatvprintf(sdsempty(),fmt,ap); va_end(ap); addReplyStatusLength(c,s,sdslen(s)); sdsfree(s); } /* Adds an empty object to the reply list that will contain the multi bulk * length, which is not known when this function is called. */ void *addDeferredMultiBulkLength(client *c) { /* Note that we install the write event here even if the object is not * ready to be sent, since we are sure that before returning to the * event loop setDeferredMultiBulkLength() will be called. */ if (prepareClientToWrite(c) != C_OK) return NULL; listAddNodeTail(c->reply,NULL); /* NULL is our placeholder. */ return listLast(c->reply); } /* Populate the length object and try gluing it to the next chunk. */ void setDeferredMultiBulkLength(client *c, void *node, long length) { listNode *ln = (listNode*)node; sds len, next; /* Abort when *node is NULL: when the client should not accept writes * we return NULL in addDeferredMultiBulkLength() */ if (node == NULL) return; len = sdscatprintf(sdsnewlen("*",1),"%ld\r\n",length); listNodeValue(ln) = len; c->reply_bytes += sdslen(len); if (ln->next != NULL) { next = listNodeValue(ln->next); /* Only glue when the next node is non-NULL (an sds in this case) */ if (next != NULL) { len = sdscatsds(len,next); listDelNode(c->reply,ln->next); listNodeValue(ln) = len; /* No need to update c->reply_bytes: we are just moving the same * amount of bytes from one node to another. */ } } asyncCloseClientOnOutputBufferLimitReached(c); } /* Add a double as a bulk reply */ void addReplyDouble(client *c, double d) { char dbuf[128], sbuf[128]; int dlen, slen; if (isinf(d)) { /* Libc in odd systems (Hi Solaris!) will format infinite in a * different way, so better to handle it in an explicit way. */ addReplyBulkCString(c, d > 0 ? "inf" : "-inf"); } else { dlen = snprintf(dbuf,sizeof(dbuf),"%.17g",d); slen = snprintf(sbuf,sizeof(sbuf),"$%d\r\n%s\r\n",dlen,dbuf); addReplyString(c,sbuf,slen); } } /* Add a long long as integer reply or bulk len / multi bulk count. * Basically this is used to output . */ void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) { char buf[128]; int len; /* Things like $3\r\n or *2\r\n are emitted very often by the protocol * so we have a few shared objects to use if the integer is small * like it is most of the times. */ if (prefix == '*' && ll < OBJ_SHARED_BULKHDR_LEN) { addReply(c,shared.mbulkhdr[ll]); return; } else if (prefix == '$' && ll < OBJ_SHARED_BULKHDR_LEN) { addReply(c,shared.bulkhdr[ll]); return; } buf[0] = prefix; len = ll2string(buf+1,sizeof(buf)-1,ll); buf[len+1] = '\r'; buf[len+2] = '\n'; addReplyString(c,buf,len+3); } void addReplyLongLong(client *c, long long ll) { if (ll == 0) addReply(c,shared.czero); else if (ll == 1) addReply(c,shared.cone); else addReplyLongLongWithPrefix(c,ll,':'); } void addReplyMultiBulkLen(client *c, long length) { if (length < OBJ_SHARED_BULKHDR_LEN) addReply(c,shared.mbulkhdr[length]); else addReplyLongLongWithPrefix(c,length,'*'); } /* Create the length prefix of a bulk reply, example: $2234 */ void addReplyBulkLen(client *c, robj *obj) { size_t len; if (sdsEncodedObject(obj)) { len = sdslen(obj->ptr); } else { long n = (long)obj->ptr; /* Compute how many bytes will take this integer as a radix 10 string */ len = 1; if (n < 0) { len++; n = -n; } while((n = n/10) != 0) { len++; } } if (len < OBJ_SHARED_BULKHDR_LEN) addReply(c,shared.bulkhdr[len]); else addReplyLongLongWithPrefix(c,len,'$'); } /* Add a Disque Object as a bulk reply */ void addReplyBulk(client *c, robj *obj) { addReplyBulkLen(c,obj); addReply(c,obj); addReply(c,shared.crlf); } /* Add a C buffer as bulk reply */ void addReplyBulkCBuffer(client *c, const void *p, size_t len) { addReplyLongLongWithPrefix(c,len,'$'); addReplyString(c,p,len); addReply(c,shared.crlf); } /* Add sds to reply (takes ownership of sds and frees it) */ void addReplyBulkSds(client *c, sds s) { addReplySds(c,sdscatfmt(sdsempty(),"$%u\r\n", (unsigned long)sdslen(s))); addReplySds(c,s); addReply(c,shared.crlf); } /* Add a C nul term string as bulk reply */ void addReplyBulkCString(client *c, const char *s) { if (s == NULL) { addReply(c,shared.nullbulk); } else { addReplyBulkCBuffer(c,s,strlen(s)); } } /* Add a long long as a bulk reply */ void addReplyBulkLongLong(client *c, long long ll) { char buf[64]; int len; len = ll2string(buf,64,ll); addReplyBulkCBuffer(c,buf,len); } /* Copy 'src' client output buffers into 'dst' client output buffers. * The function takes care of freeing the old output buffers of the * destination client. */ void copyClientOutputBuffer(client *dst, client *src) { listRelease(dst->reply); dst->reply = listDup(src->reply); memcpy(dst->buf,src->buf,src->bufpos); dst->bufpos = src->bufpos; dst->reply_bytes = src->reply_bytes; } /* Return true if the specified client has pending reply buffers to write to * the socket. */ int clientHasPendingReplies(client *c) { return c->bufpos || listLength(c->reply); } #define MAX_ACCEPTS_PER_CALL 1000 static void acceptCommonHandler(int fd, int flags) { client *c; if ((c = createClient(fd)) == NULL) { serverLog(LL_WARNING, "Error registering fd event for the new client: %s (fd=%d)", strerror(errno),fd); close(fd); /* May be already closed, just ignore errors */ return; } /* If maxclient directive is set and this is one client more... close the * connection. Note that we create the client instead to check before * for this condition, since now the socket is already set in non-blocking * mode and we can send an error for free using the Kernel I/O */ if (listLength(server.clients) > server.maxclients) { char *err = "-ERR max number of clients reached\r\n"; /* That's a best effort error message, don't check write errors */ if (write(c->fd,err,strlen(err)) == -1) { /* Nothing to do, Just to avoid the warning... */ } server.stat_rejected_conn++; freeClient(c); return; } server.stat_numconnections++; c->flags |= flags; } void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask) { int cport, cfd, max = MAX_ACCEPTS_PER_CALL; char cip[NET_IP_STR_LEN]; UNUSED(el); UNUSED(mask); UNUSED(privdata); while(max--) { cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) serverLog(LL_WARNING, "Accepting client connection: %s", server.neterr); return; } serverLog(LL_VERBOSE,"Accepted %s:%d", cip, cport); acceptCommonHandler(cfd,0); } } void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask) { int cfd, max = MAX_ACCEPTS_PER_CALL; UNUSED(el); UNUSED(mask); UNUSED(privdata); while(max--) { cfd = anetUnixAccept(server.neterr, fd); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) serverLog(LL_WARNING, "Accepting client connection: %s", server.neterr); return; } serverLog(LL_VERBOSE,"Accepted connection to %s", server.unixsocket); acceptCommonHandler(cfd,CLIENT_UNIX_SOCKET); } } static void freeClientArgv(client *c) { int j; for (j = 0; j < c->argc; j++) decrRefCount(c->argv[j]); c->argc = 0; c->cmd = NULL; } /* Remove the specified client from global lists where the client could * be referenced, not including the Pub/Sub channels. * This is used by freeClient() and replicationCacheMaster(). */ void unlinkClient(client *c) { listNode *ln; /* If this is marked as current client unset it. */ if (server.current_client == c) server.current_client = NULL; /* Certain operations must be done only if the client has an active socket. * If the client was already unlinked or if it's a "fake client" the * fd is already set to -1. */ if (c->fd != -1) { /* Remove from the list of active clients. */ ln = listSearchKey(server.clients,c); serverAssert(ln != NULL); listDelNode(server.clients,ln); /* Unregister async I/O handlers and close the socket. */ aeDeleteFileEvent(server.el,c->fd,AE_READABLE); aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE); close(c->fd); c->fd = -1; } /* Remove from the list of pending writes if needed. */ if (c->flags & CLIENT_PENDING_WRITE) { ln = listSearchKey(server.clients_pending_write,c); serverAssert(ln != NULL); listDelNode(server.clients_pending_write,ln); } /* When client was just unblocked because of a blocking operation, * remove it from the list of unblocked clients. */ if (c->flags & CLIENT_UNBLOCKED) { ln = listSearchKey(server.unblocked_clients,c); serverAssert(ln != NULL); listDelNode(server.unblocked_clients,ln); } } void freeClient(client *c) { listNode *ln; /* Free the query buffer */ sdsfree(c->querybuf); c->querybuf = NULL; /* Deallocate structures used to block on blocking ops. */ if (c->flags & CLIENT_BLOCKED) unblockClient(c); dictRelease(c->bpop.queues); /* Free data structures. */ listRelease(c->reply); freeClientArgv(c); /* Unlink the client: this will close the socket, remove the I/O * handlers, and remove references of the client from different * places where active clients may be referenced. */ unlinkClient(c); /* Monitors cleanup. */ if (c->flags & CLIENT_MONITOR) { ln = listSearchKey(server.monitors,c); serverAssert(ln != NULL); listDelNode(server.monitors,ln); } /* If this client was scheduled for async freeing we need to remove it * from the queue. */ if (c->flags & CLIENT_CLOSE_ASAP) { ln = listSearchKey(server.clients_to_close,c); serverAssert(ln != NULL); listDelNode(server.clients_to_close,ln); } /* Release other dynamically allocated client structure fields, * and finally release the client structure itself. */ if (c->name) decrRefCount(c->name); zfree(c->argv); sdsfree(c->peerid); zfree(c); } /* Schedule a client to free it at a safe time in the serverCron() function. * This function is useful when we need to terminate a client but we are in * a context where calling freeClient() is not possible, because the client * should be valid for the continuation of the flow of the program. */ void freeClientAsync(client *c) { if (c->flags & CLIENT_CLOSE_ASAP) return; c->flags |= CLIENT_CLOSE_ASAP; listAddNodeTail(server.clients_to_close,c); } void freeClientsInAsyncFreeQueue(void) { while (listLength(server.clients_to_close)) { listNode *ln = listFirst(server.clients_to_close); client *c = listNodeValue(ln); c->flags &= ~CLIENT_CLOSE_ASAP; freeClient(c); listDelNode(server.clients_to_close,ln); } } /* Write data in output buffers to client. Return C_OK if the client * is still valid after the call, C_ERR if it was freed. */ int writeToClient(int fd, client *c, int handler_installed) { ssize_t nwritten = 0, totwritten = 0; size_t objlen; sds o; while(clientHasPendingReplies(c)) { if (c->bufpos > 0) { nwritten = write(fd,c->buf+c->sentlen,c->bufpos-c->sentlen); if (nwritten <= 0) break; c->sentlen += nwritten; totwritten += nwritten; /* If the buffer was sent, set bufpos to zero to continue with * the remainder of the reply. */ if (c->sentlen == c->bufpos) { c->bufpos = 0; c->sentlen = 0; } } else { o = listNodeValue(listFirst(c->reply)); objlen = sdslen(o); if (objlen == 0) { listDelNode(c->reply,listFirst(c->reply)); continue; } nwritten = write(fd, o + c->sentlen, objlen - c->sentlen); if (nwritten <= 0) break; c->sentlen += nwritten; totwritten += nwritten; /* If we fully sent the object on head go to the next one */ if (c->sentlen == objlen) { listDelNode(c->reply,listFirst(c->reply)); c->sentlen = 0; c->reply_bytes -= objlen; } } /* Note that we avoid to send more than NET_MAX_WRITES_PER_EVENT * bytes, in a single threaded server it's a good idea to serve * other clients as well, even if a very large request comes from * super fast link that is always able to accept data (in real world * scenario think about 'KEYS *' against the loopback interface). * * However if we are over the maxmemory limit we ignore that and * just deliver as much data as it is possible to deliver. */ server.stat_net_output_bytes += totwritten; if (totwritten > NET_MAX_WRITES_PER_EVENT && (server.maxmemory == 0 || zmalloc_used_memory() < server.maxmemory)) break; } if (nwritten == -1) { if (errno == EAGAIN) { nwritten = 0; } else { serverLog(LL_VERBOSE, "Error writing to client: %s", strerror(errno)); freeClient(c); return C_ERR; } } if (totwritten > 0) c->lastinteraction = server.unixtime; if (!clientHasPendingReplies(c)) { c->sentlen = 0; if (handler_installed) aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE); /* Close connection after entire reply has been sent. */ if (c->flags & CLIENT_CLOSE_AFTER_REPLY) { freeClient(c); return C_ERR; } } return C_OK; } /* Write event handler. Just send data to the client. */ void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) { UNUSED(el); UNUSED(mask); writeToClient(fd,privdata,1); } /* This function is called just before entering the event loop, in the hope * we can just write the replies to the client output buffer without any * need to use a syscall in order to install the writable event handler, * get it called, and so forth. */ int handleClientsWithPendingWrites(void) { listIter li; listNode *ln; int processed = listLength(server.clients_pending_write); listRewind(server.clients_pending_write,&li); while((ln = listNext(&li))) { client *c = listNodeValue(ln); c->flags &= ~CLIENT_PENDING_WRITE; listDelNode(server.clients_pending_write,ln); /* Try to write buffers to the client socket. */ if (writeToClient(c->fd,c,0) == C_ERR) continue; /* If there is nothing left, do nothing. Otherwise install * the write handler. */ if (clientHasPendingReplies(c) && aeCreateFileEvent(server.el, c->fd, AE_WRITABLE, sendReplyToClient, c) == AE_ERR) { freeClientAsync(c); } } return processed; } /* resetClient prepare the client to process the next command */ void resetClient(client *c) { freeClientArgv(c); c->reqtype = 0; c->multibulklen = 0; c->bulklen = -1; /* Remove the CLIENT_REPLY_SKIP flag if any so that the reply * to the next command will be sent, but set the flag if the command * we just processed was "CLIENT REPLY SKIP". */ c->flags &= ~CLIENT_REPLY_SKIP; if (c->flags & CLIENT_REPLY_SKIP_NEXT) { c->flags |= CLIENT_REPLY_SKIP; c->flags &= ~CLIENT_REPLY_SKIP_NEXT; } } int processInlineBuffer(client *c) { char *newline; int argc, j; sds *argv, aux; size_t querylen; /* Search for end of line */ newline = strchr(c->querybuf,'\n'); /* Nothing to do without a \r\n */ if (newline == NULL) { if (sdslen(c->querybuf) > PROTO_INLINE_MAX_SIZE) { addReplyError(c,"Protocol error: too big inline request"); setProtocolError(c,0); } return C_ERR; } /* Handle the \r\n case. */ if (newline && newline != c->querybuf && *(newline-1) == '\r') newline--; /* Split the input buffer up to the \r\n */ querylen = newline-(c->querybuf); aux = sdsnewlen(c->querybuf,querylen); argv = sdssplitargs(aux,&argc); sdsfree(aux); if (argv == NULL) { addReplyError(c,"Protocol error: unbalanced quotes in request"); setProtocolError(c,0); return C_ERR; } /* Leave data after the first line of the query in the buffer */ sdsrange(c->querybuf,querylen+2,-1); /* Setup argv array on client structure */ if (c->argv) zfree(c->argv); c->argv = zmalloc(sizeof(robj*)*argc); /* Create disque objects for all arguments. */ for (c->argc = 0, j = 0; j < argc; j++) { if (sdslen(argv[j])) { c->argv[c->argc] = createObject(OBJ_STRING,argv[j]); c->argc++; } else { sdsfree(argv[j]); } } zfree(argv); return C_OK; } /* Helper function. Trims query buffer to make the function that processes * multi bulk requests idempotent. */ static void setProtocolError(client *c, int pos) { if (server.verbosity >= LL_VERBOSE) { sds client = catClientInfoString(sdsempty(),c); serverLog(LL_VERBOSE, "Protocol error from client: %s", client); sdsfree(client); } c->flags |= CLIENT_CLOSE_AFTER_REPLY; sdsrange(c->querybuf,pos,-1); } int processMultibulkBuffer(client *c) { char *newline = NULL; int pos = 0, ok; long long ll; if (c->multibulklen == 0) { /* The client should have been reset */ serverAssertWithInfo(c,NULL,c->argc == 0); /* Multi bulk length cannot be read without a \r\n */ newline = strchr(c->querybuf,'\r'); if (newline == NULL) { if (sdslen(c->querybuf) > PROTO_INLINE_MAX_SIZE) { addReplyError(c,"Protocol error: too big mbulk count string"); setProtocolError(c,0); } return C_ERR; } /* Buffer should also contain \n */ if (newline-(c->querybuf) > ((signed)sdslen(c->querybuf)-2)) return C_ERR; /* We know for sure there is a whole line since newline != NULL, * so go ahead and find out the multi bulk length. */ serverAssertWithInfo(c,NULL,c->querybuf[0] == '*'); ok = string2ll(c->querybuf+1,newline-(c->querybuf+1),&ll); if (!ok || ll > 1024*1024) { addReplyError(c,"Protocol error: invalid multibulk length"); setProtocolError(c,pos); return C_ERR; } pos = (newline-c->querybuf)+2; if (ll <= 0) { sdsrange(c->querybuf,pos,-1); return C_OK; } c->multibulklen = ll; /* Setup argv array on client structure */ if (c->argv) zfree(c->argv); c->argv = zmalloc(sizeof(robj*)*c->multibulklen); } serverAssertWithInfo(c,NULL,c->multibulklen > 0); while(c->multibulklen) { /* Read bulk length if unknown */ if (c->bulklen == -1) { newline = strchr(c->querybuf+pos,'\r'); if (newline == NULL) { if (sdslen(c->querybuf) > PROTO_INLINE_MAX_SIZE) { addReplyError(c, "Protocol error: too big bulk count string"); setProtocolError(c,0); return C_ERR; } break; } /* Buffer should also contain \n */ if (newline-(c->querybuf) > ((signed)sdslen(c->querybuf)-2)) break; if (c->querybuf[pos] != '$') { addReplyErrorFormat(c, "Protocol error: expected '$', got '%c'", c->querybuf[pos]); setProtocolError(c,pos); return C_ERR; } ok = string2ll(c->querybuf+pos+1,newline-(c->querybuf+pos+1),&ll); if (!ok || ll < 0 || ll > 512*1024*1024) { addReplyError(c,"Protocol error: invalid bulk length"); setProtocolError(c,pos); return C_ERR; } pos += newline-(c->querybuf+pos)+2; if (ll >= PROTO_MBULK_BIG_ARG) { size_t qblen; /* If we are going to read a large object from network * try to make it likely that it will start at c->querybuf * boundary so that we can optimize object creation * avoiding a large copy of data. */ sdsrange(c->querybuf,pos,-1); pos = 0; qblen = sdslen(c->querybuf); /* Hint the sds library about the amount of bytes this string is * going to contain. */ if (qblen < (size_t)ll+2) c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2-qblen); } c->bulklen = ll; } /* Read bulk argument */ if (sdslen(c->querybuf)-pos < (unsigned)(c->bulklen+2)) { /* Not enough data (+2 == trailing \r\n) */ break; } else { /* Optimization: if the buffer contains JUST our bulk element * instead of creating a new object by *copying* the sds we * just use the current sds string. */ if (pos == 0 && c->bulklen >= PROTO_MBULK_BIG_ARG && (signed) sdslen(c->querybuf) == c->bulklen+2) { c->argv[c->argc++] = createObject(OBJ_STRING,c->querybuf); sdsIncrLen(c->querybuf,-2); /* remove CRLF */ c->querybuf = sdsempty(); /* Assume that if we saw a fat argument we'll see another one * likely... */ c->querybuf = sdsMakeRoomFor(c->querybuf,c->bulklen+2); pos = 0; } else { c->argv[c->argc++] = createStringObject(c->querybuf+pos,c->bulklen); pos += c->bulklen+2; } c->bulklen = -1; c->multibulklen--; } } /* Trim to pos */ if (pos) sdsrange(c->querybuf,pos,-1); /* We're done when c->multibulk == 0 */ if (c->multibulklen == 0) return C_OK; /* Still not read to process the command */ return C_ERR; } void processInputBuffer(client *c) { server.current_client = c; /* Keep processing while there is something in the input buffer */ while(sdslen(c->querybuf)) { /* Return if clients are paused. */ if (clientsArePaused()) break; /* Immediately abort if the client is in the middle of something. */ if (c->flags & CLIENT_BLOCKED) break; /* CLIENT_CLOSE_AFTER_REPLY closes the connection once the reply is * written to the client. Make sure to not let the reply grow after * this flag has been set (i.e. don't process more commands). */ if (c->flags & CLIENT_CLOSE_AFTER_REPLY) break; /* Determine request type when unknown. */ if (!c->reqtype) { if (c->querybuf[0] == '*') { c->reqtype = PROTO_REQ_MULTIBULK; } else { c->reqtype = PROTO_REQ_INLINE; } } if (c->reqtype == PROTO_REQ_INLINE) { if (processInlineBuffer(c) != C_OK) break; } else if (c->reqtype == PROTO_REQ_MULTIBULK) { if (processMultibulkBuffer(c) != C_OK) break; } else { serverPanic("Unknown request type"); } /* Multibulk processing could see a <= 0 length. */ if (c->argc == 0) { resetClient(c); } else { /* Only reset the client when the command was executed. */ if (processCommand(c) == C_OK) resetClient(c); } } server.current_client = NULL; } void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { client *c = (client*) privdata; int nread, readlen; size_t qblen; UNUSED(el); UNUSED(mask); readlen = PROTO_IOBUF_LEN; /* If this is a multi bulk request, and we are processing a bulk reply * that is large enough, try to maximize the probability that the query * buffer contains exactly the SDS string representing the object, even * at the risk of requiring more read(2) calls. This way the function * processMultiBulkBuffer() can avoid copying buffers to create the * Disque Object representing the argument. */ if (c->reqtype == PROTO_REQ_MULTIBULK && c->multibulklen && c->bulklen != -1 && c->bulklen >= PROTO_MBULK_BIG_ARG) { int remaining = (unsigned)(c->bulklen+2)-sdslen(c->querybuf); if (remaining < readlen) readlen = remaining; } qblen = sdslen(c->querybuf); if (c->querybuf_peak < qblen) c->querybuf_peak = qblen; c->querybuf = sdsMakeRoomFor(c->querybuf, readlen); nread = read(fd, c->querybuf+qblen, readlen); if (nread == -1) { if (errno == EAGAIN) { return; } else { serverLog(LL_VERBOSE, "Reading from client: %s",strerror(errno)); freeClient(c); return; } } else if (nread == 0) { serverLog(LL_VERBOSE, "Client closed connection"); freeClient(c); return; } sdsIncrLen(c->querybuf,nread); c->lastinteraction = server.unixtime; server.stat_net_input_bytes += nread; if (sdslen(c->querybuf) > server.client_max_querybuf_len) { sds ci = catClientInfoString(sdsempty(),c), bytes = sdsempty(); bytes = sdscatrepr(bytes,c->querybuf,64); serverLog(LL_WARNING,"Closing client that reached max query buffer length: %s (qbuf initial bytes: %s)", ci, bytes); sdsfree(ci); sdsfree(bytes); freeClient(c); return; } processInputBuffer(c); } void getClientsMaxBuffers(unsigned long *longest_output_list, unsigned long *biggest_input_buffer) { client *c; listNode *ln; listIter li; unsigned long lol = 0, bib = 0; listRewind(server.clients,&li); while ((ln = listNext(&li)) != NULL) { c = listNodeValue(ln); if (listLength(c->reply) > lol) lol = listLength(c->reply); if (sdslen(c->querybuf) > bib) bib = sdslen(c->querybuf); } *longest_output_list = lol; *biggest_input_buffer = bib; } /* This is a helper function for genClientPeerID(). * It writes the specified ip/port to "peerid" as a null termiated string * in the form ip:port if ip does not contain ":" itself, otherwise * [ip]:port format is used (for IPv6 addresses basically). */ void formatPeerID(char *peerid, size_t peerid_len, char *ip, int port) { if (strchr(ip,':')) snprintf(peerid,peerid_len,"[%s]:%d",ip,port); else snprintf(peerid,peerid_len,"%s:%d",ip,port); } /* A Disque "Peer ID" is a colon separated ip:port pair. * For IPv4 it's in the form x.y.z.k:port, example: "127.0.0.1:1234". e For IPv6 addresses we use [] around the IP part, like in "[::1]:1234". * For Unix sockets we use path:0, like in "/tmp/disque:0". * * A Peer ID always fits inside a buffer of NET_PEER_ID_LEN bytes, including * the null term. * * The function returns C_OK on succcess, and C_ERR on failure. * * On failure the function still populates 'peerid' with the "?:0" string * in case you want to relax error checking or need to display something * anyway (see anetPeerToString implementation for more info). */ int genClientPeerID(client *client, char *peerid, size_t peerid_len) { char ip[NET_IP_STR_LEN]; int port; if (client->flags & CLIENT_UNIX_SOCKET) { /* Unix socket client. */ snprintf(peerid,peerid_len,"%s:0",server.unixsocket); return C_OK; } else { /* TCP client. */ int retval = anetPeerToString(client->fd,ip,sizeof(ip),&port); formatPeerID(peerid,peerid_len,ip,port); return (retval == -1) ? C_ERR : C_OK; } } /* This function returns the client peer id, by creating and caching it * if client->peerid is NULL, otherwise returning the cached value. * The Peer ID never changes during the life of the client, however it * is expensive to compute. */ char *getClientPeerId(client *c) { char peerid[NET_PEER_ID_LEN]; if (c->peerid == NULL) { genClientPeerID(c,peerid,sizeof(peerid)); c->peerid = sdsnew(peerid); } return c->peerid; } /* Concatenate a string representing the state of a client in an human * readable format, into the sds string 's'. */ sds catClientInfoString(sds s, client *client) { char flags[16], events[3], *p; int emask; p = flags; if (client->flags & CLIENT_MONITOR) *p++ = 'O'; if (client->flags & CLIENT_BLOCKED) *p++ = 'b'; if (client->flags & CLIENT_CLOSE_AFTER_REPLY) *p++ = 'c'; if (client->flags & CLIENT_UNBLOCKED) *p++ = 'u'; if (client->flags & CLIENT_CLOSE_ASAP) *p++ = 'A'; if (client->flags & CLIENT_UNIX_SOCKET) *p++ = 'U'; if (p == flags) *p++ = 'N'; *p++ = '\0'; emask = client->fd == -1 ? 0 : aeGetFileEvents(server.el,client->fd); p = events; if (emask & AE_READABLE) *p++ = 'r'; if (emask & AE_WRITABLE) *p++ = 'w'; *p = '\0'; return sdscatfmt(s, "id=%U addr=%s fd=%i name=%s age=%I idle=%I flags=%s qbuf=%U qbuf-free=%U obl=%U oll=%U omem=%U events=%s cmd=%s", (unsigned long long) client->id, getClientPeerId(client), client->fd, client->name ? (char*)client->name->ptr : "", (long long)(server.unixtime - client->ctime), (long long)(server.unixtime - client->lastinteraction), flags, (unsigned long long) sdslen(client->querybuf), (unsigned long long) sdsavail(client->querybuf), (unsigned long long) client->bufpos, (unsigned long long) listLength(client->reply), (unsigned long long) getClientOutputBufferMemoryUsage(client), events, client->lastcmd ? client->lastcmd->name : "NULL"); } sds getAllClientsInfoString(void) { listNode *ln; listIter li; client *client; sds o = sdsempty(); o = sdsMakeRoomFor(o,200*listLength(server.clients)); listRewind(server.clients,&li); while ((ln = listNext(&li)) != NULL) { client = listNodeValue(ln); o = catClientInfoString(o,client); o = sdscatlen(o,"\n",1); } return o; } void clientCommand(client *c) { listNode *ln; listIter li; client *client; if (!strcasecmp(c->argv[1]->ptr,"list") && c->argc == 2) { /* CLIENT LIST */ sds o = getAllClientsInfoString(); addReplyBulkCBuffer(c,o,sdslen(o)); sdsfree(o); } else if (!strcasecmp(c->argv[1]->ptr,"reply") && c->argc == 3) { /* CLIENT REPLY ON|OFF|SKIP */ if (!strcasecmp(c->argv[2]->ptr,"on")) { c->flags &= ~(CLIENT_REPLY_SKIP|CLIENT_REPLY_OFF); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[2]->ptr,"off")) { c->flags |= CLIENT_REPLY_OFF; } else if (!strcasecmp(c->argv[2]->ptr,"skip")) { if (!(c->flags & CLIENT_REPLY_OFF)) c->flags |= CLIENT_REPLY_SKIP_NEXT; } else { addReply(c,shared.syntaxerr); return; } } else if (!strcasecmp(c->argv[1]->ptr,"kill")) { /* CLIENT KILL * CLIENT KILL