pax_global_header00006660000000000000000000000064145433300420014510gustar00rootroot0000000000000052 comment=85fdce08bdb752eb81f5954e1dffae185b5a43c6 bup-0.33.3/000077500000000000000000000000001454333004200123645ustar00rootroot00000000000000bup-0.33.3/.cirrus.yml000066400000000000000000000034521454333004200145000ustar00rootroot00000000000000 task: name: debian check/lint root container: image: debian:bullseye cpu: 4 memory: 2 script: | set -xe dev/prep-for-debianish-build python3 dev/system-info BUP_PYTHON_CONFIG=python3-config ./configure --with-pylint=yes make -j6 check task: name: debian long-check container: image: debian:bullseye cpu: 4 memory: 2 script: | set -xe dev/prep-for-debianish-build python3 DEBIAN_FRONTEND=noninteractive apt-get -y install bup export BUP_TEST_OTHER_BUP="$(command -v bup)" "$BUP_TEST_OTHER_BUP" version dev/system-info adduser --disabled-password --gecos '' bup chown -R bup:bup . printf "make -j6 -C %q BUP_PYTHON_CONFIG=python3-config long-check" \ "$(pwd)" | su -l bup task: name: debian check container: image: debian:bullseye cpu: 4 memory: 2 script: | set -xe dev/prep-for-debianish-build python3 dev/system-info adduser --disabled-password --gecos '' bup chown -R bup:bup . printf "make -j6 -C %q BUP_PYTHON_CONFIG=python3-config check" \ "$(pwd)" | su -l bup task: name: freebsd check freebsd_instance: image: freebsd-12-4-release-amd64 cpu: 4 memory: 4 script: | set -xe dev/prep-for-freebsd-build python3 dev/system-info BUP_PYTHON_CONFIG=python3.9-config make -j6 check task: name: macos check macos_instance: # https://cirrus-ci.org/guide/macOS/ image: ghcr.io/cirruslabs/macos-monterey-base:latest script: | set -xe dev/prep-for-macos-build python3 brew install bup export BUP_TEST_OTHER_BUP="$(command -v bup)" "$BUP_TEST_OTHER_BUP" version export PKG_CONFIG_PATH=/usr/local/opt/readline/lib/pkgconfig dev/system-info make -j6 BUP_PYTHON_CONFIG=python3-config LDFLAGS=-L/usr/local/lib check bup-0.33.3/.dir-locals.el000066400000000000000000000004331454333004200150150ustar00rootroot00000000000000((nil . ()) (python-mode . ((indent-tabs-mode . nil) (python-indent-offset . 4))) (sh-mode . ((indent-tabs-mode . nil) (sh-basic-offset . 4))) (c-mode . ((indent-tabs-mode . nil) (c-basic-offset . 4) (c-file-style . "BSD")))) bup-0.33.3/.github/000077500000000000000000000000001454333004200137245ustar00rootroot00000000000000bup-0.33.3/.github/CONTRIBUTING.md000066400000000000000000000000721454333004200161540ustar00rootroot00000000000000 Please see https://github.com/bup/bup/blob/main/HACKING bup-0.33.3/.github/PULL_REQUEST_TEMPLATE000066400000000000000000000015661454333004200171360ustar00rootroot00000000000000 We discuss code changes on the mailing list bup-list@googlegroups.com, but if you'd prefer to begin the process with a pull request, that's just fine. We're happy to have the help. In any case, please make sure each commit includes a Signed-off-by: Someone line in the commit message that matches the "Author" line so that we'll be able to include your work in the project. See ./SIGNED-OFF-BY for the meaning: https://github.com/bup/bup/blob/main/SIGNED-OFF-BY After you submit the pull request, someone will eventually redirect it to the list for review, and you will of course be included in the conversation there. On the other hand, if you're comfortable with "git send-email" (or the equivalent), please post your patches to the list as described in the "Submitting Patches" section in ./HACKING: https://github.com/bup/bup/blob/main/HACKING bup-0.33.3/.gitignore000066400000000000000000000007521454333004200143600ustar00rootroot00000000000000*.swp *~ /config/bin/make /config/config.h.tmp /config/finished /dev/bup-exec /dev/bup-exec.d /dev/bup-python /dev/bup-python.d /dev/python /dev/python-proposed /dev/python-proposed.d /lib/bup/_helpers.d /lib/bup/_helpers.dll /lib/bup/_helpers.so /lib/bup/checkout_info.py /lib/cmd/bup /lib/cmd/bup.d /nbproject/ /test/int/__init__.pyc /test/lib/__init__.pyc /test/lib/buptest/__init__.pyc /test/lib/buptest/vfs.pyc /test/lib/wvpytest.pyc /test/sampledata/var/ /test/tmp/ \#*# __pycache__/ bup-0.33.3/.pylintrc000066400000000000000000000005251454333004200142330ustar00rootroot00000000000000# -*-conf-*- [GENERAL OPTIONS] [MESSAGES CONTROL] disable=all enable= syntax-error, catching-non-exception, consider-using-in, inconsistent-return-statements, return-in-init, trailing-whitespace, undefined-variable, unidiomatic-typecheck, unused-import, unused-wildcard-import, useless-return, super-init-not-called bup-0.33.3/CODINGSTYLE000066400000000000000000000016331454333004200140760ustar00rootroot00000000000000.. -*-rst-*- C = The C implementations should follow the `kernel/git coding style `_. Python ====== Python code follows `PEP8 `_ with regard to coding style and `PEP257 `_ with regard to docstring style. Multi-line docstrings should have one short summary line, followed by a blank line and a series of paragraphs. The last paragraph should be followed by a line that closes the docstring (no blank line in between). Here's an example from ``lib/bup/helpers.py``:: def unlink(f): """Delete a file at path 'f' if it currently exists. Unlike os.unlink(), does not throw an exception if the file didn't already exist. """ ... Module-level docstrings follow exactly the same guidelines but without the blank line between the summary and the details. bup-0.33.3/DESIGN000066400000000000000000001121631454333004200132640ustar00rootroot00000000000000 The Crazy Hacker's Crazy Guide to Bup Craziness =============================================== Despite what you might have heard, bup is not that crazy, and neither are you if you're trying to figure out how it works. But it's also (as of this writing) rather new and the source code doesn't have a lot of comments, so it can be a little confusing at first glance. This document is designed to make it easier for you to get started if you want to add a new feature, fix a bug, or just understand how it all works. Bup Source Code Layout ---------------------- As you're reading this, you might want to look at different parts of the bup source code to follow along and see what we're talking about. bup's code is written primarily in python with a bit of C code in speed-sensitive places. Here are the most important things to know: - The main program is a fairly small C program that mostly just initializes the correct Python interpreter and then runs bup.main.main(). This arrangement was chosen in order to give us more flexibility. For example: - It allows us to avoid [crashing on some Unicode-unfriendly command line arguments](https://bugs.python.org/issue35883) which is critical, given that paths can be arbitrary byte sequences. - It allows more flexibility in dealing with upstream changes like the breakage of our ability to manipulate the processes arguement list on platforms that support it during the Python 3.9 series. - It means that we'll no longer be affected by any changes to the `#!/...` path, i.e. if `/usr/bin/python`, or `/usr/bin/python3`, or whatever we'd previously selected during `./configure` were to change from 2 to 3, or 3.5 to 3.20. The version of python bup uses is determined by the `python-config` program selected by `./configure`. It tries to find a suitable default unless `BUP_PYTHON_CONFIG` is set in the environment. - bup supports both internal and external subcommands. The former are the most common, and are all located in lib/bup/cmd/. They must be python modules named lib/bup/cmd/COMMAND.py, and must contain a `main(argv)` function that will be passed the *binary* command line arguments (bytes, not strings). The filename must have underscores for any dashes in the subcommand name. The external subcommands are in lib/cmd/. - The python code is all in lib/bup. - lib/bup/\*.py contains the python code (modules) that bup depends on. That directory name seems a little silly (and worse, redundant) but there seemed to be no better way to let programs write "from bup import index" and have it work. Putting bup in the top level conflicted with the 'bup' command; calling it anything other than 'bup' was fundamentally wrong, and doesn't work when you install bup on your system in /usr/lib somewhere. So we get the annoyingly long paths. Repository Structure ==================== Before you can talk about how bup works, we need to first address what it does. The purpose of bup is essentially to let you "replicate" data between two main data structures: 1. Your computer's filesystem; 2. A bup repository. (Yes, we know, that part also resides in your filesystem. Stop trying to confuse yourself. Don't worry, we'll be plenty confusing enough as it is.) Essentially, copying data from the filesystem to your repository is called "backing stuff up," which is what bup specializes in. Normally you initiate a backup using the 'bup save' command, but that's getting ahead of ourselves. For the inverse operation, ie. copying from the repository to your filesystem, you have several choices; the main ones are 'bup restore', 'bup ftp', 'bup fuse', and 'bup web'. Now, those are the basics of backups. In other words, we just spent about half a page telling you that bup backs up and restores data. Are we having fun yet? The next thing you'll want to know is the format of the bup repository, because hacking on bup is rather impossible unless you understand that part. In short, a bup repository is a git repository. If you don't know about git, you'll want to read about it now. A really good article to read is "Git for Computer Scientists" - you can find it in Google. Go read it now. We'll wait. Got it? Okay, so now you're an expert in blobs, trees, commits, and refs, the four building blocks of a git repository. bup uses these four things, and they're formatted in exactly the same way as git does it, so you can use git to manipulate the bup repository if you want, and you probably won't break anything. It's also a comfort to know you can squeeze data out using git, just in case bup fails you, and as a developer, git offers some nice tools (like 'git rev-list' and 'git log' and 'git diff' and 'git show' and so on) that allow you to explore your repository and help debug when things go wrong. Now, bup does use these tools a little bit differently than plain git. We need to do this in order to address two deficiencies in git when used for large backups, namely a) git bogs down and crashes if you give it really large files; b) git is too slow when you give it too many files; and c) git doesn't store detailed filesystem metadata. Let's talk about each of those problems in turn. Handling large files (cmd/split, hashsplit.split_to_blob_or_tree) -------------------- The primary reason git can't handle huge files is that it runs them through xdelta, which generally means it tries to load the entire contents of a file into memory at once. If it didn't do this, it would have to store the entire contents of every single revision of every single file, even if you only changed a few bytes of that file. That would be a terribly inefficient use of disk space, and git is well known for its amazingly efficient repository format. Unfortunately, xdelta works great for small files and gets amazingly slow and memory-hungry for large files. For git's main purpose, ie. managing your source code, this isn't a problem. But when backing up your filesystem, you're going to have at least a few large files, and so it's a non-starter. bup has to do something totally different. What bup does instead of xdelta is what we call "hashsplitting." We wanted a general-purpose way to efficiently back up *any* large file that might change in small ways, without storing the entire file every time. In fact, the original versions of bup could only store a single file at a time; surprisingly enough, this was enough to give us a large part of bup's functionality. If you just take your entire filesystem and put it in a giant tarball each day, then send that tarball to bup, bup will be able to efficiently store only the changes to that tarball from one day to the next. For small files, bup's compression won't be as good as xdelta's, but for anything over a few megabytes in size, bup's compression will actually *work*, which is a big advantage over xdelta. How does hashsplitting work? It's deceptively simple. We read through the file one byte at a time, calculating a rolling checksum of the last 64 bytes. (Why 64? No reason. Literally. We picked it out of the air. Probably some other number is better. Feel free to join the mailing list and tell us which one and why.) (The rolling checksum idea is actually stolen from rsync and xdelta, although we use it differently. And they use some kind of variable window size based on a formula we don't totally understand.) The original rolling checksum algorithm we used was called "stupidsum," because it was based on the only checksum Avery remembered how to calculate at the time. He also remembered that it was the introductory checksum algorithm in a whole article about how to make good checksums that he read about 15 years ago, and it was thoroughly discredited in that article for being very stupid. But, as so often happens, Avery couldn't remember any better algorithms from the article. So what we got is stupidsum. Since then, we have replaced the stupidsum algorithm with what we call "rollsum," based on code in librsync. It's essentially the same as what rsync does, except we use a fixed window size. (If you're a computer scientist and can demonstrate that some other rolling checksum would be faster and/or better and/or have fewer screwy edge cases, we need your help! Avery's out of control! Join our mailing list! Please! Save us! ... oh boy, I sure hope he doesn't read this) In any case, rollsum seems to do pretty well at its job. You can find it in bupsplit.c. Basically, it converts the last 64 bytes read into a 32-bit integer. What we then do is take the lowest 13 bits of the rollsum, and if they're all 1's, we consider that to be the end of a chunk. This happens on average once every 2^13 = 8192 bytes, so the average chunk size is 8192 bytes. (Why 13 bits? Well, we picked the number at random and... eugh. You're getting the idea, right? Join the mailing list and tell us why we're wrong.) (Incidentally, even though the average chunk size is 8192 bytes, the actual probability distribution of block sizes ends up being non-uniform; if we remember our stats classes correctly, which we probably don't, it's probably an "exponential distribution." The idea is that for each byte in the block, the probability that it's the last block is one in 8192. Thus, the block sizes end up being skewed toward the smaller end. That's not necessarily for the best, but maybe it is. Computer science to the rescue? You know the drill.) Anyway, so we're dividing up those files into chunks based on the rolling checksum. Then we store each chunk separately (indexed by its sha1sum) as a git blob. Why do we split this way? Well, because the results are actually really nice. Let's imagine you have a big mysql database dump (produced by mysqldump) and it's basically 100 megs of SQL text. Tomorrow's database dump adds 100 rows to the middle of the file somewhere, soo it's 100.01 megs of text. A naive block splitting algorithm - for example, just dividing the file into 8192-byte blocks - would be a disaster. After the first bit of text has changed, every block after that would have a different boundary, so most of the blocks in the new backup would be different from the previous ones, and you'd have to store the same data all over again. But with hashsplitting, no matter how much data you add, modify, or remove in the middle of the file, all the chunks *before* and *after* the affected chunk are absolutely the same. All that matters to the hashsplitting algorithm is the "separator" sequence, and a single change can only affect, at most, one separator sequence or the bytes between two separator sequences. And because of rollsum, about one in 8192 possible 64-byte sequences is a separator sequence. Like magic, the hashsplit chunking algorithm will chunk your file the same way every time, even without knowing how it had chunked it previously. The next problem is less obvious: after you store your series of chunks as git blobs, how do you store their sequence? Each blob has a 20-byte sha1 identifier, which means the simple list of blobs is going to be 20/8192 = 0.25% of the file length. For a 200GB file, that's 488 megs of just sequence data. As an overhead percentage, 0.25% basically doesn't matter. 488 megs sounds like a lot, but compared to the 200GB you have to store anyway, it's irrelevant. What *is* relevant is that 488 megs is a lot of memory you have to use in order to keep track of the list. Worse, if you back up an almost-identical file tomorrow, you'll have *another* 488 meg blob to keep track of, and it'll be almost but not quite the same as last time. Hmm, big files, each one almost the same as the last... you know where this is going, right? Actually no! Ha! We didn't split this list in the same way. We could have, in fact, but it wouldn't have been very "git-like", since we'd like to store the list as a git 'tree' object in order to make sure git's refcounting and reachability analysis doesn't get confused. Never mind the fact that we want you to be able to 'git checkout' your data without any special tools. What we do instead is we extend the hashsplit algorithm a little further using what we call "fanout." Instead of checking just the last 13 bits of the checksum, we use additional checksum bits to produce additional splits. Note that (most likely due to an implementation bug), the next higher bit after the 13 bits (marked 'x'): ...... '..x1'1111'1111'1111 is actually ignored next. Now, let's say we use a 4-bit fanout. That means we'll break a series of chunks into its own tree object whenever the next 4 bits of the rolling checksum are 1, in addition to the 13 lowest ones. Since the 13 lowest bits already have to be 1, the boundary of a group of chunks is necessarily also always the boundary of a particular chunk. And so on. Eventually you'll have too many chunk groups, but you can group them into supergroups by using another 4 bits, and continue from there. What you end up with is an actual tree of blobs - which git 'tree' objects are ideal to represent. And if you think about it, just like the original list of chunks, the tree itself is pretty stable across file modifications. Any one modification will only affect the chunks actually containing the modifications, thus only the groups containing those chunks, and so on up the tree. Essentially, the number of changed git objects is O(log n) where n is the number of chunks. Since log 200 GB, using a base of 16 or so, is not a very big number, this is pretty awesome. Remember, any git object we *don't* change in a new backup is one we can reuse from last time, so the deduplication effect is pretty awesome. Better still, the hashsplit-tree format is good for a) random instead of sequential access to data (which you can see in action with 'bup fuse'); and b) quickly showing the differences between huge files (which we haven't really implemented because we don't need it, but you can try 'git diff -M -C -C backup1 backup2 -- filename' for a good start). So now we've split out 200 GB file into about 24 million pieces. That brings us to git limitation number 2. Handling huge numbers of files (git.PackWriter) ------------------------------ git is designed for handling reasonably-sized repositories that change relatively infrequently. (You might think you change your source code "frequently" and that git handles much more frequent changes than, say, svn can handle. But that's not the same kind of "frequently" we're talking about. Imagine you're backing up all the files on your disk, and one of those files is a 100 GB database file with hundreds of daily users. Your disk changes so frequently you can't even back up all the revisions even if you were backing stuff up 24 hours a day. That's "frequently.") git's way of doing things works really nicely for the way software developers write software, but it doesn't really work so well for everything else. The #1 killer is the way it adds new objects to the repository: it creates one file per blob. Then you later run 'git gc' and combine those files into a single file (using highly efficient xdelta compression, and ignoring any files that are no longer relevant). 'git gc' is slow, but for source code repositories, the resulting super-efficient storage (and associated really fast access to the stored files) is worth it. For backups, it's not; you almost never access your backed-up data, so storage time is paramount, and retrieval time is mostly unimportant. To back up that 200 GB file with git and hashsplitting, you'd have to create 24 million little 8k files, then copy them into a 200 GB packfile, then delete the 24 million files again. That would take about 400 GB of disk space to run, require lots of random disk seeks, and require you to go through your data twice. So bup doesn't do that. It just writes packfiles directly. Luckily, these packfiles are still git-formatted, so git can happily access them once they're written. But that leads us to our next problem. Huge numbers of huge packfiles (midx.py, bloom.py, cmd/midx, cmd/bloom) ------------------------------ Git isn't actually designed to handle super-huge repositories. Most git repositories are small enough that it's reasonable to merge them all into a single packfile, which 'git gc' usually does eventually. The problematic part of large packfiles isn't the packfiles themselves - git is designed to expect the total size of all packs to be larger than available memory, and once it can handle that, it can handle virtually any amount of data about equally efficiently. The problem is the packfile indexes (.idx) files. In bup we call these idx (pronounced "idix") files instead of using the word "index," because the word index is already used for something totally different in git (and thus bup) and we'll become hopelessly confused otherwise. Anyway, each packfile (*.pack) in git has an associated idx (*.idx) that's a sorted list of git object hashes and file offsets. If you're looking for a particular object based on its sha1, you open the idx, binary search it to find the right hash, then take the associated file offset, seek to that offset in the packfile, and read the object contents. The performance of the binary search is about O(log n) with the number of hashes in the pack, with an optimized first step (you can read about it elsewhere) that somewhat improves it to O(log(n)-7). Unfortunately, this breaks down a bit when you have *lots* of packs. Say you have 24 million objects (containing around 200 GB of data) spread across 200 packfiles of 1GB each. To look for an object requires you search through about 122000 objects per pack; ceil(log2(122000)-7) = 10, so you'll have to search 10 times. About 7 of those searches will be confined to a single 4k memory page, so you'll probably have to page in about 3-4 pages per file, times 200 files, which makes 600-800 4k pages (2.4-3.6 megs)... every single time you want to look for an object. This brings us to another difference between git's and bup's normal use case. With git, there's a simple optimization possible here: when looking for an object, always search the packfiles in MRU (most recently used) order. Related objects are usually clusted together in a single pack, so you'll usually end up searching around 3 pages instead of 600, which is a tremendous improvement. (And since you'll quickly end up swapping in all the pages in a particular idx file this way, it isn't long before searching for a nearby object doesn't involve any swapping at all.) bup isn't so lucky. git users spend most of their time examining existing objects (looking at logs, generating diffs, checking out branches), which lends itself to the above optimization. bup, on the other hand, spends most of its time looking for *nonexistent* objects in the repository so that it can back them up. When you're looking for objects that aren't in the repository, there's no good way to optimize; you have to exhaustively check all the packs, one by one, to ensure that none of them contain the data you want. To improve performance of this sort of operation, bup introduces midx (pronounced "midix" and short for "multi-idx") files. As the name implies, they index multiple packs at a time. Imagine you had a midx file for your 200 packs. midx files are a lot like idx files; they have a lookup table at the beginning that narrows down the initial search, followed by a binary search. Then unlike idx files (which have a fixed-size 256-entry lookup table) midx tables have a variably-sized table that makes sure the entire binary search can be contained to a single page of the midx file. Basically, the lookup table tells you which page to load, and then you binary search inside that page. A typical search thus only requires the kernel to swap in two pages, which is better than results with even a single large idx file. And if you have lots of RAM, eventually the midx lookup table (at least) will end up cached in memory, so only a single page should be needed for each lookup. You generate midx files with 'bup midx'. The downside of midx files is that generating one takes a while, and you have to regenerate it every time you add a few packs. UPDATE: Brandon Low contributed an implementation of "bloom filters", which have even better characteristics than midx for certain uses. Look it up in Wikipedia. He also massively sped up both midx and bloom by rewriting the key parts in C. The nicest thing about bloom filters is we can update them incrementally every time we get a new idx, without regenerating from scratch. That makes the update phase much faster, and means we can also get away with generating midxes less often. midx files are a bup-specific optimization and git doesn't know what to do with them. However, since they're stored as separate files, they don't interfere with git's ability to read the repository. Detailed Metadata ----------------- So that's the basic structure of a bup repository, which is also a git repository. There's just one more thing we have to deal with: filesystem metadata. Git repositories are really only intended to store file contents with a small bit of extra information, like symlink targets and executable bits, so we have to store the rest some other way. Bup stores more complete metadata in the VFS in a file named .bupm in each tree. This file contains one entry for each file in the tree object, sorted in the same order as the tree. The first .bupm entry is for the directory itself, i.e. ".", and its name is the empty string, "". Each .bupm entry contains a variable length sequence of records containing the metadata for the corresponding path. Each record records one type of metadata. Current types include a common record type (containing the normal stat information), a symlink target type, a hardlink target type, a POSIX1e ACL type, etc. See metadata.py for the complete list. The .bupm file is optional, and when it's missing, bup will behave as it did before the addition of metadata, and restore files using the tree information. The nice thing about this design is that you can walk through each file in a tree just by opening the tree and the .bupm contents, and iterating through both at the same time. Since the contents of any .bupm file should match the state of the filesystem when it was *indexed*, bup must record the detailed metadata in the index. To do this, bup records four values in the index, the atime, mtime, and ctime (as timespecs), and an integer offset into a secondary "metadata store" which has the same name as the index, but with ".meta" appended. This secondary store contains the encoded Metadata object corresponding to each path in the index. Currently, in order to decrease the storage required for the metadata store, bup only writes unique values there, reusing offsets when appropriate across the index. The effectiveness of this approach relies on the expectation that there will be many duplicate metadata records. Storing the full timestamps in the index is intended to make that more likely, because it makes it unnecessary to record those values in the secondary store. So bup clears them before encoding the Metadata objects destined for the index, and timestamp differences don't contribute to the uniqueness of the metadata. Bup supports recording and restoring hardlinks, and it does so by tracking sets of paths that correspond to the same dev/inode pair when indexing. This information is stored in an optional file with the same name as the index, but ending with ".hlink". If there are multiple index runs, and the hardlinks change, bup will notice this (within whatever subtree it is asked to reindex) and update the .hlink information accordingly. The current hardlink implementation will refuse to link to any file that resides outside the restore tree, and if the restore tree spans a different set of filesystems than the save tree, complete sets of hardlinks may not be restored. Filesystem Interaction ====================== Storing data is just half of the problem of making a backup; figuring out what to store is the other half. At the most basic level, piping the output of 'tar' into 'bup split' is an easy way to offload that decision; just let tar do all the hard stuff. And if you like tar files, that's a perfectly acceptable way to do it. But we can do better. Backing up with tarballs would totally be the way to go, except for two serious problems: 1. The result isn't easily "seekable." Tar files have no index, so if (as commonly happens) you only want to restore one file in a 200 GB backup, you'll have to read up to 200 GB before you can get to the beginning of that file. tar is short for "tape archive"; on a tape, there was no better way to do it anyway, so they didn't try. But on a disk, random file access is much, much better when you can figure out how. 2. tar doesn't remember which files it backed up last time, so it has to read through the entire file contents again in order to generate the tarball, large parts of which will then be skipped by bup since they've already been stored. This is much slower than necessary. (The second point isn't entirely true for all versions of tar. For example, GNU tar has an "incremental" mode that can somewhat mitigate this problem, if you're smart enough to know how to use it without hurting yourself. But you still have to decide which backups are "incremental" and which ones will be "full" and so on, so even when it works, it's more error-prone than bup.) bup divides the backup process into two major steps: a) indexing the filesystem, and b) saving file contents into the repository. Let's look at those steps in detail. Indexing the filesystem (cmd/drecurse, cmd/index, index.py) ----------------------- Splitting the filesystem indexing phase into its own program is nontraditional, but it gives us several advantages. The first advantage is trivial, but might be the most important: you can index files a lot faster than you can back them up. That means we can generate the index (.bup/bupindex) first, then have a nice, reliable, non-lying completion bar that tells you how much of your filesystem remains to be backed up. The alternative would be annoying failures like counting the number of *files* remaining (as rsync does), even though one of the files is a virtual machine image of 80 GB, and the 1000 other files are each under 10k. With bup, the percentage complete is the *real* percentage complete, which is very pleasant. Secondly, it makes it easier to debug and test; you can play with the index without actually backing up any files. Thirdly, you can replace the 'bup index' command with something else and not have to change anything about the 'bup save' command. The current 'bup index' implementation just blindly walks the whole filesystem looking for files that have changed since the last time it was indexed; this works fine, but something using inotify instead would be orders of magnitude faster. Windows and MacOS both have inotify-like services too, but they're totally different; if we want to support them, we can simply write new bup commands that do the job, and they'll never interfere with each other. And fourthly, git does it that way, and git is awesome, so who are we to argue? So let's look at how the index file works. First of all, note that the ".bup/bupindex" file is not the same as git's ".git/index" file. The latter isn't used in bup; as far as git is concerned, your bup repository is a "bare" git repository and doesn't have a working tree, and thus it doesn't have an index either. However, the bupindex file actually serves exactly the same purpose as git's index file, which is why we still call it "the index." We just had to redesign it for the usual bup-vs-git reasons, mostly that git just isn't designed to handle millions of files in a single repository. (The only way to find a file in git's index is to search it linearly; that's very fast in git-sized repositories, but very slow in bup-sized ones.) Let's not worry about the exact format of the bupindex file; it's still not optimal, and will probably change again. The most important things to know about bupindex are: - You can iterate through it much faster than you can iterate through the "real" filesystem (using something like the 'find' command). - If you delete it, you can get it back just by reindexing your filesystem (although that can be annoying to wait for); it's not critical to the repository itself. - You can iterate through only particular subtrees if you want. - There is no need to have more than one index for a particular filesystem, since it doesn't store anything about backups; it just stores file metadata. It's really just a cache (or 'index') of your filesystem's existing metadata. You could share the bupindex between repositories, or between multiple users on the same computer. If you back up your filesystem to multiple remote repositories to be extra safe, you can still use the same bupindex file across all of them, because it's the same filesystem every time. - Filenames in the bupindex are absolute paths, because that's the best way to ensure that you only need one bupindex file and that they're interchangeable. A note on file "dirtiness" -------------------------- The concept on which 'bup save' operates is simple enough; it reads through the index and backs up any file that is "dirty," that is, doesn't already exist in the repository. Determination of dirtiness is a little more complicated than it sounds. The most dirtiness-relevant flag in the bupindex is IX_HASHVALID; if this flag is reset, the file *definitely* is dirty and needs to be backed up. But a file may be dirty even if IX_HASHVALID is set, and that's the confusing part. The index stores a listing of files, their attributes, and their git object ids (sha1 hashes), if known. The "if known" is what IX_HASHVALID is about. When 'bup save' backs up a file, it sets the sha1 and sets IX_HASHVALID; when 'bup index' sees that a file has changed, it leaves the sha1 alone and resets IX_HASHVALID. Remember that the index can be shared between users, repositories, and backups. So IX_HASHVALID doesn't mean your repository *has* that sha1 in it; it only means that if you *do* have it, that you don't need to back up the file. Thus, 'bup save' needs to check every file in the index to make sure its hash exists, not just that it's valid. There's an optimization possible, however: if you know a particular tree's hash is valid and exists (say /usr), then you don't need to check the validity of all its children; because of the way git trees and blobs work, if your repository is valid and you have a tree object, then you have all the blobs it points to. You won't back up a tree object without backing up its blobs first, so you don't need to double check it next time. (If you really want to double check this, it belongs in a tool like 'bup fsck' or 'git fsck'.) So in short, 'bup save' on a "clean" index (all files are marked IX_HASHVALID) can be very fast; we just check our repository and see if the top level IX_HASHVALID sha1 exists. If it does, then we're done. Similarly, if not the entire index is valid, you can still avoid recursing into subtrees if those particular subtrees are IX_HASHVALID and their sha1s are in the repository. The net result is that, as long as you never lose your index, 'bup save' can always run very fast. Another interesting trick is that you can skip backing up files even if IX_HASHVALID *isn't* set, as long as you have that file's sha1 in the repository. What that means is you've chosen not to backup the latest version of that file; instead, your new backup set just contains the most-recently-known valid version of that file. This is a good trick if you want to do frequent backups of smallish files and infrequent backups of large ones. Each of your backups will be "complete," in that they contain all the small files and the large ones, but intermediate ones will just contain out-of-date copies of the large files. Note that this isn't done right now, and 'bup save --smaller' doesn't store bigger files _at all_. A final game we can play with the bupindex involves restoring: when you restore a directory from a previous backup, you can update the bupindex right away. Then, if you want to restore a different backup on top, you can compare the files in the index against the ones in the backup set, and update only the ones that have changed. (Even more interesting things happen if people are using the files on the restored system and you haven't updated the index yet; the net result would be an automated merge of all non-conflicting files.) This would be a poor man's distributed filesystem. The only catch is that nobody has written this feature for 'bup restore' yet. Someday! How 'bup save' works (cmd/save) -------------------- This section is too boring and has been omitted. Once you understand the index, there's nothing special about bup save. Retrieving backups: the bup vfs layer (vfs.py, cmd/ls, cmd/ftp, cmd/fuse) ===================================== One of the neat things about bup's storage format, at least compared to most backup tools, is it's easy to read a particular file, or even part of a file. That means a read-only virtual filesystem is easy to generate and it'll have good performance characteristics. Because of git's commit structure, you could even use branching and merging to make a transactional read-write filesystem... but that's probably getting a little out of bup's scope. Who knows what the future might bring, though? Read-only filesystems are well within our reach today, however. The 'bup ls', 'bup ftp', and 'bup fuse' commands all use a "VFS" (virtual filesystem) layer to let you access your repositories. Feel free to explore the source code for these tools and vfs.py - they're pretty straightforward. Some things to note: - None of these use the bupindex for anything. - For user-friendliness, they present your refs/commits/trees as a single hierarchy (ie. a filesystem), which isn't really how git repositories are formatted. So don't get confused! Handling Python 3's insistence on strings ========================================= In Python 2 strings were bytes, and bup used them for all kinds of data. Python 3 made a pervasive backward-incompatible change to treat all strings as Unicode, i.e. in Python 2 'foo' and b'foo' were the same thing, while u'foo' was a Unicode string. In Python 3 'foo' became synonymous with u'foo', completely changing the type and potential content, depending on the locale. In addition, and particularly bad for bup, Python 3 also (initially) insisted that all kinds of things were strings that just aren't (at least not on many platforms), i.e. user names, groups, filesystem paths, etc. There's no guarantee that any of those are always representable in Unicode. Over the years, Python 3 has gradually backed away from that initial position, adding alternate interfaces like os.environb or allowing bytes arguments to many functions like open(b'foo'...), so that in those cases it's at least possible to accurately retrieve the system data. After a while, they devised the concept of [bytesmuggling](https://www.python.org/dev/peps/pep-0383/) as a more comprehensive solution. In theory, this might be sufficient, but our initial randomized testing discovered that some binary arguments would crash Python during startup[1]. Eventually Johannes Berg tracked down the [cause](https://sourceware.org/bugzilla/show_bug.cgi?id=26034), and we hope that the problem will be fixed eventually in glibc or worked around by Python, but in either case, it will be a long time before any fix is widely available. Before we tracked down that bug we were pursuing an approach that would let us side step the issue entirely by manipulating the LC_CTYPE, but that approach was somewhat complicated, and once we understood what was causing the crashes, we decided to just let Python 3 operate "normally", and work around the issues. Consequently, we've had to wrap a number of things ourselves that incorrectly return Unicode strings (libacl, libreadline, hostname, etc.) and we've had to come up with a way to avoid the fatal crashes caused by some command line arguments (sys.argv) described above. To fix the latter, for the time being, we just use a trivial sh wrapper to redirect all of the command line arguments through the environment in BUP_ARGV_{0,1,2,...} variables, since the variables are unaffected, and we can access them directly in Python 3 via environb. [1] Our randomized argv testing found that the byte smuggling approach was not working correctly for some values (initially discovered in Python 3.7, and observed in other versions). The interpreter would just crash while starting up like this: Fatal Python error: _PyMainInterpreterConfig_Read: memory allocation failed ValueError: character U+134bd2 is not in range [U+0000; U+10ffff] Current thread 0x00007f2f0e1d8740 (most recent call first): Traceback (most recent call last): File "t/test-argv", line 28, in out = check_output(cmd) File "/usr/lib/python3.7/subprocess.py", line 395, in check_output **kwargs).stdout File "/usr/lib/python3.7/subprocess.py", line 487, in run output=stdout, stderr=stderr) We hope you'll enjoy bup. Looking forward to your patches! -- apenwarr and the rest of the bup team Local Variables: mode: markdown End: bup-0.33.3/Documentation/000077500000000000000000000000001454333004200151755ustar00rootroot00000000000000bup-0.33.3/Documentation/.gitignore000066400000000000000000000000321454333004200171600ustar00rootroot00000000000000*.[0-9] *.html /substvars bup-0.33.3/Documentation/bup-bloom.md000066400000000000000000000027351454333004200174220ustar00rootroot00000000000000% bup-bloom(1) Bup %BUP_VERSION% % Brandon Low % %BUP_DATE% # NAME bup-bloom - generates, regenerates, updates bloom filters # SYNOPSIS bup bloom [-d dir] [-o outfile] [-k hashes] [-c idxfile] [-f] [\--ruin] # DESCRIPTION `bup bloom` builds a bloom filter file for a bup repository. If one already exists, it checks the filter and updates or regenerates it as needed. # OPTIONS \--ruin : destroy bloom filters by setting the whole bitmask to zeros. you really want to know what you are doing if run this and you want to delete the resulting bloom when you are done with it. -f, \--force : don't update the existing bloom file; generate a new one from scratch. -d, \--dir=*directory* : the directory, containing `.idx` files, to process. Defaults to $BUP_DIR/objects/pack -o, \--outfile=*outfile* : the file to write the bloom filter to. defaults to $dir/bup.bloom -k, \--hashes=*hashes* : number of hash functions to use only 4 and 5 are valid. defaults to 5 for repositories < 2 TiB, or 4 otherwise. See comments in git.py for more on this value. -c, \--check=*idxfile* : checks the bloom file (counterintuitively outfile) against the specified `.idx` file, first checks that the bloom filter is claiming to contain the `.idx`, then checks that it does actually contain all of the objects in the `.idx`. Does not write anything and ignores the `-k` option. # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-cat-file.md000066400000000000000000000025571454333004200200000ustar00rootroot00000000000000% bup-cat-file(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-cat-file - extract archive content (low-level) # SYNOPSIS bup cat-file [\--meta|\--bupm] <*path*> # DESCRIPTION `bup cat-file` extracts content associated with *path* from the archive and dumps it to standard output. If nothing special is requested, the actual data contained by *path* (which must be a regular file) will be dumped. # OPTIONS \--meta : retrieve the metadata entry associated with *path*. Note that currently this does not return the raw bytes for the entry recorded in the relevant .bupm in the archive, but rather a decoded and then re-encoded version. When that matters, it should be possible (though awkward) to use `--bupm` on the parent directory and then find the relevant entry in the output. \--bupm : retrieve the .bupm file associated with *path*, which must be a directory. # EXAMPLES # Retrieve the content of somefile. $ bup cat-file /foo/latest/somefile > somefile-content # Examine the metadata associated with something. $ bup cat-file --meta /foo/latest/something | bup meta -tvvf - # Examine the metadata for somedir, including the items it contains. $ bup cat-file --bupm /foo/latest/somedir | bup meta -tvvf - # SEE ALSO `bup-join`(1), `bup-meta`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-daemon.md000066400000000000000000000007541454333004200175540ustar00rootroot00000000000000% bup-daemon(1) Bup %BUP_VERSION% % Brandon Low % %BUP_DATE% # NAME bup-daemon - listens for connections and runs `bup server` # SYNOPSIS bup daemon [-l address] [-p port] # DESCRIPTION `bup daemon` is a simple bup server which listens on a socket and forks connections to `bup mux server` children. # OPTIONS -l, \--listen=*address* : the address or hostname to listen on -p, \--port=*port* : the port to listen on # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-damage.md000066400000000000000000000064411454333004200175260ustar00rootroot00000000000000% bup-damage(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-damage - randomly destroy blocks of a file # SYNOPSIS bup damage [-n count] [-s maxsize] [\--percent pct] [-S seed] [\--equal] \ # DESCRIPTION Use `bup damage` to deliberately destroy blocks in a `.pack` or `.idx` file (from `.bup/objects/pack`) to test the recovery features of `bup-fsck`(1) or other programs. *THIS PROGRAM IS EXTREMELY DANGEROUS AND WILL DESTROY YOUR DATA* `bup damage` is primarily useful for automated or manual tests of data recovery tools, to reassure yourself that the tools actually work. Note that the details of the current behavior may change (particularly the details not documented here). For example the moment, the damage is strictly probabilistic, and so may or may not actually alter any given block. With a block size of 1, there should be a 1/256 chance that the block won't actually change. This behavior may change. # OPTIONS -n, \--num=*numblocks* : the number of separate blocks to damage in each file (default 10). Note that it's possible for more than one damaged segment to fall in the same `bup-fsck`(1) recovery block, so you might not damage as many recovery blocks as you expect. If this is a problem, use `--equal`. -s, \--size=*maxblocksize* : the maximum size, in bytes, of each damaged block (default 1 unless `--percent` is specified). Note that because of the way `bup-fsck`(1) works, a multi-byte block could fall on the boundary between two recovery blocks, and thus damaging two separate recovery blocks. In small files, it's also possible for a damaged block to be larger than a recovery block. If these issues might be a problem, you should use the default damage size of one byte. \--percent=*maxblockpercent* : the maximum size, in percent of the original file, of each damaged block. If both `--size` and `--percent` are given, the maximum block size is the minimum of the two restrictions. You can use this to ensure that a given block will never damage more than one or two `git-fsck`(1) recovery blocks. -S, \--seed=*randomseed* : seed the random number generator with the given value. If you use this option, your tests will be repeatable, since the damaged block offsets, sizes, and contents will be the same every time. By default, the random numbers are different every time (so you can run tests in a loop and repeatedly test with different damage each time). \--equal : instead of choosing random offsets for each damaged block, space the blocks equally throughout the file, starting at offset 0. If you also choose a correct maximum block size, this can guarantee that any given damage block never damages more than one `git-fsck`(1) recovery block. (This is also guaranteed if you use `-s 1`.) # EXAMPLES # make a backup in case things go horribly wrong cp -pPR ~/.bup/objects/pack ~/bup-packs.bak # generate recovery blocks for all packs bup fsck -g # deliberately damage the packs bup damage -n 10 -s 1 -S 0 ~/.bup/objects/pack/*.{pack,idx} # recover from the damage bup fsck -r # SEE ALSO `bup-fsck`(1), `par2`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-drecurse.md000066400000000000000000000037641454333004200201310ustar00rootroot00000000000000% bup-drecurse(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-drecurse - recursively list files in your filesystem # SYNOPSIS bup drecurse [-x] [-q] [\--exclude *path*] \ [\--exclude-from *filename*] [\--exclude-rx *pattern*] \ [\--exclude-rx-from *filename*] [\--profile] \ # DESCRIPTION `bup drecurse` traverses files in the filesystem in a way similar to `find`(1). In most cases, you should use `find`(1) instead. This program is useful mainly for testing the file traversal algorithm used in `bup-index`(1). Note that filenames are returned in reverse alphabetical order, as in `bup-index`(1). This is important because you can't generate the hash of a parent directory until you have generated the hashes of all its children. When listing files in reverse order, the parent directory will come after its children, making this easy. # OPTIONS -x, \--xdev, \--one-file-system : don't cross filesystem boundaries -- though as with tar and rsync, the mount points themselves will still be reported. -q, \--quiet : don't print filenames as they are encountered. Useful when testing performance of the traversal algorithms. \--exclude=*path* : exclude *path* from the backup (may be repeated). \--exclude-from=*filename* : read --exclude paths from *filename*, one path per-line (may be repeated). Ignore completely empty lines. \--exclude-rx=*pattern* : exclude any path matching *pattern*. See `bup-index`(1) for details, but note that unlike index, drecurse will produce relative paths if the drecurse target is a relative path. (may be repeated). \--exclude-rx-from=*filename* : read --exclude-rx patterns from *filename*, one pattern per-line (may be repeated). Ignore completely empty lines. \--profile : print profiling information upon completion. Useful when testing performance of the traversal algorithms. # EXAMPLES bup drecurse -x / # SEE ALSO `bup-index`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-features.md000066400000000000000000000014331454333004200201220ustar00rootroot00000000000000% bup-features(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-features - report the current status and capabilities of bup itself # SYNOPSIS bup features # DESCRIPTION `bup features` reports information about the current bup installation, for example, which version of the Python interpreter is used, whether command line editing is supported by `bup ftp`, or POSIX ACLs can be saved and restored. # EXAMPLES $ bup features bup 0.31~a7ff2d5b8c12b24b97858aad1251d28c18f8c1e1 source a7ff2d5b8c12b24b97858aad1251d28c18f8c1e1 2020-07-05 14:54:06 -0500 Python: 3.7.3 Command line editing (e.g. bup ftp): yes Saving and restoring POSIX ACLs: yes .... # SEE ALSO `bup-version`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-fsck.md000066400000000000000000000070131454333004200172320ustar00rootroot00000000000000% bup-fsck(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-fsck - verify or repair a bup repository # SYNOPSIS bup fsck [-r] [-g] [-v] [\--quick] [-j *jobs*] [\--par2-ok] [\--disable-par2] [filenames...] # DESCRIPTION `bup fsck` is a tool for validating bup repositories in the same way that `git fsck` validates git repositories. It can also generate and/or use "recovery blocks" using the `par2`(1) tool (if you have it installed). This allows you to recover from damaged blocks covering up to 5% of your `.pack` files. In a normal backup system, damaged blocks are less important, because there tends to be enough data duplicated between backup sets that a single damaged backup set is non-critical. In a deduplicating backup system like bup, however, no block is ever stored more than once, even if it is used in every single backup. If that block were to be unrecoverable, *all* your backup sets would be damaged at once. Thus, it's important to be able to verify the integrity of your backups and recover from disk errors if they occur. *WARNING*: bup fsck's recovery features are not available unless you have the free `par2`(1) package installed on your bup server. *WARNING*: bup fsck obviously cannot recover from a complete disk failure. If your backups are important, you need to carefully consider redundancy (such as using RAID for multi-disk redundancy, or making off-site backups for site redundancy). # OPTIONS -r, \--repair : attempt to repair any damaged packs using existing recovery blocks. (Requires `par2`(1).) -g, \--generate : generate recovery blocks for any packs that don't already have them. (Requires `par2`(1).) -v, \--verbose : increase verbosity (can be used more than once). \--quick : don't run a full `git verify-pack` on each pack file; instead just check the final checksum. This can cause a significant speedup with no obvious decrease in reliability. However, you may want to avoid this option if you're paranoid. Has no effect on packs that already have recovery information. -j, \--jobs=*numjobs* : maximum number of pack verifications to run at a time. The optimal value for this option depends how fast your CPU can verify packs vs. your disk throughput. If you run too many jobs at once, your disk will get saturated by seeking back and forth between files and performance will actually decrease, even if *numjobs* is less than the number of CPU cores on your system. You can experiment with this option to find the optimal value. \--par2-ok : immediately return 0 if `par2`(1) is installed and working, or 1 otherwise. Do not actually check anything. \--disable-par2 : pretend that `par2`(1) is not installed, and ignore all recovery blocks. # EXAMPLES # generate recovery blocks for all packs that don't # have them bup fsck -g # generate recovery blocks for a particular pack bup fsck -g ~/.bup/objects/pack/153a1420cb1c8*.pack # check all packs for correctness (can be very slow!) bup fsck # check all packs for correctness and recover any # damaged ones bup fsck -r # check a particular pack for correctness and recover # it if damaged bup fsck -r ~/.bup/objects/pack/153a1420cb1c8*.pack # check if recovery blocks are available on this system if bup fsck --par2-ok; then echo "par2 is ok" fi # SEE ALSO `bup-damage`(1), `fsck`(1), `git-fsck`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-ftp.md000066400000000000000000000037111454333004200170760ustar00rootroot00000000000000% bup-ftp(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-ftp - ftp-like client for navigating bup repositories # SYNOPSIS bup ftp # DESCRIPTION `bup ftp` is a command-line tool for navigating bup repositories. It has commands similar to the Unix `ftp`(1) command. The file hierarchy is the same as that shown by `bup-fuse`(1) and `bup-ls`(1). Note: if your system has the python-readline library installed, you can use the \ key to complete filenames while navigating your backup data. This will save you a lot of typing. # COMMANDS The following commands are available inside `bup ftp`: ls [-s] [-a] [*path*] : print the contents of a directory. If no path argument is given, the current directory's contents are listed. If -a is given, also include hidden files (files which start with a `.` character). If -s is given, each file is displayed with its hash from the bup archive to its left. cd *dirname* : change to a different working directory pwd : print the path of the current working directory cat *filenames...* : print the contents of one or more files to stdout get *filename* *localname* : download the contents of *filename* and save it to disk as *localname*. If *localname* is omitted, uses *filename* as the local name. mget *filenames...* : download the contents of the given *filenames* and stores them to disk under the same names. The filenames may contain Unix filename globs (`*`, `?`, etc.) help : print a list of available commands quit : exit the `bup ftp` client # EXAMPLES $ bup ftp bup> ls mybackup/ yourbackup/ bup> cd mybackup/ bup> ls 2010-02-05-185507@ 2010-02-05-185508@ latest@ bup> cd latest/ bup> ls (...etc...) bup> get myfile Saving 'myfile' bup> quit # SEE ALSO `bup-fuse`(1), `bup-ls`(1), `bup-save`(1), `bup-restore`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-fuse.md000066400000000000000000000032631454333004200172510ustar00rootroot00000000000000% bup-fuse(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-fuse - mount a bup repository as a filesystem # SYNOPSIS bup fuse [-d] [-f] [-o] \ # DESCRIPTION `bup fuse` opens a bup repository and exports it as a `fuse`(7) userspace filesystem. This feature is only available on systems (such as Linux) which support FUSE. **WARNING**: bup fuse is still experimental and does not enforce any file permissions! All files will be readable by all users. When you're done accessing the mounted fuse filesystem, you should unmount it with `umount`(8). # OPTIONS -d, \--debug : run in the foreground and print FUSE debug information for each request. -f, \--foreground : run in the foreground and exit only when the filesystem is unmounted. -o, \--allow-other : permit other users to access the filesystem. Necessary for exporting the filesystem via Samba, for example. \--meta : report some of the original metadata (when available) for the mounted paths (currently the uid, gid, mode, and timestamps). Without this, only generic values will be presented. This option is not yet enabled by default because it may negatively affect performance, and note that any timestamps before 1970-01-01 UTC (i.e. before the Unix epoch) will be presented as 1970-01-01 UTC. -v, \--verbose : increase verbosity (can be used more than once). # EXAMPLES rm -rf /tmp/buptest mkdir /tmp/buptest sudo bup fuse -d /tmp/buptest ls /tmp/buptest/*/latest ... umount /tmp/buptest # SEE ALSO `fuse`(7), `fusermount`(1), `bup-ls`(1), `bup-ftp`(1), `bup-restore`(1), `bup-web`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-gc.md000066400000000000000000000036411454333004200167000ustar00rootroot00000000000000% bup-gc(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-gc - remove unreferenced, unneeded data # SYNOPSIS bup gc [-#|\--verbose] <*branch*|*save*...> # DESCRIPTION `bup gc` removes (permanently deletes) unreachable data from the repository, data that isn't referred to directly or indirectly by the current set of branches (backup sets) and tags. But bear in mind that given deduplication, deleting a save and running the garbage collector might or might not actually delete anything (or reclaim any space). With the current, proababilistic implementation, some fraction of the unreachable data may be retained. In exchange, the garbage collection should require much less RAM than might by some more precise approaches. Typically, the garbage collector would be invoked after some set of invocations of `bup rm`. WARNING: This is one of the few bup commands that modifies your archive in intentionally destructive ways. Though if an attempt to `join` or `restore` the data you still care about after a `gc` succeeds, that's a fairly encouraging sign that the commands worked correctly. (The `dev/compare-trees` command in the source tree can be used to help test before/after results.) # OPTIONS \--threshold=N : only rewrite a packfile if it's over N percent garbage; otherwise leave it alone. The default threshold is 10%. -v, \--verbose : increase verbosity (can be used more than once). With one -v, bup prints every directory name as it gets backed up. With two -v, it also prints every filename. -*#*, \--compress=*#* : set the compression level to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 1 (fast, loose compression). # EXAMPLES # Remove all saves of "home" and most of the otherwise unreferenced data. $ bup rm home $ bup gc # SEE ALSO `bup-rm`(1) and `bup-fsck`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-get.md000066400000000000000000000162611454333004200170700ustar00rootroot00000000000000% bup-get(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-get - copy repository items (CAUTION: EXPERIMENTAL) # SYNOPSIS bup get \[-s *source-path*\] \[-r *host*:*path*\] OPTIONS \<(METHOD *ref* [*dest*])\>... # DESCRIPTION `bup get` copies the indicated *ref*s from the source repository to the destination repository (respecting `--bup-dir` and `BUP_DIR`), according to the specified METHOD, which may be one of `--ff`, `--ff:`, `--append`, `--append:`, `--pick`, `--pick:`, `--force-pick`, `--force-pick:`, `--new-tag`, `--new-tag:`, `--replace`, `--replace:`, or `--unnamed`. See the EXAMPLES below for a quick introduction. The *ref* is the source repository reference of the object to be fetched, and the *dest* is the optional destination reference. A *dest* may only be specified for a METHOD whose name ends in a colon. For example: bup get -s /source/repo --ff foo bup get -s /source/repo --ff: foo/latest bar bup get -s /source/repo --pick: foo/2010-10-10-101010 .tag/bar As a special case, if *ref* names the "latest" save symlink, then bup will act exactly as if the save that "latest" points to had been specified, rather than the "latest" symlink itself, so `bup get foo/latest` will actually be interpreted as something like `bup get foo/2013-01-01-030405`. In some situations `bup get` will evaluate a branch operation according to whether or not it will be a "fast-forward" (which requires that any existing destination branch be an ancestor of the source). An existing destination tag can only be overwritten by a `--replace` or `--force-pick`. When a new commit is created (i.e. via `--append`, `--pick`, etc.), it will have the same author, author date, and message as the original, but a committer and committer date corresponding to the current user and time. If requested by the appropriate options, bup will print the commit, tree, or tag hash for each destination reference updated. When relevant, the tree hash will be printed before the commit hash. Local *ref*s can be pushed to a remote repository with the `--remote` option, and remote *ref*s can be pulled into a local repository via "bup on HOST get ...". See `bup-on`(1) and the EXAMPLES below for further information. WARNING: This is one of the few bup commands that can modify your archives in intentionally destructive ways. Though if an attempt to join or restore the data you still care about succeeds after you've run this command, then that's a fairly encouraging sign that it worked correctly. (The dev/compare-trees command in the source tree can be used to help test before/after results.) # METHODS \--ff *ref*, \--ff: *ref* *dest* : fast-forward *dest* to match *ref*. If *dest* is not specified and *ref* names a save, set *dest* to the save's branch. If *dest* is not specified and *ref* names a branch or a tag, use the same name for *dest*. \--append *ref*, \--append: *ref* *dest* : append all of the commits represented by *ref* to *dest* as new commits. If *ref* names a directory/tree, append a new commit for that tree. If *dest* is not specified and *ref* names a save or branch, set *dest* to the *ref* branch name. If *dest* is not specified and *ref* names a tag, use the same name for *dest*. \--pick *ref*, \--pick: *ref* *dest* : append the single commit named by *ref* to *dest* as a new commit. If *dest* is not specified and *ref* names a save, set *dest* to the *ref* branch name. If *dest* is not specified and *ref* names a tag, use the same name for *dest*. \--force-pick *ref*, \--force-pick: *ref* *dest* : do the same thing as `--pick`, but don't refuse to overwrite an existing tag. \--new-tag *ref*, \--new-tag: *ref* *dest* : create a *dest* tag for *ref*, but refuse to overwrite an existing tag. If *dest* is not specified and *ref* names a tag, use the same name for *dest*. \--replace *ref*, \--replace: *ref* *dest* : clobber *dest* with *ref*, overwriting any existing tag, or replacing any existing branch. If *dest* is not specified and *ref* names a branch or tag, use the same name for *dest*. \--unnamed *ref* : copy *ref* into the destination repository, without any name, leaving a potentially dangling reference until/unless the object named by *ref* is referred to some other way (cf. `bup tag`). # OPTIONS -s, \--source=*path* : use *path* as the source repository, instead of the default. -r, \--remote=*host*:*path* : store the indicated items on the given remote server. If *path* is omitted, uses the default path on the remote server (you still need to include the ':'). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. -c, \--print-commits : for each updated branch, print the new git commit id. -t, \--print-trees : for each updated branch, print the new git tree id of the filesystem root. \--print-tags : for each updated tag, print the new git id. -v, \--verbose : increase verbosity (can be used more than once). With `-v`, print the name of every item fetched, with `-vv` add directory names, and with `-vvv` add every filename. \--bwlimit=*bytes/sec* : don't transmit more than *bytes/sec* bytes per second to the server. This can help avoid sucking up all your network bandwidth. Use a suffix like k, M, or G to specify multiples of 1024, 1024\*1024, 1024\*1024\*1024 respectively. -*#*, \--compress=*#* : set the compression level to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 1 (fast, loose compression) # EXAMPLES # Update or copy the archives branch in src-repo to the local repository. $ bup get -s src-repo --ff archives # Append a particular archives save to the pruned-archives branch. $ bup get -s src-repo --pick: archives/2013-01-01-030405 pruned-archives # Update or copy the archives branch on remotehost to the local # repository. $ bup on remotehost get --ff archives # Update or copy the local branch archives to remotehost. $ bup get -r remotehost: --ff archives # Update or copy the archives branch in src-repo to remotehost. $ bup get -s src-repo -r remotehost: --ff archives # Update the archives-2 branch on remotehost to match archives. # If archives-2 exists and is not an ancestor of archives, bup # will refuse. $ bup get -r remotehost: --ff: archives archives-2 # Replace the contents of branch y with those of x. $ bup get --replace: x y # Copy the latest local save from the archives branch to the # remote tag foo. $ bup get -r remotehost: --pick: archives/latest .tag/foo # Or if foo already exists: $ bup get -r remotehost: --force-pick: archives/latest .tag/foo # Append foo (from above) to the local other-archives branch. $ bup on remotehost get --append: .tag/foo other-archives # Append only the /home directory from archives/latest to only-home. $ bup get -s "$BUP_DIR" --append: archives/latest/home only-home # SEE ALSO `bup-on`(1), `bup-tag`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-help.md000066400000000000000000000010641454333004200172340ustar00rootroot00000000000000% bup-help(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-help - open the documentation for a given bup command # SYNOPSIS bup help \ # DESCRIPTION `bup help ` opens the documentation for the given command. This is currently equivalent to typing `man bup-`. # EXAMPLES $ bup help help (Imagine that this man page was pasted below, recursively. Since that would cause an endless loop we include this silly remark instead. Chicken.) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-import-duplicity.md000066400000000000000000000027741454333004200216330ustar00rootroot00000000000000% bup-import-duplicity(1) Bup %BUP_VERSION% % Zoran Zaric , Rob Browning % %BUP_DATE% # NAME bup-import-duplicity - import duplicity backups # WARNING bup-import-duplicity is **EXPERIMENTAL** (proceed with caution) # SYNOPSIS bup import-duplicity [-n] \ \ # DESCRIPTION `bup import-duplicity` imports all of the duplicity backups at `source-url` into `bup` via `bup save -n save-name`. The bup saves will have the same timestamps (via `bup save --date`) as the original backups. Because this command operates by restoring each duplicity backup to a temporary directory, the extent to which the metadata is preserved will depend on the characteristics of the underlying filesystem, whether or not you run `import-duplicity` as root (or under `fakeroot`(1)), etc. Note that this command will use [`mkdtemp`][mkdtemp] to create temporary directories, which means that it should respect any `TEMPDIR`, `TEMP`, or `TMP` environment variable settings. Make sure that the relevant filesystem has enough space for the largest duplicity backup being imported. Since all invocations of duplicity use a temporary `--archive-dir`, `import-duplicity` should not affect ~/.cache/duplicity. # OPTIONS -n, \--dry-run : don't do anything; just print out what would be done # EXAMPLES $ bup import-duplicity file:///duplicity/src/ legacy-duplicity # BUP Part of the `bup`(1) suite. [mkdtemp]: https://docs.python.org/3/library/tempfile.html#tempfile.mkdtemp bup-0.33.3/Documentation/bup-import-rdiff-backup.md000066400000000000000000000011601454333004200221460ustar00rootroot00000000000000% bup-import-rdiff-backup(1) Bup %BUP_VERSION% % Zoran Zaric % %BUP_DATE% # NAME bup-import-rdiff-backup - import a rdiff-backup archive # SYNOPSIS bup import-rdiff-backup [-n] # DESCRIPTION `bup import-rdiff-backup` imports a rdiff-backup archive. The timestamps for the backups are preserved and the path to the rdiff-backup archive is stripped from the paths. # OPTIONS -n, \--dry-run : don't do anything just print out what would be done # EXAMPLES $ bup import-rdiff-backup /.snapshots legacy-rdiff-backup # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-import-rsnapshot.md000066400000000000000000000013421454333004200216340ustar00rootroot00000000000000% bup-import-rsnapshot(1) Bup %BUP_VERSION% % Zoran Zaric % %BUP_DATE% # NAME bup-import-rsnapshot - import a rsnapshot archive # SYNOPSIS bup import-rsnapshot [-n] \ [\] # SYNOPSIS `bup import-rsnapshot` imports an rsnapshot archive. The timestamps for the backups are preserved and the path to the rsnapshot archive is stripped from the paths. `bup import-rsnapshot` either imports the whole archive or imports all backups only for a given backuptarget. # OPTIONS -n, \--dry-run : don't do anything just print out what would be done # EXAMPLES $ bup import-rsnapshot /.snapshots $ bup import-rsnapshot /.snapshots host1 # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-index.md000066400000000000000000000166151454333004200174230ustar00rootroot00000000000000% bup-index(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-index - print and/or update the bup filesystem index # SYNOPSIS bup index \<-p|-m|-s|-u|\--clear|\--check\> [-H] [-l] [-x] [\--fake-valid] [\--no-check-device] [\--fake-invalid] [-f *indexfile*] [\--exclude *path*] [\--exclude-from *filename*] [\--exclude-rx *pattern*] [\--exclude-rx-from *filename*] [-v] \ # DESCRIPTION `bup index` manipulates the filesystem index, which is a cache of absolute paths and their metadata (attributes, SHA-1 hashes, etc.). The bup index is similar in function to the `git`(1) index, and the default index can be found in `$BUP_DIR/bupindex`. Creating a backup in bup consists of two steps: updating the index with `bup index`, then actually backing up the files (or a subset of the files) with `bup save`. The separation exists for these reasons: 1. There is more than one way to generate a list of files that need to be backed up. For example, you might want to use `inotify`(7) or `dnotify`(7). 2. Even if you back up files to multiple destinations (for added redundancy), the file names, attributes, and hashes will be the same each time. Thus, you can save the trouble of repeatedly re-generating the list of files for each backup set. 3. You may want to use the data tracked by bup index for other purposes (such as speeding up other programs that need the same information). # NOTES At the moment, bup will ignore Linux attributes (cf. chattr(1) and lsattr(1)) on some systems (any big-endian systems where sizeof(long) < sizeof(int)). This is because the Linux kernel and FUSE currently disagree over the type of the attr system call arguments, and so on big-endian systems there's no way to get the results without the risk of stack corruption (http://lwn.net/Articles/575846/). In these situations, bup will print a warning the first time Linux attrs are relevant during any index/save/restore operation. bup makes accommodations for the expected "worst-case" filesystem timestamp resolution -- currently one second; examples include VFAT, ext2, ext3, small ext4, etc. Since bup cannot know the filesystem timestamp resolution, and could be traversing multiple filesystems during any given run, it always assumes that the resolution may be no better than one second. As a practical matter, this means that index updates are a bit imprecise, and so `bup save` may occasionally record filesystem changes that you didn't expect. That's because, during an index update, if bup encounters a path whose actual timestamps are more recent than one second before the update started, bup will set the index timestamps for that path (mtime and ctime) to exactly one second before the run, -- effectively capping those values. This ensures that no subsequent changes to those paths can result in timestamps that are identical to those in the index. If that were possible, bup could overlook the modifications. You can see the effect of this behavior in this example (assume that less than one second elapses between the initial file creation and first index run): $ touch src/1 src/2 # A "sleep 1" here would avoid the unexpected save. $ bup index src $ bup save -n src src # Saves 1 and 2. $ date > src/1 $ bup index src $ date > src/2 # Not indexed. $ bup save -n src src # But src/2 is saved anyway. Strictly speaking, bup should not notice the change to src/2, but it does, due to the accommodations described above. # MODES -u, \--update : recursively update the index for the given paths and their descendants. One or more paths must be specified, and if a path ends with a symbolic link, the link itself will be indexed, not the target. If no mode option is given, `--update` is the default, and paths may be excluded by the `--exclude`, `--exclude-rx`, and `--one-file-system` options. -p, \--print : print the contents of the index. If paths are given, shows the given entries and their descendants. If no paths are given, shows the entries starting at the current working directory (.). -m, \--modified : prints only files which are marked as modified (ie. changed since the most recent backup) in the index. Implies `-p`. -s, \--status : prepend a status code (A, M, D, or space) before each path. Implies `-p`. The codes mean, respectively, that a file is marked in the index as added, modified, deleted, or unchanged since the last backup. \--check : carefully check index file integrity before and after updating. Mostly useful for automated tests. \--clear : clear the default index. # OPTIONS -H, \--hash : for each file printed, prepend the most recently recorded hash code. The hash code is normally generated by `bup save`. For objects which have not yet been backed up, the hash code will be 0000000000000000000000000000000000000000. Note that the hash code is printed even if the file is known to be modified or deleted in the index (ie. the file on the filesystem no longer matches the recorded hash). If this is a problem for you, use `--status`. -l, \--long : print more information about each file, in a similar format to the `-l` option to `ls`(1). -x, \--xdev, \--one-file-system : don't cross filesystem boundaries when traversing the filesystem -- though as with tar and rsync, the mount points themselves will still be indexed. Only applicable if you're using `-u`. \--fake-valid : mark specified paths as up-to-date even if they aren't. This can be useful for testing, or to avoid unnecessarily backing up files that you know are boring. \--fake-invalid : mark specified paths as not up-to-date, forcing the next "bup save" run to re-check their contents. -f, \--indexfile=*indexfile* : use a different index filename instead of `$BUP_DIR/bupindex`. \--exclude=*path* : exclude *path* from the backup (may be repeated). \--exclude-from=*filename* : read --exclude paths from *filename*, one path per-line (may be repeated). Ignore completely empty lines. \--exclude-rx=*pattern* : exclude any path matching *pattern*, which must be a Python regular expression (http://docs.python.org/library/re.html). The pattern will be compared against the full path, without anchoring, so "x/y" will match "ox/yard" or "box/yards". To exclude the contents of /tmp, but not the directory itself, use "^/tmp/.". (may be repeated) Examples: * '/foo$' - exclude any file named foo * '/foo/$' - exclude any directory named foo * '/foo/.' - exclude the content of any directory named foo * '^/tmp/.' - exclude root-level /tmp's content, but not /tmp itself \--exclude-rx-from=*filename* : read --exclude-rx patterns from *filename*, one pattern per-line (may be repeated). Ignore completely empty lines. \--no-check-device : don't mark an entry invalid if the device number (stat(2) st_dev) changes. This can be useful when indexing remote, automounted, or snapshot filesystems (LVM, Btrfs, etc.), where the device number isn't fixed. -v, \--verbose : increase log output during update (can be used more than once). With one `-v`, print each directory as it is updated; with two `-v`, print each file too. # EXAMPLES bup index -vux /etc /var /usr # SEE ALSO `bup-save`(1), `bup-drecurse`(1), `bup-on`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-init.md000066400000000000000000000015551454333004200172540ustar00rootroot00000000000000% bup-init(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-init - initialize a bup repository # SYNOPSIS [BUP_DIR=*localpath*] bup init [-r *host*:*path*] # DESCRIPTION `bup init` initializes your local bup repository. By default, BUP_DIR is `~/.bup`. # OPTIONS -r, \--remote=*host*:*path* : Initialize not only the local repository, but also the remote repository given by the *host* and *path*. This is not necessary if you intend to back up to the default location on the server (ie. a blank *path*). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. # EXAMPLES bup init # SEE ALSO `bup-fsck`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-join.md000066400000000000000000000031531454333004200172440ustar00rootroot00000000000000% bup-join(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-join - concatenate files from a bup repository # SYNOPSIS bup join [-r *host*:*path*] [refs or hashes...] # DESCRIPTION `bup join` is roughly the opposite operation to `bup-split`(1). You can use it to retrieve the contents of a file from a local or remote bup repository. The supplied list of refs or hashes can be in any format accepted by `git`(1), including branch names, commit ids, tree ids, or blob ids. If no refs or hashes are given on the command line, `bup join` reads them from stdin instead. # OPTIONS -r, \--remote=*host*:*path* : Retrieves objects from the given remote repository instead of the local one. *path* may be blank, in which case the default remote repository is used. The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. Even though the data source is remote, a local bup repository is still required. # EXAMPLES # split and then rejoin a file using its tree id TREE=$(tar -cvf - /etc | bup split -t) bup join $TREE | tar -tf - # make two backups, then get the second-most-recent. # mybackup~1 is git(1) notation for the second most # recent commit on the branch named mybackup. tar -cvf - /etc | bup split -n mybackup tar -cvf - /etc | bup split -n mybackup bup join mybackup~1 | tar -tf - # SEE ALSO `bup-split`(1), `bup-save`(1), `bup-cat-file`, `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-ls.md000066400000000000000000000051271454333004200167260ustar00rootroot00000000000000% bup-ls(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-ls - list the contents of a bup repository # SYNOPSIS bup ls [-r *host*:[*path*]] [OPTION...] \ # DESCRIPTION `bup ls` lists files and directories in your bup repository using the same directory hierarchy as they would have with `bup-fuse`(1). The top level directory contains the branch (corresponding to the `-n` option in `bup save`), the next level is the date of the backup, and subsequent levels correspond to files in the backup. When `bup ls` is asked to output on a tty, and `-l` is not specified, it formats the output in columns so it can list as much as possible in as few lines as possible. However, when `-l` is specified or bup is asked to output to something other than a tty (say you pipe the output to another command, or you redirect it to a file), it will print one file name per line. This makes the listing easier to parse with external tools. Note that `bup ls` doesn't show hidden files by default and one needs to use the `-a` option to show them. Files are hidden when their name begins with a dot. For example, on the topmost level, the special directories named `.commit` and `.tag` are hidden directories. Once you have identified the file you want using `bup ls`, you can view its contents using `bup join` or `git show`. # OPTIONS -r, \--remote=*host*:[*path*] : list information for the repository at *path* on the indicated *host*. If *path* is omitted, uses the default path on the remote server (you still need to include the ':'). The connection to the remote server will be made by SSH. If you'd like to specify the port, user, or private key, we recommend you use the `~/.ssh/config` file (`ssh_config(5)`). -s, \--hash : show hash for each file/directory. -a, \--all : show hidden files. -A, \--almost-all : show hidden files, except "." and "..". -d, \--directory : show information about directories themselves, rather than their contents, and don't follow symlinks. -l : provide a detailed, long listing for each item. -F, \--classify : append type indicator: dir/, symlink@, fifo|, socket=, and executable*. \--file-type : append type indicator: dir/, symlink@, fifo|, socket=. \--human-readable : print human readable file sizes (i.e. 3.9K, 4.7M). \--numeric-ids : display numeric IDs (user, group, etc.) rather than names. # EXAMPLES bup ls /myserver/latest/etc/profile bup ls -a / # SEE ALSO `bup-join`(1), `bup-fuse`(1), `bup-ftp`(1), `bup-save`(1), `git-show`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-margin.md000066400000000000000000000043321454333004200175620ustar00rootroot00000000000000% bup-margin(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-margin - figure out your deduplication safety margin # SYNOPSIS bup margin [options...] # DESCRIPTION `bup margin` iterates through all objects in your bup repository, calculating the largest number of prefix bits shared between any two entries. This number, `n`, identifies the longest subset of SHA-1 you could use and still encounter a collision between your object ids. For example, one system that was tested had a collection of 11 million objects (70 GB), and `bup margin` returned 45. That means a 46-bit hash would be sufficient to avoid all collisions among that set of objects; each object in that repository could be uniquely identified by its first 46 bits. The number of bits needed seems to increase by about 1 or 2 for every doubling of the number of objects. Since SHA-1 hashes have 160 bits, that leaves 115 bits of margin. Of course, because SHA-1 hashes are essentially random, it's theoretically possible to use many more bits with far fewer objects. If you're paranoid about the possibility of SHA-1 collisions, you can monitor your repository by running `bup margin` occasionally to see if you're getting dangerously close to 160 bits. # OPTIONS \--predict : Guess the offset into each index file where a particular object will appear, and report the maximum deviation of the correct answer from the guess. This is potentially useful for tuning an interpolation search algorithm. \--ignore-midx : don't use `.midx` files, use only `.idx` files. This is only really useful when used with `--predict`. # EXAMPLES $ bup margin Reading indexes: 100.00% (1612581/1612581), done. 40 40 matching prefix bits 1.94 bits per doubling 120 bits (61.86 doublings) remaining 4.19338e+18 times larger is possible Everyone on earth could have 625878182 data sets like yours, all in one repository, and we would expect 1 object collision. $ bup margin --predict PackIdxList: using 1 index. Reading indexes: 100.00% (1612581/1612581), done. 915 of 1612581 (0.057%) # SEE ALSO `bup-midx`(1), `bup-save`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-memtest.md000066400000000000000000000113421454333004200177620ustar00rootroot00000000000000% bup-memtest(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-memtest - test bup memory usage statistics # SYNOPSIS bup memtest [options...] # DESCRIPTION `bup memtest` opens the list of pack indexes in your bup repository, then searches the list for a series of nonexistent objects, printing memory usage statistics after each cycle. Because of the way Unix systems work, the output will usually show a large (and unchanging) value in the VmSize column, because mapping the index files in the first place takes a certain amount of virtual address space. However, this virtual memory usage is entirely virtual; it doesn't take any of your RAM. Over time, bup uses *parts* of the indexes, which need to be loaded from disk, and this is what causes an increase in the VmRSS column. # OPTIONS -n, \--number=*number* : set the number of objects to search for during each cycle (ie. before printing a line of output) -c, \--cycles=*cycles* : set the number of cycles (ie. the number of lines of output after the first). The first line of output is always 0 (ie. the baseline before searching for any objects). \--ignore-midx : ignore any `.midx` files created by `bup midx`. This allows you to compare memory performance with and without using midx. \--existing : search for existing objects instead of searching for random nonexistent ones. This can greatly affect memory usage and performance. Note that most of the time, `bup save` spends most of its time searching for nonexistent objects, since existing ones are probably in unmodified files that we won't be trying to back up anyway. So the default behaviour reflects real bup performance more accurately. But you might want this option anyway just to make sure you haven't made searching for existing objects much worse than before. # EXAMPLES $ bup memtest -n300 -c5 PackIdxList: using 1 index. VmSize VmRSS VmData VmStk 0 20824 kB 4528 kB 1980 kB 84 kB 300 20828 kB 5828 kB 1984 kB 84 kB 600 20828 kB 6844 kB 1984 kB 84 kB 900 20828 kB 7836 kB 1984 kB 84 kB 1200 20828 kB 8736 kB 1984 kB 84 kB 1500 20828 kB 9452 kB 1984 kB 84 kB $ bup memtest -n300 -c5 --ignore-midx PackIdxList: using 361 indexes. VmSize VmRSS VmData VmStk 0 27444 kB 6552 kB 2516 kB 84 kB 300 27448 kB 15832 kB 2520 kB 84 kB 600 27448 kB 17220 kB 2520 kB 84 kB 900 27448 kB 18012 kB 2520 kB 84 kB 1200 27448 kB 18388 kB 2520 kB 84 kB 1500 27448 kB 18556 kB 2520 kB 84 kB # DISCUSSION When optimizing bup indexing, the first goal is to keep the VmRSS reasonably low. However, it might eventually be necessary to swap in all the indexes, simply because you're searching for a lot of objects, and this will cause your RSS to grow as large as VmSize eventually. The key word here is *eventually*. As long as VmRSS grows reasonably slowly, the amount of disk activity caused by accessing pack indexes is reasonably small. If it grows quickly, bup will probably spend most of its time swapping index data from disk instead of actually running your backup, so backups will run very slowly. The purpose of `bup memtest` is to give you an idea of how fast your memory usage is growing, and to help in optimizing bup for better memory use. If you have memory problems you might be asked to send the output of `bup memtest` to help diagnose the problems. Tip: try using `bup midx -a` or `bup midx -f` to see if it helps reduce your memory usage. Trivia: index memory usage in bup (or git) is only really a problem when adding a large number of previously unseen objects. This is because for each object, we need to absolutely confirm that it isn't already in the database, which requires us to search through *all* the existing pack indexes to ensure that none of them contain the object in question. In the more obvious case of searching for objects that *do* exist, the objects being searched for are typically related in some way, which means they probably all exist in a small number of packfiles, so memory usage will be constrained to just those packfile indexes. Since git users typically don't add a lot of files in a single run, git doesn't really need a program like `bup midx`. bup, on the other hand, spends most of its time backing up files it hasn't seen before, so its memory usage patterns are different. # SEE ALSO `bup-midx`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-meta.md000066400000000000000000000107231454333004200172340ustar00rootroot00000000000000% bup-meta(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-meta - create or extract a metadata archive # SYNOPSIS bup meta \--create ~ [-R] [-v] [-q] [\--no-symlinks] [\--no-paths] [-f *file*] \<*paths*...\> bup meta \--list ~ [-v] [-q] [-f *file*] bup meta \--extract ~ [-v] [-q] [\--numeric-ids] [\--no-symlinks] [-f *file*] bup meta \--start-extract ~ [-v] [-q] [\--numeric-ids] [\--no-symlinks] [-f *file*] bup meta \--finish-extract ~ [-v] [-q] [\--numeric-ids] [-f *file*] bup meta \--edit ~ [\--set-uid *uid* | \--set-gid *gid* | \--set-user *user* | \--set-group *group* | ...] \<*paths*...\> # DESCRIPTION `bup meta` creates, extracts, or otherwise manipulates metadata archives. A metadata archive contains the metadata information (timestamps, ownership, access permissions, etc.) for a set of filesystem paths. See `bup-restore`(1) for a description of the way ownership metadata is restored. # OPTIONS -c, \--create : Create a metadata archive for the specified *path*s. Write the archive to standard output unless `--file` is specified. -t, \--list : Display information about the metadata in an archive. Read the archive from standard input unless `--file` is specified. -x, \--extract : Extract a metadata archive. Conceptually, perform `--start-extract` followed by `--finish-extract`. Read the archive from standard input unless `--file` is specified. \--start-extract : Build a filesystem tree matching the paths stored in a metadata archive. By itself, this command does not produce a full restoration of the metadata. For a full restoration, this command must be followed by a call to `--finish-extract`. Once this command has finished, all of the normal files described by the metadata will exist and be empty. Restoring the data in those files, and then calling `--finish-extract` should restore the original tree. The archive will be read from standard input unless `--file` is specified. \--finish-extract : Finish applying the metadata stored in an archive to the filesystem. Normally, this command should follow a call to `--start-extract`. The archive will be read from standard input unless `--file` is specified. \--edit : Edit metadata archives. The result will be written to standard output unless `--file` is specified. -f, \--file=*filename* : Read the metadata archive from *filename* or write it to *filename* as appropriate. If *filename* is "-", then read from standard input or write to standard output. -R, \--recurse : Recursively descend into subdirectories during `--create`. \--xdev, \--one-file-system : don't cross filesystem boundaries -- though as with tar and rsync, the mount points themselves will still be handled. \--numeric-ids : Apply numeric IDs (user, group, etc.) rather than names during `--extract` or `--finish-extract`. \--symlinks : Record symbolic link targets when creating an archive, or restore symbolic links when extracting an archive (during `--extract` or `--start-extract`). This option is enabled by default. Specify `--no-symlinks` to disable it. \--paths : Record pathnames when creating an archive. This option is enabled by default. Specify `--no-paths` to disable it. \--set-uid=*uid* : Set the metadata uid to the integer *uid* during `--edit`. \--set-gid=*gid* : Set the metadata gid to the integer *gid* during `--edit`. \--set-user=*user* : Set the metadata user to *user* during `--edit`. \--unset-user : Remove the metadata user during `--edit`. \--set-group=*group* : Set the metadata user to *group* during `--edit`. \--unset-group : Remove the metadata group during `--edit`. -v, \--verbose : Be more verbose (can be used more than once). -q, \--quiet : Be quiet. # EXAMPLES # Create a metadata archive for /etc. $ bup meta -cRf etc.meta /etc bup: removing leading "/" from "/etc" # Extract the etc.meta archive (files will be empty). $ mkdir tmp && cd tmp $ bup meta -xf ../etc.meta $ ls etc # Restore /etc completely. $ mkdir tmp && cd tmp $ bup meta --start-extract -f ../etc.meta ...fill in all regular file contents using some other tool... $ bup meta --finish-extract -f ../etc.meta # Change user/uid to root. $ bup meta --edit --set-uid 0 --set-user root \ src.meta > dest.meta # BUGS Hard links are not handled yet. # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-midx.md000066400000000000000000000064571454333004200172600ustar00rootroot00000000000000% bup-midx(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-midx - create a multi-index (`.midx`) file from several `.idx` files # SYNOPSIS bup midx [-o *outfile*] \<-a|-f|*idxnames*...\> # DESCRIPTION `bup midx` creates a multi-index (`.midx`) file from one or more git pack index (`.idx`) files. Note: you should no longer need to run this command by hand. It gets run automatically by `bup-save`(1) and similar commands. # OPTIONS -o, \--output=*filename.midx* : use the given output filename for the `.midx` file. Default is auto-generated. -a, \--auto : automatically generate new `.midx` files for any `.idx` files where it would be appropriate. -f, \--force : force generation of a single new `.midx` file containing *all* your `.idx` files, even if other `.midx` files already exist. This will result in the fastest backup performance, but may take a long time to run. \--dir=*packdir* : specify the directory containing the `.idx`/`.midx` files to work with. The default is `$BUP_DIR/objects/pack`. \--max-files : maximum number of `.idx` files to open at a time. You can use this if you have an especially small number of file descriptors available, so that midx can complete (though possibly non-optimally) even if it can't open all your `.idx` files at once. The default value of this option should be fine for most people. \--check : validate a `.midx` file by ensuring that all objects in its contained `.idx` files exist inside the `.midx`. May be useful for debugging. # EXAMPLES $ bup midx -a Merging 21 indexes (2278559 objects). Table size: 524288 (17 bits) Reading indexes: 100.00% (2278559/2278559), done. midx-b66d7c9afc4396187218f2936a87b865cf342672.midx # DISCUSSION By default, bup uses git-formatted pack files, which consist of a pack file (containing objects) and an idx file (containing a sorted list of object names and their offsets in the .pack file). Normal idx files are convenient because it means you can use `git`(1) to access your backup datasets. However, idx files can get slow when you have a lot of very large packs (which git typically doesn't have, but bup often does). bup `.midx` files consist of a single sorted list of all the objects contained in all the .pack files it references. This list can be binary searched in about log2(m) steps, where m is the total number of objects. To further speed up the search, midx files also have a variable-sized fanout table that reduces the first n steps of the binary search. With the help of this fanout table, bup can narrow down which page of the midx file a given object id would be in (if it exists) with a single lookup. Thus, typical searches will only need to swap in two pages: one for the fanout table, and one for the object id. midx files are most useful when creating new backups, since searching for a nonexistent object in the repository necessarily requires searching through *all* the index files to ensure that it does not exist. (Searching for objects that *do* exist can be optimized; for example, consecutive objects are often stored in the same pack, so we can search that one first using an MRU algorithm.) # SEE ALSO `bup-save`(1), `bup-margin`(1), `bup-memtest`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-mux.md000066400000000000000000000010211454333004200171060ustar00rootroot00000000000000% bup-mux(1) Bup %BUP_VERSION% % Brandon Low % %BUP_DATE% # NAME bup-mux - multiplexes data and error streams over a connection # SYNOPSIS bup mux \ [options...] # DESCRIPTION `bup mux` is used in the bup client-server protocol to send both data and debugging/error output over the single connection stream. `bup mux bup server` might be used in an inetd server setup. # OPTIONS command : the command to run options : options for the command # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-on.md000066400000000000000000000045571454333004200167320ustar00rootroot00000000000000% bup-on(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-on - run a bup server locally and client remotely # SYNOPSIS bup on \ index ... bup on \ save ... bup on \ split ... bup on \ get ... # DESCRIPTION `bup on` runs the given bup command on the given host using ssh. It runs a bup server on the local machine, so that commands like `bup save` on the remote machine can back up to the local machine. (You don't need to provide a `--remote` option to `bup save` in order for this to work.) See `bup-index`(1), `bup-save`(1), and so on for details of how each subcommand works. This 'reverse mode' operation is useful when the machine being backed up isn't supposed to be able to ssh into the backup server. For example, your backup server can be hidden behind a one-way firewall on a private or dynamic IP address; using an ssh key, it can be authorized to ssh into each of your important machines. After connecting to each destination machine, it initiates a backup, receiving the resulting data and storing in its local repository. For example, if you run several virtual private Linux machines on a remote hosting provider, you could back them up to a local (much less expensive) computer in your basement. # EXAMPLES # First index the files on the remote server $ bup on myserver index -vux /etc bup server: reading from stdin. Indexing: 2465, done. bup: merging indexes (186668/186668), done. bup server: done # Now save the files from the remote server to the # local $BUP_DIR $ bup on myserver save -n myserver-backup /etc bup server: reading from stdin. bup server: command: 'list-indexes' PackIdxList: using 7 indexes. Saving: 100.00% (241/241k, 648/648 files), done. bup server: received 55 objects. Indexing objects: 100% (55/55), done. bup server: command: 'quit' bup server: done # Now we can look at the resulting repo on the local # machine $ bup ftp 'cat /myserver-backup/latest/etc/passwd' root:x:0:0:root:/root:/bin/bash daemon:x:1:1:daemon:/usr/sbin:/bin/sh bin:x:2:2:bin:/bin:/bin/sh sys:x:3:3:sys:/dev:/bin/sh sync:x:4:65534:sync:/bin:/bin/sync ... # SEE ALSO `bup-index`(1), `bup-save`(1), `bup-split`(1), `bup-get`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-prune-older.md000066400000000000000000000075541454333004200205520ustar00rootroot00000000000000% bup-prune-older(1) bup %BUP_VERSION% | bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-prune-older - remove older saves # SYNOPSIS bup prune-older [options...] <*branch*...> # DESCRIPTION `bup prune-older` removes (permanently deletes) all saves except those preserved by the various keep arguments detailed below. At least one keep argument must be specified. This command is equivalent to a suitable `bup rm` invocation followed by `bup gc`. WARNING: This is one of the few bup commands that modifies your archive in intentionally destructive ways. Though if an attempt to `join` or `restore` the data you still care about after a `prune-older` succeeds, that's a fairly encouraging sign that the commands worked correctly. (The `dev/compare-trees` command in the source tree can be used to help test before/after results.) # KEEP PERIODS A `--keep` PERIOD (as required below) must be an integer followed by a scale, or "forever". For example, 12y specifies a PERIOD of twelve years. Here are the valid scales: - s indicates seconds - min indicates minutes (60s) - h indicates hours (60m) - d indicates days (24h) - w indicates weeks (7d) - m indicates months (31d) - y indicates years (366d) - forever is infinitely far in the past As indicated, the PERIODS are computed with respect to the current time, or the `--wrt` value if specified, and do not respect any calendar, so `--keep-dailies-for 5d` means a period starting exactly 5 * 24 * 60 * 60 seconds before the starting point. # OPTIONS \--keep-all-for PERIOD : when no smaller time scale `--keep` option applies, retain all saves within the given period. \--keep-dailies-for PERIOD : when no smaller time scale `--keep` option applies, retain the newest save for any day within the given period. \--keep-monthlies-for PERIOD : when no smaller time scale `--keep` option applies, retain the newest save for any month within the given period. \--keep-yearlies-for PERIOD : when no smaller time scale `--keep` option applies, retain the newest save for any year within the given period. \--wrt UTC_SECONDS : when computing a keep period, place the most recent end of the range at UTC\_SECONDS, and any saves newer than this will be kept. \--pretend : don't do anything, just list the actions that would be taken to standard output, one action per line like this: - SAVE + SAVE ... \--gc : garbage collect the repository after removing the relevant saves. This is the default behavior, but it can be avoided with `--no-gc`. \--gc-threshold N : only rewrite a packfile if it's over N percent garbage; otherwise leave it alone. The default threshold is 10%. -*#*, \--compress *#* : set the compression level when rewriting archive data to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 1 (fast, loose compression). -v, \--verbose : increase verbosity (can be specified more than once). # NOTES When `--verbose` is specified, the save periods will be summarized to standard error with lines like this: keeping monthlies since 1969-07-20-201800 keeping all yearlies ... It's possible that the current implementation might not be able to format the date if, for example, it is far enough back in time. In that case, you will see something like this: keeping yearlies since -30109891477 seconds before 1969-12-31-180000 ... # EXAMPLES # Keep all saves for the past month, and any newer monthlies for # the past year. Delete everything else. $ bup prune-older --keep-all-for 1m --keep-monthlies-for 1y # Keep all saves for the past 6 months and delete everything else, # but only on the semester branch. $ bup prune-older --keep-all-for 6m semester # SEE ALSO `bup-rm`(1), `bup-gc`(1), and `bup-fsck`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-random.md000066400000000000000000000043261454333004200175700ustar00rootroot00000000000000% bup-random(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-random - generate a stream of random output # SYNOPSIS bup random [-S seed] [-fv] \ # DESCRIPTION `bup random` produces a stream of pseudorandom output bytes to stdout. Note: the bytes are *not* generated using a cryptographic algorithm and should never be used for security. Note that the stream of random bytes will be identical every time `bup random` is run, unless you provide a different `seed` value. This is intentional: the purpose of this program is to be able to run repeatable tests on large amounts of data, so we want identical data every time. `bup random` generates about 240 megabytes per second on a modern test system (Intel Core2), which is faster than you could achieve by reading data from most disks. Thus, it can be helpful when running microbenchmarks. # OPTIONS \ : the number of bytes of data to generate. Can be used with the suffices `k`, `M`, or `G` to indicate kilobytes, megabytes, or gigabytes, respectively. -S, \--seed=*seed* : use the given value to seed the pseudorandom number generator. The generated output stream will be identical for every stream seeded with the same value. The default seed is 1. A seed value of 0 is equivalent to 1. -f, \--force : generate output even if stdout is a tty. (Generating random data to a tty is generally considered ill-advised, but you can do if you really want.) -v, \--verbose : print a progress message showing the number of bytes that has been output so far. # EXAMPLES $ bup random 1k | sha1sum 2108c55d0a2687c8dacf9192677c58437a55db71 - $ bup random -S1 1k | sha1sum 2108c55d0a2687c8dacf9192677c58437a55db71 - $ bup random -S2 1k | sha1sum f71acb90e135d98dad7efc136e8d2cc30573e71a - $ time bup random 1G >/dev/null Random: 1024 Mbytes, done. real 0m4.261s user 0m4.048s sys 0m0.172s $ bup random 1G | bup split -t --bench Random: 1024 Mbytes, done. bup: 1048576.00kbytes in 18.59 secs = 56417.78 kbytes/sec 1092599b9c7b2909652ef1e6edac0796bfbfc573 # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-restore.md000066400000000000000000000233241454333004200177720ustar00rootroot00000000000000% bup-restore(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-restore - extract files from a backup set # SYNOPSIS bup restore [-r *host*:[*path*]] [\--outdir=*outdir*] [\--exclude-rx *pattern*] [\--exclude-rx-from *filename*] [-v] [-q] \ # DESCRIPTION `bup restore` extracts files from a backup set (created with `bup-save`(1)) to the local filesystem. The specified *paths* are of the form /_branch_/_revision_/_some/where_. The components of the path are as follows: branch : the name of the backup set to restore from; this corresponds to the `--name` (`-n`) option to `bup save`. revision : the revision of the backup set to restore. The revision *latest* is always the most recent backup on the given branch. You can discover other revisions using `bup ls /branch`. some/where : the previously saved path (after any stripping/grafting) that you want to restore. For example, `etc/passwd`. If _some/where_ names a directory, `bup restore` will restore that directory and then recursively restore its contents. If _some/where_ names a directory and ends with a slash (ie. path/to/dir/), `bup restore` will restore the children of that directory directly to the current directory (or the `--outdir`). If _some/where_ does not end in a slash, the children will be restored to a subdirectory of the current directory. If _some/where_ names a directory and ends in '/.' (ie. path/to/dir/.), `bup restore` will do exactly what it would have done for path/to/dir, and then restore _dir_'s metadata to the current directory (or the `--outdir`). See the EXAMPLES section. As a special case, if _some/where_ names the "latest" symlink, e.g. `bup restore /foo/latest`, then bup will act exactly as if the save that "latest" points to had been specified, and restore that, rather than the "latest" symlink itself. Whenever path metadata is available, `bup restore` will attempt to restore it. When restoring ownership, bup implements tar/rsync-like semantics. It will normally prefer user and group names to uids and gids when they're available, but it will not try to restore the user unless running as root, and it will fall back to the numeric uid or gid whenever the metadata contains a user or group name that doesn't exist on the current system. The use of user and group names can be disabled via `--numeric-ids` (which can be important when restoring a chroot, for example), and as a special case, a uid or gid of 0 will never be remapped by name. Additionally, some systems don't allow setting a uid/gid that doesn't correspond with a known user/group. On those systems, bup will log an error for each relevant path. The `--map-user`, `--map-group`, `--map-uid`, `--map-gid` options may be used to adjust the available ownership information before any of the rules above are applied, but note that due to those rules, `--map-uid` and `--map-gid` will have no effect whenever a path has a valid user or group. In those cases, either `--numeric-ids` must be specified, or the user or group must be cleared by a suitable `--map-user foo=` or `--map-group foo=`. Hardlinks will also be restored when possible, but at least currently, no links will be made to targets outside the restore tree, and if the restore tree spans a different arrangement of filesystems from the save tree, some hardlink sets may not be completely restored. Also note that changing hardlink sets on disk between index and save may produce unexpected results. With the current implementation, bup will attempt to recreate any given hardlink set as it existed at index time, even if all of the files in the set weren't still hardlinked (but were otherwise identical) at save time. Note that during the restoration process, access to data within the restore tree may be more permissive than it was in the original source. Unless security is irrelevant, you must restore to a private subdirectory, and then move the resulting tree to its final position. See the EXAMPLES section for a demonstration. # OPTIONS -r, \--remote=*host*:*path* : restore the backup set from the given remote server. If *path* is omitted, uses the default path on the remote server (you still need to include the ':'). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. -C, \--outdir=*outdir* : create and change to directory *outdir* before extracting the files. \--numeric-ids : restore numeric IDs (user, group, etc.) rather than names. \--exclude-rx=*pattern* : exclude any path matching *pattern*, which must be a Python regular expression (http://docs.python.org/library/re.html). The pattern will be compared against the full path rooted at the top of the restore tree, without anchoring, so "x/y" will match "ox/yard" or "box/yards". To exclude the contents of /tmp, but not the directory itself, use "^/tmp/.". (can be specified more than once) Note that the root of the restore tree (which matches '^/') is the top of the archive tree being restored, and has nothing to do with the filesystem destination. Given "restore ... /foo/latest/etc/", the pattern '^/passwd$' would match if a file named passwd had been saved as '/foo/latest/etc/passwd'. Examples: * '/foo$' - exclude any file named foo * '/foo/$' - exclude any directory named foo * '/foo/.' - exclude the content of any directory named foo * '^/tmp/.' - exclude root-level /tmp's content, but not /tmp itself \--exclude-rx-from=*filename* : read --exclude-rx patterns from *filename*, one pattern per-line (may be repeated). Ignore completely empty lines. \--sparse : write output data sparsely when reasonable. Currently, reasonable just means "at least whenever there are 512 or more consecutive zeroes". \--map-user *old*=*new* : for every path, restore the *old* (saved) user name as *new*. Specifying "" for *new* will clear the user. For example "--map-user foo=" will allow the uid to take effect for any path that originally had a user of "foo", unless countermanded by a subsequent "--map-user foo=..." specification. See DESCRIPTION above for further information. \--map-group *old*=*new* : for every path, restore the *old* (saved) group name as *new*. Specifying "" for *new* will clear the group. For example "--map-group foo=" will allow the gid to take effect for any path that originally had a group of "foo", unless countermanded by a subsequent "--map-group foo=..." specification. See DESCRIPTION above for further information. \--map-uid *old*=*new* : for every path, restore the *old* (saved) uid as *new*, unless countermanded by a subsequent "--map-uid *old*=..." option. Note that the uid will only be relevant for paths with no user. See DESCRIPTION above for further information. \--map-gid *old*=*new* : for every path, restore the *old* (saved) gid as *new*, unless countermanded by a subsequent "--map-gid *old*=..." option. Note that the gid will only be relevant for paths with no user. See DESCRIPTION above for further information. -v, \--verbose : increase log output. Given once, prints every directory as it is restored; given twice, prints every file and directory. -q, \--quiet : suppress output, including the progress meter. Normally, if stderr is a tty, a progress meter displays the total number of files restored. # EXAMPLES Create a simple test backup set: $ bup index -u /etc $ bup save -n mybackup /etc/passwd /etc/profile Restore just one file: $ bup restore /mybackup/latest/etc/passwd Restoring: 1, done. $ ls -l passwd -rw-r--r-- 1 apenwarr apenwarr 1478 2010-09-08 03:06 passwd Restore etc to test (no trailing slash): $ bup restore -C test /mybackup/latest/etc Restoring: 3, done. $ find test test test/etc test/etc/passwd test/etc/profile Restore the contents of etc to test (trailing slash): $ bup restore -C test /mybackup/latest/etc/ Restoring: 2, done. $ find test test test/passwd test/profile Restore the contents of etc and etc's metadata to test (trailing "/."): $ bup restore -C test /mybackup/latest/etc/. Restoring: 2, done. # At this point test and etc's metadata will match. $ find test test test/passwd test/profile Restore a tree without risk of unauthorized access: # mkdir --mode 0700 restore-tmp # bup restore -C restore-tmp /somebackup/latest/foo Restoring: 42, done. # mv restore-tmp/foo somewhere # rmdir restore-tmp Restore a tree, remapping an old user and group to a new user and group: # ls -l /original/y -rw-r----- 1 foo baz 3610 Nov 4 11:31 y # bup restore -C dest --map-user foo=bar --map-group baz=bax /x/latest/y Restoring: 42, done. # ls -l dest/y -rw-r----- 1 bar bax 3610 Nov 4 11:31 y Restore a tree, remapping an old uid to a new uid. Note that the old user must be erased so that bup won't prefer it over the uid: # ls -l /original/y -rw-r----- 1 foo baz 3610 Nov 4 11:31 y # ls -ln /original/y -rw-r----- 1 1000 1007 3610 Nov 4 11:31 y # bup restore -C dest --map-user foo= --map-uid 1000=1042 /x/latest/y Restoring: 97, done. # ls -ln dest/y -rw-r----- 1 1042 1007 3610 Nov 4 11:31 y An alternate way to do the same by quashing users/groups universally with `--numeric-ids`: # bup restore -C dest --numeric-ids --map-uid 1000=1042 /x/latest/y Restoring: 97, done. # SEE ALSO `bup-save`(1), `bup-ftp`(1), `bup-fuse`(1), `bup-web`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-rm.md000066400000000000000000000024561454333004200167300ustar00rootroot00000000000000% bup-rm(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-rm - remove references to archive content # SYNOPSIS bup rm [-#|\--verbose] <*branch*|*save*...> # DESCRIPTION `bup rm` removes the indicated *branch*es (backup sets) and *save*s. By itself, this command does not delete any actual data (nor recover any storage space), but it may make it very difficult or impossible to refer to the deleted items, unless there are other references to them (e.g. tags). A subsequent garbage collection, either by a `bup gc`, or by a normal `git gc`, may permanently delete data that is no longer reachable from the remaining branches or tags, and reclaim the related storage space. WARNING: This is one of the few bup commands that modifies your archive in intentionally destructive ways. # OPTIONS -v, \--verbose : increase verbosity (can be used more than once). -*#*, \--compress=*#* : set the compression level to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 6. Note that `bup rm` may only write new commits. # EXAMPLES # Delete the backup set (branch) foo and a save in bar. $ bup rm /foo /bar/2014-10-21-214720 # SEE ALSO `bup-gc`(1), `bup-save`(1), `bup-fsck`(1), and `bup-tag`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-save.md000066400000000000000000000132431454333004200172440ustar00rootroot00000000000000% bup-save(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-save - create a new bup backup set # SYNOPSIS bup save [-r *host*:*path*] \<-t|-c|-n *name*\> [-#] [-f *indexfile*] [-v] [-q] [\--smaller=*maxsize*] \; # DESCRIPTION `bup save` saves the contents of the given files or paths into a new backup set and optionally names that backup set. Note that in order to refer to your backup set later (i.e. for restoration), you must either specify `--name` (the normal case), or record the tree or commit id printed by `--tree` or `--commit`. Before trying to save files using `bup save`, you should first update the index using `bup index`. The reasons for separating the two steps are described in the man page for `bup-index`(1). By default, metadata will be saved for every path, and the metadata for any unindexed parent directories of indexed paths will be taken directly from the filesystem. However, if `--strip`, `--strip-path`, or `--graft` is specified, metadata will not be saved for the root directory (*/*). See `bup-restore`(1) for more information about the handling of metadata. # OPTIONS -r, \--remote=*host*:*path* : save the backup set to the given remote server. If *path* is omitted, uses the default path on the remote server (you still need to include the ':'). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. -t, \--tree : after creating the backup set, print out the git tree id of the resulting backup. -c, \--commit : after creating the backup set, print out the git commit id of the resulting backup. -n, \--name=*name* : after creating the backup set, create a git branch named *name* so that the backup can be accessed using that name. If *name* already exists, the new backup will be considered a descendant of the old *name*. (Thus, you can continually create new backup sets with the same name, and later view the history of that backup set to see how files have changed over time.) -d, \--date=*date* : specify the date of the backup, in seconds since the epoch, instead of the current time. -f, \--indexfile=*indexfile* : use a different index filename instead of `$BUP_DIR/bupindex`. -v, \--verbose : increase verbosity (can be used more than once). With one -v, prints every directory name as it gets backed up. With two -v, also prints every filename. -q, \--quiet : disable progress messages. \--smaller=*maxsize* : don't back up files >= *maxsize* bytes. You can use this to run frequent incremental backups of your small files, which can usually be backed up quickly, and skip over large ones (like virtual machine images) which take longer. Then you can back up the large files less frequently. Use a suffix like k, M, or G to specify multiples of 1024, 1024\*1024, 1024\*1024\*1024 respectively. \--bwlimit=*bytes/sec* : don't transmit more than *bytes/sec* bytes per second to the server. This is good for making your backups not suck up all your network bandwidth. Use a suffix like k, M, or G to specify multiples of 1024, 1024\*1024, 1024\*1024\*1024 respectively. \--strip : strips the path that is given from all files and directories. A directory */root/chroot/etc* saved with "bup save -n chroot \--strip /root/chroot" would be saved as */etc*. Note that currently, metadata will not be saved for the root directory (*/*) when this option is specified. \--strip-path=*path-prefix* : strips the given path prefix *path-prefix* from all files and directories. A directory */root/chroot/webserver/etc* saved with "bup save -n webserver \--strip-path=/root/chroot /root/chroot/webserver/etc" would be saved as */webserver/etc*. Note that currently, metadata will not be saved for the root directory (*/*) when this option is specified. \--graft=*old_path*=*new_path* : a graft point *old_path*=*new_path* (can be used more than once). A directory */root/chroot/a/etc* saved with "bup save -n chroot \--graft /root/chroot/a=/chroot/a" would be saved as */chroot/a/etc*. Note that currently, metadata will not be saved for the root directory (*/*) when this option is specified. -*#*, \--compress=*#* : set the compression level to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 1 (fast, loose compression) # EXAMPLES $ bup index -ux /etc Indexing: 1981, done. $ bup save -r myserver: -n my-pc-backup --bwlimit=50k /etc Reading index: 1981, done. Saving: 100.00% (998/998k, 1981/1981 files), done. $ ls /home/joe/chroot/httpd bin var $ bup index -ux /home/joe/chroot/httpd Indexing: 1337, done. $ bup save --strip -n joes-httpd-chroot /home/joe/chroot/httpd Reading index: 1337, done. Saving: 100.00% (998/998k, 1337/1337 files), done. $ bup ls joes-httpd-chroot/latest/ bin/ var/ $ bup save --strip-path=/home/joe/chroot -n joes-chroot \ /home/joe/chroot/httpd Reading index: 1337, done. Saving: 100.00% (998/998k, 1337/1337 files), done. $ bup ls joes-chroot/latest/ httpd/ $ bup save --graft /home/joe/chroot/httpd=/http-chroot \ -n joe /home/joe/chroot/httpd Reading index: 1337, done. Saving: 100.00% (998/998k, 1337/1337 files), done. $ bup ls joe/latest/ http-chroot/ # SEE ALSO `bup-index`(1), `bup-split`(1), `bup-on`(1), `bup-restore`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-server.md000066400000000000000000000025141454333004200176130ustar00rootroot00000000000000% bup-server(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-server - the server side of the bup client-server relationship # SYNOPSIS bup server # DESCRIPTION `bup server` is the server side of a remote bup session. If you use `bup-split`(1) or `bup-save`(1) with the `-r` option, they will ssh to the remote server and run `bup server` to receive the transmitted objects. There is normally no reason to run `bup server` yourself. # MODES smart : In this mode, the server checks each incoming object against the idx files in its repository. If any object already exists, it tells the client about the idx file it was found in, allowing the client to download that idx and avoid sending duplicate data. This is `bup-server`'s default mode. dumb : In this mode, the server will not check its local index before writing an object. To avoid writing duplicate objects, the server will tell the client to download all of its `.idx` files at the start of the session. This mode is useful on low powered server hardware (ie router/slow NAS). # FILES $BUP_DIR/bup-dumb-server : Activate dumb server mode, as discussed above. This file is not created by default in new repositories. # SEE ALSO `bup-save`(1), `bup-split`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-split.md000066400000000000000000000142011454333004200174340ustar00rootroot00000000000000% bup-split(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-split - save individual files to bup backup sets # SYNOPSIS bup split \[-t\] \[-c\] \[-n *name*\] COMMON\_OPTIONS bup split -b COMMON\_OPTIONS bup split --copy COMMON\_OPTIONS bup split --noop \[-t|-b\] COMMON\_OPTIONS COMMON\_OPTIONS ~ \[-r *host*:*path*\] \[-v\] \[-q\] \[-d *seconds-since-epoch*\] \[\--bench\] \[\--max-pack-size=*bytes*\] \[-#\] \[\--bwlimit=*bytes*\] \[\--max-pack-objects=*n*\] \[\--fanout=*count*\] \[\--keep-boundaries\] \[\--git-ids | filenames...\] # DESCRIPTION `bup split` concatenates the contents of the given files (or if no filenames are given, reads from stdin), splits the content into chunks of around 8k using a rolling checksum algorithm, and saves the chunks into a bup repository. Chunks which have previously been stored are not stored again (ie. they are 'deduplicated'). Because of the way the rolling checksum works, chunks tend to be very stable across changes to a given file, including adding, deleting, and changing bytes. For example, if you use `bup split` to back up an XML dump of a database, and the XML file changes slightly from one run to the next, nearly all the data will still be deduplicated and the size of each backup after the first will typically be quite small. Another technique is to pipe the output of the `tar`(1) or `cpio`(1) programs to `bup split`. When individual files in the tarball change slightly or are added or removed, bup still processes the remainder of the tarball efficiently. (Note that `bup save` is usually a more efficient way to accomplish this, however.) To get the data back, use `bup-join`(1). # MODES These options select the primary behavior of the command, with -n being the most likely choice. -n, \--name=*name* : after creating the dataset, create a git branch named *name* so that it can be accessed using that name. If *name* already exists, the new dataset will be considered a descendant of the old *name*. (Thus, you can continually create new datasets with the same name, and later view the history of that dataset to see how it has changed over time.) The original data will also be available as a top-level file named "data" in the VFS, accessible via `bup fuse`, `bup ftp`, etc. -t, \--tree : output the git tree id of the resulting dataset. -c, \--commit : output the git commit id of the resulting dataset. -b, \--blobs : output a series of git blob ids that correspond to the chunks in the dataset. Incompatible with -n, -t, and -c. \--noop : read the data and split it into blocks based on the "bupsplit" rolling checksum algorithm, but don't store anything in the repo. Can be combined with -b or -t to compute (but not store) the git blobs or tree ids for the dataset. This is mostly useful for benchmarking and validating the bupsplit algorithm. Incompatible with -n and -c. \--copy : like `--noop`, but also write the data to stdout. This can be useful for benchmarking the speed of read+bupsplit+write for large amounts of data. Incompatible with -n, -t, -c, and -b. # OPTIONS -r, \--remote=*host*:*path* : save the backup set to the given remote server. If *path* is omitted, uses the default path on the remote server (you still need to include the ':'). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. Even though the destination is remote, a local bup repository is still required. -d, \--date=*seconds-since-epoch* : specify the date inscribed in the commit (seconds since 1970-01-01). -q, \--quiet : disable progress messages. -v, \--verbose : increase verbosity (can be used more than once). \--git-ids : stdin is a list of git object ids instead of raw data. `bup split` will read the contents of each named git object (if it exists in the bup repository) and split it. This might be useful for converting a git repository with large binary files to use bup-style hashsplitting instead. This option is probably most useful when combined with `--keep-boundaries`. \--keep-boundaries : if multiple filenames are given on the command line, they are normally concatenated together as if the content all came from a single file. That is, the set of blobs/trees produced is identical to what it would have been if there had been a single input file. However, if you use `--keep-boundaries`, each file is split separately. You still only get a single tree or commit or series of blobs, but each blob comes from only one of the files; the end of one of the input files always ends a blob. \--bench : print benchmark timings to stderr. \--max-pack-size=*bytes* : never create git packfiles larger than the given number of bytes. Default is 1 billion bytes. Usually there is no reason to change this. \--max-pack-objects=*numobjs* : never create git packfiles with more than the given number of objects. Default is 200 thousand objects. Usually there is no reason to change this. \--fanout=*numobjs* : when splitting very large files, try and keep the number of elements in trees to an average of *numobjs*. \--bwlimit=*bytes/sec* : don't transmit more than *bytes/sec* bytes per second to the server. This is good for making your backups not suck up all your network bandwidth. Use a suffix like k, M, or G to specify multiples of 1024, 1024\*1024, 1024\*1024\*1024 respectively. -*#*, \--compress=*#* : set the compression level to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 1 (fast, loose compression) # EXAMPLES $ tar -cf - /etc | bup split -r myserver: -n mybackup-tar tar: Removing leading /' from member names Indexing objects: 100% (196/196), done. $ bup join -r myserver: mybackup-tar | tar -tf - | wc -l 1961 # SEE ALSO `bup-join`(1), `bup-index`(1), `bup-save`(1), `bup-on`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-tag.md000066400000000000000000000027401454333004200170610ustar00rootroot00000000000000% bup-tag(1) Bup %BUP_VERSION% % Gabriel Filion % %BUP_DATE% # NAME bup-tag - tag a commit in the bup repository # SYNOPSIS bup tag bup tag [-f] \ \ bup tag -d [-f] \ # DESCRIPTION `bup tag` lists, creates or deletes a tag in the bup repository. A tag is an easy way to retrieve a specific commit. It can be used to mark a specific backup for easier retrieval later. When called without any arguments, the command lists all tags that can be found in the repository. When called with a tag name and a commit ID or ref name, it creates a new tag with the given name, if it doesn't already exist, that points to the commit given in the second argument. When called with '-d' and a tag name, it removes the given tag, if it exists. bup exposes the contents of backups with current tags, via any command that lists or shows backups. They can be found under the /.tag directory. For example, the 'ftp' command will show the tag named 'tag1' under /.tag/tag1. # OPTIONS -d, \--delete : delete a tag -f, \--force : Overwrite the named tag even if it already exists. With -f, don't report a missing tag as an error. # EXAMPLES $ bup tag new-puppet-version hostx-backup $ bup tag new-puppet-version $ bup ftp "ls /.tag/new-puppet-version" files.. $ bup tag -d new-puppet-version # SEE ALSO `bup-save`(1), `bup-split`(1), `bup-ftp`(1), `bup-fuse`(1), `bup-web`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-tick.md000066400000000000000000000012451454333004200172370ustar00rootroot00000000000000% bup-tick(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-tick - wait for up to one second # SYNOPSIS bup tick # DESCRIPTION `bup tick` waits until `time`(2) returns a different value than it originally did. Since time() has a granularity of one second, this can cause a delay of up to one second. This program is useful for writing tests that need to ensure a file date will be seen as modified. It is slightly better than `sleep`(1) since it sometimes waits for less than one second. # EXAMPLES $ date; bup tick; date Sat Feb 6 16:59:58 EST 2010 Sat Feb 6 16:59:59 EST 2010 # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup-web.md000066400000000000000000000031541454333004200170630ustar00rootroot00000000000000% bup-ftp(1) Bup %BUP_VERSION% % Joe Beda % %BUP_DATE% # NAME bup-web - Start web server to browse bup repositiory # SYNOPSIS bup web [[hostname]:port] bup web unix://path # DESCRIPTION `bup web` starts a web server that can browse bup repositories. The file hierarchy is the same as that shown by `bup-fuse`(1), `bup-ls`(1) and `bup-ftp`(1). `hostname` and `port` default to 127.0.0.1 and 8080, respectively, and hence `bup web` will only offer up the web server to locally running clients. If you'd like to expose the web server to anyone on your network (dangerous!) you can omit the bind address to bind to all available interfaces: `:8080`. When `unix://path` is specified, the server will listen on the filesystem socket at `path` rather than a network socket. A `SIGTERM` signal may be sent to the server to request an orderly shutdown. # OPTIONS \--human-readable : display human readable file sizes (i.e. 3.9K, 4.7M) \--browser : open the site in the default browser # EXAMPLES $ bup web Serving HTTP on 127.0.0.1:8080... ^C Interrupted. $ bup web :8080 Serving HTTP on 0.0.0.0:8080... ^C Interrupted. $ bup web unix://socket & Serving HTTP on filesystem socket 'socket' $ curl --unix-socket ./socket http://localhost/ $ fg bup web unix://socket ^C Interrupted. $ bup web & [1] 30980 Serving HTTP on 127.0.0.1:8080... $ kill -s TERM 30980 Shutdown requested $ wait 30980 $ echo $? 0 # SEE ALSO `bup-fuse`(1), `bup-ls`(1), `bup-ftp`(1), `bup-restore`(1), `kill`(1) # BUP Part of the `bup`(1) suite. bup-0.33.3/Documentation/bup.md000066400000000000000000000052071454333004200163110ustar00rootroot00000000000000% bup(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup - Backup program using rolling checksums and git file formats # SYNOPSIS bup [global options...] \ [options...] # DESCRIPTION `bup` is a program for making backups of your files using the git file format. Unlike `git`(1) itself, bup is optimized for handling huge data sets including individual very large files (such a virtual machine images). However, once a backup set is created, it can still be accessed using git tools. Subcommands are described in separate man pages. For example `bup-init`(1) covers `bup init`. # GLOBAL OPTIONS \--version : print bup's version number. Equivalent to `bup version`. -d, \--bup-dir=*BUP_DIR* : use the given BUP_DIR parameter as the bup repository location, instead of reading it from the $BUP_DIR environment variable or using the default `~/.bup` location. # COMMONLY USED SUBCOMMANDS `bup-fsck`(1) : Check backup sets for damage and add redundancy information `bup-ftp`(1) : Browse backup sets using an ftp-like client `bup-fuse`(1) : Mount your backup sets as a filesystem `bup-help`(1) : Print detailed help for the given command `bup-index`(1) : Create or display the index of files to back up `bup-on`(1) : Backup a remote machine to the local one `bup-restore`(1) : Extract files from a backup set `bup-save`(1) : Save files into a backup set (note: run "bup index" first) `bup-web`(1) : Launch a web server to examine backup sets # RARELY USED SUBCOMMANDS `bup-damage`(1) : Deliberately destroy data `bup-drecurse`(1) : Recursively list files in your filesystem `bup-init`(1) : Initialize a bup repository `bup-join`(1) : Retrieve a file backed up using `bup-split`(1) `bup-ls`(1) : Browse the files in your backup sets `bup-margin`(1) : Determine how close your bup repository is to armageddon `bup-memtest`(1) : Test bup memory usage statistics `bup-midx`(1) : Index objects to speed up future backups `bup-newliner`(1) : Make sure progress messages don't overlap with output `bup-random`(1) : Generate a stream of random output `bup-server`(1) : The server side of the bup client-server relationship `bup-split`(1) : Split a single file into its own backup set `bup-tick`(1) : Wait for up to one second. `bup-version`(1) : Report the version number of your copy of bup. # ENVIRONMENT `BUP_ASSUME_GIT_VERSION_IS_FINE` : If set to `true`, `yes`, or `1`, assume the version of `git` in the path is acceptable. # SEE ALSO `git`(1) and the *README* file from the bup distribution. The home of bup is at . bup-0.33.3/GNUmakefile000066400000000000000000000227761454333004200144540ustar00rootroot00000000000000 MAKEFLAGS += --warn-undefined-variables SHELL := bash .DEFAULT_GOAL := all # So where possible we can make tests more reproducible export BUP_TEST_RANDOM_SEED ?= $(shell echo "$$RANDOM") # Guard against accidentally using/testing a local bup export PATH := $(CURDIR)/dev/shadow-bin:$(PATH) clean_paths := generated_dependencies := # See config/config.vars.in (sets bup_python_config, among other things) include config/config.vars -include $(generated_dependencies) pf := set -o pipefail define isok && echo " ok" || echo " no" endef # If ok, strip trailing " ok" and return the output, otherwise, error define shout $(if $(subst ok,,$(lastword $(1))),$(error $(2)),$(shell x="$(1)"; echo $${x%???})) endef sampledata_rev := $(shell dev/configure-sampledata --revision $(isok)) sampledata_rev := \ $(call shout,$(sampledata_rev),Could not parse sampledata revision) current_sampledata := test/sampledata/var/rev/v$(sampledata_rev) os := $(shell ($(pf); uname | sed 's/[-_].*//') $(isok)) os := $(call shout,$(os),Unable to determine OS) # CFLAGS CPPFLAGS LDFLAGS are handled vis config/config.vars.in # Satisfy --warn-undefined-variables DESTDIR ?= TARGET_ARCH ?= bup_shared_cflags := -O2 -Wall -Werror -Wformat=2 -MMD -MP bup_shared_cflags := -Wno-unknown-pragmas -Wsign-compare $(bup_shared_cflags) bup_shared_cflags := -D_FILE_OFFSET_BITS=64 $(bup_shared_cflags) bup_shared_cflags := $(bup_config_cflags) $(bup_shared_cflags) bup_shared_ldflags := soext := .so ifeq ($(os),CYGWIN) soext := .dll endif ifdef TMPDIR test_tmp := $(TMPDIR) else test_tmp := $(CURDIR)/test/tmp endif initial_setup := $(shell dev/update-checkout-info lib/bup/checkout_info.py $(isok)) initial_setup := $(call shout,$(initial_setup),update-checkout-info failed)) clean_paths += lib/bup/checkout_info.py # Dependency changes here should be mirrored in Makefile config/config.vars: configure config/configure config/configure.inc config/*.in MAKE="$(MAKE)" ./configure # On some platforms, Python.h and readline.h fight over the # _XOPEN_SOURCE version, i.e. -Werror crashes on a mismatch, so for # now, we're just going to let Python's version win. helpers_cflags = $(bup_python_cflags) $(bup_shared_cflags) -I$(CURDIR)/src helpers_ldflags := $(bup_python_ldflags) $(bup_shared_ldflags) ifneq ($(strip $(bup_readline_cflags)),) readline_cflags += $(bup_readline_cflags) readline_xopen := $(filter -D_XOPEN_SOURCE=%,$(readline_cflags)) readline_xopen := $(subst -D_XOPEN_SOURCE=,,$(readline_xopen)) readline_cflags := $(filter-out -D_XOPEN_SOURCE=%,$(readline_cflags)) readline_cflags += $(addprefix -DBUP_RL_EXPECTED_XOPEN_SOURCE=,$(readline_xopen)) helpers_cflags += $(readline_cflags) endif helpers_ldflags += $(bup_readline_ldflags) ifeq ($(bup_have_libacl),1) helpers_cflags += $(bup_libacl_cflags) helpers_ldflags += $(bup_libacl_ldflags) endif bup_ext_cmds := lib/cmd/bup-import-rdiff-backup lib/cmd/bup-import-rsnapshot bup_deps := lib/bup/_helpers$(soext) lib/cmd/bup all: dev/bup-exec dev/bup-python dev/python $(bup_deps) Documentation/all \ $(current_sampledata) $(current_sampledata): dev/configure-sampledata --setup PANDOC ?= $(shell type -p pandoc) ifeq (,$(PANDOC)) $(shell echo "Warning: pandoc not found; skipping manpage generation" 1>&2) man_md := else man_md := $(wildcard Documentation/*.md) endif man_roff := $(patsubst %.md,%.1,$(man_md)) man_html := $(patsubst %.md,%.html,$(man_md)) INSTALL=install PREFIX=/usr/local MANDIR=$(PREFIX)/share/man DOCDIR=$(PREFIX)/share/doc/bup BINDIR=$(PREFIX)/bin LIBDIR=$(PREFIX)/lib/bup dest_mandir := $(DESTDIR)$(MANDIR) dest_docdir := $(DESTDIR)$(DOCDIR) dest_bindir := $(DESTDIR)$(BINDIR) dest_libdir := $(DESTDIR)$(LIBDIR) install: all $(INSTALL) -d $(dest_bindir) $(dest_libdir)/bup/cmd $(dest_libdir)/cmd \ $(dest_libdir)/web/static test -z "$(man_roff)" || install -d $(dest_mandir)/man1 test -z "$(man_roff)" || $(INSTALL) -m 0644 $(man_roff) $(dest_mandir)/man1 test -z "$(man_html)" || install -d $(dest_docdir) test -z "$(man_html)" || $(INSTALL) -m 0644 $(man_html) $(dest_docdir) $(INSTALL) -pm 0755 lib/cmd/bup "$(dest_libdir)/cmd/bup" $(INSTALL) -pm 0755 $(bup_ext_cmds) "$(dest_libdir)/cmd/" cd "$(dest_bindir)" && \ ln -sf "$$($(CURDIR)/dev/python -c 'import os; print(os.path.relpath("$(abspath $(dest_libdir))/cmd/bup"))')" \ . set -e; \ $(INSTALL) -pm 0644 lib/bup/*.py $(dest_libdir)/bup/ $(INSTALL) -pm 0644 lib/bup/cmd/*.py $(dest_libdir)/bup/cmd/ $(INSTALL) -pm 0755 \ lib/bup/*$(soext) \ $(dest_libdir)/bup $(INSTALL) -pm 0644 \ lib/web/static/* \ $(dest_libdir)/web/static/ $(INSTALL) -pm 0644 \ lib/web/*.html \ $(dest_libdir)/web/ if test -e lib/bup/checkout_info.py; then \ $(INSTALL) -pm 0644 lib/bup/checkout_info.py \ $(dest_libdir)/bup/source_info.py; \ else \ ! grep -qF '$$Format' lib/bup/source_info.py; \ $(INSTALL) -pm 0644 lib/bup/source_info.py $(dest_libdir)/bup/; \ fi embed_cflags = $(bup_python_cflags_embed) $(bup_shared_cflags) -I$(CURDIR)/src embed_ldflags := $(bup_python_ldflags_embed) $(bup_shared_ldflags) config/config.h: config/config.vars clean_paths += config/config.h.tmp cc_bin = $(CC) $(embed_cflags) -I src $(CPPFLAGS) $(CFLAGS) $^ \ $(embed_ldflags) $(LDFLAGS) -fPIE -o $@ clean_paths += dev/python-proposed generated_dependencies += dev/python-proposed.d dev/python-proposed: dev/python.c src/bup/compat.c src/bup/io.c rm -f dev/python $(cc_bin) clean_paths += dev/python dev/python: dev/python-proposed dev/validate-python $@-proposed cp -R -p $@-proposed $@ clean_paths += dev/bup-exec generated_dependencies += dev/bup-exec.d dev/bup-exec: bup_shared_cflags += -D BUP_DEV_BUP_EXEC=1 dev/bup-exec: lib/cmd/bup.c src/bup/compat.c src/bup/io.c $(cc_bin) clean_paths += dev/bup-python generated_dependencies += dev/bup-python.d dev/bup-python: bup_shared_cflags += -D BUP_DEV_BUP_PYTHON=1 dev/bup-python: lib/cmd/bup.c src/bup/compat.c src/bup/io.c $(cc_bin) clean_paths += lib/cmd/bup generated_dependencies += lib/cmd/bup.d lib/cmd/bup: lib/cmd/bup.c src/bup/compat.c src/bup/io.c $(cc_bin) clean_paths += lib/bup/_helpers$(soext) generated_dependencies += lib/bup/_helpers.d lib/bup/_helpers$(soext): lib/bup/_helpers.c lib/bup/bupsplit.c $(CC) $(helpers_cflags) $(CPPFLAGS) $(CFLAGS) $^ \ $(helpers_ldflags) $(LDFLAGS) -o $@ test/tmp: mkdir test/tmp # MAKEFLAGS must not be in an immediate := assignment parallel_opt = $(lastword $(filter -j%,$(MAKEFLAGS))) get_parallel_n = $(patsubst -j%,%,$(parallel_opt)) maybe_specific_n = $(if $(filter -j%,$(parallel_opt)),-n$(get_parallel_n)) xdist_opt = $(if $(filter -j,$(parallel_opt)),-nauto,$(maybe_specific_n)) lint: dev/bup-exec dev/bup-python ./pylint test: all test/tmp dev/python lint ! bup version # Ensure we can't test the local bup (cf. dev/shadow-bin) ./bup features if test yes = "$$(dev/python -c 'import xdist; print("yes")' 2>/dev/null)"; then \ (set -x; ./pytest $(xdist_opt);) \ else \ (set -x; ./pytest;) \ fi stupid: PATH=/bin:/usr/bin $(MAKE) test check: test distcheck: all if test yes = $$(dev/python -c "import xdist; print('yes')" 2>/dev/null); then \ (set -x; ./pytest $(xdist_opt) -m release;) \ else \ (set -x; ./pytest -m release;) \ fi long-test: export BUP_TEST_LEVEL=11 long-test: test long-check: export BUP_TEST_LEVEL=11 long-check: check .PHONY: check-py3 check-py3: $(MAKE) clean && BUP_PYTHON_CONFIG=python3-config $(MAKE) check .PHONY: Documentation/all Documentation/all: $(man_roff) $(man_html) Documentation/substvars: $(bup_deps) # FIXME: real temp file set -e; bup_ver=$$(./bup version); \ echo "s,%BUP_VERSION%,$$bup_ver,g" > $@.tmp; \ echo "s,%BUP_DATE%,$$bup_ver,g" >> $@.tmp mv $@.tmp $@ Documentation/%.1: Documentation/%.md Documentation/substvars $(pf); sed -f Documentation/substvars $< \ | "$(PANDOC)" -s -r markdown -w man -o $@ Documentation/%.html: Documentation/%.md Documentation/substvars $(pf); sed -f Documentation/substvars $< \ | "$(PANDOC)" -s -r markdown -w html -o $@ .PHONY: Documentation/clean Documentation/clean: cd Documentation && rm -f *~ .*~ *.[0-9] *.html substvars # Note: this adds commits containing the current manpages in roff and # html format to the man and html branches respectively. The version # is determined by "git describe --always". .PHONY: update-doc-branches update-doc-branches: Documentation/all dev/update-doc-branches refs/heads/man refs/heads/html # push the pregenerated doc files to origin/man and origin/html push-docs: export-docs git push origin man html # import pregenerated doc files from origin/man and origin/html, in case you # don't have pandoc but still want to be able to install the docs. import-docs: Documentation/clean $(pf); git archive origin/html | (cd Documentation && tar -xvf -) $(pf); git archive origin/man | (cd Documentation && tar -xvf -) clean: Documentation/clean cd config && rm -rf finished bin config.var # Clean up the mounts first, so that find, etc. won't crash later if test -e test/mnt; then dev/cleanup-mounts-under test/mnt; fi if test -e test/mnt; then rm -r test/mnt; fi if test -e test/tmp; then dev/cleanup-mounts-under test/tmp; fi # FIXME: migrate these to test/mnt/ if test -e test/int/testfs; \ then umount test/int/testfs || true; fi rm -rf test/int/testfs test/int/testfs.img testfs.img cd config && rm -f \ ${CONFIGURE_DETRITUS} ${CONFIGURE_FILES} ${GENERATED_FILES} rm -rf $(clean_paths) .pytest_cache rm -f $(generated_dependencies) find . -name __pycache__ -exec rm -rf {} + if test -e test/tmp; then dev/force-delete test/tmp; fi dev/configure-sampledata --clean bup-0.33.3/HACKING000066400000000000000000000121561454333004200133600ustar00rootroot00000000000000 Conventions? Are you kidding? OK fine. Code Branching Model ==================== The main branch is the development branch, and stable releases are tagged either from there, or from `VERSION.x` branches, created as needed, for example `0.33.x`. Any branch with a "tmp/" prefix might be rebased (often), so keep that in mind when using or depending on one. Any branch with a "tmp/review/" prefix corresponds to a patchset submitted to the mailing list. We try to maintain these branches to make the review process easier for those not as familiar with patches via email. Current Trajectory ================== Now that we've finished the 0.33 release, we're working on 0.34, and although we're not certain which new features will be included, we're considering: - Migrating hashsplitting to C. - Automatically splitting trees to avoid having to save large tree objects for large directories even if only a few files have changed or been added (e.g. maildirs). - Moving all of the compoents of the index to sqlite. Right now the main index is an mmapped file, and the hard link and metadata databases are pickled. As a result the index isn't transactional and suffers from bugs caused by "skew" across the components. - Better VFS performance for large repositories (i.e. fuse, ls, web...). - Better VFS caching. - Index improvements. - Incremental indexing via inotify. - Smarter (and quieter) handling of cross-filesystem metadata. - Encryption. - Support for alternate remote storage APIs. If you have the time and inclination, please help review patches posted to the list, or post your own. (See "ways to help" below.) More specific ways to help ========================== Testing -- yes please. With respect to patches, bup development is handled via the mailing list, and all patches should be sent to the list for review (see "Submitting Patches" below). In most cases, we try to wait until we have at least one or two "Reviewed-by:" replies to a patch posted to the list before incorporating it into main, so reviews are an important way to help. We also love a good "Tested-by:" -- the more the merrier. Testing ======= Individual tests can be run via ./pytest TEST For example: ./pytest test/int/test_git.py ./pytest test/ext/test-ftp If you have the xdist module installed, then you can specify its `-n` option to run the tests in parallel (e.g. `./pytest -nauto ...`), or you can specify `-j` to make, which will be translated to xdist with `-j` becoming `-nauto` and `-jN` becoming `-nN`. Internal tests that test bup's code directly are located in test/int, and external tests that test bup from the outside, typically by running the executable, are located in test/ext. Currently, all pytests must be located in either test/ext or test/int. Internal test filenames must match test_*.py, and external tests must be located in text/ext and their filenames must match test-* (see test/ext/conftest.py for the handling of the latter). Any paths matching those criteria will be automatically collected by pytest. Some aspects of the environment are automatically restored after each test via fixtures in conftest.py, including the state of the environment variables and the working directory; the latter is reset to the top of the source tree. Submitting patches ================== As mentioned, all patches should be posted to the mailing list for review, and must be "signed off" by the author before official inclusion (see ./SIGNED-OFF-BY). You can create a "signed off" set of patches in ./patches, ready for submission to the list, like this: git format-patch -s -o patches origin/main which will include all of the patches since origin/main on your current branch. Then you can send them to the list like this: git send-email --to bup-list@googlegroups.com --compose patches/* The use of --compose will cause git to ask you to edit a cover letter that will be sent as the first message. It's also possible to handle everything in one step: git send-email -s --to bup-list@googlegroups.com --compose origin/main and you can add --annotate if you'd like to review or edit each patch before it's sent. For single patches, this might be easier: git send-email -s --to bup-list@googlegroups.com --annotate -n1 HEAD which will send the top patch on the current branch, and will stop to allow you to add comments. You can add comments to the section with the diffstat without affecting the commit message. Of course, unless your machine is set up to handle outgoing mail locally, you may need to configure git to be able to send mail. See git-send-email(1) for further details. Oh, and we do have a ./CODINGSTYLE, hobgoblins and all, though don't let that scare you off. We're not all that fierce. Even More Generally =================== It's not like we have a lot of hard and fast rules, but some of the ideas here aren't altogether terrible: http://www.kernel.org/doc/Documentation/SubmittingPatches In particular, we've been paying at least some attention to the bits regarding Acked-by:, Reported-by:, Tested-by: and Reviewed-by:. bup-0.33.3/LICENSE000066400000000000000000000620771454333004200134050ustar00rootroot00000000000000 Unless otherwise stated below, the files in this project may be distributed under the terms of the following license. (The LGPL version 2.) In addition, bupsplit.c, bupsplit.h, and options.py may be redistributed according to the separate (BSD-style) license written inside those files. GNU LIBRARY GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the library GPL. It is numbered 2 because it goes with version 2 of the ordinary GPL.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Library General Public License, applies to some specially designated Free Software Foundation software, and to any other libraries whose authors decide to use it. You can use it for your libraries, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library, or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link a program with the library, you must provide complete object files to the recipients so that they can relink them with the library, after making changes to the library and recompiling it. And you must show them these terms so they know their rights. Our method of protecting your rights has two steps: (1) copyright the library, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the library. Also, for each distributor's protection, we want to make certain that everyone understands that there is no warranty for this free library. If the library is modified by someone else and passed on, we want its recipients to know that what they have is not the original version, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that companies distributing free software will individually obtain patent licenses, thus in effect transforming the program into proprietary software. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License, which was designed for utility programs. This license, the GNU Library General Public License, applies to certain designated libraries. This license is quite different from the ordinary one; be sure to read it in full, and don't assume that anything in it is the same as in the ordinary license. The reason we have a separate public license for some libraries is that they blur the distinction we usually make between modifying or adding to a program and simply using it. Linking a program with a library, without changing the library, is in some sense simply using the library, and is analogous to running a utility program or application program. However, in a textual and legal sense, the linked executable is a combined work, a derivative of the original library, and the ordinary General Public License treats it as such. Because of this blurred distinction, using the ordinary General Public License for libraries did not effectively promote software sharing, because most developers did not use the libraries. We concluded that weaker conditions might promote sharing better. However, unrestricted linking of non-free programs would deprive the users of those programs of all benefit from the free status of the libraries themselves. This Library General Public License is intended to permit developers of non-free programs to use free libraries, while preserving your freedom as a user of such programs to change the free libraries that are incorporated in them. (We have not seen how to achieve this as regards changes in header files, but we have achieved it as regards changes in the actual functions of the Library.) The hope is that this will lead to faster development of free libraries. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, while the latter only works together with the library. Note that it is possible for a library to be covered by the ordinary General Public License rather than by this special one. GNU LIBRARY GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Library General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also compile or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. c) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. d) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Library General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! bup-0.33.3/Makefile000066400000000000000000000004551454333004200140300ustar00rootroot00000000000000 # Redirect to GNU make .SUFFIXES: default: config/finished config/bin/make .DEFAULT: $(MAKE) config/finished config/bin/make $(.TARGETS) # Dependency changes here should be mirrored in GNUmakefile config/finished: configure config/configure config/configure.inc config/*.in MAKE= ./configure bup-0.33.3/README000077700000000000000000000000001454333004200145162README.mdustar00rootroot00000000000000bup-0.33.3/README.md000066400000000000000000000547061454333004200136570ustar00rootroot00000000000000bup: It backs things up ======================= bup is a program that backs things up. It's short for "backup." Can you believe that nobody else has named an open source program "bup" after all this time? Me neither. Despite its unassuming name, bup is pretty cool. To give you an idea of just how cool it is, I wrote you this poem: Bup is teh awesome What rhymes with awesome? I guess maybe possum But that's irrelevant. Hmm. Did that help? Maybe prose is more useful after all. Reasons bup is awesome ---------------------- bup has a few advantages over other backup software: - It uses a rolling checksum algorithm (similar to rsync) to split large files into chunks. The most useful result of this is you can backup huge virtual machine (VM) disk images, databases, and XML files incrementally, even though they're typically all in one huge file, and not use tons of disk space for multiple versions. - It uses the packfile format from git (the open source version control system), so you can access the stored data even if you don't like bup's user interface. - Unlike git, it writes packfiles *directly* (instead of having a separate garbage collection / repacking stage) so it's fast even with gratuitously huge amounts of data. bup's improved index formats also allow you to track far more filenames than git (millions) and keep track of far more objects (hundreds or thousands of gigabytes). - Data is "automagically" shared between incremental backups without having to know which backup is based on which other one - even if the backups are made from two different computers that don't even know about each other. You just tell bup to back stuff up, and it saves only the minimum amount of data needed. - You can back up directly to a remote bup server, without needing tons of temporary disk space on the computer being backed up. And if your backup is interrupted halfway through, the next run will pick up where you left off. And it's easy to set up a bup server: just install bup on any machine where you have ssh access. - Bup can use "par2" redundancy to recover corrupted backups even if your disk has undetected bad sectors. - Even when a backup is incremental, you don't have to worry about restoring the full backup, then each of the incrementals in turn; an incremental backup *acts* as if it's a full backup, it just takes less disk space. - You can mount your bup repository as a FUSE filesystem and access the content that way, and even export it over Samba. - It's written in python (with some C parts to make it faster) so it's easy for you to extend and maintain. Reasons you might want to avoid bup ----------------------------------- - It's not remotely as well tested as something like tar, so it's more likely to eat your data. It's also missing some probably-critical features, though fewer than it used to be. - It requires python 3.7 or newer, a C compiler, and an installed git version >= 1.5.6. It also requires par2 if you want fsck to be able to generate the information needed to recover from some types of corruption. - It currently only works on Linux, FreeBSD, NetBSD, OS X >= 10.4, Solaris, or Windows (with Cygwin, and WSL). Patches to support other platforms are welcome. - Any items in "Things that are stupid" below. Notable changes introduced by a release ======================================= - Changes in 0.33.3 as compared to 0.33.2 - Changes in 0.33.2 as compared to 0.33.1 - Changes in 0.33.1 as compared to 0.33 - Changes in 0.33 as compared to 0.32 - Changes in 0.32.2 as compared to 0.32.1 - Changes in 0.32.1 as compared to 0.32 - Changes in 0.32 as compared to 0.31 - Changes in 0.31 as compared to 0.30.1 - Changes in 0.30.1 as compared to 0.30 - Changes in 0.30 as compared to 0.29.3 - Changes in 0.29.3 as compared to 0.29.2 - Changes in 0.29.2 as compared to 0.29.1 - Changes in 0.29.1 as compared to 0.29 - Changes in 0.29 as compared to 0.28.1 - Changes in 0.28.1 as compared to 0.28 - Changes in 0.28 as compared to 0.27.1 - Changes in 0.27.1 as compared to 0.27 Test status =========== | main | |--------| | [![main branch test status](https://api.cirrus-ci.com/github/bup/bup.svg?branch=main)](https://cirrus-ci.com/github/bup/bup) | Getting started =============== From source ----------- - Check out the bup source code using git: ```sh git clone https://github.com/bup/bup ``` - This will leave you on the main branch, which is perfect if you would like to help with development, but if you'd just like to use bup, please check out the latest stable release like this: ```sh git checkout 0.33.3 ``` You can see the latest stable release here: https://github.com/bup/bup/tags - Install the required python libraries (including the development libraries). For `bup fuse` you will need to install [python-fuse](https://github.com/libfuse/python-fuse) rather than [fusepy](https://github.com/fusepy/fusepy). For example, in Debian, install python3-fuse rather than python3-fusepy. On very recent Debian/Ubuntu versions, this may be sufficient (run as root): ```sh apt-get build-dep bup ``` Otherwise try this: ```sh apt-get install python3-dev python3-fuse apt-get install python3-pyxattr python3-pytest apt-get install python3-distutils apt-get install pkg-config linux-libc-dev libacl1-dev apt-get install gcc make acl attr rsync apt-get isntall python3-pytest-xdist # optional (parallel tests) apt-get install par2 # optional (error correction) apt-get install libreadline-dev # optional (bup ftp) apt-get install python3-tornado # optional (bup web) ``` On Cygwin, install python, make, rsync, and gcc4. If you would like to use the optional bup web server on systems without a tornado package, you may want to try this: ```sh pip install tornado ``` - Build: ```sh make ``` At the moment the build treats compiler warnings as errors. If the build fails as a result, try this: ```sh CFLAGS=-Wno-error ./configure make ``` - Run the tests: ```sh make long-check ``` or if you're in a bit more of a hurry: ```sh make check ``` If you have the Python xdist module installed, then you can probably run the tests faster by adding the make -j option (see ./HACKING for additional information): ```sh make -j check ``` The tests should pass (with some skipped tests that weren't applicable in your environment). If they don't pass for you, stop here and send an email to bup-list@googlegroups.com. Though if there are symbolic links along the current working directory path, the tests may fail. Running something like this before "make test" should sidestep the problem: ```sh cd "$(pwd -P)" ``` - You can install bup via "make install", and override the default destination with DESTDIR and PREFIX. Files are normally installed to "$DESTDIR/$PREFIX" where DESTDIR is empty by default, and PREFIX is set to /usr/local. So if you wanted to install bup to /opt/bup, you might do something like this: ```sh make install DESTDIR=/opt/bup PREFIX='' ``` - The Python version that bup will use is determined by the `python-config` program chosen by `./configure`, which will search for a reasonable version unless `BUP_PYTHON_CONFIG` is set in the environment. You can see which Python executable was chosen by looking at the configure output, or examining `config/config.var/bup-python-config`, and you can change the selection by re-running `./configure`. - If you want to specify your own `CPPFLAGS`, `CFLAGS`, or `LDFLAGS`, you can set them for individual `make` invocations, e.g. `make CFLAGS=-O0 check`, or persistently via `./configure` with `CFLAGS=-O0 ./configure`. At the moment, `make clean` clears the configuration, but we may change that at some point, perhaps by adding and requiring a `make distclean` to clear the configuration. From binary packages -------------------- Binary packages of bup are known to be built for the following OSes: - [Debian](https://packages.debian.org/bup) - [Ubuntu](https://packages.ubuntu.com/bup) - [pkgsrc.se (NetBSD, Dragonfly, and others)](https://pkgsrc.se/sysutils/bup) - [NetBSD](https://cvsweb.netbsd.org/bsdweb.cgi/pkgsrc/sysutils/bup/) - [Arch Linux](https://www.archlinux.org/packages/?sort=&q=bup) - [macOS (Homebrew)](https://formulae.brew.sh/formula/bup) Using bup --------- - Get help for any bup command: ```sh bup help bup help init bup help index bup help save bup help restore ... ``` - Initialize the default bup repository (~/.bup -- you can choose another by either specifying `bup -d DIR ...` or setting the `BUP_DIR` environment variable for a command): ```sh bup init ``` - Make a local backup (-v or -vv will increase the verbosity): ```sh bup index /etc bup save -n local-etc /etc ``` - Restore a local backup to ./dest: ```sh bup restore -C ./dest local-etc/latest/etc ls -l dest/etc ``` - Look at how much disk space your backup took: ```sh du -s ~/.bup ``` - Make another backup (which should be mostly identical to the last one; notice that you don't have to *specify* that this backup is incremental, it just saves space automatically): ```sh bup index /etc bup save -n local-etc /etc ``` - Look how little extra space your second backup used (on top of the first): ```sh du -s ~/.bup ``` - Get a list of your previous backups: ```sh bup ls local-etc ``` - Restore your first backup again: ```sh bup restore -C ./dest-2 local-etc/2013-11-23-11195/etc ``` - Make a backup to a remote server which must already have the 'bup' command somewhere in its PATH (see /etc/profile, etc/environment, ~/.profile, or ~/.bashrc), and be accessible via ssh. Make sure to replace SERVERNAME with the actual hostname of your server: ```sh bup init -r SERVERNAME:path/to/remote-bup-dir bup index /etc bup save -r SERVERNAME:path/to/remote-bup-dir -n local-etc /etc ``` - Make a remote backup to ~/.bup on SERVER: ```sh bup index /etc bup save -r SERVER: -n local-etc /etc ``` - See what saves are available in ~/.bup on SERVER: ```sh bup ls -r SERVER: ``` - Restore the remote backup to ./dest: ```sh bup restore -r SERVER: -C ./dest local-etc/latest/etc ls -l dest/etc ``` - Defend your backups from death rays (OK fine, more likely from the occasional bad disk block). This writes parity information (currently via par2) for all of the existing data so that bup may be able to recover from some amount of repository corruption: ```sh bup fsck -g ``` - Use split/join instead of index/save/restore. Try making a local backup using tar: ```sh tar -cvf - /etc | bup split -n local-etc -vv ``` - Try restoring the tarball: ```sh bup join local-etc | tar -tf - ``` - Look at how much disk space your backup took: ```sh du -s ~/.bup ``` - Make another tar backup: ```sh tar -cvf - /etc | bup split -n local-etc -vv ``` - Look at how little extra space your second backup used on top of the first: ```sh du -s ~/.bup ``` - Restore the first tar backup again (the ~1 is git notation for "one older than the most recent"): ```sh bup join local-etc~1 | tar -tf - ``` - Get a list of your previous split-based backups: ```sh GIT_DIR=~/.bup git log local-etc ``` - Save a tar archive to a remote server (without tar -z to facilitate deduplication): ```sh tar -cvf - /etc | bup split -r SERVERNAME: -n local-etc -vv ``` - Restore the archive: ```sh bup join -r SERVERNAME: local-etc | tar -tf - ``` That's all there is to it! Notes on FreeBSD ---------------- - In order to compile the code, run tests and install bup, you need to install GNU Make from the `gmake` port. - Python's development headers are automatically installed with the 'python' port so there's no need to install them separately. - To use the 'bup fuse' command, you need to install the fuse kernel module from the 'fusefs-kmod' port in the 'sysutils' section and the libraries from the port named 'py-fusefs' in the 'devel' section. - The 'par2' command can be found in the port named 'par2cmdline'. - In order to compile the documentation, you need pandoc which can be found in the port named 'hs-pandoc' in the 'textproc' section. Notes on NetBSD/pkgsrc ---------------------- - See pkgsrc/sysutils/bup, which should be the most recent stable release and includes man pages. It also has a reasonable set of dependencies (git, par2, py-fuse-bindings). - The "fuse-python" package referred to is hard to locate, and is a separate tarball for the python language binding distributed by the fuse project on sourceforge. It is available as pkgsrc/filesystems/py-fuse-bindings and on NetBSD 5, "bup fuse" works with it. - "bup fuse" presents every directory/file as inode 0. The directory traversal code ("fts") in NetBSD's libc will interpret this as a cycle and error out, so "ls -R" and "find" will not work. - There is no support for ACLs. If/when some enterprising person fixes this, adjust dev/compare-trees. Notes on Cygwin --------------- - There is no support for ACLs. If/when some enterprising person fixes this, adjust dev/compare-trees. - In test/ext/test-misc, two tests have been disabled. These tests check to see that repeated saves produce identical trees and that an intervening index doesn't change the SHA1. Apparently Cygwin has some unusual behaviors with respect to access times (that probably warrant further investigation). Possibly related: http://cygwin.com/ml/cygwin/2007-06/msg00436.html Notes on OS X ------------- - There is no support for ACLs. If/when some enterprising person fixes this, adjust dev/compare-trees. How it works ============ Basic storage: -------------- bup stores its data in a git-formatted repository. Unfortunately, git itself doesn't actually behave very well for bup's use case (huge numbers of files, files with huge sizes, retaining file permissions/ownership are important), so we mostly don't use git's *code* except for a few helper programs. For example, bup has its own git packfile writer written in python. Basically, 'bup split' reads the data on stdin (or from files specified on the command line), breaks it into chunks using a rolling checksum (similar to rsync), and saves those chunks into a new git packfile. There is at least one git packfile per backup. When deciding whether to write a particular chunk into the new packfile, bup first checks all the other packfiles that exist to see if they already have that chunk. If they do, the chunk is skipped. git packs come in two parts: the pack itself (*.pack) and the index (*.idx). The index is pretty small, and contains a list of all the objects in the pack. Thus, when generating a remote backup, we don't have to have a copy of the packfiles from the remote server: the local end just downloads a copy of the server's *index* files, and compares objects against those when generating the new pack, which it sends directly to the server. The "-n" option to 'bup split' and 'bup save' is the name of the backup you want to create, but it's actually implemented as a git branch. So you can do cute things like checkout a particular branch using git, and receive a bunch of chunk files corresponding to the file you split. If you use '-b' or '-t' or '-c' instead of '-n', bup split will output a list of blobs, a tree containing that list of blobs, or a commit containing that tree, respectively, to stdout. You can use this to construct your own scripts that do something with those values. The bup index: -------------- 'bup index' walks through your filesystem and updates a file (whose name is, by default, ~/.bup/bupindex) to contain the name, attributes, and an optional git SHA1 (blob id) of each file and directory. 'bup save' basically just runs the equivalent of 'bup split' a whole bunch of times, once per file in the index, and assembles a git tree that contains all the resulting objects. Among other things, that makes 'git diff' much more useful (compared to splitting a tarball, which is essentially a big binary blob). However, since bup splits large files into smaller chunks, the resulting tree structure doesn't *exactly* correspond to what git itself would have stored. Also, the tree format used by 'bup save' will probably change in the future to support storing file ownership, more complex file permissions, and so on. If a file has previously been written by 'bup save', then its git blob/tree id is stored in the index. This lets 'bup save' avoid reading that file to produce future incremental backups, which means it can go *very* fast unless a lot of files have changed. Things that are stupid for now but which we'll fix later ======================================================== Help with any of these problems, or others, is very welcome. Join the mailing list (see below) if you'd like to help. - 'bup save' and 'bup restore' have immature metadata support. On the plus side, they actually do have support now, but it's new, and not remotely as well tested as tar/rsync/whatever's. However, you have to start somewhere, and as of 0.25, we think it's ready for more general use. Please let us know if you have any trouble. Also, if any strip or graft-style options are specified to 'bup save', then no metadata will be written for the root directory. That's obviously less than ideal. - bup is overly optimistic about mmap. Right now bup just assumes that it can mmap as large a block as it likes, and that mmap will never fail. Yeah, right... If nothing else, this has failed on 32-bit architectures (and 31-bit is even worse -- looking at you, s390). To fix this, we might just implement a FakeMmap[1] class that uses normal file IO and handles all of the mmap methods[2] that bup actually calls. Then we'd swap in one of those whenever mmap fails. This would also require implementing some of the methods needed to support "[]" array access, probably at a minimum __getitem__, __setitem__, and __setslice__ [3]. [1] http://comments.gmane.org/gmane.comp.sysutils.backup.bup/613 [2] http://docs.python.org/3/library/mmap.html [3] http://docs.python.org/3/reference/datamodel.html#emulating-container-types - 'bup index' is slower than it should be. It's still rather fast: it can iterate through all the filenames on my 600,000 file filesystem in a few seconds. But it still needs to rewrite the entire index file just to add a single filename, which is pretty nasty; it should just leave the new files in a second "extra index" file or something. - bup could use inotify for *really* efficient incremental backups. You could even have your system doing "continuous" backups: whenever a file changes, we immediately send an image of it to the server. We could give the continuous-backup process a really low CPU and I/O priority so you wouldn't even know it was running. - bup has never been tested on anything but Linux, FreeBSD, NetBSD, OS X, and Windows+Cygwin. There's nothing that makes it *inherently* non-portable, though, so that's mostly a matter of someone putting in some effort. (For a "native" Windows port, the most annoying thing is the absence of ssh in a default Windows installation.) - bup needs better documentation. According to an article about bup in Linux Weekly News (https://lwn.net/Articles/380983/), "it's a bit short on examples and a user guide would be nice." Documentation is the sort of thing that will never be great unless someone from outside contributes it (since the developers can never remember which parts are hard to understand). - bup is "relatively speedy" and has "pretty good" compression. ...according to the same LWN article. Clearly neither of those is good enough. We should have awe-inspiring speed and crazy-good compression. Must work on that. Writing more parts in C might help with the speed. - bup has no GUI. Actually, that's not stupid, but you might consider it a limitation. See the ["Related Projects"](https://bup.github.io/) list for some possible options. More Documentation ================== bup has an extensive set of man pages. Try using 'bup help' to get started, or use 'bup help SUBCOMMAND' for any bup subcommand (like split, join, index, save, etc.) to get details on that command. For further technical details, please see ./DESIGN. How you can help ================ bup is a work in progress and there are many ways it can still be improved. If you'd like to contribute patches, ideas, or bug reports, please join the bup mailing list: You can find the mailing list archives here: http://groups.google.com/group/bup-list and you can subscribe by sending a message to: bup-list+subscribe@googlegroups.com You can also reach us via the \#bup IRC channel at ircs://irc.libera.chat:6697/bup on the [libera.chat](https://libera.chat/) network or via this [web interface](https://web.libera.chat/?channels=bup). Please see ./HACKING for additional information, i.e. how to submit patches (hint - no pull requests), how we handle branches, etc. Have fun, Avery bup-0.33.3/SIGNED-OFF-BY000066400000000000000000000004521454333004200142010ustar00rootroot00000000000000 Patches to bup should have a Signed-off-by: header. Including this header in your patches signifies that you are licensing your changes under the terms described in the LICENSE file residing in the top-level directory of the source tree, the directory that also contains this SIGNED-OFF-BY file. bup-0.33.3/bin/000077500000000000000000000000001454333004200131345ustar00rootroot00000000000000bup-0.33.3/bin/bup000077700000000000000000000000001454333004200160752../lib/cmd/bupustar00rootroot00000000000000bup-0.33.3/bup000077700000000000000000000000001454333004200151122lib/cmd/bupustar00rootroot00000000000000bup-0.33.3/config/000077500000000000000000000000001454333004200136315ustar00rootroot00000000000000bup-0.33.3/config/.gitignore000066400000000000000000000001271454333004200156210ustar00rootroot00000000000000config.cmd config.h config.log config.mak config.md config.sub config.var/ config.vars bup-0.33.3/config/config.vars.in000066400000000000000000000012211454333004200163740ustar00rootroot00000000000000CONFIGURE_FILES=@CONFIGURE_FILES@ GENERATED_FILES=@GENERATED_FILES@ CC = @CC@ CFLAGS = @CFLAGS@ CPPFLAGS = @CPPFLAGS@ LDFLAGS = @LDFLAGS@ bup_config_cflags = @bup_config_cflags@ bup_make=@bup_make@ bup_python_config=@bup_python_config@ bup_python_cflags=@bup_python_cflags@ bup_python_ldflags=@bup_python_ldflags@ bup_python_cflags_embed=@bup_python_cflags_embed@ bup_python_ldflags_embed=@bup_python_ldflags_embed@ bup_have_libacl=@bup_have_libacl@ bup_libacl_cflags=@bup_libacl_cflags@ bup_libacl_ldflags=@bup_libacl_ldflags@ bup_have_readline=@bup_have_readline@ bup_readline_cflags=@bup_readline_cflags@ bup_readline_ldflags=@bup_readline_ldflags@ bup-0.33.3/config/configure000077500000000000000000000250351454333004200155450ustar00rootroot00000000000000#!/usr/bin/env bash ac_help="--with-pylint[=yes|no|maybe] require and run pylint (maybe)" bup_find_prog() { # Prints prog path to stdout or nothing. local name="$1" result="$2" TLOGN "checking for $name" if ! [ "$result" ]; then result=`acLookFor "$name"` fi TLOG " ($result)" echo "$result" } bup_try_c_code() { local code="$1" tmpdir rc cflags='' if test -z "$code"; then AC_FAIL "No code provided to test compile" fi case "$#" in 1) ;; 2) cflags="$2" ;; *) AC_FAIL "Invald call to bup_try_c_code" "$@" ;; esac tmpdir="$(mktemp -d "bup-try-c-compile-XXXXXXX")" || exit $? echo "$code" > "$tmpdir/test.c" || exit $? $AC_CC -Wall -Werror $cflags -c -o "$tmpdir/test" "$tmpdir/test.c" rc=$? rm -r "$tmpdir" || exit $? return $rc } bup_config_cflags=() bup-add-cflag-if-supported() { local opt="$1" if test -z "$opt"; then AC_FAIL 'No option to check' fi TLOGN "checking for $AC_CC $opt support" if bup_try_c_code \ "int main(int argc, char**argv) { return 0; }" \ "$opt"; then bup_config_cflags+="$opt" TLOG ' (found)' else TLOG ' (not found)' fi } TARGET=bup argv=() with_pylint=maybe while test $# -gt 0; do case "$1" in --with-pylint=yes) with_pylint=yes; shift;; --with-pylint=maybe) with_pylint=maybe; shift;; --with-pylint=no) with_pylint=no; shift;; *) argv+=("$1"); shift;; esac done # Set $@ to the adjusted args set - "${argv[@]}" . ./configure.inc # FIXME: real tmpdir rm -rf finished config/bin config.var config.var.tmp config.vars AC_INIT $TARGET if ! AC_PROG_CC; then LOG " You need to have a functional C compiler to build $TARGET" exit 1 fi bup-add-cflag-if-supported -Wno-unused-command-line-argument # Since ./configure changes pwd, fix MAKE if it's relative case "$MAKE" in /*) ;; */*) MAKE="../../$MAKE";; esac for make_candidate in make gmake; do found_make="$(bup_find_prog "$make_candidate" "$MAKE")" if test "$found_make" \ && ("$found_make" --version | grep "GNU Make"); then MAKE="$found_make" break; fi done if ! test "$MAKE"; then AC_FAIL "ERROR: unable to find GNU make as make or gmake" fi MAKE_VERSION=`$MAKE --version | grep "GNU Make" | awk '{print $3}'` if [ -z "$MAKE_VERSION" ]; then AC_FAIL "ERROR: $MAKE --version does not return sensible output?" fi expr "$MAKE_VERSION" '>=' '3.81' || AC_FAIL "ERROR: $MAKE must be >= version 3.81" AC_SUB bup_make "$MAKE" # Haven't seen a documented way to determine the python version via # python-config right now, so we'll defer version checking until # later. if test "$BUP_PYTHON_CONFIG"; then bup_python_config="$(type -p "$BUP_PYTHON_CONFIG")" if test -z "$bup_python_config"; then AC_FAIL $(printf "ERROR: BUP_PYTHON_CONFIG value %q appears invalid" \ "$BUP_PYTHON_CONFIG") fi else for py_min_ver in 11 10 9 8 7; do bup_python_config="$(bup_find_prog "python3.$py_min_ver-config" '')" test -z "$bup_python_config" || break done test -z "$bup_python_config" \ && bup_python_config="$(bup_find_prog python3-config '')" if test -z "$bup_python_config"; then AC_FAIL "ERROR: unable to find a suitable python-config" fi fi bup_python_cflags=$("$bup_python_config" --cflags) || exit $? bup_python_ldflags=$("$bup_python_config" --ldflags) || exit $? bup_python_cflags_embed=$("$bup_python_config" --cflags --embed) if test $? -eq 0; then bup_python_ldflags_embed=$("$bup_python_config" --ldflags --embed) || exit $? else # Earlier versions didn't support --embed bup_python_cflags_embed=$("$bup_python_config" --cflags) || exit $? bup_python_ldflags_embed=$("$bup_python_config" --ldflags) || exit $? fi bup_python_cflags="$bup_python_cflags -fPIC" case "$OSTYPE" in darwin*) # For at least 10.3+ (2003+) bup_python_ldflags="$bup_python_ldflags -bundle -undefined dynamic_lookup" ;; *) bup_python_ldflags="$bup_python_ldflags -shared" ;; esac AC_SUB bup_python_config "$bup_python_config" AC_SUB bup_python_cflags "$bup_python_cflags" AC_SUB bup_python_ldflags "$bup_python_ldflags" AC_SUB bup_python_cflags_embed "$bup_python_cflags_embed" AC_SUB bup_python_ldflags_embed "$bup_python_ldflags_embed" bup_git="$(bup_find_prog git '')" if test -z "$bup_git"; then AC_FAIL "ERROR: unable to find git" fi # For stat. AC_CHECK_HEADERS sys/stat.h AC_CHECK_HEADERS sys/types.h # For stat and mincore. AC_CHECK_HEADERS unistd.h # For mincore. AC_CHECK_HEADERS sys/mman.h # For FS_IOC_GETFLAGS and FS_IOC_SETFLAGS. AC_CHECK_HEADERS linux/fs.h AC_CHECK_HEADERS sys/ioctl.h # On GNU/kFreeBSD utimensat is defined in GNU libc, but won't work. if [ -z "$OS_GNU_KFREEBSD" ]; then AC_CHECK_FUNCS utimensat fi AC_CHECK_FUNCS utimes AC_CHECK_FUNCS lutimes AC_CHECK_FUNCS mincore mincore_incore_code=" #if 0$ac_defined_HAVE_UNISTD_H #include #endif #if 0$ac_defined_HAVE_SYS_MMAN_H #include #endif int main(int argc, char **argv) { if (MINCORE_INCORE) return 0; } " mincore_buf_type_code() { local vec_type="$1" echo " #include int main(int argc, char **argv) { void *x = 0; $vec_type *buf = 0; return mincore(x, 0, buf); }" || exit $? } if test "$ac_defined_HAVE_MINCORE"; then TLOGN "checking for MINCORE_INCORE" if bup_try_c_code "$mincore_incore_code"; then AC_DEFINE BUP_HAVE_MINCORE_INCORE 1 TLOG ' (found)' else TLOG ' (not found)' fi TLOGN "checking mincore buf type" if bup_try_c_code "$(mincore_buf_type_code char)"; then AC_DEFINE BUP_MINCORE_BUF_TYPE 'char' TLOG ' (char)' elif bup_try_c_code "$(mincore_buf_type_code 'unsigned char')"; then AC_DEFINE BUP_MINCORE_BUF_TYPE 'unsigned char' TLOG ' (unsigned char)' else AC_FAIL "ERROR: unexpected mincore definition; please notify bup-list@googlegroups.com" fi fi TLOGN "checking for readline" bup_have_readline='' bup_readline_includes_in_subdir='' bup_readline_via_pkg_config='' # We test this specific thing because it should work everywhere and it was # a particulary problem on macos (we'd get the wrong includes if we just # tested that the includes work). readline_test_code=' static char *on_completion_entry(const char *text, int state) { return NULL; } void bup_test(void) { rl_completion_entry_function = on_completion_entry; } ' if pkg-config readline; then bup_readline_cflags="$(pkg-config readline --cflags)" || exit $? bup_readline_ldflags="$(pkg-config readline --libs)" || exit $? # It looks like it's not uncommon for pkg-config to provide a -I # that doesn't support the documentation's specified #include # . See what's really going on. if bup_try_c_code "#include // required by unpatched readline #include $readline_test_code" \ "$bup_readline_cflags" then bup_have_readline=1 bup_readline_includes_in_subdir=1 elif bup_try_c_code "#include // required by unpatched readline #include $readline_test_code" \ "$bup_readline_cflags" then bup_have_readline=1 fi if test "$bup_have_readline"; then bup_readline_via_pkg_config=1 else bup_readline_cflags='' bup_readline_ldflags='' fi fi if ! test "$bup_have_readline"; then if bup_try_c_code "#include $readline_test_code"; then bup_readline_ldflags=-lreadline bup_have_readline=1 bup_readline_includes_in_subdir=1 elif bup_try_c_code "#include $readline_test_code"; then bup_readline_ldflags=-lreadline bup_have_readline=1 fi fi if test "$bup_have_readline"; then AC_DEFINE BUP_HAVE_READLINE 1 if test "$bup_readline_includes_in_subdir"; then AC_DEFINE BUP_READLINE_INCLUDES_IN_SUBDIR 1 fi if test "$bup_readline_via_pkg_config"; then TLOG ' (yes, pkg-config)' else TLOG ' (yes)' fi fi AC_SUB bup_readline_cflags "$bup_readline_cflags" AC_SUB bup_readline_ldflags "$bup_readline_ldflags" AC_SUB bup_have_readline "$bup_have_readline" AC_CHECK_FIELD stat st_atim sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_mtim sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_ctim sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_atimensec sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_mtimensec sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_ctimensec sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD tm tm_gmtoff time.h orig_ac_cc="$AC_CC" orig_libs="$LIBS" TLOGN "checking for libacl" if pkg-config libacl; then bup_libacl_cflags="$(pkg-config libacl --cflags)" bup_libacl_ldflags="$(pkg-config libacl --libs)" TLOG ' (yes, pkg-config)' else bup_libacl_cflags= bup_libacl_ldflags='-lacl' TLOG ' (yes)' fi AC_CC="$AC_CC${bup_libacl_cflags:+ $bup_libacl_cflags}" LIBS="$bup_libacl_ldflags" AC_CHECK_HEADERS sys/acl.h AC_CHECK_HEADERS acl/libacl.h AC_CHECK_FUNCS acl_get_file AC_CHECK_FUNCS acl_from_text AC_CHECK_FUNCS acl_set_file # Note: These are linux specific, but we need them (for now?) AC_CHECK_FUNCS acl_extended_file AC_CHECK_FUNCS acl_to_any_text TLOGN "checking for complete acl support" if test "$ac_defined_HAVE_ACL_EXTENDED_FILE"; then bup_have_libacl=1 AC_SUB bup_libacl_cflags "$bup_libacl_cflags" AC_SUB bup_libacl_ldflags "$bup_libacl_ldflags" TLOG ' (yes)' else bup_have_libacl= AC_SUB bup_have_libacl '' TLOG ' (no)' fi AC_SUB bup_have_libacl "$bup_have_libacl" AC_CC="$orig_ac_cc" LIBS="$orig_libs" AC_SUB bup_config_cflags "$bup_config_cflags" AC_OUTPUT config.vars set -euo pipefail # FIXME: real tmpdir mkdir -p config.var.tmp echo -n "$MAKE" > config.var.tmp/bup-make echo -n "$bup_python_config" > config.var.tmp/bup-python-config echo -n "$with_pylint" > config.var.tmp/with-pylint mv config.var.tmp config.var if test -e bin; then rm -r bin; fi mkdir -p bin (cd bin && ln -s "$MAKE" make) touch finished printf " found: python-config (%q) found: git (%q, ($("$bup_git" --version)) " \ "$bup_python_config" \ "$bup_git" \ 1>&5 summarize() { local found="$1" shift if test "$found"; then TLOG found: "$@" else TLOG not found: "$@" fi } summarize "$bup_have_readline" 'readline support (e.g. bup ftp)' summarize "$bup_have_libacl" 'POSIX ACL support' TLOG bup-0.33.3/config/configure.inc000066400000000000000000000667251454333004200163250ustar00rootroot00000000000000# -*-shell-script-*- # @(#) configure.inc 1.40@(#) # Copyright (c) 1999-2007 David Parsons. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # 3. My name may not be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY DAVID PARSONS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID # PARSONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # # # this preamble code is executed when this file is sourced and it picks # interesting things off the command line. # ac_default_path="/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:/usr/X11R6/bin" ac_standard="--src=DIR where the source lives (.) --prefix=DIR where to install the final product (/usr/local) --execdir=DIR where to put executables (prefix/bin) --sbindir=DIR where to put static executables (prefix/sbin) --confdir=DIR where to put configuration information (/etc) --libdir=DIR where to put libraries (prefix/lib) --libexecdir=DIR where to put private executables --mandir=DIR where to put manpages" __fail=exit __ac_tmpdir='' __ac_clean_up () { if test "$__ac_tmpdir"; then rm -rf "$__ac_tmpdir" fi } trap __ac_clean_up EXIT if dirname B/A 2>/dev/null >/dev/null; then __ac_dirname() { dirname "$1" } else __ac_dirname() { echo "$1" | sed -e 's:/[^/]*$::' } fi ac_progname=$0 ac_configure_command= Q=\' for x in "$@"; do ac_configure_command="$ac_configure_command $Q$x$Q" done # ac_configure_command="$*" __d=`__ac_dirname "$ac_progname"` if [ "$__d" = "$ac_progname" ]; then AC_SRCDIR=`pwd` else AC_SRCDIR=`cd $__d;pwd` fi __ac_dir() { if test -d "$1"; then (cd "$1";pwd) else echo "$1"; fi } while [ $# -gt 0 ]; do unset matched case X"$1" in X--src|X--srcdir) AC_SRCDIR=`__ac_dir "$2"` _set_srcdir=1 shift 2;; X--src=*|X--srcdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_SRCDIR=`__ac_dir "$__d"` _set_srcdir=1 shift 1 ;; X--prefix) AC_PREFIX=`__ac_dir "$2"` _set_prefix=1 shift 2;; X--prefix=*) __d=`echo "$1"| sed -e 's/^[^=]*=//'` AC_PREFIX=`__ac_dir "$__d"` _set_prefix=1 shift 1;; X--confdir) AC_CONFDIR=`__ac_dir "$2"` _set_confdir=1 shift 2;; X--confdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_CONFDIR=`__ac_dir "$__d"` _set_confdir=1 shift 1;; X--libexec|X--libexecdir) AC_LIBEXEC=`__ac_dir "$2"` _set_libexec=1 shift 2;; X--libexec=*|X--libexecdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_LIBEXEC=`__ac_dir "$__d"` _set_libexec=1 shift 1;; X--lib|X--libdir) AC_LIBDIR=`__ac_dir "$2"` _set_libdir=1 shift 2;; X--lib=*|X--libdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_LIBDIR=`__ac_dir "$__d"` _set_libdir=1 shift 1;; X--exec|X--execdir) AC_EXECDIR=`__ac_dir "$2"` _set_execdir=1 shift 2;; X--exec=*|X--execdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_EXECDIR=`__ac_dir "$__d"` _set_execdir=1 shift 1;; X--sbin|X--sbindir) AC_SBINDIR=`__ac_dir "$2"` _set_sbindir=1 shift 2;; X--sbin=*|X--sbindir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_SBINDIR=`__ac_dir "$__d"` _set_sbindir=1 shift 1;; X--man|X--mandir) AC_MANDIR=`__ac_dir "$2"` _set_mandir=1 shift 2;; X--man=*|X--mandir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_MANDIR=`__ac_dir "$__d"` _set_mandir=1 shift 1;; X--use-*=*) _var=`echo "$1"| sed -n 's/^--use-\([A-Za-z][-A-Za-z0-9_]*\)=.*$/\1/p'` if [ "$_var" ]; then _val=`echo "$1" | sed -e 's/^--use-[^=]*=\(.*\)$/\1/'` _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` case X"$_val" in X[Yy][Ee][Ss]|X[Tt][Rr][Uu][Ee]) eval USE_${_v}=T ;; X[Nn][Oo]|X[Ff][Aa][Ll][Ss][Ee]) eval unset USE_${_v} ;; *) echo "Bad value for --use-$_var ; must be yes or no" exit 1 ;; esac else echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi shift 1 ;; X--use-*) _var=`echo "$1"|sed -n 's/^--use-\([A-Za-z][-A-Za-z0-9_]*\)$/\1/p'` _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` eval USE_${_v}=T shift 1;; X--with-*=*) _var=`echo "$1"| sed -n 's/^--with-\([A-Za-z][-A-Za-z0-9_]*\)=.*$/\1/p'` if [ "$_var" ]; then _val=`echo "$1" | sed -e 's/^--with-[^=]*=\(.*\)$/\1/'` _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` eval WITH_${_v}=\"$_val\" else echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi shift 1 ;; X--with-*) _var=`echo "$1" | sed -n 's/^--with-\([A-Za-z][A-Za-z0-9_-]*\)$/\1/p'` if [ "$_var" ]; then _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` eval WITH_${_v}=1 else echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi shift 1 ;; X--help) echo "$ac_standard" test "$ac_help" && echo "$ac_help" exit 0;; *) if [ "$LOCAL_AC_OPTIONS" ]; then eval "$LOCAL_AC_OPTIONS" else ac_error=T fi if [ "$ac_error" ]; then echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi ;; esac done # # echo w/o newline # echononl() { ${ac_echo:-echo} "${@}$ac_echo_nonl" } # # log something to the terminal and to a logfile. # LOG () { echo "$@" echo "$@" 1>&5 } # # log something to the terminal without a newline, and to a logfile with # a newline # LOGN () { echononl "$@" 1>&5 echo "$@" } # # log something to the terminal # TLOG () { echo "$@" 1>&5 } # # log something to the terminal, no newline # TLOGN () { echononl "$@" 1>&5 } # # AC_CONTINUE tells configure not to bomb if something fails, but to # continue blithely along # AC_CONTINUE () { __fail="return" } # # Emulate gnu autoconf's AC_CHECK_HEADERS() function # AC_CHECK_HEADERS () { AC_PROG_CC echo "/* AC_CHECK_HEADERS */" > "$__ac_tmpdir/ngc$$.c" for hdr in $*; do echo "#include <$hdr>" >> "$__ac_tmpdir/ngc$$.c" done echo "main() { }" >> "$__ac_tmpdir/ngc$$.c" LOGN "checking for header $hdr" if $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c"; then AC_DEFINE 'HAVE_'`echo $hdr | tr 'a-z' 'A-Z' | tr './' '_'` 1 TLOG " (found)" rc=0 else TLOG " (not found)" rc=1 fi rm -f "$__ac_tmpdir/ngc$$.c" "$__ac_tmpdir/ngc$$" return $rc } # # emulate GNU autoconf's AC_CHECK_FUNCS function # AC_CHECK_FUNCS () { AC_PROG_CC F=$1 shift rm -f "$__ac_tmpdir/ngc$$.c" while [ "$1" ]; do echo "#include <$1>" >> "$__ac_tmpdir/ngc$$.c" shift done cat >> "$__ac_tmpdir/ngc$$.c" << EOF main() { $F(); } EOF LOGN "checking for the $F function" if $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" $LIBS; then AC_DEFINE `echo ${2:-HAVE_$F} | tr 'a-z' 'A-Z'` 1 TLOG " (found)" rc=0 else echo "offending command was:" cat "$__ac_tmpdir/ngc$$.c" echo "$AC_CC -o $__ac_tmpdir/ngc$$ $__ac_tmpdir/ngc$$.c $LIBS" TLOG " (not found)" rc=1 fi rm -f "$__ac_tmpdir/ngc$$.c" "$__ac_tmpdir/ngc$$" return $rc } # # check to see if some structure exists # # usage: AC_CHECK_STRUCT structure {include ...} # AC_CHECK_STRUCT () { AC_PROG_CC struct=$1 shift rm -f "$__ac_tmpdir/ngc$$.c" for include in $*; do echo "#include <$include>" >> "$__ac_tmpdir/ngc$$.c" done cat >> "$__ac_tmpdir/ngc$$.c" << EOF main() { struct $struct foo; } EOF LOGN "checking for struct $struct" if $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" $AC_LIBS 2>>config.log; then AC_DEFINE HAVE_STRUCT_`echo ${struct} | tr 'a-z' 'A-Z'` TLOG " (found)" rc=0 else TLOG " (not found)" rc=1 fi rm -f "$__ac_tmpdir/ngc$$.c" "$__ac_tmpdir/ngc$$" return $rc } # # check to see if some structure contains a field # # usage: AC_CHECK_FIELD structure field {include ...} # AC_CHECK_FIELD () { AC_PROG_CC struct=$1 field=$2 shift 2 rm -f "$__ac_tmpdir/ngc$$.c" for include in $*;do echo "#include <$include>" >> "$__ac_tmpdir/ngc$$.c" done cat >> "$__ac_tmpdir/ngc$$.c" << EOF main() { struct $struct foo; foo.$field; } EOF LOGN "checking that struct $struct has a $field field" if $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" $AC_LIBS 2>>config.log; then AC_DEFINE HAVE_`echo ${struct}_$field | tr 'a-z' 'A-Z'` TLOG " (yes)" rc=0 else TLOG " (no)" rc=1 fi rm -f "$__ac_tmpdir/ngc$$.c" "$__ac_tmpdir/ngc$$" return $rc } # # check that the C compiler works # AC_PROG_CC () { test "$AC_CC" && return 0 cat > "$__ac_tmpdir/ngc$$.c" << \EOF #include main() { puts("hello, sailor"); } EOF TLOGN "checking the C compiler" unset AC_CFLAGS AC_LDFLAGS if [ "$CC" ] ; then AC_CC="$CC" elif [ "$WITH_PATH" ]; then AC_CC=`acLookFor cc` elif [ "`acLookFor cc`" ]; then # don't specify the full path if the user is looking in their $PATH # for a C compiler. AC_CC=cc fi # finally check for POSIX c89 test "$AC_CC" || AC_CC=`acLookFor c89` if [ ! "$AC_CC" ]; then TLOG " (no C compiler found)" $__fail 1 fi echo "checking out the C compiler" $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" status=$? TLOGN " ($AC_CC)" if [ $status -eq 0 ]; then TLOG " ok" # check that the CFLAGS and LDFLAGS aren't bogus unset AC_CFLAGS AC_LDFLAGS if [ "$CFLAGS" ]; then test "$CFLAGS" && echo "validating CFLAGS=${CFLAGS}" if $AC_CC $CFLAGS -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" ; then AC_CFLAGS=${CFLAGS:-"-g"} test "$CFLAGS" && echo "CFLAGS=\"${CFLAGS}\" are okay" elif [ "$CFLAGS" ]; then echo "ignoring bogus CFLAGS=\"${CFLAGS}\"" fi else AC_CFLAGS=-g fi if [ "$LDFLAGS" ]; then echo "validating LDFLAGS=${LDFLAGS}" $AC_CC $AC_CFLAGS -c -o "$__ac_tmpdir/ngc$$.o" "$__ac_tmpdir/ngc$$.c" if $AC_CC $AC_CFLAGS $LDFLAGS -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.o"; then AC_LDFLAGS=${LDFLAGS:-"-g"} test "$LDFLAGS" && TLOG "LDFLAGS=\"${LDFLAGS}\" are okay" elif [ "$LDFLAGS" ]; then TLOG "ignoring bogus LDFLAGS=\"${LDFLAGS}\"" fi else AC_LDFLAGS=${CFLAGS:-"-g"} fi else AC_FAIL " does not compile code properly" fi AC_SUB 'CC' "$AC_CC" rm -f "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" "$__ac_tmpdir/ngc$$.o" return $status } # # acLookFor actually looks for a program, without setting anything. # acLookFor () { path="${AC_PATH:-$ac_default_path}" case "X$1" in X-[rx]) __mode=$1 shift ;; *) __mode=-x ;; esac oldifs="$IFS" for program in $*; do IFS=":" for x in $path; do if [ $__mode "$x/$program" -a -f "$x/$program" ]; then echo "$x/$program" break 2 fi done done IFS="$oldifs" unset __mode } # # check that a program exists and set its path # MF_PATH_INCLUDE () { SYM=$1; shift case X$1 in X-[rx]) __mode=$1 shift ;; *) unset __mode ;; esac TLOGN "looking for $1" DEST=`acLookFor $__mode $*` __sym=`echo "$SYM" | tr '[a-z]' '[A-Z]'` if [ "$DEST" ]; then TLOG " ($DEST)" echo "$1 is $DEST" AC_MAK $SYM AC_DEFINE PATH_$__sym \""$DEST"\" AC_SUB $__sym "$DEST" eval CF_$SYM="'$DEST'" return 0 else #AC_SUB $__sym '' echo "$1 is not found" TLOG " (not found)" return 1 fi } # # AC_INIT starts the ball rolling # # After AC_INIT, fd's 1 and 2 point to config.log # and fd 5 points to what used to be fd 1 # AC_INIT () { __ac_tmpdir=$(mktemp -d configure-XXXXXX) __config_files="config.cmd config.sub config.h config.mak config.log" __config_detritus="config.h.tmp" rm -f $__config_files $__config_detritus __cwd=`pwd` exec 5>&1 1>$__cwd/config.log 2>&1 AC_CONFIGURE_FOR=__AC_`echo $1 | sed -e 's/\..$//' | tr 'a-z' 'A-Z' | tr ' ' '_'`_D # check to see whether to use echo -n or echo ...\c # echo -n hello > $$ echo world >> $$ if grep "helloworld" $$ >/dev/null; then ac_echo="echo -n" echo "[echo -n] works" else ac_echo="echo" echo 'hello\c' > $$ echo 'world' >> $$ if grep "helloworld" $$ >/dev/null; then ac_echo_nonl='\c' echo "[echo ...\\c] works" fi fi rm -f $$ LOG "Configuring for [$1]" rm -f $__cwd/config.h cat > $__cwd/config.h.tmp << EOF /* * configuration for $1${2:+" ($2)"}, generated `date` * by ${LOGNAME:-`whoami`}@`hostname` */ #ifndef $AC_CONFIGURE_FOR #define $AC_CONFIGURE_FOR 1 EOF unset __share if [ -d $AC_PREFIX/share/man ]; then for t in 1 2 3 4 5 6 7 8 9; do if [ -d $AC_PREFIX/share/man/man$t ]; then __share=/share elif [ -d $AC_PREFIX/share/man/cat$t ]; then __share=/share fi done else __share= fi if [ -d $AC_PREFIX/libexec ]; then __libexec=libexec else __libexec=lib fi AC_PREFIX=${AC_PREFIX:-/usr/local} AC_EXECDIR=${AC_EXECDIR:-$AC_PREFIX/bin} AC_SBINDIR=${AC_SBINDIR:-$AC_PREFIX/sbin} AC_LIBDIR=${AC_LIBDIR:-$AC_PREFIX/lib} AC_MANDIR=${AC_MANDIR:-$AC_PREFIX$__share/man} AC_LIBEXEC=${AC_LIBEXEC:-$AC_PREFIX/$__libexec} AC_CONFDIR=${AC_CONFDIR:-/etc} AC_PATH=${WITH_PATH:-$PATH} AC_PROG_CPP AC_PROG_INSTALL ac_os=`uname -s | sed 's/[-_].*//; s/[^a-zA-Z0-9]/_/g'` _os=`echo $ac_os | tr '[a-z]' '[A-Z]'` AC_DEFINE OS_$_os 1 eval OS_${_os}=1 unset _os } # # AC_LIBRARY checks to see if a given library exists and contains the # given function. # usage: AC_LIBRARY function library [alternate ...] # AC_LIBRARY() { SRC=$1 shift __acllibs= __aclhdrs= for x in "$@"; do case X"$x" in X-l*) __acllibs="$__acllibs $x" ;; *) __aclhdrs="$__aclhdrs $x" ;; esac done # first see if the function can be found in any of the # current libraries AC_QUIET AC_CHECK_FUNCS $SRC $__aclhdrs && return 0 # then search through the list of libraries __libs="$LIBS" for x in $__acllibs; do LIBS="$__libs $x" if AC_QUIET AC_CHECK_FUNCS $SRC $__aclhdrs; then AC_LIBS="$AC_LIBS $x" return 0 fi done return 1 } # # AC_PROG_LEX checks to see if LEX exists, and if it's lex or flex. # AC_PROG_LEX() { TLOGN "looking for lex " DEST=`acLookFor lex` if [ "$DEST" ]; then AC_MAK LEX AC_DEFINE PATH_LEX \"$DEST\" AC_SUB 'LEX' "$DEST" echo "lex is $DEST" else DEST=`acLookFor flex` if [ "$DEST" ]; then AC_MAK FLEX AC_DEFINE 'LEX' \"$DEST\" AC_SUB 'LEX', "$DEST" echo "lex is $DEST" else AC_SUB LEX '' echo "neither lex or flex found" TLOG " (not found)" return 1 fi fi if AC_LIBRARY yywrap -ll -lfl; then TLOG "($DEST)" return 0 fi TLOG "(no lex library found)" return 1 } # # AC_PROG_YACC checks to see if YACC exists, and if it's bison or # not. # AC_PROG_YACC () { TLOGN "looking for yacc " DEST=`acLookFor yacc` if [ "$DEST" ]; then AC_MAK YACC AC_DEFINE PATH_YACC \"$DEST\" AC_SUB 'YACC' "$DEST" TLOG "($DEST)" echo "yacc is $DEST" else DEST=`acLookFor bison` if [ "$DEST" ]; then AC_MAK BISON AC_DEFINE 'YACC' \"$DEST\" AC_SUB 'YACC' "$DEST -y" echo "yacc is $DEST -y" TLOG "($DEST -y)" else AC_SUB 'YACC' '' echo "neither yacc or bison found" TLOG " (not found)" return 1 fi fi return 0 } # # AC_PROG_LN_S checks to see if ln exists, and, if so, if ln -s works # AC_PROG_LN_S () { test "$AC_FIND_PROG" || AC_PROG_FIND test "$AC_FIND_PROG" || return 1 TLOGN "looking for \"ln -s\"" DEST=`acLookFor ln` if [ "$DEST" ]; then rm -f "$__ac_tmpdir/b$$" "$DEST" -s "$__ac_tmpdir/a$$" "$__ac_tmpdir/b$$" if [ "`$AC_FIND_PROG $__ac_tmpdir/b$$ -type l -print`" ]; then TLOG " ($DEST)" echo "$DEST exists, and ln -s works" AC_SUB 'LN_S' "$DEST -s" rm -f "$__ac_tmpdir/b$$" else AC_SUB 'LN_S' '' TLOG " ($DEST exists, but -s does not seem to work)" echo "$DEST exists, but ln -s doesn't seem to work" rm -f "$__ac_tmpdir/b$$" return 1 fi else AC_SUB 'LN_S' '' echo "ln not found" TLOG " (not found)" return 1 fi } # # AC_PROG_FIND looks for the find program and sets the FIND environment # variable # AC_PROG_FIND () { if test -z "$AC_FIND_PROG"; then MF_PATH_INCLUDE FIND find rc=$? AC_FIND_PROG="$DEST" return $rc fi return 0 } # # AC_PROG_AWK looks for the awk program and sets the AWK environment # variable # AC_PROG_AWK () { if test -z "$AC_AWK_PROG"; then MF_PATH_INCLUDE AWK awk rc=$? AC_AWK_PROG="$DEST" return $rc fi return 0 } # # AC_PROG_SED looks for the sed program and sets the SED environment # variable # AC_PROG_SED () { if test -z "$AC_SED_PROG"; then MF_PATH_INCLUDE SED sed rc=$? AC_SED_PROG="$DEST" return $rc fi return 0 } # # AC_HEADER_SYS_WAIT looks for sys/wait.h # AC_HEADER_SYS_WAIT () { AC_CHECK_HEADERS sys/wait.h || return 1 } # # AC_TYPE_PID_T checks to see if the pid_t type exists # AC_TYPE_PID_T () { cat > "$__ac_tmpdir/pd$$.c" << EOF #include main() { pid_t me; } EOF LOGN "checking for pid_t" if $AC_CC -c "$__ac_tmpdir/pd$$.c" -o "$__ac_tmpdir/pd$$.o"; then TLOG " (found)" rc=0 else echo "typedef int pid_t;" >> $__cwd/config.h.tmp TLOG " (not found)" rc=1 fi rm -f "$__ac_tmpdir/pd$$.o" "$__ac_tmpdir/pd$$.c" return $rc } # # AC_C_CONST checks to see if the compiler supports the const keyword # AC_C_CONST () { cat > "$__ac_tmpdir/pd$$.c" << EOF const char me=1; EOF LOGN "checking for \"const\" keyword" if $AC_CC -c "$__ac_tmpdir/pd$$.c" -o "$__ac_tmpdir/pd$$.o"; then TLOG " (yes)" rc=0 else AC_DEFINE 'const' '/**/' TLOG " (no)" rc=1 fi rm -f "$__ac_tmpdir/pd$$.o" "$__ac_tmpdir/pd$$.c" return $rc } # # AC_SCALAR_TYPES checks to see if the compiler can generate 2 and 4 byte ints. # AC_SCALAR_TYPES () { cat > "$__ac_tmpdir/pd$$.c" << EOF #include main() { unsigned long v_long; unsigned int v_int; unsigned short v_short; if (sizeof v_long == 4) puts("#define DWORD unsigned long"); else if (sizeof v_int == 4) puts("#define DWORD unsigned int"); else exit(1); if (sizeof v_int == 2) puts("#define WORD unsigned int"); else if (sizeof v_short == 2) puts("#define WORD unsigned short"); else exit(2); puts("#define BYTE unsigned char"); exit(0); } EOF rc=1 LOGN "defining WORD & DWORD scalar types" if $AC_CC "$__ac_tmpdir/pd$$.c" -o "$__ac_tmpdir/pd$$"; then if "$__ac_tmpdir/pd$$" >> $__cwd/config.h.tmp; then rc=0 fi fi case "$rc" in 0) TLOG "" ;; *) TLOG " ** FAILED **" ;; esac rm -f "$__ac_tmpdir/pd$$" "$__ac_tmpdir/pd$$.c" } # # AC_OUTPUT generates makefiles from makefile.in's # AC_OUTPUT () { cd $__cwd AC_SUB 'LIBS' "$AC_LIBS" AC_SUB 'CONFIGURE_FILES' "$__config_files" AC_SUB 'CONFIGURE_DETRITUS' "$__config_detritus" AC_SUB 'GENERATED_FILES' "$*" AC_SUB 'CFLAGS' "$AC_CFLAGS" AC_SUB 'FCFLAGS' "$AC_FCFLAGS" AC_SUB 'CPPFLAGS' "$AC_CPPFLAGS" AC_SUB 'CXXFLAGS' "$AC_CXXFLAGS" AC_SUB 'LDFLAGS' "$AC_LDFLAGS" AC_SUB 'srcdir' "$AC_SRCDIR" AC_SUB 'prefix' "$AC_PREFIX" AC_SUB 'exedir' "$AC_EXECDIR" AC_SUB 'sbindir' "$AC_SBINDIR" AC_SUB 'libdir' "$AC_LIBDIR" AC_SUB 'libexec' "$AC_LIBEXEC" AC_SUB 'confdir' "$AC_CONFDIR" AC_SUB 'mandir' "$AC_MANDIR" if [ -r config.sub ]; then test "$AC_SED_PROG" || AC_PROG_SED test "$AC_SED_PROG" || return 1 echo >> config.h.tmp echo "#endif/* ${AC_CONFIGURE_FOR} */" >> config.h.tmp rm -f config.cmd Q=\' cat - > config.cmd << EOF #! /bin/sh ${CXX:+CXX=${Q}${CXX}${Q}} ${CPPFLAGS:+CPPFLAGS=${Q}${CPPFLAGS}${Q}} ${CXXFLAGS:+CXXFLAGS=${Q}${CXXFLAGS}${Q}} ${FC:+FC=${Q}${FC}${Q}} ${FCFLAGS:+FCFLAGS=${Q}${FCFLAGS}${Q}} ${CC:+CC=${Q}${CC}${Q}} ${CFLAGS:+CFLAGS=${Q}${CFLAGS}${Q}} $ac_progname $ac_configure_command EOF chmod +x config.cmd __d=$AC_SRCDIR for makefile in $*;do if test -r $__d/${makefile}.in; then LOG "generating $makefile" ./config.md `__ac_dirname ./$makefile` 2>/dev/null $AC_SED_PROG -f config.sub < $__d/${makefile}.in > $makefile __config_files="$__config_files $makefile" else LOG "WARNING: ${makefile}.in does not exist!" fi done unset __d else echo fi cp $__cwd/config.h.tmp $__cwd/config.h } # # AC_CHECK_FLOCK checks to see if flock() exists and if the LOCK_NB argument # works properly. # AC_CHECK_FLOCK() { AC_CHECK_HEADERS sys/types.h sys/file.h fcntl.h cat << EOF > $$.c #include #include #include #include main() { int x = open("$$.c", O_RDWR, 0666); int y = open("$$.c", O_RDWR, 0666); if (flock(x, LOCK_EX) != 0) exit(1); if (flock(y, LOCK_EX|LOCK_NB) == 0) exit(1); exit(0); } EOF LOGN "checking for flock()" HAS_FLOCK=0 if $AC_CC -o flock $$.c ; then if ./flock ; then LOG " (found)" HAS_FLOCK=1 AC_DEFINE HAS_FLOCK else LOG " (bad)" fi else LOG " (no)" fi rm -f flock $$.c case "$HAS_FLOCK" in 0) return 1 ;; *) return 0 ;; esac } # # AC_CHECK_RESOLVER finds out whether the berkeley resolver is # present on this system. # AC_CHECK_RESOLVER () { AC_PROG_CC TLOGN "checking for the Berkeley resolver library" cat > "$__ac_tmpdir/ngc$$.c" << EOF #include #include #include #include main() { char bfr[256]; res_init(); res_query("hello", C_IN, T_A, bfr, sizeof bfr); } EOF # first see if res_init() and res_query() actually exist... if $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c"; then __extralib= elif $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" -lresolv; then __extralib=-lresolv AC_LIBS="$AC_LIBS -lresolv" else TLOG " (not found)" rm -f "$__ac_tmpdir/ngc$$.c" return 1 fi # if res_init() and res_query() actually exist, check to # see if the HEADER structure is defined ... cat > "$__ac_tmpdir/ngc$$.c" << EOF #include #include #include #include main() { HEADER hhh; res_init(); } EOF if $AC_CC -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" $__extralib; then TLOG " (found)" elif $AC_CC -DBIND_8_COMPAT -o "$__ac_tmpdir/ngc$$" "$__ac_tmpdir/ngc$$.c" $__extralib; then TLOG " (bind9 with BIND_8_COMPAT)" AC_DEFINE BIND_8_COMPAT 1 else TLOG " (broken)" rm -f "$__ac_tmpdir/ngc$$.c" return 1 fi rm -f "$__ac_tmpdir/ngc$$.c" return 0 } # # AC_PROG_INSTALL finds the install program and guesses whether it's a # Berkeley or GNU install program # AC_PROG_INSTALL () { DEST=`acLookFor install` LOGN "checking for install" unset IS_BSD if [ "$DEST" ]; then # BSD install or GNU install? Let's find out... touch "$__ac_tmpdir/a$$" "$DEST" "$__ac_tmpdir/a$$" "$__ac_tmpdir/b$$" if test -r "$__ac_tmpdir/a$$"; then LOG " ($DEST)" else IS_BSD=1 LOG " ($DEST) bsd install" fi rm -f "$__ac_tmpdir/a$$" "$__ac_tmpdir/b$$" else DEST=`acLookFor ginstall` if [ "$DEST" ]; then LOG " ($DEST)" else DEST="false" LOG " (not found)" fi fi if [ "$IS_BSD" ]; then PROG_INSTALL="$DEST -c" else PROG_INSTALL="$DEST" fi AC_SUB 'INSTALL' "$PROG_INSTALL" AC_SUB 'INSTALL_PROGRAM' "$PROG_INSTALL -s -m 755" AC_SUB 'INSTALL_DATA' "$PROG_INSTALL -m 444" # finally build a little directory installer # if mkdir -p works, use that, otherwise use install -d, # otherwise build a script to do it by hand. # in every case, test to see if the directory exists before # making it. if mkdir -p $$a/b; then # I like this method best. __mkdir="mkdir -p" rmdir $$a/b rmdir $$a elif $PROG_INSTALL -d $$a/b; then __mkdir="$PROG_INSTALL -d" rmdir $$a/b rmdir $$a fi __config_files="$__config_files config.md" AC_SUB 'INSTALL_DIR' "$__cwd/config.md" echo "#! /bin/sh" > $__cwd/config.md echo "# script generated" `date` "by configure.sh" >> $__cwd/config.md echo >> $__cwd/config.md if [ "$__mkdir" ]; then echo "test -d \"\$1\" || $__mkdir \"\$1\"" >> $__cwd/config.md echo "exit $?" >> $__cwd/config.md else cat - >> $__cwd/config.md << \EOD pieces=`IFS=/; for x in $1; do echo $x; done` dir= for x in $pieces; do dir="$dir$x" mkdir $dir || exit 1 dir="$dir/" done exit 0 EOD fi chmod +x $__cwd/config.md } # # acCheckCPP is a local that runs a C preprocessor with a given set of # compiler options # acCheckCPP () { cat > "$__ac_tmpdir/ngc$$.c" << EOF #define FOO BAR FOO EOF if $1 $2 "$__ac_tmpdir/ngc$$.c" > "$__ac_tmpdir/ngc$$.o"; then if grep -v '#define' "$__ac_tmpdir/ngc$$.o" | grep -s BAR >/dev/null; then echo "CPP=[$1], CPPFLAGS=[$2]" AC_SUB 'CPP' "$1" AC_SUB 'CPPFLAGS' "$2" rm "$__ac_tmpdir/ngc$$.c" "$__ac_tmpdir/ngc$$.o" return 0 fi fi rm "$__ac_tmpdir/ngc$$.c" "$__ac_tmpdir/ngc$$.o" return 1 } # # AC_PROG_CPP checks for cpp, then checks to see which CPPFLAGS are needed # to run it as a filter. # AC_PROG_CPP () { if [ "$AC_CPP_PROG" ]; then DEST="$AC_CPP_PROG" else __ac_path="$AC_PATH" AC_PATH="/lib:/usr/lib:${__ac_path:-$ac_default_path}" DEST=`acLookFor cpp` AC_PATH="$__ac_path" fi unset fail LOGN "Looking for cpp" if [ "$DEST" ]; then TLOGN " ($DEST)" acCheckCPP "$DEST" "$CPPFLAGS" || \ acCheckCPP "$DEST" -traditional-cpp -E || \ acCheckCPP "$DEST" -E || \ acCheckCPP "$DEST" -traditional-cpp -pipe || \ acCheckCPP "$DEST" -pipe || fail=1 if [ "$fail" ]; then AC_FAIL " (can't run cpp as a pipeline)" else TLOG " ok" return 0 fi fi AC_FAIL " (not found)" } # # AC_FAIL spits out an error message, then __fail's AC_FAIL() { LOG "$*" $__fail 1 } # # AC_SUB writes a substitution into config.sub AC_SUB() { ( echononl "s;@$1@;" _subst=`echo $2 | sed -e 's/;/\\;/g'` echononl "$_subst" echo ';g' ) >> $__cwd/config.sub } # # AC_MAK writes a define into config.mak AC_MAK() { echo "HAVE_$1 = 1" >> $__cwd/config.mak } # # AC_DEFINE adds a #define to config.h AC_DEFINE() { local name="$1" value="${2:-1}" if ! printf -v "ac_defined_$name" '%s' "$value"; then AC_FATAL 'AC_DEFINE unable to set "ac_defined_$name" to "$value"' fi echo "#define $name $value" >> $__cwd/config.h.tmp } # # AC_INCLUDE adds a #include to config.h AC_INCLUDE() { echo "#include \"$1\"" >> $__cwd/config.h.tmp } # # AC_CONFIG adds a configuration setting to all the config files AC_CONFIG() { AC_DEFINE "PATH_$1" \""$2"\" AC_MAK "$1" AC_SUB "$1" "$2" } # # AC_QUIET does something quietly AC_QUIET() { eval $* 5>/dev/null } bup-0.33.3/configure000077500000000000000000000000631454333004200142720ustar00rootroot00000000000000#!/bin/sh set -e cd config exec ./configure "$@" bup-0.33.3/conftest.py000066400000000000000000000062501454333004200145660ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from os.path import basename, dirname, realpath, relpath from time import tzset from traceback import extract_stack import errno import os import pytest import re import subprocess import sys import tempfile sys.path[:0] = ['lib'] from bup import helpers from bup.compat import environ, fsencode _bup_src_top = realpath(dirname(fsencode(__file__))) # The "pwd -P" here may not be appropriate in the long run, but we # need it until we settle the relevant drecurse/exclusion questions: # https://groups.google.com/forum/#!topic/bup-list/9ke-Mbp10Q0 os.chdir(realpath(os.getcwd())) # Make the test results available to fixtures @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): other_hooks = yield report = other_hooks.get_result() bup = item.__dict__.setdefault('bup', {}) bup[report.when + '-report'] = report # setup, call, teardown item.bup = bup def bup_test_sort_order(item): # Pull some slower tests forward to speed parallel runs if item.fspath.basename in ('test_get.py', 'test-index.sh'): return (0, str(item.fspath)) return (1, str(item.fspath)) def pytest_collection_modifyitems(session, config, items): items.sort(key=bup_test_sort_order) @pytest.fixture(autouse=True) def no_lingering_errors(): def fail_if_errors(): if helpers.saved_errors: bt = extract_stack() src_file, src_line, src_func, src_txt = bt[-4] msg = 'saved_errors ' + repr(helpers.saved_errors) assert False, '%s:%-4d %s' % (basename(src_file), src_line, msg) fail_if_errors() helpers.clear_errors() yield None fail_if_errors() helpers.clear_errors() @pytest.fixture(autouse=True) def ephemeral_env_changes(): orig_env = environ.copy() yield None for k, orig_v in orig_env.items(): v = environ.get(k) if v is not orig_v: environ[k] = orig_v if k == b'TZ': tzset() for k in environ.keys(): if k not in orig_env: del environ[k] if k == b'TZ': tzset() os.chdir(_bup_src_top) # Assumes (of course) this file is at the top-level of the source tree _bup_test_dir = realpath(dirname(fsencode(__file__))) + b'/test' _bup_tmp = _bup_test_dir + b'/tmp' try: os.makedirs(_bup_tmp) except OSError as e: if e.errno != errno.EEXIST: raise _safe_path_rx = re.compile(br'[^a-zA-Z0-9_-]') @pytest.fixture() def tmpdir(request): if sys.version_info[0] > 2: rp = realpath(fsencode(request.fspath)) else: rp = realpath(str(request.fspath)) rp = relpath(rp, _bup_test_dir) if request.function: rp += b'-' + fsencode(request.function.__name__) safe = _safe_path_rx.sub(b'-', rp) tmpdir = tempfile.mkdtemp(dir=_bup_tmp, prefix=safe) yield tmpdir if request.node.bup['call-report'].failed: print('\nPreserving:', b'test/' + relpath(tmpdir, _bup_test_dir), file=sys.stderr) else: subprocess.call(['chmod', '-R', 'u+rwX', tmpdir]) subprocess.call(['rm', '-rf', tmpdir]) bup-0.33.3/dev/000077500000000000000000000000001454333004200131425ustar00rootroot00000000000000bup-0.33.3/dev/checksum000077500000000000000000000022621454333004200146740ustar00rootroot00000000000000#!/usr/bin/env bash set -ueo pipefail usage() { echo "Usage: checksum -t [--] [PATH]"; } misuse() { usage 1>&2; exit 2; } kind='' while test $# -gt 0; do case "$1" in --) shift break ;; -t) shift test $# -gt 0 || misuse kind="$1" case "$kind" in sha1|sha256) ;; *) misuse ;; esac shift ;; -*) misuse ;; *) break ;; esac done test "$kind" || misuse src='' case $# in 0) ;; 1) src="$1" ;; *) misuse ;; esac # Use KINDsum if available, else KIND (e.g. sha1sum or sha1). Assumes # the former is compatible with the coreutils version, and the latter # is compatible with the FreeBSD version. if command -v "$kind"sum > /dev/null; then if test "$src"; then result=$("$kind"sum "$src") else result=$("$kind"sum) fi echo "${result%% *}" elif command -v "$kind" > /dev/null; then if test "$src"; then "$kind" -q "$src" else "$kind" -q fi else echo "Can't find sha1sum or sha1" 1>&2 exit 2 fi bup-0.33.3/dev/cleanup-mounts-under000077500000000000000000000033061454333004200171570ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- # This command is used by "make clean", so don't rely on ./configure set -e for python in \ python3 \ python3.10 \ python3.9 \ python3.8 \ python3.7 \ python3.6 \ python; \ do \ if command -v "$python"; then exec "$python" "$0" "$@" fi done echo "error: unable to find suitable python executable; please report" 1>&2 exit 2 """ from __future__ import absolute_import, print_function from sys import stderr import os.path, re, subprocess, sys def mntent_unescape(x): def replacement(m): unescapes = { "\\\\" : "\\", "\\011" : "\t", "\\012" : "\n", "\\040" : " " } return unescapes.get(m.group(0)) return re.sub(r'(\\\\|\\011|\\012|\\040)', replacement, x) targets = sys.argv[1:] if not os.path.exists('/proc/mounts'): print('No /proc/mounts; skipping mount cleanup in', repr(targets), file=stderr) sys.exit(0) exit_status = 0 for target in targets: if not os.path.isdir(target): print(repr(target), 'is not a directory', file=stderr) exit_status = 1 continue top = os.path.realpath(target) proc_mounts = open('/proc/mounts', 'r') for line in proc_mounts: _, point, fstype, _ = line.split(' ', 3) point = mntent_unescape(point) if top == point or os.path.commonprefix((top + '/', point)) == top + '/': if fstype.startswith('fuse'): if subprocess.call(['fusermount', '-uz', point]) != 0: exit_status = 1 else: if subprocess.call(['umount', '-l', point]) != 0: exit_status = 1 sys.exit(exit_status) bup-0.33.3/dev/compare-trees000077500000000000000000000056401454333004200156430ustar00rootroot00000000000000#!/usr/bin/env bash set -euo pipefail # Test that src and dest trees are as identical as bup is capable of # making them. For now, use rsync -niaHAX ... usage() { cat <&2; exit 2;; -c) verify_content=" --checksum"; shift;; -x) verify_content=""; shift;; --times) verify_times=' --times'; shift;; --no-times) verify_times=''; shift;; --) shift; break;; [^-]*) break;; esac done esac if test $# -ne 2 -a -z "$show_features"; then usage 1>&2 exit 2 fi src="${1:-}" dest="${2:-}" rsync_opts="-rlpgoD" # --archive, without --times rsync_opts="$rsync_opts -niH --delete" rsync_opts="$rsync_opts$verify_content" rsync_opts="$rsync_opts$verify_times" comparing_acls='' rsync_version=$(rsync --version) if [[ ! "$rsync_version" =~ "ACLs" ]] || [[ "$rsync_version" =~ "no ACLs" ]]; then echo "Not comparing ACLs (not supported by available rsync)" 1>&2 else case "$OSTYPE" in cygwin|darwin|netbsd) echo "Not comparing ACLs (not yet supported on $OSTYPE)" 1>&2 ;; *) comparing_acls=yes rsync_opts="$rsync_opts -A" ;; esac fi xattrs_available='' if [[ ! "$rsync_version" =~ "xattrs" ]] || [[ "$rsync_version" =~ "no xattrs" ]]; then echo "Not comparing xattrs (not supported by available rsync)" 1>&2 else xattrs_available=yes fi if test "$show_features"; then echo "POSIX ACLs: ${comparing_acls:-no}" echo "Extended attributes (xattrs): ${xattrs_available:-no}" fi if test "$show_features"; then exit 0 fi tmpfile="$(mktemp /tmp/bup-test-XXXXXXX)" || exit $? trap "rm -rf '$tmpfile'" EXIT || exit $? # Even in dry-run mode, rsync may fail if -X is specified and the # filesystems don't support xattrs. if test "$xattrs_available"; then rsync $rsync_opts -X "$src" "$dest" > "$tmpfile" if test $? -ne 0; then # Try again without -X rsync $rsync_opts "$src" "$dest" > "$tmpfile" || exit $? fi else rsync $rsync_opts "$src" "$dest" > "$tmpfile" || exit $? fi if test $(wc -l < "$tmpfile") != 0; then echo "Differences between $src and $dest" 1>&2 cat "$tmpfile" exit 1 fi exit 0 bup-0.33.3/dev/configure-sampledata000077500000000000000000000034641454333004200171710ustar00rootroot00000000000000#!/usr/bin/env bash set -o pipefail # NOTE: any relevant changes to var/ must be accompanied by an # increment to the revision. revision=3 top="$(pwd)" || exit $? usage() { echo 'Usage: dev/configure-sampledata [--setup | --clean | --revision]' } if test "$#" -ne 1; then usage 1>&2; exit 1 fi rm_symlinks() { for p in "$@"; do # test -e is false for dangling symlinks. if test -h "$p" -o -e "$p"; then rm "$p" || exit $?; fi done } clean() ( cd test/sampledata || exit $? if test -e var; then rm -r var || exit $?; fi # Remove legacy content (before everything moved to var/). rm_symlinks abs-symlink b c etc ) case "$1" in --setup) ( clean mkdir -p test/sampledata/var/rev || exit $? cd test/sampledata/var || exit $? ln -sf a b || exit $? ln -sf b c || exit $? ln -sf "$(pwd)/abs-symlink-target" abs-symlink || exit $? mkfifo fifo mkdir -p cmd doc lib/bup || exit $? cp -pP "$top"/lib/bup/*.py lib/bup/ || exit $? cp -pP "$top"/Documentation/*.md doc/ || exit $? cp -pP "$top"/lib/bup/*.py lib/bup || exit $? mkdir path-zoo || exit $? if test "$BUP_TEST_RANDOMIZED_SAMPLEDATA_PATHS"; then "$top"/dev/make-random-paths 3000 path-zoo || exit $? fi # The "v" ensures that if "configure-sampledata # --revision" and/or the setup above fails somehow, # callers like make will be looking for a file that won't # exist. touch rev/v$revision || exit $? ) || exit $? ;; --clean) clean ;; --revision) echo "$revision" || exit $? ;; *) usage 1>&2; exit 1 ;; esac bup-0.33.3/dev/data-size000077500000000000000000000012111454333004200147440ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function from os.path import getsize, isdir from sys import stderr import os from bup.compat import get_argvb def listdir_failure(ex): raise ex def usage(): print('Usage: data-size PATH ...', file=sys.stderr) total = 0 for path in get_argvb()[1:]: if isdir(path): for root, dirs, files in os.walk(path, onerror=listdir_failure): total += sum(getsize(os.path.join(root, name)) for name in files) else: total += getsize(path) print(total) bup-0.33.3/dev/echo-argv-bytes000077500000000000000000000006131454333004200160670ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function from os.path import abspath, dirname from sys import stdout import os, sys from bup import compat for arg in compat.get_argvb(): os.write(stdout.fileno(), arg) os.write(stdout.fileno(), b'\0\n') stdout.flush() bup-0.33.3/dev/force-delete000077500000000000000000000011651454333004200154310ustar00rootroot00000000000000#!/usr/bin/env bash set -o pipefail # Try *hard* to delete $@. Among other things, some systems have # r-xr-xr-x for root and other system dirs. rc=0 rm -rf "$@" # Maybe we'll get lucky. for f in "$@"; do test -e "$f" || continue if test "$(type -p setfacl)"; then setfacl -Rb "$f" fi if test "$(type -p chattr)"; then chattr -R -aisu "$f" fi chmod -R u+rwX "$f" rm -r "$f" if test -e "$f"; then rc=1 find "$f" -ls lsattr -aR "$f" getfacl -R "$f" fi done if test "$rc" -ne 0; then echo "Failed to delete everything" 1>&2 fi exit "$rc" bup-0.33.3/dev/git-cat-tree000077500000000000000000000022071454333004200153560ustar00rootroot00000000000000#!/usr/bin/env bash # Recursively dump all blobs in the subtree identified by ID. set -o pipefail usage() { cat <&2 exit 1 ;; esac } case $# in 1) ;; 3) if test "$1" != --git-dir; then usage 1>&2 exit 1 fi export GIT_DIR="$2" shift 2 ;; *) usage 1>&2 exit 1 ;; esac top="$1" type=$(git cat-file -t "$top") || exit $? cat-item "$top" "$type" bup-0.33.3/dev/hardlink-sets000077500000000000000000000030651454333004200156440ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function import os, stat, sys from bup.compat import get_argvb from bup.io import byte_stream # Print the full paths of all the files in each hardlink set # underneath one of the paths. Separate sets with a blank line, sort # the paths within each set, and sort the sets by their first path. def usage(): print("Usage: hardlink-sets ", file=sys.stderr) argvb = get_argvb() if len(argvb) < 2: usage() sys.exit(1) def on_walk_error(e): raise e sys.stdout.flush() out = byte_stream(sys.stdout) hardlink_set = {} for p in argvb[1:]: for root, dirs, files in os.walk(p, onerror = on_walk_error): for filename in files: full_path = os.path.join(root, filename) st = os.lstat(full_path) if not stat.S_ISDIR(st.st_mode): node = b'%d:%d' % (st.st_dev, st.st_ino) link_paths = hardlink_set.get(node) if link_paths: link_paths.append(full_path) else: hardlink_set[node] = [full_path] # Sort the link sets. for node, link_paths in hardlink_set.items(): link_paths.sort() first_set = True for link_paths in sorted(hardlink_set.values(), key = lambda x : x[0]): if len(link_paths) > 1: if first_set: first_set = False else: out.write(b'\n') for p in sorted(link_paths): out.write(p + b'\n') sys.exit(0) bup-0.33.3/dev/have-pylint000077500000000000000000000005031454333004200153260ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function import sys try: import pylint except ImportError as ex: sys.exit(1) except BaseException as ex: print(ex, file=sys.stderr) sys.exit(2) bup-0.33.3/dev/id-other-than000077500000000000000000000022601454333004200155330ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function import grp import pwd import sys from bup.compat import get_argv, get_argvb def usage(): print('Usage: id-other-than <--user|--group> ID [ID ...]', file=sys.stderr) argvb = get_argvb() if len(argvb) < 2: usage() sys.exit(1) def is_integer(x): try: int(x) return True except ValueError as e: return False excluded_ids = set(int(x) for x in argvb[2:] if is_integer(x)) excluded_names = (x for x in get_argv()[2:] if not is_integer(x)) if argvb[1] == b'--user': for x in excluded_names: excluded_ids.add(pwd.getpwnam(x).pw_uid) for x in pwd.getpwall(): if x.pw_uid not in excluded_ids: print(x.pw_name + ':' + str(x.pw_uid)) sys.exit(0) elif argvb[1] == b'--group': for x in excluded_names: excluded_ids.add(grp.getgrnam(x).gr_gid) for x in grp.getgrall(): if x.gr_gid not in excluded_ids: print(x.gr_name + ':' + str(x.gr_gid)) sys.exit(0) else: usage() sys.exit(1) bup-0.33.3/dev/lib.sh000066400000000000000000000024561454333004200142530ustar00rootroot00000000000000# Assumes shell is Bash, and pipefail is set. # Assumes this is always loaded while pwd is still the source tree root bup_dev_lib_top=$(pwd) || exit $? bup-cfg-py() { "$bup_dev_lib_top/dev/python" "$@"; } bup-python() { "$bup_dev_lib_top/dev/bup-python" "$@"; } force-delete() { "$bup_dev_lib_top/dev/force-delete" "$@" } resolve-parent() { test "$#" -eq 1 || return $? echo "$1" | \ bup-python \ -c "import sys, bup.helpers; print(bup.helpers.resolve_parent(sys.stdin.readline()))" \ || return $? } current-filesystem() { local kernel="$(uname -s)" || return $? case "$kernel" in NetBSD) df -G . | sed -En 's/.* ([^ ]*) fstype.*/\1/p' ;; SunOS) df -g . | sed -En 's/.* ([^ ]*) fstype.*/\1/p' ;; *) df -T . | awk 'END{print $2}' esac } path-filesystems() ( # Return filesystem for each dir from $1 to /. # Perhaps for /foo/bar, "ext4\next4\nbtrfs\n". test "$#" -eq 1 || exit $? cd "$1" || exit $? current-filesystem || exit $? dir="$(pwd)" || exit $? while test "$dir" != /; do cd .. || exit $? dir="$(pwd)" || exit $? current-filesystem || exit $? done exit 0 ) escape-erx() { sed 's/[][\.|$(){?+*^]/\\&/g' <<< "$*" } bup-0.33.3/dev/make-random-paths000077500000000000000000000023341454333004200164020ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function from os.path import abspath, dirname from random import randint from sys import stderr, stdout import errno, re, sys from bup.compat import fsencode, get_argv, get_argvb argv = get_argv() def usage(out=stdout): print('Usage:', argv[0], 'NUM', 'DEST_DIR', file=out) def misuse(): usage(stderr) exit(2) if sys.version_info[0] >= 3: def bytes_from_ints(ints): return bytes(ints) else: def bytes_from_ints(ints): return ''.join([chr(x) for x in ints]) invalid_fragments = re.compile(br'(\x00|[./]|\.\.)') def random_filename(): n = randint(1, 32) def random_candidate(): return invalid_fragments.sub(b'', bytes_from_ints([randint(1, 255) for x in range(n)])) candidate = random_candidate() while not candidate: candidate = random_candidate() return candidate if len(argv) != 3: misuse() count, dest = get_argvb()[1:] count = int(count) i = 0 while i < count: with open(dest + b'/' + random_filename(), 'w') as _: i += 1 bup-0.33.3/dev/mksock000077500000000000000000000004401454333004200143550ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import import socket, sys from bup.compat import get_argvb s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) s.bind(get_argvb()[1]) bup-0.33.3/dev/ns-timestamp-resolutions000077500000000000000000000021161454333004200200750ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import import os.path, sys from bup.compat import argv_bytes, get_argvb from bup.helpers import handle_ctrl_c, saved_errors from bup.io import byte_stream from bup import compat, metadata, options import bup.xstat as xstat optspec = """ ns-timestamp-resolutions TEST_FILE_NAME -- """ handle_ctrl_c() o = options.Options(optspec) opt, flags, extra = o.parse_bytes(get_argvb()[1:]) sys.stdout.flush() out = byte_stream(sys.stdout) if len(extra) != 1: o.fatal('must specify a test file name') target = argv_bytes(extra[0]) open(target, 'w').close() xstat.utime(target, (123456789, 123456789)) meta = metadata.from_path(target) def ns_resolution(x): n = 1; while n < 10**9 and x % 10 == 0: x /= 10 n *= 10 return n out.write(b'%d %d\n' % (ns_resolution(meta.atime), ns_resolution(meta.mtime))) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/dev/perf-glance000077500000000000000000000022231454333004200152520ustar00rootroot00000000000000#!/usr/bin/env bash set -ueo pipefail if test $# -lt 1; then echo "Usage: perf-glance SRC_DATA_PATH..." 1>&2 exit 1 fi set -x src_data=("$@") top="$(pwd)" script_name="$(basename $0)" mkdir -p "$top/test/tmp" tmpdir="$(mktemp -d "$top/test/tmp/$script_name-XXXXXXX")" export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@" } get-time() { python -c 'import time; print(time.time())' } rm -rf "$BUP_DIR" all_start="$(get-time)" init_start="$(get-time)" bup init init_finish="$(get-time)" index_start="$(get-time)" bup index "${src_data[@]}" index_finish="$(get-time)" save_start="$(get-time)" bup save -t -n data "${src_data[@]}" save_finish="$(get-time)" mkdir "$tmpdir/restore" restore_start="$(get-time)" bup restore -C "$tmpdir/restore" "/data/latest/" restore_finish="$(get-time)" all_finish="$(get-time)" set +x cat <&2 exit 2 ;; esac bup-0.33.3/dev/prep-for-freebsd-build000077500000000000000000000010161454333004200173250ustar00rootroot00000000000000#!/bin/sh set -exu usage() { echo "Usage: prep-for-freebsd-build [python3]" } pyver="${1:-python3}" # Install build deps export ASSUME_ALWAYS_YES=yes pkg update # https://reviews.freebsd.org/D24816 pkg install rdiff-backup || true pkgs='gmake git bash rsync curl par2cmdline readline duplicity' pkgs="$pkgs rsnapshot" case "$pyver" in python3) pkgs="$pkgs python39 py39-tornado py39-pytest py39-pytest-xdist" pkg install $pkgs ;; *) usage 1>&2 exit 2 ;; esac bup-0.33.3/dev/prep-for-macos-build000077500000000000000000000012621454333004200170200ustar00rootroot00000000000000#!/usr/bin/env bash set -exu usage() { echo "Usage: prep-for-macos-build [python3]" } pyver="${1:-python3}" if ! command -v brew; then /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" fi brew install par2 readline rsync pkg-config md5sha1sum # This avoid's macos interference, i.e. without this it looks like we # won't actually be able to use the readline we just installed above. brew link --force readline # "brew unlink readline" will undo this hack case "$pyver" in python3) brew install python pip3 install --user pytest pytest-xdist ;; *) usage 1>&2 exit 2 ;; esac bup-0.33.3/dev/python.c000066400000000000000000000010451454333004200146270ustar00rootroot00000000000000#define _LARGEFILE64_SOURCE 1 #define PY_SSIZE_T_CLEAN 1 #undef NDEBUG #include "../config/config.h" // According to Python, its header has to go first: // http://docs.python.org/3/c-api/intro.html#include-files #include #include "bup/compat.h" #if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 8 # define bup_py_main bup_py_bytes_main #elif PY_MAJOR_VERSION > 2 # define bup_py_main Py_BytesMain #else # define bup_py_main Py_Main #endif int main(int argc, char **argv) { assert(argc > 0); return bup_py_main (argc, argv); } bup-0.33.3/dev/refresh000077500000000000000000000022141454333004200145250ustar00rootroot00000000000000#!/usr/bin/env bash set -ueo pipefail # Related: https://joeyh.name/code/moreutils/ sponge usage() { echo 'Usage: refresh [-a] [-v] DEST' echo ' refresh [-a] [-v] -- DEST' } append='' verbose='' while test $# -gt 0; do case $1 in -a) append=true; shift;; -v) verbose=true; shift;; --) if test "$#" -ne 2; then usage 1>&2 exit 2 fi dest="$2" shift 2 ;; -*) usage 1>&2 exit 2 ;; *) if test "$#" -ne 1; then usage 1>&2 exit 2 fi dest="$1" shift ;; esac done tmpfile="$(mktemp "$dest.sponge-XXXXXXX")" clean-up() { rm -f "$tmpfile" } trap clean-up EXIT # Inefficient, but should clone the permissions if test -e "$dest"; then cp -Lp "$dest" "$tmpfile" fi if test "$append"; then cat >> "$tmpfile" else cat > "$tmpfile" fi if ! cmp -s "$tmpfile" "$dest"; then if test "$verbose"; then echo "Refreshed $dest" 1>&2 fi mv "$tmpfile" "$dest" fi bup-0.33.3/dev/root-status000077500000000000000000000007751454333004200154050ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- python="$(dirname "$0")/python" || exit $? exec "$python" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function import os, sys if sys.platform.startswith('cygwin'): groups = os.getgroups() if 544 in groups or 0 in groups: print('root') else: print('none') else: if os.environ.get('FAKEROOTKEY'): print('fake') else: if os.geteuid() == 0: print('root') else: print('none') bup-0.33.3/dev/shadow-bin/000077500000000000000000000000001454333004200151755ustar00rootroot00000000000000bup-0.33.3/dev/shadow-bin/bup000077500000000000000000000001401454333004200157040ustar00rootroot00000000000000#!/bin/sh cat <&2 exit 2 ;; esac printf %s\\n "$src" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/" bup-0.33.3/dev/sort-z000077500000000000000000000001501454333004200143220ustar00rootroot00000000000000#!/bin/sh set -e if test "$(uname -s)" = NetBSD; then exec sort -R 000 "$@" fi exec sort -z "$@" bup-0.33.3/dev/sparse-test-data000077500000000000000000000054641454333004200162620ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function from random import randint from sys import stderr, stdout import os, sys from bup.compat import get_argvb from bup.io import byte_stream def smaller_region(max_offset): start = randint(0, max_offset) return (start, min(max_offset, randint(start + 1, start + 5))) def possibly_larger_region(max_offset, min_sparse_len): start = randint(0, max_offset) return (start, min(max_offset, randint(start + 1, start + 3 * min_sparse_len))) def initial_region(max_offset, min_sparse_len): start = 0 return (start, min(max_offset, randint(start + 1, start + 3 * min_sparse_len))) def final_region(max_offset, min_sparse_len): start = max(0, randint(max_offset - 3 * min_sparse_len, max_offset - 1)) return (start, max_offset) def region_around_min_len(max_offset, min_sparse_len): start = randint(0, max_offset) return (start, min(max_offset, randint(start + min_sparse_len - 5, start + min_sparse_len + 5))) generators = [] def random_region(): global generators return generators[randint(0, len(generators) - 1)]() argv = get_argvb() if len(argv) == 0: stdout.flush() out = byte_stream(stdout) if len(argv) == 2: out = open(argv[1], 'wb') else: print('Usage: sparse-test-data [FILE]', file=stderr) sys.exit(2) bup_read_size = 2 ** 16 bup_min_sparse_len = 512 out_size = randint(0, bup_read_size * 10) generators = (lambda : smaller_region(out_size), lambda : possibly_larger_region(out_size, bup_min_sparse_len), lambda : initial_region(out_size, bup_min_sparse_len), lambda : final_region(out_size, bup_min_sparse_len), lambda : region_around_min_len(out_size, bup_min_sparse_len)) sparse = [] sparse.append(random_region()) sparse.append(random_region()) # Handle overlaps if sparse[1][0] < sparse[0][0]: sparse[0], sparse[1] = sparse[1], sparse[0] sparse_offsets = [] sparse_offsets.append(sparse[0][0]) if sparse[1][0] <= sparse[0][1]: sparse_offsets.append(max(sparse[0][1], sparse[1][1])) else: sparse_offsets.extend((sparse[0][1], sparse[1][0], sparse[1][1])) if sparse[1][1] != out_size: sparse_offsets.append(out_size) # Now sparse_offsets indicates where to start/stop zero runs data = b'x' pos = 0 print('offsets:', sparse_offsets, file=stderr) for offset in sparse_offsets: count = offset - pos print('write:', 'x' if data == 'x' else '0', count, file=stderr) out.write(data * (offset - pos)) pos += count data = b'\0' if data == b'x' else b'x' out.close() bup-0.33.3/dev/subtree-hash000077500000000000000000000024161454333004200154650ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- bup_exec="$(dirname "$0")/bup-exec" || exit $? exec "$bup_exec" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function import os.path, sys from bup.compat import argv_bytes, get_argvb from bup.helpers import handle_ctrl_c, readpipe from bup.io import byte_stream from bup import options optspec = """ subtree-hash ROOT_HASH [PATH_ITEM...] -- """ handle_ctrl_c() o = options.Options(optspec) opt, flags, extra = o.parse_bytes(get_argvb()[1:]) if len(extra) < 1: o.fatal('must specify a root hash') tree_hash = argv_bytes(extra[0]) path = [argv_bytes(x) for x in extra[1:]] while path: target_name = path[0] subtree_items = readpipe([b'git', b'ls-tree', b'-z', tree_hash]) target_hash = None for entry in subtree_items.split(b'\0'): if not entry: break info, name = entry.split(b'\t', 1) if name == target_name: _, _, target_hash = info.split(b' ') break if not target_hash: print("Can't find %r in %s" % (target_name, tree_hash.decode('ascii')), file=sys.stderr) break tree_hash = target_hash path = path[1:] if path: sys.exit(1) sys.stdout.flush() out = byte_stream(sys.stdout) out.write(tree_hash + b'\n') bup-0.33.3/dev/sync-tree000077500000000000000000000025471454333004200150110ustar00rootroot00000000000000#!/usr/bin/env bash set -u usage() { cat <&2; exit 1;; esac done shift $(($OPTIND - 1)) || exit $? if ! test $# -eq 2 then usage 1>&2 exit 1 fi src="$1" dest="$2" rsync_opts="-aH --delete" rsync_version=$(rsync --version) if [[ ! "$rsync_version" =~ "ACLs" ]] || [[ "$rsync_version" =~ "no ACLs" ]]; then echo "Not syncing ACLs (not supported by available rsync)" 1>&2 else case $OSTYPE in cygwin|darwin|netbsd) echo "Not syncing ACLs (not yet supported on $OSTYPE)" 1>&2 ;; *) rsync_opts="$rsync_opts -A" ;; esac fi xattrs_available='' if [[ ! "$rsync_version" =~ "xattrs" ]] || [[ "$rsync_version" =~ "no xattrs" ]]; then echo "Not syncing xattrs (not supported by available rsync)" 1>&2 else xattrs_available=yes fi # rsync may fail if -X is specified and the filesystems don't support # xattrs. if test "$xattrs_available"; then rsync $rsync_opts -X "$src" "$dest" if test $? -ne 0; then # Try again without -X exec rsync $rsync_opts "$src" "$dest" fi else exec rsync $rsync_opts "$src" "$dest" fi bup-0.33.3/dev/system-info000077500000000000000000000010151454333004200153420ustar00rootroot00000000000000#!/usr/bin/env bash set -e uname -a case "$OSTYPE" in linux*) cat /proc/cpuinfo cat /proc/meminfo ;; freebsd*) sysctl hw.machine hw.machine_arch hw.model hw.ncpu sysctl hw.{phys,user,real}mem ;; darwin*) system_profiler SPHardwareDataType ;; esac set -x git --version rsync --version # Older versions of par2 don't support -V, but it'll still show the # version when it fails. if command -v par2; then par2 -V || true; fi df -h mount id pwd bup-0.33.3/dev/unknown-owner000077500000000000000000000011171454333004200157170ustar00rootroot00000000000000#!/bin/sh """": # -*-python-*- python="$(dirname "$0")/python" || exit $? exec "$python" "$0" ${1+"$@"} """ from __future__ import absolute_import, print_function import grp import pwd import sys def usage(): print("Usage: unknown-owner (--user | --group)", file=sys.stderr) if len(sys.argv) != 2: usage() sys.exit(1) if sys.argv[1] == '--user': max_name_len = max([len(x.pw_name) for x in pwd.getpwall()]) elif sys.argv[1] == '--group': max_name_len = max([len(x.gr_name) for x in grp.getgrall()]) else: usage() sys.exit(1) print('x' * (max_name_len + 1)) bup-0.33.3/dev/update-checkout-info000077500000000000000000000011061454333004200171040ustar00rootroot00000000000000#!/usr/bin/env bash set -euo pipefail top="$(pwd -P)" usage() { echo 'Usage: update-checkout-info DEST'; } if test "$#" -ne 1; then usage 1>&2; exit 1 fi dest="$1" if ! test -f lib/bup/bupsplit.c; then echo 'error: cannot find bup source tree' 1>&2 exit 1 fi if ! test -e .git; then # Not building from a git tree rm -f "$dest" exit 0 fi local_changes=$(git status --porcelain -uno) (git log -1 --pretty="commit='%H'%ndate='%ci'" echo -n 'modified=' if test "$local_changes"; then echo True; else echo False; fi) \ | dev/refresh -v -- "$dest" bup-0.33.3/dev/update-doc-branches000077500000000000000000000024761454333004200167110ustar00rootroot00000000000000#!/usr/bin/env bash # Ensures that the working tree is clean, and Documentation/ is up to # date, and then commits Documentation/*.1 to the man branch, and # Documentation/*.html to the html branch. set -uexo pipefail test "$#" -eq 2 # Must be full ref name, i.e. refs/heads/man, etc. man_ref="$1" html_ref="$2" git diff-index --quiet HEAD -- # no uncommitted changes git rev-parse --verify "$man_ref" git rev-parse --verify "$html_ref" echo "$man_ref" | grep -qE '^refs/heads' echo "$html_ref" | grep -qE '^refs/heads' "${MAKE:-make}" mkdir -p t/tmp tmpdir="$(mktemp -d "t/tmp/update-doc-branches-XXXXXX")" trap "$(printf 'rm -rf %q' "$tmpdir")" EXIT tmpidx="$tmpdir/git-index.tmp" for fmt in man html; do rm -f "$tmpidx" for f in $(git ls-files 'Documentation/*.md'); do base="$(basename "$f" .md)" if test "$fmt" = man; then ref="$man_ref" GIT_INDEX_FILE="$tmpidx" git add -f "Documentation/$base.1" else ref="$html_ref" GIT_INDEX_FILE="$tmpidx" git add -f "Documentation/$base.html" fi done msg="Update $fmt pages for $(git describe --always)" tree=$(GIT_INDEX_FILE="$tmpidx" git write-tree --prefix=Documentation) commit=$(echo "$msg" | git commit-tree "$tree" -p refs/heads/"$fmt") git update-ref "$ref" "$commit" done bup-0.33.3/dev/validate-python000077500000000000000000000011141454333004200161750ustar00rootroot00000000000000#!/usr/bin/env bash set -ueo pipefail die () { echo "Usage: validate-python PYTHON_EXECUTABLE"; } test $# -eq 1 || { usage 1>&2 ; exit 2; } python="$1" majver=$("$python" -c 'import sys; print(sys.version_info[0])') minver=$("$python" -c 'import sys; print(sys.version_info[1])') # May not be correct yet, i.e. actual requirement may be higher. if test "$majver" -lt 3 || test "$majver" -eq 3 && test "$minver" -lt 7; then # utime follow_symlinks >= 3.3 bup_version_str=$("$python" --version 2>&1) echo "ERROR: found $bup_version_str (must be >= 3.7)" 1>&2 exit 2 fi bup-0.33.3/lib/000077500000000000000000000000001454333004200131325ustar00rootroot00000000000000bup-0.33.3/lib/__init__.py000066400000000000000000000000001454333004200152310ustar00rootroot00000000000000bup-0.33.3/lib/bup/000077500000000000000000000000001454333004200137205ustar00rootroot00000000000000bup-0.33.3/lib/bup/.gitattributes000066400000000000000000000000351454333004200166110ustar00rootroot00000000000000source_info.py export-subst bup-0.33.3/lib/bup/__init__.py000066400000000000000000000000001454333004200160170ustar00rootroot00000000000000bup-0.33.3/lib/bup/_helpers.c000066400000000000000000002215761454333004200157020ustar00rootroot00000000000000#define _LARGEFILE64_SOURCE 1 #define PY_SSIZE_T_CLEAN 1 #undef NDEBUG #include "../../config/config.h" // According to Python, its header has to go first: // http://docs.python.org/3/c-api/intro.html#include-files #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_MMAN_H #include #endif #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_STAT_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_SYS_TIME_H #include #endif #ifdef HAVE_LINUX_FS_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #ifdef HAVE_TM_TM_GMTOFF #include #endif #if defined(BUP_RL_EXPECTED_XOPEN_SOURCE) \ && (!defined(_XOPEN_SOURCE) || _XOPEN_SOURCE < BUP_RL_EXPECTED_XOPEN_SOURCE) # warning "_XOPEN_SOURCE version is incorrect for readline" #endif #ifdef BUP_HAVE_READLINE # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wstrict-prototypes" # ifdef BUP_READLINE_INCLUDES_IN_SUBDIR # include # include # else # include # include # endif # pragma GCC diagnostic pop #endif #include "bupsplit.h" #include "bup/intprops.h" #if defined(FS_IOC_GETFLAGS) && defined(FS_IOC_SETFLAGS) #define BUP_HAVE_FILE_ATTRS 1 #endif #if PY_MAJOR_VERSION > 2 # define BUP_USE_PYTHON_UTIME 1 #endif #ifndef BUP_USE_PYTHON_UTIME // just for Python 2 now /* * Check for incomplete UTIMENSAT support (NetBSD 6), and if so, * pretend we don't have it. */ #if !defined(AT_FDCWD) || !defined(AT_SYMLINK_NOFOLLOW) #undef HAVE_UTIMENSAT #endif #endif // defined BUP_USE_PYTHON_UTIME #ifndef FS_NOCOW_FL // Of course, this assumes it's a bitfield value. #define FS_NOCOW_FL 0 #endif typedef unsigned char byte; typedef struct { int istty2; } state_t; // cstr_argf: for byte vectors without null characters (e.g. paths) // rbuf_argf: for read-only byte vectors // wbuf_argf: for mutable byte vectors #define get_state(x) ((state_t *) PyModule_GetState(x)) #define cstr_argf "y" #define rbuf_argf "y#" #define wbuf_argf "y*" static void *checked_calloc(size_t n, size_t size) { void *result = calloc(n, size); if (!result) PyErr_NoMemory(); return result; } static void *checked_malloc(size_t n, size_t size) { size_t total; if (!INT_MULTIPLY_OK(n, size, &total)) { PyErr_Format(PyExc_OverflowError, "request to allocate %zu items of size %zu is too large", n, size); return NULL; } void *result = malloc(total); if (!result) return PyErr_NoMemory(); return result; } #ifndef htonll // This function should technically be macro'd out if it's going to be used // more than ocasionally. As of this writing, it'll actually never be called // in real world bup scenarios (because our packs are < MAX_INT bytes). static uint64_t htonll(uint64_t value) { static const int endian_test = 42; if (*(char *)&endian_test == endian_test) // LSB-MSB return ((uint64_t)htonl(value & 0xFFFFFFFF) << 32) | htonl(value >> 32); return value; // already in network byte order MSB-LSB } #endif #define INTEGRAL_ASSIGNMENT_FITS(dest, src) INT_ADD_OK(src, 0, dest) #define INTEGER_TO_PY(x) \ EXPR_SIGNED(x) ? PyLong_FromLongLong(x) : PyLong_FromUnsignedLongLong(x) static int bup_ulong_from_py(unsigned long *x, PyObject *py, const char *name) { if (!PyLong_Check(py)) { PyErr_Format(PyExc_TypeError, "expected integer %s", name); return 0; } const unsigned long tmp = PyLong_AsUnsignedLong(py); if (PyErr_Occurred()) { if (PyErr_ExceptionMatches(PyExc_OverflowError)) PyErr_Format(PyExc_OverflowError, "%s too big for unsigned long", name); return 0; } *x = tmp; return 1; } static int bup_uint_from_py(unsigned int *x, PyObject *py, const char *name) { unsigned long tmp; if (!bup_ulong_from_py(&tmp, py, name)) return 0; if (tmp > UINT_MAX) { PyErr_Format(PyExc_OverflowError, "%s too big for unsigned int", name); return 0; } *x = (unsigned int) tmp; return 1; } static int bup_ullong_from_py(unsigned PY_LONG_LONG *x, PyObject *py, const char *name) { if (!PyLong_Check(py)) { PyErr_Format(PyExc_TypeError, "integer argument expected for %s", name); return 0; } const unsigned PY_LONG_LONG tmp = PyLong_AsUnsignedLongLong(py); if (tmp == (unsigned long long) -1 && PyErr_Occurred()) { if (PyErr_ExceptionMatches(PyExc_OverflowError)) PyErr_Format(PyExc_OverflowError, "%s too big for unsigned long long", name); return 0; } *x = tmp; return 1; } static PyObject *bup_bytescmp(PyObject *self, PyObject *args) { PyObject *py_s1, *py_s2; // This is really a PyBytes/PyString if (!PyArg_ParseTuple(args, "SS", &py_s1, &py_s2)) return NULL; char *s1, *s2; Py_ssize_t s1_len, s2_len; if (PyBytes_AsStringAndSize(py_s1, &s1, &s1_len) == -1) return NULL; if (PyBytes_AsStringAndSize(py_s2, &s2, &s2_len) == -1) return NULL; const Py_ssize_t n = (s1_len < s2_len) ? s1_len : s2_len; const int cmp = memcmp(s1, s2, n); if (cmp != 0) return PyLong_FromLong(cmp); if (s1_len == s2_len) return PyLong_FromLong(0);; return PyLong_FromLong((s1_len < s2_len) ? -1 : 1); } static PyObject *bup_cat_bytes(PyObject *self, PyObject *args) { unsigned char *bufx = NULL, *bufy = NULL; Py_ssize_t bufx_len, bufx_ofs, bufx_n; Py_ssize_t bufy_len, bufy_ofs, bufy_n; if (!PyArg_ParseTuple(args, rbuf_argf "nn" rbuf_argf "nn", &bufx, &bufx_len, &bufx_ofs, &bufx_n, &bufy, &bufy_len, &bufy_ofs, &bufy_n)) return NULL; if (bufx_ofs < 0) return PyErr_Format(PyExc_ValueError, "negative x offset"); if (bufx_n < 0) return PyErr_Format(PyExc_ValueError, "negative x extent"); if (bufx_ofs > bufx_len) return PyErr_Format(PyExc_ValueError, "x offset greater than length"); if (bufx_n > bufx_len - bufx_ofs) return PyErr_Format(PyExc_ValueError, "x extent past end of buffer"); if (bufy_ofs < 0) return PyErr_Format(PyExc_ValueError, "negative y offset"); if (bufy_n < 0) return PyErr_Format(PyExc_ValueError, "negative y extent"); if (bufy_ofs > bufy_len) return PyErr_Format(PyExc_ValueError, "y offset greater than length"); if (bufy_n > bufy_len - bufy_ofs) return PyErr_Format(PyExc_ValueError, "y extent past end of buffer"); if (bufy_n > PY_SSIZE_T_MAX - bufx_n) return PyErr_Format(PyExc_OverflowError, "result length too long"); PyObject *result = PyBytes_FromStringAndSize(NULL, bufx_n + bufy_n); if (!result) return PyErr_NoMemory(); char *buf = PyBytes_AS_STRING(result); memcpy(buf, bufx + bufx_ofs, bufx_n); memcpy(buf + bufx_n, bufy + bufy_ofs, bufy_n); return result; } static int write_all(int fd, const void *buf, const size_t count) { size_t written = 0; while (written < count) { const ssize_t rc = write(fd, buf + written, count - written); if (rc == -1) return -1; written += rc; } return 0; } static inline int uadd(unsigned long long *dest, const unsigned long long x, const unsigned long long y) { return INT_ADD_OK(x, y, dest); } static PyObject *append_sparse_region(const int fd, unsigned long long n) { while (n) { off_t new_off; if (!INTEGRAL_ASSIGNMENT_FITS(&new_off, n)) new_off = INT_MAX; const off_t off = lseek(fd, new_off, SEEK_CUR); if (off == (off_t) -1) return PyErr_SetFromErrno(PyExc_IOError); n -= new_off; } return NULL; } static PyObject *record_sparse_zeros(unsigned long long *new_pending, const int fd, unsigned long long prev_pending, const unsigned long long count) { // Add count additional sparse zeros to prev_pending and store the // result in new_pending, or if the total won't fit in // new_pending, write some of the zeros to fd sparsely, and store // the remaining sum in new_pending. if (!uadd(new_pending, prev_pending, count)) { PyObject *err = append_sparse_region(fd, prev_pending); if (err != NULL) return err; *new_pending = count; } return NULL; } static byte* find_not_zero(const byte * const start, const byte * const end) { // Return a pointer to first non-zero byte between start and end, // or end if there isn't one. assert(start <= end); const unsigned char *cur = start; while (cur < end && *cur == 0) cur++; return (byte *) cur; } static byte* find_trailing_zeros(const byte * const start, const byte * const end) { // Return a pointer to the start of any trailing run of zeros, or // end if there isn't one. assert(start <= end); if (start == end) return (byte *) end; const byte * cur = end; while (cur > start && *--cur == 0) {} if (*cur == 0) return (byte *) cur; else return (byte *) (cur + 1); } static byte *find_non_sparse_end(const byte * const start, const byte * const end, const ptrdiff_t min_len) { // Return the first pointer to a min_len sparse block in [start, // end) if there is one, otherwise a pointer to the start of any // trailing run of zeros. If there are no trailing zeros, return // end. if (start == end) return (byte *) end; assert(start < end); assert(min_len); // Probe in min_len jumps, searching backward from the jump // destination for a non-zero byte. If such a byte is found, move // just past it and try again. const byte *candidate = start; // End of any run of zeros, starting at candidate, that we've already seen const byte *end_of_known_zeros = candidate; while (end - candidate >= min_len) // Handle all min_len candidate blocks { const byte * const probe_end = candidate + min_len; const byte * const trailing_zeros = find_trailing_zeros(end_of_known_zeros, probe_end); if (trailing_zeros == probe_end) end_of_known_zeros = candidate = probe_end; else if (trailing_zeros == end_of_known_zeros) { assert(candidate >= start); assert(candidate <= end); assert(*candidate == 0); return (byte *) candidate; } else { candidate = trailing_zeros; end_of_known_zeros = probe_end; } } if (candidate == end) return (byte *) end; // No min_len sparse run found, search backward from end const byte * const trailing_zeros = find_trailing_zeros(end_of_known_zeros, end); if (trailing_zeros == end_of_known_zeros) { assert(candidate >= start); assert(candidate < end); assert(*candidate == 0); assert(end - candidate < min_len); return (byte *) candidate; } if (trailing_zeros == end) { assert(*(end - 1) != 0); return (byte *) end; } assert(end - trailing_zeros < min_len); assert(trailing_zeros >= start); assert(trailing_zeros < end); assert(*trailing_zeros == 0); return (byte *) trailing_zeros; } static PyObject *bup_write_sparsely(PyObject *self, PyObject *args) { int fd; unsigned char *buf = NULL; Py_ssize_t sbuf_len; PyObject *py_min_sparse_len, *py_prev_sparse_len; if (!PyArg_ParseTuple(args, "i" rbuf_argf "OO", &fd, &buf, &sbuf_len, &py_min_sparse_len, &py_prev_sparse_len)) return NULL; ptrdiff_t min_sparse_len; unsigned long long prev_sparse_len, buf_len, ul_min_sparse_len; if (!bup_ullong_from_py(&ul_min_sparse_len, py_min_sparse_len, "min_sparse_len")) return NULL; if (!INTEGRAL_ASSIGNMENT_FITS(&min_sparse_len, ul_min_sparse_len)) return PyErr_Format(PyExc_OverflowError, "min_sparse_len too large"); if (!bup_ullong_from_py(&prev_sparse_len, py_prev_sparse_len, "prev_sparse_len")) return NULL; if (sbuf_len < 0) return PyErr_Format(PyExc_ValueError, "negative bufer length"); if (!INTEGRAL_ASSIGNMENT_FITS(&buf_len, sbuf_len)) return PyErr_Format(PyExc_OverflowError, "buffer length too large"); const byte * block = buf; // Start of pending block const byte * const end = buf + buf_len; unsigned long long zeros = prev_sparse_len; while (1) { assert(block <= end); if (block == end) return PyLong_FromUnsignedLongLong(zeros); if (*block != 0) { // Look for the end of block, i.e. the next sparse run of // at least min_sparse_len zeros, or the end of the // buffer. const byte * const probe = find_non_sparse_end(block + 1, end, min_sparse_len); // Either at end of block, or end of non-sparse; write pending data PyObject *err = append_sparse_region(fd, zeros); if (err != NULL) return err; int rc = write_all(fd, block, probe - block); if (rc) return PyErr_SetFromErrno(PyExc_IOError); if (end - probe < min_sparse_len) zeros = end - probe; else zeros = min_sparse_len; block = probe + zeros; } else // *block == 0 { // Should be in the first loop iteration, a sparse run of // zeros, or nearly at the end of the block (within // min_sparse_len). const byte * const zeros_end = find_not_zero(block, end); PyObject *err = record_sparse_zeros(&zeros, fd, zeros, zeros_end - block); if (err != NULL) return err; assert(block <= zeros_end); block = zeros_end; } } } static PyObject *selftest(PyObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "")) return NULL; return Py_BuildValue("i", !bupsplit_selftest()); } static PyObject *blobbits(PyObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "")) return NULL; return Py_BuildValue("i", BUP_BLOBBITS); } static PyObject *splitbuf(PyObject *self, PyObject *args) { int out = 0, bits = -1; Py_buffer buf; if (!PyArg_ParseTuple(args, "y*", &buf)) return NULL; assert(buf.len <= INT_MAX); out = bupsplit_find_ofs(buf.buf, buf.len, &bits); PyBuffer_Release(&buf); if (out) assert(bits >= BUP_BLOBBITS); return Py_BuildValue("ii", out, bits); } static PyObject *bitmatch(PyObject *self, PyObject *args) { unsigned char *buf1 = NULL, *buf2 = NULL; Py_ssize_t len1 = 0, len2 = 0; Py_ssize_t byte; int bit; if (!PyArg_ParseTuple(args, rbuf_argf rbuf_argf, &buf1, &len1, &buf2, &len2)) return NULL; bit = 0; for (byte = 0; byte < len1 && byte < len2; byte++) { int b1 = buf1[byte], b2 = buf2[byte]; if (b1 != b2) { for (bit = 0; bit < 8; bit++) if ( (b1 & (0x80 >> bit)) != (b2 & (0x80 >> bit)) ) break; break; } } Py_ssize_t result; if (!INT_MULTIPLY_OK(byte, 8, &result) || !INT_ADD_OK(result, bit, &result)) { PyErr_Format(PyExc_OverflowError, "bitmatch bit count too large"); return NULL; } return PyLong_FromSsize_t(result); } static PyObject *firstword(PyObject *self, PyObject *args) { unsigned char *buf = NULL; Py_ssize_t len = 0; uint32_t v; if (!PyArg_ParseTuple(args, rbuf_argf, &buf, &len)) return NULL; if (len < 4) return NULL; v = ntohl(*(uint32_t *)buf); return PyLong_FromUnsignedLong(v); } #define BLOOM2_HEADERLEN 16 static void to_bloom_address_bitmask4(const unsigned char *buf, const int nbits, uint64_t *v, unsigned char *bitmask) { int bit; uint32_t high; uint64_t raw, mask; memcpy(&high, buf, 4); mask = (1<> (37-nbits)) & 0x7; *v = (raw >> (40-nbits)) & mask; *bitmask = 1 << bit; } static void to_bloom_address_bitmask5(const unsigned char *buf, const int nbits, uint32_t *v, unsigned char *bitmask) { int bit; uint32_t high; uint32_t raw, mask; memcpy(&high, buf, 4); mask = (1<> (29-nbits)) & 0x7; *v = (raw >> (32-nbits)) & mask; *bitmask = 1 << bit; } #define BLOOM_SET_BIT(name, address, otype) \ static void name(unsigned char *bloom, const unsigned char *buf, const int nbits)\ {\ unsigned char bitmask;\ otype v;\ address(buf, nbits, &v, &bitmask);\ bloom[BLOOM2_HEADERLEN+v] |= bitmask;\ } BLOOM_SET_BIT(bloom_set_bit4, to_bloom_address_bitmask4, uint64_t) BLOOM_SET_BIT(bloom_set_bit5, to_bloom_address_bitmask5, uint32_t) #define BLOOM_GET_BIT(name, address, otype) \ static int name(const unsigned char *bloom, const unsigned char *buf, const int nbits)\ {\ unsigned char bitmask;\ otype v;\ address(buf, nbits, &v, &bitmask);\ return bloom[BLOOM2_HEADERLEN+v] & bitmask;\ } BLOOM_GET_BIT(bloom_get_bit4, to_bloom_address_bitmask4, uint64_t) BLOOM_GET_BIT(bloom_get_bit5, to_bloom_address_bitmask5, uint32_t) static PyObject *bloom_add(PyObject *self, PyObject *args) { Py_buffer bloom, sha; int nbits = 0, k = 0; if (!PyArg_ParseTuple(args, wbuf_argf wbuf_argf "ii", &bloom, &sha, &nbits, &k)) return NULL; PyObject *result = NULL; if (bloom.len < 16+(1< 29) goto clean_and_return; unsigned char *cur = sha.buf; unsigned char *end; for (end = cur + sha.len; cur < end; cur += 20/k) bloom_set_bit5(bloom.buf, cur, nbits); } else if (k == 4) { if (nbits > 37) goto clean_and_return; unsigned char *cur = sha.buf; unsigned char *end = cur + sha.len; for (; cur < end; cur += 20/k) bloom_set_bit4(bloom.buf, cur, nbits); } else goto clean_and_return; result = Py_BuildValue("n", sha.len / 20); clean_and_return: PyBuffer_Release(&bloom); PyBuffer_Release(&sha); return result; } static PyObject *bloom_contains(PyObject *self, PyObject *args) { Py_buffer bloom; unsigned char *sha = NULL; Py_ssize_t len = 0; int nbits = 0, k = 0; if (!PyArg_ParseTuple(args, wbuf_argf rbuf_argf "ii", &bloom, &sha, &len, &nbits, &k)) return NULL; PyObject *result = NULL; if (len != 20) goto clean_and_return; if (k == 5) { if (nbits > 29) goto clean_and_return; int steps; unsigned char *end; for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) if (!bloom_get_bit5(bloom.buf, sha, nbits)) { result = Py_BuildValue("Oi", Py_None, steps); goto clean_and_return; } } else if (k == 4) { if (nbits > 37) goto clean_and_return; int steps; unsigned char *end; for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) if (!bloom_get_bit4(bloom.buf, sha, nbits)) { result = Py_BuildValue("Oi", Py_None, steps); goto clean_and_return; } } else goto clean_and_return; result = Py_BuildValue("ii", 1, k); clean_and_return: PyBuffer_Release(&bloom); return result; } static uint32_t _extract_bits(unsigned char *buf, int nbits) { uint32_t v, mask; mask = (1<> (32-nbits)) & mask; return v; } static PyObject *extract_bits(PyObject *self, PyObject *args) { unsigned char *buf = NULL; Py_ssize_t len = 0; int nbits = 0; if (!PyArg_ParseTuple(args, rbuf_argf "i", &buf, &len, &nbits)) return NULL; if (len < 4) return NULL; return PyLong_FromUnsignedLong(_extract_bits(buf, nbits)); } struct sha { unsigned char bytes[20]; }; static inline int _cmp_sha(const struct sha *sha1, const struct sha *sha2) { return memcmp(sha1->bytes, sha2->bytes, sizeof(sha1->bytes)); } struct idx { unsigned char *map; struct sha *cur; struct sha *end; uint32_t *cur_name; Py_ssize_t bytes; int name_base; }; static void _fix_idx_order(struct idx **idxs, Py_ssize_t *last_i) { struct idx *idx; Py_ssize_t low, mid, high; int c = 0; idx = idxs[*last_i]; if (idxs[*last_i]->cur >= idxs[*last_i]->end) { idxs[*last_i] = NULL; PyMem_Free(idx); --*last_i; return; } if (*last_i == 0) return; low = *last_i-1; mid = *last_i; high = 0; while (low >= high) { mid = (low + high) / 2; c = _cmp_sha(idx->cur, idxs[mid]->cur); if (c < 0) high = mid + 1; else if (c > 0) low = mid - 1; else break; } if (c < 0) ++mid; if (mid == *last_i) return; memmove(&idxs[mid+1], &idxs[mid], (*last_i-mid)*sizeof(struct idx *)); idxs[mid] = idx; } static uint32_t _get_idx_i(struct idx *idx) { if (idx->cur_name == NULL) return idx->name_base; return ntohl(*idx->cur_name) + idx->name_base; } #define MIDX4_HEADERLEN 12 static PyObject *merge_into(PyObject *self, PyObject *args) { struct sha *sha_ptr, *sha_start = NULL; uint32_t *table_ptr, *name_ptr, *name_start; int i; unsigned int total; uint32_t count, prefix; Py_buffer fmap; int bits;; PyObject *py_total, *ilist = NULL; if (!PyArg_ParseTuple(args, wbuf_argf "iOO", &fmap, &bits, &py_total, &ilist)) return NULL; PyObject *result = NULL; struct idx **idxs = NULL; Py_ssize_t num_i = 0; int *idx_buf_init = NULL; Py_buffer *idx_buf = NULL; if (!bup_uint_from_py(&total, py_total, "total")) goto clean_and_return; num_i = PyList_Size(ilist); if (!(idxs = checked_malloc(num_i, sizeof(struct idx *)))) goto clean_and_return; if (!(idx_buf_init = checked_calloc(num_i, sizeof(int)))) goto clean_and_return; if (!(idx_buf = checked_malloc(num_i, sizeof(Py_buffer)))) goto clean_and_return; for (i = 0; i < num_i; i++) { long len, sha_ofs, name_map_ofs; if (!(idxs[i] = checked_malloc(1, sizeof(struct idx)))) goto clean_and_return; PyObject *itup = PyList_GetItem(ilist, i); if (!PyArg_ParseTuple(itup, wbuf_argf "llli", &(idx_buf[i]), &len, &sha_ofs, &name_map_ofs, &idxs[i]->name_base)) return NULL; idx_buf_init[i] = 1; idxs[i]->map = idx_buf[i].buf; idxs[i]->bytes = idx_buf[i].len; idxs[i]->cur = (struct sha *)&idxs[i]->map[sha_ofs]; idxs[i]->end = &idxs[i]->cur[len]; if (name_map_ofs) idxs[i]->cur_name = (uint32_t *)&idxs[i]->map[name_map_ofs]; else idxs[i]->cur_name = NULL; } table_ptr = (uint32_t *) &((unsigned char *) fmap.buf)[MIDX4_HEADERLEN]; sha_start = sha_ptr = (struct sha *)&table_ptr[1<= 0) { struct idx *idx; uint32_t new_prefix; if (count % 102424 == 0 && get_state(self)->istty2) fprintf(stderr, "midx: writing %.2f%% (%d/%d)\r", count*100.0/total, count, total); idx = idxs[last_i]; new_prefix = _extract_bits((unsigned char *)idx->cur, bits); while (prefix < new_prefix) table_ptr[prefix++] = htonl(count); memcpy(sha_ptr++, idx->cur, sizeof(struct sha)); *name_ptr++ = htonl(_get_idx_i(idx)); ++idx->cur; if (idx->cur_name != NULL) ++idx->cur_name; _fix_idx_order(idxs, &last_i); ++count; } while (prefix < ((uint32_t) 1 << bits)) table_ptr[prefix++] = htonl(count); assert(count == total); assert(prefix == ((uint32_t) 1 << bits)); assert(sha_ptr == sha_start+count); assert(name_ptr == name_start+count); result = PyLong_FromUnsignedLong(count); clean_and_return: if (idx_buf_init) { for (i = 0; i < num_i; i++) if (idx_buf_init[i]) PyBuffer_Release(&(idx_buf[i])); free(idx_buf_init); free(idx_buf); } if (idxs) { for (i = 0; i < num_i; i++) free(idxs[i]); free(idxs); } PyBuffer_Release(&fmap); return result; } #define FAN_ENTRIES 256 static PyObject *write_idx(PyObject *self, PyObject *args) { char *filename = NULL; PyObject *py_total, *idx = NULL; PyObject *part; unsigned int total = 0; uint32_t count; int i; uint32_t *fan_ptr, *crc_ptr, *ofs_ptr; uint64_t *ofs64_ptr; struct sha *sha_ptr; Py_buffer fmap; if (!PyArg_ParseTuple(args, cstr_argf wbuf_argf "OO", &filename, &fmap, &idx, &py_total)) return NULL; PyObject *result = NULL; if (!bup_uint_from_py(&total, py_total, "total")) goto clean_and_return; if (PyList_Size (idx) != FAN_ENTRIES) // Check for list of the right length. { result = PyErr_Format (PyExc_TypeError, "idx must contain %d entries", FAN_ENTRIES); goto clean_and_return; } const char idx_header[] = "\377tOc\0\0\0\002"; memcpy (fmap.buf, idx_header, sizeof(idx_header) - 1); fan_ptr = (uint32_t *)&((unsigned char *)fmap.buf)[sizeof(idx_header) - 1]; sha_ptr = (struct sha *)&fan_ptr[FAN_ENTRIES]; crc_ptr = (uint32_t *)&sha_ptr[total]; ofs_ptr = (uint32_t *)&crc_ptr[total]; ofs64_ptr = (uint64_t *)&ofs_ptr[total]; count = 0; uint32_t ofs64_count = 0; for (i = 0; i < FAN_ENTRIES; ++i) { part = PyList_GET_ITEM(idx, i); PyList_Sort(part); uint32_t plen; if (!INTEGRAL_ASSIGNMENT_FITS(&plen, PyList_GET_SIZE(part)) || UINT32_MAX - count < plen) { PyErr_Format(PyExc_OverflowError, "too many objects in index part"); goto clean_and_return; } count += plen; *fan_ptr++ = htonl(count); uint32_t j; for (j = 0; j < plen; ++j) { unsigned char *sha = NULL; Py_ssize_t sha_len = 0; PyObject *crc_py, *ofs_py; unsigned int crc; unsigned PY_LONG_LONG ofs_ull; uint64_t ofs; if (!PyArg_ParseTuple(PyList_GET_ITEM(part, j), rbuf_argf "OO", &sha, &sha_len, &crc_py, &ofs_py)) goto clean_and_return; if(!bup_uint_from_py(&crc, crc_py, "crc")) goto clean_and_return; if(!bup_ullong_from_py(&ofs_ull, ofs_py, "ofs")) goto clean_and_return; assert(crc <= UINT32_MAX); assert(ofs_ull <= UINT64_MAX); ofs = ofs_ull; if (sha_len != sizeof(struct sha)) goto clean_and_return; memcpy(sha_ptr++, sha, sizeof(struct sha)); *crc_ptr++ = htonl(crc); if (ofs > 0x7fffffff) { *ofs64_ptr++ = htonll(ofs); ofs = 0x80000000 | ofs64_count++; } *ofs_ptr++ = htonl((uint32_t)ofs); } } int rc = msync(fmap.buf, fmap.len, MS_ASYNC); if (rc != 0) { result = PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename); goto clean_and_return; } result = PyLong_FromUnsignedLong(count); clean_and_return: PyBuffer_Release(&fmap); return result; } // I would have made this a lower-level function that just fills in a buffer // with random values, and then written those values from python. But that's // about 20% slower in my tests, and since we typically generate random // numbers for benchmarking other parts of bup, any slowness in generating // random bytes will make our benchmarks inaccurate. Plus nobody wants // pseudorandom bytes much except for this anyway. static PyObject *write_random(PyObject *self, PyObject *args) { uint32_t buf[1024/4]; int fd = -1, seed = 0, verbose = 0; ssize_t ret; long long len = 0, kbytes = 0, written = 0; if (!PyArg_ParseTuple(args, "iLii", &fd, &len, &seed, &verbose)) return NULL; srandom(seed); for (kbytes = 0; kbytes < len/1024; kbytes++) { unsigned i; for (i = 0; i < sizeof(buf)/sizeof(buf[0]); i++) buf[i] = (uint32_t) random(); ret = write(fd, buf, sizeof(buf)); if (ret < 0) ret = 0; written += ret; if (ret < (int)sizeof(buf)) break; if (verbose && kbytes/1024 > 0 && !(kbytes%1024)) fprintf(stderr, "Random: %lld Mbytes\r", kbytes/1024); } // handle non-multiples of 1024 if (len % 1024) { unsigned i; for (i = 0; i < sizeof(buf)/sizeof(buf[0]); i++) buf[i] = (uint32_t) random(); ret = write(fd, buf, len % 1024); if (ret < 0) ret = 0; written += ret; } if (kbytes/1024 > 0) fprintf(stderr, "Random: %lld Mbytes, done.\n", kbytes/1024); return Py_BuildValue("L", written); } static PyObject *random_sha(PyObject *self, PyObject *args) { static int seeded = 0; uint32_t shabuf[20/4]; int i; if (!seeded) { assert(sizeof(shabuf) == 20); srandom((unsigned int) time(NULL)); seeded = 1; } if (!PyArg_ParseTuple(args, "")) return NULL; memset(shabuf, 0, sizeof(shabuf)); for (i=0; i < 20/4; i++) shabuf[i] = (uint32_t) random(); return Py_BuildValue(rbuf_argf, shabuf, 20); } static int _open_noatime(const char *filename, int attrs) { int attrs_noatime, fd; attrs |= O_RDONLY; #ifdef O_NOFOLLOW attrs |= O_NOFOLLOW; #endif #ifdef O_LARGEFILE attrs |= O_LARGEFILE; #endif attrs_noatime = attrs; #ifdef O_NOATIME attrs_noatime |= O_NOATIME; #endif fd = open(filename, attrs_noatime); if (fd < 0 && errno == EPERM) { // older Linux kernels would return EPERM if you used O_NOATIME // and weren't the file's owner. This pointless restriction was // relaxed eventually, but we have to handle it anyway. // (VERY old kernels didn't recognized O_NOATIME, but they would // just harmlessly ignore it, so this branch won't trigger) fd = open(filename, attrs); } return fd; } static PyObject *open_noatime(PyObject *self, PyObject *args) { char *filename = NULL; int fd; if (!PyArg_ParseTuple(args, cstr_argf, &filename)) return NULL; fd = _open_noatime(filename, 0); if (fd < 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, filename); return Py_BuildValue("i", fd); } static PyObject *fadvise_done(PyObject *self, PyObject *args) { int fd = -1; long long llofs, lllen = 0; if (!PyArg_ParseTuple(args, "iLL", &fd, &llofs, &lllen)) return NULL; off_t ofs, len; if (!INTEGRAL_ASSIGNMENT_FITS(&ofs, llofs)) return PyErr_Format(PyExc_OverflowError, "fadvise offset overflows off_t"); if (!INTEGRAL_ASSIGNMENT_FITS(&len, lllen)) return PyErr_Format(PyExc_OverflowError, "fadvise length overflows off_t"); #ifdef POSIX_FADV_DONTNEED posix_fadvise(fd, ofs, len, POSIX_FADV_DONTNEED); #endif return Py_BuildValue(""); } // Currently the Linux kernel and FUSE disagree over the type for // FS_IOC_GETFLAGS and FS_IOC_SETFLAGS. The kernel actually uses int, // but FUSE chose long (matching the declaration in linux/fs.h). So // if you use int, and then traverse a FUSE filesystem, you may // corrupt the stack. But if you use long, then you may get invalid // results on big-endian systems. // // For now, we just use long, and then disable Linux attrs entirely // (with a warning) in helpers.py on systems that are affected. #ifdef BUP_HAVE_FILE_ATTRS static PyObject *bup_get_linux_file_attr(PyObject *self, PyObject *args) { int rc; unsigned long attr; char *path; int fd; if (!PyArg_ParseTuple(args, cstr_argf, &path)) return NULL; fd = _open_noatime(path, O_NONBLOCK); if (fd == -1) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); attr = 0; // Handle int/long mismatch (see above) rc = ioctl(fd, FS_IOC_GETFLAGS, &attr); if (rc == -1) { close(fd); return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); } close(fd); assert(attr <= UINT_MAX); // Kernel type is actually int return PyLong_FromUnsignedLong(attr); } #endif /* def BUP_HAVE_FILE_ATTRS */ #ifdef BUP_HAVE_FILE_ATTRS static PyObject *bup_set_linux_file_attr(PyObject *self, PyObject *args) { int rc; unsigned long orig_attr; unsigned int attr; char *path; PyObject *py_attr; int fd; if (!PyArg_ParseTuple(args, cstr_argf "O", &path, &py_attr)) return NULL; if (!bup_uint_from_py(&attr, py_attr, "attr")) return NULL; fd = open(path, O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_NOFOLLOW); if (fd == -1) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); // Restrict attr to modifiable flags acdeijstuADST -- see // chattr(1) and the e2fsprogs source. Letter to flag mapping is // in pf.c flags_array[]. attr &= FS_APPEND_FL | FS_COMPR_FL | FS_NODUMP_FL | FS_EXTENT_FL | FS_IMMUTABLE_FL | FS_JOURNAL_DATA_FL | FS_SECRM_FL | FS_NOTAIL_FL | FS_UNRM_FL | FS_NOATIME_FL | FS_DIRSYNC_FL | FS_SYNC_FL | FS_TOPDIR_FL | FS_NOCOW_FL; // The extents flag can't be removed, so don't (see chattr(1) and chattr.c). orig_attr = 0; // Handle int/long mismatch (see above) rc = ioctl(fd, FS_IOC_GETFLAGS, &orig_attr); if (rc == -1) { close(fd); return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); } assert(orig_attr <= UINT_MAX); // Kernel type is actually int attr |= ((unsigned int) orig_attr) & FS_EXTENT_FL; rc = ioctl(fd, FS_IOC_SETFLAGS, &attr); if (rc == -1) { close(fd); return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); } close(fd); return Py_BuildValue("O", Py_None); } #endif /* def BUP_HAVE_FILE_ATTRS */ #ifndef BUP_USE_PYTHON_UTIME // just for Python 2 now #ifndef HAVE_UTIMENSAT #ifndef HAVE_UTIMES #error "cannot find utimensat or utimes()" #endif #ifndef HAVE_LUTIMES #error "cannot find utimensat or lutimes()" #endif #endif #endif // defined BUP_USE_PYTHON_UTIME #define ASSIGN_PYLONG_TO_INTEGRAL(dest, pylong, overflow) \ ({ \ int result = 0; \ *(overflow) = 0; \ const long long lltmp = PyLong_AsLongLong(pylong); \ if (lltmp == -1 && PyErr_Occurred()) \ { \ if (PyErr_ExceptionMatches(PyExc_OverflowError)) \ { \ const unsigned long long ulltmp = PyLong_AsUnsignedLongLong(pylong); \ if (ulltmp == (unsigned long long) -1 && PyErr_Occurred()) \ { \ if (PyErr_ExceptionMatches(PyExc_OverflowError)) \ { \ PyErr_Clear(); \ *(overflow) = 1; \ } \ } \ if (INTEGRAL_ASSIGNMENT_FITS((dest), ulltmp)) \ result = 1; \ else \ *(overflow) = 1; \ } \ } \ else \ { \ if (INTEGRAL_ASSIGNMENT_FITS((dest), lltmp)) \ result = 1; \ else \ *(overflow) = 1; \ } \ result; \ }) #ifndef BUP_USE_PYTHON_UTIME // just for Python 2 now #ifdef HAVE_UTIMENSAT static PyObject *bup_utimensat(PyObject *self, PyObject *args) { int rc; int fd, flag; char *path; PyObject *access_py, *modification_py; struct timespec ts[2]; if (!PyArg_ParseTuple(args, "i" cstr_argf "((Ol)(Ol))i", &fd, &path, &access_py, &(ts[0].tv_nsec), &modification_py, &(ts[1].tv_nsec), &flag)) return NULL; int overflow; if (!ASSIGN_PYLONG_TO_INTEGRAL(&(ts[0].tv_sec), access_py, &overflow)) { if (overflow) PyErr_SetString(PyExc_ValueError, "unable to convert access time seconds for utimensat"); return NULL; } if (!ASSIGN_PYLONG_TO_INTEGRAL(&(ts[1].tv_sec), modification_py, &overflow)) { if (overflow) PyErr_SetString(PyExc_ValueError, "unable to convert modification time seconds for utimensat"); return NULL; } rc = utimensat(fd, path, ts, flag); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); return Py_BuildValue("O", Py_None); } #endif /* def HAVE_UTIMENSAT */ #if defined(HAVE_UTIMES) || defined(HAVE_LUTIMES) static int bup_parse_xutimes_args(char **path, struct timeval tv[2], PyObject *args) { PyObject *access_py, *modification_py; long long access_us, modification_us; // POSIX guarantees tv_usec is signed. if (!PyArg_ParseTuple(args, cstr_argf "((OL)(OL))", path, &access_py, &access_us, &modification_py, &modification_us)) return 0; int overflow; if (!ASSIGN_PYLONG_TO_INTEGRAL(&(tv[0].tv_sec), access_py, &overflow)) { if (overflow) PyErr_SetString(PyExc_ValueError, "unable to convert access time seconds to timeval"); return 0; } if (!INTEGRAL_ASSIGNMENT_FITS(&(tv[0].tv_usec), access_us)) { PyErr_SetString(PyExc_ValueError, "unable to convert access time nanoseconds to timeval"); return 0; } if (!ASSIGN_PYLONG_TO_INTEGRAL(&(tv[1].tv_sec), modification_py, &overflow)) { if (overflow) PyErr_SetString(PyExc_ValueError, "unable to convert modification time seconds to timeval"); return 0; } if (!INTEGRAL_ASSIGNMENT_FITS(&(tv[1].tv_usec), modification_us)) { PyErr_SetString(PyExc_ValueError, "unable to convert modification time nanoseconds to timeval"); return 0; } return 1; } #endif /* defined(HAVE_UTIMES) || defined(HAVE_LUTIMES) */ #ifdef HAVE_UTIMES static PyObject *bup_utimes(PyObject *self, PyObject *args) { char *path; struct timeval tv[2]; if (!bup_parse_xutimes_args(&path, tv, args)) return NULL; int rc = utimes(path, tv); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); return Py_BuildValue("O", Py_None); } #endif /* def HAVE_UTIMES */ #ifdef HAVE_LUTIMES static PyObject *bup_lutimes(PyObject *self, PyObject *args) { char *path; struct timeval tv[2]; if (!bup_parse_xutimes_args(&path, tv, args)) return NULL; int rc = lutimes(path, tv); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); return Py_BuildValue("O", Py_None); } #endif /* def HAVE_LUTIMES */ #endif // defined BUP_USE_PYTHON_UTIME #ifdef HAVE_STAT_ST_ATIM # define BUP_STAT_ATIME_NS(st) (st)->st_atim.tv_nsec # define BUP_STAT_MTIME_NS(st) (st)->st_mtim.tv_nsec # define BUP_STAT_CTIME_NS(st) (st)->st_ctim.tv_nsec #elif defined HAVE_STAT_ST_ATIMENSEC # define BUP_STAT_ATIME_NS(st) (st)->st_atimespec.tv_nsec # define BUP_STAT_MTIME_NS(st) (st)->st_mtimespec.tv_nsec # define BUP_STAT_CTIME_NS(st) (st)->st_ctimespec.tv_nsec #else # define BUP_STAT_ATIME_NS(st) 0 # define BUP_STAT_MTIME_NS(st) 0 # define BUP_STAT_CTIME_NS(st) 0 #endif static PyObject *stat_struct_to_py(const struct stat *st, const char *filename, int fd) { // We can check the known (via POSIX) signed and unsigned types at // compile time, but not (easily) the unspecified types, so handle // those via INTEGER_TO_PY(). Assumes ns values will fit in a // long. return Py_BuildValue("NKNNNNNL(Nl)(Nl)(Nl)", INTEGER_TO_PY(st->st_mode), (unsigned PY_LONG_LONG) st->st_ino, INTEGER_TO_PY(st->st_dev), INTEGER_TO_PY(st->st_nlink), INTEGER_TO_PY(st->st_uid), INTEGER_TO_PY(st->st_gid), INTEGER_TO_PY(st->st_rdev), (PY_LONG_LONG) st->st_size, INTEGER_TO_PY(st->st_atime), (long) BUP_STAT_ATIME_NS(st), INTEGER_TO_PY(st->st_mtime), (long) BUP_STAT_MTIME_NS(st), INTEGER_TO_PY(st->st_ctime), (long) BUP_STAT_CTIME_NS(st)); } static PyObject *bup_stat(PyObject *self, PyObject *args) { int rc; char *filename; if (!PyArg_ParseTuple(args, cstr_argf, &filename)) return NULL; struct stat st; rc = stat(filename, &st); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, filename); return stat_struct_to_py(&st, filename, 0); } static PyObject *bup_lstat(PyObject *self, PyObject *args) { int rc; char *filename; if (!PyArg_ParseTuple(args, cstr_argf, &filename)) return NULL; struct stat st; rc = lstat(filename, &st); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, filename); return stat_struct_to_py(&st, filename, 0); } static PyObject *bup_fstat(PyObject *self, PyObject *args) { int rc, fd; if (!PyArg_ParseTuple(args, "i", &fd)) return NULL; struct stat st; rc = fstat(fd, &st); if (rc != 0) return PyErr_SetFromErrno(PyExc_OSError); return stat_struct_to_py(&st, NULL, fd); } #ifdef HAVE_TM_TM_GMTOFF static PyObject *bup_localtime(PyObject *self, PyObject *args) { long long lltime; time_t ttime; if (!PyArg_ParseTuple(args, "L", &lltime)) return NULL; if (!INTEGRAL_ASSIGNMENT_FITS(&ttime, lltime)) return PyErr_Format(PyExc_OverflowError, "time value too large"); struct tm tm; tzset(); if(localtime_r(&ttime, &tm) == NULL) return PyErr_SetFromErrno(PyExc_OSError); // Match the Python struct_time values. return Py_BuildValue("[i,i,i,i,i,i,i,i,i,i,s]", 1900 + tm.tm_year, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_wday, tm.tm_yday + 1, tm.tm_isdst, tm.tm_gmtoff, tm.tm_zone); } #endif /* def HAVE_TM_TM_GMTOFF */ #ifdef BUP_MINCORE_BUF_TYPE static PyObject *bup_mincore(PyObject *self, PyObject *args) { Py_buffer src, dest; PyObject *py_src_n, *py_src_off, *py_dest_off; if (!PyArg_ParseTuple(args, cstr_argf "*OOw*O", &src, &py_src_n, &py_src_off, &dest, &py_dest_off)) return NULL; PyObject *result = NULL; unsigned long long src_n, src_off, dest_off; if (!(bup_ullong_from_py(&src_n, py_src_n, "src_n") && bup_ullong_from_py(&src_off, py_src_off, "src_off") && bup_ullong_from_py(&dest_off, py_dest_off, "dest_off"))) goto clean_and_return; unsigned long long src_region_end; if (!uadd(&src_region_end, src_off, src_n)) { result = PyErr_Format(PyExc_OverflowError, "(src_off + src_n) too large"); goto clean_and_return; } assert(src.len >= 0); if (src_region_end > (unsigned long long) src.len) { result = PyErr_Format(PyExc_OverflowError, "region runs off end of src"); goto clean_and_return; } unsigned long long dest_size; if (!INTEGRAL_ASSIGNMENT_FITS(&dest_size, dest.len)) { result = PyErr_Format(PyExc_OverflowError, "invalid dest size"); goto clean_and_return; } if (dest_off > dest_size) { result = PyErr_Format(PyExc_OverflowError, "region runs off end of dest"); goto clean_and_return; } size_t length; if (!INTEGRAL_ASSIGNMENT_FITS(&length, src_n)) { result = PyErr_Format(PyExc_OverflowError, "src_n overflows size_t"); goto clean_and_return; } int rc = mincore((void *)(src.buf + src_off), length, (BUP_MINCORE_BUF_TYPE *) (dest.buf + dest_off)); if (rc != 0) { result = PyErr_SetFromErrno(PyExc_OSError); goto clean_and_return; } result = Py_BuildValue("O", Py_None); clean_and_return: PyBuffer_Release(&src); PyBuffer_Release(&dest); return result; } #endif /* def BUP_MINCORE_BUF_TYPE */ static unsigned int vuint_encode(long long val, char *buf) { unsigned int len = 0; if (val < 0) { PyErr_SetString(PyExc_Exception, "vuints must not be negative"); return 0; } do { buf[len] = val & 0x7f; val >>= 7; if (val) buf[len] |= 0x80; len++; } while (val); return len; } static unsigned int vint_encode(long long val, char *buf) { unsigned int len = 1; char sign = 0; if (val < 0) { sign = 0x40; val = -val; } buf[0] = (val & 0x3f) | sign; val >>= 6; if (val) buf[0] |= 0x80; while (val) { buf[len] = val & 0x7f; val >>= 7; if (val) buf[len] |= 0x80; len++; } return len; } static PyObject *bup_vuint_encode(PyObject *self, PyObject *args) { long long val; // size the buffer appropriately - need 8 bits to encode each 7 char buf[(sizeof(val) + 1) / 7 * 8]; if (!PyArg_ParseTuple(args, "L", &val)) return NULL; unsigned int len = vuint_encode(val, buf); if (!len) return NULL; return PyBytes_FromStringAndSize(buf, len); } static PyObject *bup_vint_encode(PyObject *self, PyObject *args) { long long val; // size the buffer appropriately - need 8 bits to encode each 7 char buf[(sizeof(val) + 1) / 7 * 8]; if (!PyArg_ParseTuple(args, "L", &val)) return NULL; return PyBytes_FromStringAndSize(buf, vint_encode(val, buf)); } static PyObject *tuple_from_cstrs(char **cstrs) { // Assumes list is null terminated size_t n = 0; while(cstrs[n] != NULL) n++; Py_ssize_t sn; if (!INTEGRAL_ASSIGNMENT_FITS(&sn, n)) return PyErr_Format(PyExc_OverflowError, "string array too large"); PyObject *result = PyTuple_New(sn); Py_ssize_t i = 0; for (i = 0; i < sn; i++) { PyObject *gname = Py_BuildValue(cstr_argf, cstrs[i]); if (gname == NULL) { Py_DECREF(result); return NULL; } PyTuple_SET_ITEM(result, i, gname); } return result; } static PyObject *appropriate_errno_ex(void) { switch (errno) { case ENOMEM: return PyErr_NoMemory(); case EIO: case EMFILE: case ENFILE: // In 3.3 IOError was merged into OSError. return PyErr_SetFromErrno(PyExc_IOError); default: return PyErr_SetFromErrno(PyExc_OSError); } } static PyObject *pwd_struct_to_py(const struct passwd *pwd) { // We can check the known (via POSIX) signed and unsigned types at // compile time, but not (easily) the unspecified types, so handle // those via INTEGER_TO_PY(). if (pwd == NULL) Py_RETURN_NONE; return Py_BuildValue(cstr_argf cstr_argf "OO" cstr_argf cstr_argf cstr_argf, pwd->pw_name, pwd->pw_passwd, INTEGER_TO_PY(pwd->pw_uid), INTEGER_TO_PY(pwd->pw_gid), pwd->pw_gecos, pwd->pw_dir, pwd->pw_shell); } static PyObject *bup_getpwuid(PyObject *self, PyObject *args) { unsigned long long py_uid; if (!PyArg_ParseTuple(args, "K", &py_uid)) return NULL; uid_t uid; if (!INTEGRAL_ASSIGNMENT_FITS(&uid, py_uid)) return PyErr_Format(PyExc_OverflowError, "uid too large for uid_t"); errno = 0; struct passwd *pwd = getpwuid(uid); if (!pwd && errno) return appropriate_errno_ex(); return pwd_struct_to_py(pwd); } static PyObject *bup_getpwnam(PyObject *self, PyObject *args) { PyObject *py_name; if (!PyArg_ParseTuple(args, "S", &py_name)) return NULL; char *name = PyBytes_AS_STRING(py_name); errno = 0; struct passwd *pwd = getpwnam(name); if (!pwd && errno) return appropriate_errno_ex(); return pwd_struct_to_py(pwd); } static PyObject *grp_struct_to_py(const struct group *grp) { // We can check the known (via POSIX) signed and unsigned types at // compile time, but not (easily) the unspecified types, so handle // those via INTEGER_TO_PY(). if (grp == NULL) Py_RETURN_NONE; PyObject *members = tuple_from_cstrs(grp->gr_mem); if (members == NULL) return NULL; return Py_BuildValue(cstr_argf cstr_argf "OO", grp->gr_name, grp->gr_passwd, INTEGER_TO_PY(grp->gr_gid), members); } static PyObject *bup_getgrgid(PyObject *self, PyObject *args) { unsigned long long py_gid; if (!PyArg_ParseTuple(args, "K", &py_gid)) return NULL; gid_t gid; if (!INTEGRAL_ASSIGNMENT_FITS(&gid, py_gid)) return PyErr_Format(PyExc_OverflowError, "gid too large for gid_t"); errno = 0; struct group *grp = getgrgid(gid); if (!grp && errno) return appropriate_errno_ex(); return grp_struct_to_py(grp); } static PyObject *bup_getgrnam(PyObject *self, PyObject *args) { PyObject *py_name; if (!PyArg_ParseTuple(args, "S", &py_name)) return NULL; char *name = PyBytes_AS_STRING(py_name); errno = 0; struct group *grp = getgrnam(name); if (!grp && errno) return appropriate_errno_ex(); return grp_struct_to_py(grp); } static PyObject *bup_gethostname(PyObject *mod, PyObject *ignore) { #ifdef HOST_NAME_MAX char buf[HOST_NAME_MAX + 1] = {}; #else /* 'SUSv2 guarantees that "Host names are limited to 255 bytes".' */ char buf[256] = {}; #endif if (gethostname(buf, sizeof(buf) - 1)) return PyErr_SetFromErrno(PyExc_IOError); buf[sizeof(buf) - 1] = 0; return PyBytes_FromString(buf); } #ifdef BUP_HAVE_READLINE static char *cstr_from_bytes(PyObject *bytes) { char *buf; Py_ssize_t length; int rc = PyBytes_AsStringAndSize(bytes, &buf, &length); if (rc == -1) return NULL; size_t c_len; if (!INT_ADD_OK(length, 1, &c_len)) { PyErr_Format(PyExc_OverflowError, "Cannot convert ssize_t sized bytes object (%zd) to C string", length); return NULL; } char *result = checked_malloc(c_len, sizeof(char)); if (!result) return NULL; memcpy(result, buf, length); result[length] = 0; return result; } static char **cstrs_from_seq(PyObject *seq) { char **result = NULL; seq = PySequence_Fast(seq, "Cannot convert sequence items to C strings"); if (!seq) return NULL; const Py_ssize_t len = PySequence_Fast_GET_SIZE(seq); if (len > PY_SSIZE_T_MAX - 1) { PyErr_Format(PyExc_OverflowError, "Sequence length %zd too large for conversion to C array", len); goto finish; } result = checked_malloc(len + 1, sizeof(char *)); if (!result) goto finish; Py_ssize_t i = 0; for (i = 0; i < len; i++) { PyObject *item = PySequence_Fast_GET_ITEM(seq, i); if (!item) goto abandon_result; result[i] = cstr_from_bytes(item); if (!result[i]) { i--; goto abandon_result; } } result[len] = NULL; goto finish; abandon_result: if (result) { for (; i > 0; i--) free(result[i]); free(result); result = NULL; } finish: Py_DECREF(seq); return result; } static char* our_word_break_chars = NULL; static PyObject * bup_set_completer_word_break_characters(PyObject *self, PyObject *args) { char *bytes; if (!PyArg_ParseTuple(args, cstr_argf, &bytes)) return NULL; char *prev = our_word_break_chars; char *next = strdup(bytes); if (!next) return PyErr_NoMemory(); our_word_break_chars = next; rl_completer_word_break_characters = next; if (prev) free(prev); Py_RETURN_NONE; } static PyObject * bup_get_completer_word_break_characters(PyObject *self, PyObject *args) { return PyBytes_FromString(rl_completer_word_break_characters); } static PyObject *bup_get_line_buffer(PyObject *self, PyObject *args) { return PyBytes_FromString(rl_line_buffer); } static PyObject * bup_parse_and_bind(PyObject *self, PyObject *args) { char *bytes; if (!PyArg_ParseTuple(args, cstr_argf ":parse_and_bind", &bytes)) return NULL; char *tmp = strdup(bytes); // Because it may modify the arg if (!tmp) return PyErr_NoMemory(); int rc = rl_parse_and_bind(tmp); free(tmp); if (rc != 0) return PyErr_Format(PyExc_OSError, "system rl_parse_and_bind failed (%d)", rc); Py_RETURN_NONE; } static PyObject *py_on_attempted_completion; static char **prev_completions; static char **on_attempted_completion(const char *text, int start, int end) { if (!py_on_attempted_completion) return NULL; char **result = NULL; PyObject *py_result = PyObject_CallFunction(py_on_attempted_completion, cstr_argf "ii", text, start, end); if (!py_result) return NULL; if (py_result != Py_None) { result = cstrs_from_seq(py_result); free(prev_completions); prev_completions = result; } Py_DECREF(py_result); return result; } static PyObject * bup_set_attempted_completion_function(PyObject *self, PyObject *args) { PyObject *completer; if (!PyArg_ParseTuple(args, "O", &completer)) return NULL; PyObject *prev = py_on_attempted_completion; if (completer == Py_None) { py_on_attempted_completion = NULL; rl_attempted_completion_function = NULL; } else { py_on_attempted_completion = completer; rl_attempted_completion_function = on_attempted_completion; Py_INCREF(completer); } Py_XDECREF(prev); Py_RETURN_NONE; } static PyObject *py_on_completion_entry; static char *on_completion_entry(const char *text, int state) { if (!py_on_completion_entry) return NULL; PyObject *py_result = PyObject_CallFunction(py_on_completion_entry, cstr_argf "i", text, state); if (!py_result) return NULL; char *result = (py_result == Py_None) ? NULL : cstr_from_bytes(py_result); Py_DECREF(py_result); return result; } static PyObject * bup_set_completion_entry_function(PyObject *self, PyObject *args) { PyObject *completer; if (!PyArg_ParseTuple(args, "O", &completer)) return NULL; PyObject *prev = py_on_completion_entry; if (completer == Py_None) { py_on_completion_entry = NULL; rl_completion_entry_function = NULL; } else { py_on_completion_entry = completer; rl_completion_entry_function = on_completion_entry; Py_INCREF(completer); } Py_XDECREF(prev); Py_RETURN_NONE; } static PyObject * bup_readline(PyObject *self, PyObject *args) { char *prompt; if (!PyArg_ParseTuple(args, cstr_argf, &prompt)) return NULL; char *line = readline(prompt); if (!line) return PyErr_Format(PyExc_EOFError, "readline EOF"); PyObject *result = PyBytes_FromString(line); free(line); return result; } #endif // defined BUP_HAVE_READLINE #if defined(HAVE_SYS_ACL_H) && \ defined(HAVE_ACL_LIBACL_H) && \ defined(HAVE_ACL_EXTENDED_FILE) && \ defined(HAVE_ACL_GET_FILE) && \ defined(HAVE_ACL_TO_ANY_TEXT) && \ defined(HAVE_ACL_FROM_TEXT) && \ defined(HAVE_ACL_SET_FILE) #define ACL_SUPPORT 1 #include #include // Returns // 0 for success // -1 for errors, with python exception set // -2 for ignored errors (not supported) static int bup_read_acl_to_text(const char *name, acl_type_t type, char **txt, char **num) { acl_t acl; acl = acl_get_file(name, type); if (!acl) { if (errno == EOPNOTSUPP || errno == ENOSYS) return -2; PyErr_SetFromErrno(PyExc_IOError); return -1; } *num = NULL; *txt = acl_to_any_text(acl, "", ',', TEXT_ABBREVIATE); if (*txt) *num = acl_to_any_text(acl, "", ',', TEXT_ABBREVIATE | TEXT_NUMERIC_IDS); if (*txt && *num) return 0; if (errno == ENOMEM) PyErr_NoMemory(); else PyErr_SetFromErrno(PyExc_IOError); if (*txt) acl_free((acl_t)*txt); if (*num) acl_free((acl_t)*num); return -1; } static PyObject *bup_read_acl(PyObject *self, PyObject *args) { char *name; int isdir, rv; PyObject *ret = NULL; char *acl_txt = NULL, *acl_num = NULL; if (!PyArg_ParseTuple(args, cstr_argf "i", &name, &isdir)) return NULL; if (!acl_extended_file(name)) Py_RETURN_NONE; rv = bup_read_acl_to_text(name, ACL_TYPE_ACCESS, &acl_txt, &acl_num); if (rv) goto out; if (isdir) { char *def_txt = NULL, *def_num = NULL; rv = bup_read_acl_to_text(name, ACL_TYPE_DEFAULT, &def_txt, &def_num); if (rv) goto out; ret = Py_BuildValue("[" cstr_argf cstr_argf cstr_argf cstr_argf "]", acl_txt, acl_num, def_txt, def_num); if (def_txt) acl_free((acl_t)def_txt); if (def_num) acl_free((acl_t)def_num); } else { ret = Py_BuildValue("[" cstr_argf cstr_argf "]", acl_txt, acl_num); } out: if (acl_txt) acl_free((acl_t)acl_txt); if (acl_num) acl_free((acl_t)acl_num); if (rv == -2) Py_RETURN_NONE; return ret; } static int bup_apply_acl_string(const char *name, acl_type_t type, const char *s) { acl_t acl = acl_from_text(s); int ret = 0; if (!acl) { PyErr_SetFromErrno(PyExc_IOError); return -1; } if (acl_set_file(name, type, acl)) { PyErr_SetFromErrno(PyExc_IOError); ret = -1; } acl_free(acl); return ret; } static PyObject *bup_apply_acl(PyObject *self, PyObject *args) { char *name, *acl, *def = NULL; if (!PyArg_ParseTuple(args, cstr_argf cstr_argf "|" cstr_argf, &name, &acl, &def)) return NULL; if (bup_apply_acl_string(name, ACL_TYPE_ACCESS, acl)) return NULL; if (def && bup_apply_acl_string(name, ACL_TYPE_DEFAULT, def)) return NULL; Py_RETURN_NONE; } #endif static PyObject *bup_limited_vint_pack(PyObject *self, PyObject *args) { const char *fmt; PyObject *packargs, *result; Py_ssize_t sz, i, bufsz; char *buf, *pos, *end; if (!PyArg_ParseTuple(args, "sO", &fmt, &packargs)) return NULL; if (!PyTuple_Check(packargs)) return PyErr_Format(PyExc_Exception, "pack() arg must be tuple"); sz = PyTuple_GET_SIZE(packargs); if (sz != (Py_ssize_t)strlen(fmt)) return PyErr_Format(PyExc_Exception, "number of arguments (%ld) does not match format string (%ld)", (unsigned long)sz, (unsigned long)strlen(fmt)); if (sz > INT_MAX / 20) return PyErr_Format(PyExc_Exception, "format is far too long"); // estimate no more than 20 bytes for each on average, the maximum // vint/vuint we can encode is anyway 10 bytes, so this gives us // some headroom for a few strings before we need to realloc ... bufsz = sz * 20; buf = malloc(bufsz); if (!buf) return PyErr_NoMemory(); pos = buf; end = buf + bufsz; for (i = 0; i < sz; i++) { PyObject *item = PyTuple_GET_ITEM(packargs, i); const char *bytes; switch (fmt[i]) { case 'V': { long long val = PyLong_AsLongLong(item); if (val == -1 && PyErr_Occurred()) return PyErr_Format(PyExc_OverflowError, "pack arg %d invalid", (int)i); if (end - pos < 10) goto overflow; pos += vuint_encode(val, pos); break; } case 'v': { long long val = PyLong_AsLongLong(item); if (val == -1 && PyErr_Occurred()) return PyErr_Format(PyExc_OverflowError, "pack arg %d invalid", (int)i); if (end - pos < 10) goto overflow; pos += vint_encode(val, pos); break; } case 's': { bytes = PyBytes_AsString(item); if (!bytes) goto error; if (end - pos < 10) goto overflow; Py_ssize_t val = PyBytes_GET_SIZE(item); pos += vuint_encode(val, pos); if (end - pos < val) goto overflow; memcpy(pos, bytes, val); pos += val; break; } default: PyErr_Format(PyExc_Exception, "unknown xpack format string item %c", fmt[i]); goto error; } } result = PyBytes_FromStringAndSize(buf, pos - buf); free(buf); return result; overflow: PyErr_SetString(PyExc_OverflowError, "buffer (potentially) overflowed"); error: free(buf); return NULL; } static PyMethodDef helper_methods[] = { { "write_sparsely", bup_write_sparsely, METH_VARARGS, "Write buf excepting zeros at the end. Return trailing zero count." }, { "selftest", selftest, METH_VARARGS, "Check that the rolling checksum rolls correctly (for unit tests)." }, { "blobbits", blobbits, METH_VARARGS, "Return the number of bits in the rolling checksum." }, { "splitbuf", splitbuf, METH_VARARGS, "Split a list of strings based on a rolling checksum." }, { "bitmatch", bitmatch, METH_VARARGS, "Count the number of matching prefix bits between two strings." }, { "firstword", firstword, METH_VARARGS, "Return an int corresponding to the first 32 bits of buf." }, { "bloom_contains", bloom_contains, METH_VARARGS, "Check if a bloom filter of 2^nbits bytes contains an object" }, { "bloom_add", bloom_add, METH_VARARGS, "Add an object to a bloom filter of 2^nbits bytes" }, { "extract_bits", extract_bits, METH_VARARGS, "Take the first 'nbits' bits from 'buf' and return them as an int." }, { "merge_into", merge_into, METH_VARARGS, "Merges a bunch of idx and midx files into a single midx." }, { "write_idx", write_idx, METH_VARARGS, "Write a PackIdxV2 file from an idx list of lists of tuples" }, { "write_random", write_random, METH_VARARGS, "Write random bytes to the given file descriptor" }, { "random_sha", random_sha, METH_VARARGS, "Return a random 20-byte string" }, { "open_noatime", open_noatime, METH_VARARGS, "open() the given filename for read with O_NOATIME if possible" }, { "fadvise_done", fadvise_done, METH_VARARGS, "Inform the kernel that we're finished with earlier parts of a file" }, #ifdef BUP_HAVE_FILE_ATTRS { "get_linux_file_attr", bup_get_linux_file_attr, METH_VARARGS, "Return the Linux attributes for the given file." }, #endif #ifdef BUP_HAVE_FILE_ATTRS { "set_linux_file_attr", bup_set_linux_file_attr, METH_VARARGS, "Set the Linux attributes for the given file." }, #endif #ifndef BUP_USE_PYTHON_UTIME // just for Python 2 now #ifdef HAVE_UTIMENSAT { "bup_utimensat", bup_utimensat, METH_VARARGS, "Change path timestamps with nanosecond precision (POSIX)." }, #endif #ifdef HAVE_UTIMES { "bup_utimes", bup_utimes, METH_VARARGS, "Change path timestamps with microsecond precision." }, #endif #ifdef HAVE_LUTIMES { "bup_lutimes", bup_lutimes, METH_VARARGS, "Change path timestamps with microsecond precision;" " don't follow symlinks." }, #endif #endif // defined BUP_USE_PYTHON_UTIME { "stat", bup_stat, METH_VARARGS, "Extended version of stat." }, { "lstat", bup_lstat, METH_VARARGS, "Extended version of lstat." }, { "fstat", bup_fstat, METH_VARARGS, "Extended version of fstat." }, #ifdef HAVE_TM_TM_GMTOFF { "localtime", bup_localtime, METH_VARARGS, "Return struct_time elements plus the timezone offset and name." }, #endif { "bytescmp", bup_bytescmp, METH_VARARGS, "Return a negative value if x < y, zero if equal, positive otherwise."}, { "cat_bytes", bup_cat_bytes, METH_VARARGS, "For (x_bytes, x_ofs, x_n, y_bytes, y_ofs, y_n) arguments, return their concatenation."}, #ifdef BUP_MINCORE_BUF_TYPE { "mincore", bup_mincore, METH_VARARGS, "For mincore(src, src_n, src_off, dest, dest_off)" " call the system mincore(src + src_off, src_n, &dest[dest_off])." }, #endif { "getpwuid", bup_getpwuid, METH_VARARGS, "Return the password database entry for the given numeric user id," " as a tuple with all C strings as bytes(), or None if the user does" " not exist." }, { "getpwnam", bup_getpwnam, METH_VARARGS, "Return the password database entry for the given user name," " as a tuple with all C strings as bytes(), or None if the user does" " not exist." }, { "getgrgid", bup_getgrgid, METH_VARARGS, "Return the group database entry for the given numeric group id," " as a tuple with all C strings as bytes(), or None if the group does" " not exist." }, { "getgrnam", bup_getgrnam, METH_VARARGS, "Return the group database entry for the given group name," " as a tuple with all C strings as bytes(), or None if the group does" " not exist." }, { "gethostname", bup_gethostname, METH_NOARGS, "Return the current hostname (as bytes)" }, #ifdef BUP_HAVE_READLINE { "set_completion_entry_function", bup_set_completion_entry_function, METH_VARARGS, "Set rl_completion_entry_function. Called as f(text, state)." }, { "set_attempted_completion_function", bup_set_attempted_completion_function, METH_VARARGS, "Set rl_attempted_completion_function. Called as f(text, start, end)." }, { "parse_and_bind", bup_parse_and_bind, METH_VARARGS, "Call rl_parse_and_bind." }, { "get_line_buffer", bup_get_line_buffer, METH_NOARGS, "Return rl_line_buffer." }, { "get_completer_word_break_characters", bup_get_completer_word_break_characters, METH_NOARGS, "Return rl_completer_word_break_characters." }, { "set_completer_word_break_characters", bup_set_completer_word_break_characters, METH_VARARGS, "Set rl_completer_word_break_characters." }, { "readline", bup_readline, METH_VARARGS, "Call readline(prompt)." }, #endif // defined BUP_HAVE_READLINE #ifdef ACL_SUPPORT { "read_acl", bup_read_acl, METH_VARARGS, "read_acl(name, isdir)\n\n" "Read ACLs for the given file/dirname and return the correctly encoded" " list [txt, num, def_tx, def_num] (the def_* being empty bytestrings" " unless the second argument 'isdir' is True)." }, { "apply_acl", bup_apply_acl, METH_VARARGS, "apply_acl(name, acl, def=None)\n\n" "Given a file/dirname (bytes) and the ACLs to restore, do that." }, #endif /* HAVE_ACLS */ { "vuint_encode", bup_vuint_encode, METH_VARARGS, "encode an int to vuint" }, { "vint_encode", bup_vint_encode, METH_VARARGS, "encode an int to vint" }, { "limited_vint_pack", bup_limited_vint_pack, METH_VARARGS, "Try to pack vint/vuint/str, throwing OverflowError when unable." }, { NULL, NULL, 0, NULL }, // sentinel }; static void test_integral_assignment_fits(void) { assert(sizeof(signed short) == sizeof(unsigned short)); assert(sizeof(signed short) < sizeof(signed long long)); assert(sizeof(signed short) < sizeof(unsigned long long)); assert(sizeof(unsigned short) < sizeof(signed long long)); assert(sizeof(unsigned short) < sizeof(unsigned long long)); assert(sizeof(Py_ssize_t) <= sizeof(size_t)); { signed short ss, ssmin = SHRT_MIN, ssmax = SHRT_MAX; unsigned short us, usmax = USHRT_MAX; signed long long sllmin = LLONG_MIN, sllmax = LLONG_MAX; unsigned long long ullmax = ULLONG_MAX; assert(INTEGRAL_ASSIGNMENT_FITS(&ss, ssmax)); assert(INTEGRAL_ASSIGNMENT_FITS(&ss, ssmin)); assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, usmax)); assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, sllmin)); assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, sllmax)); assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, ullmax)); assert(INTEGRAL_ASSIGNMENT_FITS(&us, usmax)); assert(!INTEGRAL_ASSIGNMENT_FITS(&us, ssmin)); assert(!INTEGRAL_ASSIGNMENT_FITS(&us, sllmin)); assert(!INTEGRAL_ASSIGNMENT_FITS(&us, sllmax)); assert(!INTEGRAL_ASSIGNMENT_FITS(&us, ullmax)); } } static int setup_module(PyObject *m) { // FIXME: migrate these tests to configure, or at least don't // possibly crash the whole application. Check against the type // we're going to use when passing to python. Other stat types // are tested at runtime. assert(sizeof(ino_t) <= sizeof(unsigned PY_LONG_LONG)); assert(sizeof(off_t) <= sizeof(PY_LONG_LONG)); assert(sizeof(blksize_t) <= sizeof(PY_LONG_LONG)); assert(sizeof(blkcnt_t) <= sizeof(PY_LONG_LONG)); // Just be sure (relevant when passing timestamps back to Python above). assert(sizeof(PY_LONG_LONG) <= sizeof(long long)); assert(sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long long)); // At least for INTEGER_TO_PY assert(sizeof(intmax_t) <= sizeof(long long)); assert(sizeof(uintmax_t) <= sizeof(unsigned long long)); test_integral_assignment_fits(); // Originally required by append_sparse_region() { off_t probe; if (!INTEGRAL_ASSIGNMENT_FITS(&probe, INT_MAX)) { fprintf(stderr, "off_t can't hold INT_MAX; please report.\n"); exit(1); } } char *e; { PyObject *value; value = INTEGER_TO_PY(INT_MAX); PyObject_SetAttrString(m, "INT_MAX", value); Py_DECREF(value); value = INTEGER_TO_PY(UINT_MAX); PyObject_SetAttrString(m, "UINT_MAX", value); Py_DECREF(value); } #ifndef BUP_USE_PYTHON_UTIME // just for Python 2 now #ifdef HAVE_UTIMENSAT { PyObject *value; value = INTEGER_TO_PY(AT_FDCWD); PyObject_SetAttrString(m, "AT_FDCWD", value); Py_DECREF(value); value = INTEGER_TO_PY(AT_SYMLINK_NOFOLLOW); PyObject_SetAttrString(m, "AT_SYMLINK_NOFOLLOW", value); Py_DECREF(value); value = INTEGER_TO_PY(UTIME_NOW); PyObject_SetAttrString(m, "UTIME_NOW", value); Py_DECREF(value); } #endif #endif // defined BUP_USE_PYTHON_UTIME #ifdef BUP_HAVE_MINCORE_INCORE { PyObject *value; value = INTEGER_TO_PY(MINCORE_INCORE); PyObject_SetAttrString(m, "MINCORE_INCORE", value); Py_DECREF(value); } #endif e = getenv("BUP_FORCE_TTY"); get_state(m)->istty2 = isatty(2) || (atoi(e ? e : "0") & 2); return 1; } static struct PyModuleDef helpers_def = { PyModuleDef_HEAD_INIT, "_helpers", NULL, sizeof(state_t), helper_methods, NULL, NULL, // helpers_traverse, NULL, // helpers_clear, NULL }; PyMODINIT_FUNC PyInit__helpers(void) { PyObject *module = PyModule_Create(&helpers_def); if (module == NULL) return NULL; if (!setup_module(module)) { Py_DECREF(module); return NULL; } return module; } bup-0.33.3/lib/bup/bloom.py000066400000000000000000000265031454333004200154100ustar00rootroot00000000000000"""Discussion of bloom constants for bup: There are four basic things to consider when building a bloom filter: The size, in bits, of the filter The capacity, in entries, of the filter The probability of a false positive that is tolerable The number of bits readily available to use for addressing filter bits There is one major tunable that is not directly related to the above: k: the number of bits set in the filter per entry Here's a wall of numbers showing the relationship between k; the ratio between the filter size in bits and the entries in the filter; and pfalse_positive: mn|k=3 |k=4 |k=5 |k=6 |k=7 |k=8 |k=9 |k=10 |k=11 8|3.05794|2.39687|2.16792|2.15771|2.29297|2.54917|2.92244|3.41909|4.05091 9|2.27780|1.65770|1.40703|1.32721|1.34892|1.44631|1.61138|1.84491|2.15259 10|1.74106|1.18133|0.94309|0.84362|0.81937|0.84555|0.91270|1.01859|1.16495 11|1.36005|0.86373|0.65018|0.55222|0.51259|0.50864|0.53098|0.57616|0.64387 12|1.08231|0.64568|0.45945|0.37108|0.32939|0.31424|0.31695|0.33387|0.36380 13|0.87517|0.49210|0.33183|0.25527|0.21689|0.19897|0.19384|0.19804|0.21013 14|0.71759|0.38147|0.24433|0.17934|0.14601|0.12887|0.12127|0.12012|0.12399 15|0.59562|0.30019|0.18303|0.12840|0.10028|0.08523|0.07749|0.07440|0.07468 16|0.49977|0.23941|0.13925|0.09351|0.07015|0.05745|0.05049|0.04700|0.04587 17|0.42340|0.19323|0.10742|0.06916|0.04990|0.03941|0.03350|0.03024|0.02870 18|0.36181|0.15765|0.08392|0.05188|0.03604|0.02748|0.02260|0.01980|0.01827 19|0.31160|0.12989|0.06632|0.03942|0.02640|0.01945|0.01549|0.01317|0.01182 20|0.27026|0.10797|0.05296|0.03031|0.01959|0.01396|0.01077|0.00889|0.00777 21|0.23591|0.09048|0.04269|0.02356|0.01471|0.01014|0.00759|0.00609|0.00518 22|0.20714|0.07639|0.03473|0.01850|0.01117|0.00746|0.00542|0.00423|0.00350 23|0.18287|0.06493|0.02847|0.01466|0.00856|0.00555|0.00392|0.00297|0.00240 24|0.16224|0.05554|0.02352|0.01171|0.00663|0.00417|0.00286|0.00211|0.00166 25|0.14459|0.04779|0.01957|0.00944|0.00518|0.00316|0.00211|0.00152|0.00116 26|0.12942|0.04135|0.01639|0.00766|0.00408|0.00242|0.00157|0.00110|0.00082 27|0.11629|0.03595|0.01381|0.00626|0.00324|0.00187|0.00118|0.00081|0.00059 28|0.10489|0.03141|0.01170|0.00515|0.00259|0.00146|0.00090|0.00060|0.00043 29|0.09492|0.02756|0.00996|0.00426|0.00209|0.00114|0.00069|0.00045|0.00031 30|0.08618|0.02428|0.00853|0.00355|0.00169|0.00090|0.00053|0.00034|0.00023 31|0.07848|0.02147|0.00733|0.00297|0.00138|0.00072|0.00041|0.00025|0.00017 32|0.07167|0.01906|0.00633|0.00250|0.00113|0.00057|0.00032|0.00019|0.00013 Here's a table showing available repository size for a given pfalse_positive and three values of k (assuming we only use the 160 bit SHA1 for addressing the filter and 8192bytes per object): pfalse|obj k=4 |cap k=4 |obj k=5 |cap k=5 |obj k=6 |cap k=6 2.500%|139333497228|1038.11 TiB|558711157|4262.63 GiB|13815755|105.41 GiB 1.000%|104489450934| 778.50 TiB|436090254|3327.10 GiB|11077519| 84.51 GiB 0.125%| 57254889824| 426.58 TiB|261732190|1996.86 GiB| 7063017| 55.89 GiB This eliminates pretty neatly any k>6 as long as we use the raw SHA for addressing. filter size scales linearly with repository size for a given k and pfalse. Here's a table of filter sizes for a 1 TiB repository: pfalse| k=3 | k=4 | k=5 | k=6 2.500%| 138.78 MiB | 126.26 MiB | 123.00 MiB | 123.37 MiB 1.000%| 197.83 MiB | 168.36 MiB | 157.58 MiB | 153.87 MiB 0.125%| 421.14 MiB | 307.26 MiB | 262.56 MiB | 241.32 MiB For bup: * We want the bloom filter to fit in memory; if it doesn't, the k pagefaults per lookup will be worse than the two required for midx. * We want the pfalse_positive to be low enough that the cost of sometimes faulting on the midx doesn't overcome the benefit of the bloom filter. * We have readily available 160 bits for addressing the filter. * We want to be able to have a single bloom address entire repositories of reasonable size. Based on these parameters, a combination of k=4 and k=5 provides the behavior that bup needs. As such, I've implemented bloom addressing, adding and checking functions in C for these two values. Because k=5 requires less space and gives better overall pfalse_positive performance, it is preferred if a table with k=5 can represent the repository. None of this tells us what max_pfalse_positive to choose. Brandon Low 2011-02-04 """ from __future__ import absolute_import import os, math, struct from bup import _helpers from bup.compat import pending_raise from bup.helpers import (debug1, debug2, log, mmap_read, mmap_readwrite, mmap_readwrite_private, unlink) BLOOM_VERSION = 2 MAX_BITS_EACH = 32 # Kinda arbitrary, but 4 bytes per entry is pretty big MAX_BLOOM_BITS = {4: 37, 5: 29} # 160/k-log2(8) MAX_PFALSE_POSITIVE = 1. # Totally arbitrary, needs benchmarking _total_searches = 0 _total_steps = 0 bloom_contains = _helpers.bloom_contains bloom_add = _helpers.bloom_add # FIXME: check bloom create() and ShaBloom handling/ownership of "f". # The ownership semantics should be clarified since the caller needs # to know who is responsible for closing it. class ShaBloom: """Wrapper which contains data from multiple index files. """ def __init__(self, filename, f=None, readwrite=False, expected=-1): self.closed = False self.name = filename self.readwrite = readwrite self.file = None self.map = None assert(filename.endswith(b'.bloom')) if readwrite: assert(expected > 0) self.file = f = f or open(filename, 'r+b') f.seek(0) # Decide if we want to mmap() the pages as writable ('immediate' # write) or else map them privately for later writing back to # the file ('delayed' write). A bloom table's write access # pattern is such that we dirty almost all the pages after adding # very few entries. But the table is so big that dirtying # *all* the pages often exceeds Linux's default # /proc/sys/vm/dirty_ratio or /proc/sys/vm/dirty_background_ratio, # thus causing it to start flushing the table before we're # finished... even though there's more than enough space to # store the bloom table in RAM. # # To work around that behaviour, if we calculate that we'll # probably end up touching the whole table anyway (at least # one bit flipped per memory page), let's use a "private" mmap, # which defeats Linux's ability to flush it to disk. Then we'll # flush it as one big lump during close(). pages = os.fstat(f.fileno()).st_size // 4096 * 5 # assume k=5 self.delaywrite = expected > pages debug1('bloom: delaywrite=%r\n' % self.delaywrite) if self.delaywrite: self.map = mmap_readwrite_private(self.file, close=False) else: self.map = mmap_readwrite(self.file, close=False) else: self.file = f or open(filename, 'rb') self.map = mmap_read(self.file) got = self.map[0:4] if got != b'BLOM': log('Warning: invalid BLOM header (%r) in %r\n' % (got, filename)) self._init_failed() return ver = struct.unpack('!I', self.map[4:8])[0] if ver < BLOOM_VERSION: log('Warning: ignoring old-style (v%d) bloom %r\n' % (ver, filename)) self._init_failed() return if ver > BLOOM_VERSION: log('Warning: ignoring too-new (v%d) bloom %r\n' % (ver, filename)) self._init_failed() return self.bits, self.k, self.entries = struct.unpack('!HHI', self.map[8:16]) idxnamestr = self.map[16 + 2**self.bits:] if idxnamestr: self.idxnames = idxnamestr.split(b'\0') else: self.idxnames = [] def _init_failed(self): self.idxnames = [] self.bits = self.entries = 0 self.map, tmp_map = None, self.map self.file, tmp_file = None, self.file try: if tmp_map: tmp_map.close() finally: # This won't handle pending exceptions correctly in py2 if self.file: tmp_file.close() def valid(self): return self.map and self.bits def close(self): self.closed = True try: if self.map and self.readwrite: debug2("bloom: closing with %d entries\n" % self.entries) self.map[12:16] = struct.pack('!I', self.entries) if self.delaywrite: self.file.seek(0) self.file.write(self.map) else: self.map.flush() self.file.seek(16 + 2**self.bits) if self.idxnames: self.file.write(b'\0'.join(self.idxnames)) finally: # This won't handle pending exceptions correctly in py2 self._init_failed() def __del__(self): assert self.closed def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def pfalse_positive(self, additional=0): n = self.entries + additional m = 8*2**self.bits k = self.k return 100*(1-math.exp(-k*float(n)/m))**k def add(self, ids): """Add the hashes in ids (packed binary 20-bytes) to the filter.""" if not self.map: raise Exception("Cannot add to closed bloom") self.entries += bloom_add(self.map, ids, self.bits, self.k) def add_idx(self, ix): """Add the object to the filter.""" self.add(ix.shatable) self.idxnames.append(os.path.basename(ix.name)) def exists(self, sha): """Return nonempty if the object probably exists in the bloom filter. If this function returns false, the object definitely does not exist. If it returns true, there is a small probability that it exists anyway, so you'll have to check it some other way. """ global _total_searches, _total_steps _total_searches += 1 if not self.map: return None found, steps = bloom_contains(self.map, sha, self.bits, self.k) _total_steps += steps return found def __len__(self): return int(self.entries) def create(name, expected, delaywrite=None, f=None, k=None): """Create and return a bloom filter for `expected` entries.""" bits = int(math.floor(math.log(expected * MAX_BITS_EACH // 8, 2))) k = k or ((bits <= MAX_BLOOM_BITS[5]) and 5 or 4) if bits > MAX_BLOOM_BITS[k]: log('bloom: warning, max bits exceeded, non-optimal\n') bits = MAX_BLOOM_BITS[k] debug1('bloom: using 2^%d bytes and %d hash functions\n' % (bits, k)) f = f or open(name, 'w+b') f.write(b'BLOM') f.write(struct.pack('!IHHI', BLOOM_VERSION, bits, k, 0)) assert(f.tell() == 16) # NOTE: On some systems this will not extend+zerofill, but it does on # darwin, linux, bsd and solaris. f.truncate(16+2**bits) f.seek(0) if delaywrite != None and not delaywrite: # tell it to expect very few objects, forcing a direct mmap expected = 1 return ShaBloom(name, f=f, readwrite=True, expected=expected) def clear_bloom(dir): unlink(os.path.join(dir, b'bup.bloom')) bup-0.33.3/lib/bup/bupsplit.c000066400000000000000000000111721454333004200157300ustar00rootroot00000000000000/* * Copyright 2011 Avery Pennarun. All rights reserved. * * (This license applies to bupsplit.c and bupsplit.h only.) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY AVERY PENNARUN AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "bupsplit.h" #include #include #include #include // According to librsync/rollsum.h: // "We should make this something other than zero to improve the // checksum algorithm: tridge suggests a prime number." // apenwarr: I unscientifically tried 0 and 7919, and they both ended up // slightly worse than the librsync value of 31 for my arbitrary test data. #define ROLLSUM_CHAR_OFFSET 31 typedef struct { unsigned s1, s2; uint8_t window[BUP_WINDOWSIZE]; int wofs; } Rollsum; // These formulas are based on rollsum.h in the librsync project. static void rollsum_add(Rollsum *r, uint8_t drop, uint8_t add) { r->s1 += add - drop; r->s2 += r->s1 - (BUP_WINDOWSIZE * (drop + ROLLSUM_CHAR_OFFSET)); } static void rollsum_init(Rollsum *r) { r->s1 = BUP_WINDOWSIZE * ROLLSUM_CHAR_OFFSET; r->s2 = BUP_WINDOWSIZE * (BUP_WINDOWSIZE-1) * ROLLSUM_CHAR_OFFSET; r->wofs = 0; memset(r->window, 0, BUP_WINDOWSIZE); } // For some reason, gcc 4.3 (at least) optimizes badly if find_ofs() // is static and rollsum_roll is an inline function. Let's use a macro // here instead to help out the optimizer. #define rollsum_roll(r, ch) do { \ rollsum_add((r), (r)->window[(r)->wofs], (ch)); \ (r)->window[(r)->wofs] = (ch); \ (r)->wofs = ((r)->wofs + 1) % BUP_WINDOWSIZE; \ } while (0) static uint32_t rollsum_digest(Rollsum *r) { return (r->s1 << 16) | (r->s2 & 0xffff); } static uint32_t rollsum_sum(uint8_t *buf, size_t ofs, size_t len) { size_t count; Rollsum r; rollsum_init(&r); for (count = ofs; count < len; count++) rollsum_roll(&r, buf[count]); return rollsum_digest(&r); } int bupsplit_find_ofs(const unsigned char *buf, int len, int *bits) { Rollsum r; int count; rollsum_init(&r); for (count = 0; count < len; count++) { rollsum_roll(&r, buf[count]); if ((r.s2 & (BUP_BLOBSIZE-1)) == ((~0) & (BUP_BLOBSIZE-1))) { if (bits) { unsigned rsum = rollsum_digest(&r); rsum >>= BUP_BLOBBITS; for (*bits = BUP_BLOBBITS; (rsum >>= 1) & 1; (*bits)++) ; } return count+1; } } return 0; } #ifndef BUP_NO_SELFTEST #define BUP_SELFTEST_SIZE 100000 int bupsplit_selftest() { uint8_t *buf = malloc(BUP_SELFTEST_SIZE); uint32_t sum1a, sum1b, sum2a, sum2b, sum3a, sum3b; unsigned count; srandom(1); for (count = 0; count < BUP_SELFTEST_SIZE; count++) buf[count] = random(); sum1a = rollsum_sum(buf, 0, BUP_SELFTEST_SIZE); sum1b = rollsum_sum(buf, 1, BUP_SELFTEST_SIZE); sum2a = rollsum_sum(buf, BUP_SELFTEST_SIZE - BUP_WINDOWSIZE*5/2, BUP_SELFTEST_SIZE - BUP_WINDOWSIZE); sum2b = rollsum_sum(buf, 0, BUP_SELFTEST_SIZE - BUP_WINDOWSIZE); sum3a = rollsum_sum(buf, 0, BUP_WINDOWSIZE+3); sum3b = rollsum_sum(buf, 3, BUP_WINDOWSIZE+3); fprintf(stderr, "sum1a = 0x%08x\n", sum1a); fprintf(stderr, "sum1b = 0x%08x\n", sum1b); fprintf(stderr, "sum2a = 0x%08x\n", sum2a); fprintf(stderr, "sum2b = 0x%08x\n", sum2b); fprintf(stderr, "sum3a = 0x%08x\n", sum3a); fprintf(stderr, "sum3b = 0x%08x\n", sum3b); free(buf); return sum1a!=sum1b || sum2a!=sum2b || sum3a!=sum3b; } #endif // !BUP_NO_SELFTEST bup-0.33.3/lib/bup/bupsplit.h000066400000000000000000000034631454333004200157410ustar00rootroot00000000000000/* * Copyright 2011 Avery Pennarun. All rights reserved. * * (This license applies to bupsplit.c and bupsplit.h only.) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY AVERY PENNARUN AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __BUPSPLIT_H #define __BUPSPLIT_H #define BUP_BLOBBITS (13) #define BUP_BLOBSIZE (1<\[)?((?(sb)[0-9a-f:]+|[^:/]+))(?(sb)\])' _port_rs = br'(?::(\d+))?' _path_rs = br'(/.*)?' _url_rx = re.compile(br'%s(?:%s%s)?%s' % (_protocol_rs, _host_rs, _port_rs, _path_rs), re.I) def parse_remote(remote): url_match = _url_rx.match(remote) if url_match: if not url_match.group(1) in (b'ssh', b'bup', b'file'): raise ClientError('unexpected protocol: %s' % url_match.group(1).decode('ascii')) return url_match.group(1,3,4,5) else: rs = remote.split(b':', 1) if len(rs) == 1 or rs[0] in (b'', b'-'): return b'file', None, None, rs[-1] else: return b'ssh', rs[0], None, rs[1] class Client: def __init__(self, remote, create=False): self.closed = False self._busy = self.conn = None self.sock = self.p = self.pout = self.pin = None try: is_reverse = environ.get(b'BUP_SERVER_REVERSE') if is_reverse: assert(not remote) remote = b'%s:' % is_reverse (self.protocol, self.host, self.port, self.dir) = parse_remote(remote) # The b'None' here matches python2's behavior of b'%s' % None == 'None', # python3 will (as of version 3.7.5) do the same for str ('%s' % None), # but crashes instead when doing b'%s' % None. cachehost = b'None' if self.host is None else self.host cachedir = b'None' if self.dir is None else self.dir self.cachedir = git.repo(b'index-cache/%s' % re.sub(br'[^@\w]', b'_', b'%s:%s' % (cachehost, cachedir))) if is_reverse: self.pout = os.fdopen(3, 'rb') self.pin = os.fdopen(4, 'wb') self.conn = Conn(self.pout, self.pin) else: if self.protocol in (b'ssh', b'file'): try: # FIXME: ssh and file shouldn't use the same module self.p = ssh.connect(self.host, self.port, b'server') self.pout = self.p.stdout self.pin = self.p.stdin self.conn = Conn(self.pout, self.pin) except OSError as e: raise ClientError('connect: %s' % e) from e elif self.protocol == b'bup': self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.host, 1982 if self.port is None else int(self.port))) self.sockw = self.sock.makefile('wb') self.conn = DemuxConn(self.sock.fileno(), self.sockw) self._available_commands = self._get_available_commands() self._require_command(b'init-dir') self._require_command(b'set-dir') if self.dir: self.dir = re.sub(br'[\r\n]', ' ', self.dir) if create: self.conn.write(b'init-dir %s\n' % self.dir) else: self.conn.write(b'set-dir %s\n' % self.dir) self.check_ok() self.sync_indexes() except BaseException as ex: with pending_raise(ex): self.close() def close(self): if self.closed: return self.closed = True try: if self.conn and not self._busy: self.conn.write(b'quit\n') finally: try: if self.pin: self.pin.close() finally: try: self.pin = None if self.sock and self.sockw: self.sockw.close() self.sock.shutdown(socket.SHUT_WR) finally: try: if self.conn: self.conn.close() finally: try: self.conn = None if self.pout: self.pout.close() finally: try: self.pout = None if self.sock: self.sock.close() finally: self.sock = None if self.p: self.p.wait() rv = self.p.wait() if rv: raise ClientError('server tunnel returned exit code %d' % rv) self.p = None def __del__(self): assert self.closed def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def check_ok(self): if self.p: rv = self.p.poll() if rv != None: raise ClientError('server exited unexpectedly with code %r' % rv) try: return self.conn.check_ok() except Exception as e: raise ClientError(e) from e def check_busy(self): if self._busy: raise ClientError('already busy with command %r' % self._busy) def ensure_busy(self): if not self._busy: raise ClientError('expected to be busy, but not busy?!') def _not_busy(self): self._busy = None def _get_available_commands(self): self.check_busy() self._busy = b'help' conn = self.conn conn.write(b'help\n') result = set() line = self.conn.readline() if not line == b'Commands:\n': raise ClientError('unexpected help header ' + repr(line)) while True: line = self.conn.readline() if line == b'\n': break if not line.startswith(b' '): raise ClientError('unexpected help line ' + repr(line)) cmd = line.strip() if not cmd: raise ClientError('unexpected help line ' + repr(line)) result.add(cmd) # FIXME: confusing not_ok = self.check_ok() if not_ok: raise not_ok self._not_busy() return frozenset(result) def _require_command(self, name): if name not in self._available_commands: raise ClientError('server does not appear to provide %s command' % name.encode('ascii')) def sync_indexes(self): self._require_command(b'list-indexes') self.check_busy() conn = self.conn mkdirp(self.cachedir) # All cached idxs are extra until proven otherwise extra = set() for f in os.listdir(self.cachedir): debug1(path_msg(f) + '\n') if f.endswith(b'.idx'): extra.add(f) needed = set() conn.write(b'list-indexes\n') for line in linereader(conn): if not line: break assert(line.find(b'/') < 0) parts = line.split(b' ') idx = parts[0] if len(parts) == 2 and parts[1] == b'load' and idx not in extra: # If the server requests that we load an idx and we don't # already have a copy of it, it is needed needed.add(idx) # Any idx that the server has heard of is proven not extra extra.discard(idx) self.check_ok() debug1('client: removing extra indexes: %s\n' % extra) for idx in extra: os.unlink(os.path.join(self.cachedir, idx)) debug1('client: server requested load of: %s\n' % needed) for idx in needed: self.sync_index(idx) git.auto_midx(self.cachedir) def sync_index(self, name): self._require_command(b'send-index') #debug1('requesting %r\n' % name) self.check_busy() mkdirp(self.cachedir) fn = os.path.join(self.cachedir, name) if os.path.exists(fn): msg = ("won't request existing .idx, try `bup bloom --check %s`" % path_msg(fn)) raise ClientError(msg) self.conn.write(b'send-index %s\n' % name) n = struct.unpack('!I', self.conn.read(4))[0] assert(n) with atomically_replaced_file(fn, 'wb') as f: count = 0 progress('Receiving index from server: %d/%d\r' % (count, n)) for b in chunkyreader(self.conn, n): f.write(b) count += len(b) qprogress('Receiving index from server: %d/%d\r' % (count, n)) progress('Receiving index from server: %d/%d, done.\n' % (count, n)) self.check_ok() def _make_objcache(self): return git.PackIdxList(self.cachedir) def _suggest_packs(self): ob = self._busy if ob: assert(ob == b'receive-objects-v2') self.conn.write(b'\xff\xff\xff\xff') # suspend receive-objects-v2 suggested = [] for line in linereader(self.conn): if not line: break debug2('%r\n' % line) if line.startswith(b'index '): idx = line[6:] debug1('client: received index suggestion: %s\n' % git.shorten_hash(idx).decode('ascii')) suggested.append(idx) else: assert(line.endswith(b'.idx')) debug1('client: completed writing pack, idx: %s\n' % git.shorten_hash(line).decode('ascii')) suggested.append(line) self.check_ok() if ob: self._busy = None idx = None for idx in suggested: self.sync_index(idx) git.auto_midx(self.cachedir) if ob: self._busy = ob self.conn.write(b'%s\n' % ob) return idx def new_packwriter(self, compression_level=1, max_pack_size=None, max_pack_objects=None): self._require_command(b'receive-objects-v2') self.check_busy() def _set_busy(): self._busy = b'receive-objects-v2' self.conn.write(b'receive-objects-v2\n') return PackWriter_Remote(self.conn, objcache_maker = self._make_objcache, suggest_packs = self._suggest_packs, onopen = _set_busy, onclose = self._not_busy, ensure_busy = self.ensure_busy, compression_level=compression_level, max_pack_size=max_pack_size, max_pack_objects=max_pack_objects) def read_ref(self, refname): self._require_command(b'read-ref') self.check_busy() self.conn.write(b'read-ref %s\n' % refname) r = self.conn.readline().strip() self.check_ok() if r: assert(len(r) == 40) # hexified sha return unhexlify(r) else: return None # nonexistent ref def update_ref(self, refname, newval, oldval): self._require_command(b'update-ref') self.check_busy() self.conn.write(b'update-ref %s\n%s\n%s\n' % (refname, hexlify(newval), hexlify(oldval) if oldval else b'')) self.check_ok() def join(self, id): self._require_command(b'join') self.check_busy() self._busy = b'join' # Send 'cat' so we'll work fine with older versions self.conn.write(b'cat %s\n' % re.sub(br'[\n\r]', b'_', id)) while 1: sz = struct.unpack('!I', self.conn.read(4))[0] if not sz: break yield self.conn.read(sz) # FIXME: ok to assume the only NotOk is a KerError? (it is true atm) e = self.check_ok() self._not_busy() if e: raise KeyError(str(e)) def cat_batch(self, refs): self._require_command(b'cat-batch') self.check_busy() self._busy = b'cat-batch' conn = self.conn conn.write(b'cat-batch\n') # FIXME: do we want (only) binary protocol? for ref in refs: assert ref assert b'\n' not in ref conn.write(ref) conn.write(b'\n') conn.write(b'\n') for ref in refs: info = conn.readline() if info == b'missing\n': yield None, None, None, None continue if not (info and info.endswith(b'\n')): raise ClientError('Hit EOF while looking for object info: %r' % info) oidx, oid_t, size = info.split(b' ') size = int(size) cr = chunkyreader(conn, size) yield oidx, oid_t, size, cr detritus = next(cr, None) if detritus: raise ClientError('unexpected leftover data ' + repr(detritus)) # FIXME: confusing not_ok = self.check_ok() if not_ok: raise not_ok self._not_busy() def refs(self, patterns=None, limit_to_heads=False, limit_to_tags=False): patterns = patterns or tuple() self._require_command(b'refs') self.check_busy() self._busy = b'refs' conn = self.conn conn.write(b'refs %d %d\n' % (1 if limit_to_heads else 0, 1 if limit_to_tags else 0)) for pattern in patterns: assert b'\n' not in pattern conn.write(pattern) conn.write(b'\n') conn.write(b'\n') for line in lines_until_sentinel(conn, b'\n', ClientError): line = line[:-1] oidx, name = line.split(b' ') if len(oidx) != 40: raise ClientError('Invalid object fingerprint in %r' % line) if not name: raise ClientError('Invalid reference name in %r' % line) yield name, unhexlify(oidx) # FIXME: confusing not_ok = self.check_ok() if not_ok: raise not_ok self._not_busy() def rev_list(self, refs, parse=None, format=None): """See git.rev_list for the general semantics, but note that with the current interface, the parse function must be able to handle (consume) any blank lines produced by the format because the first one received that it doesn't consume will be interpreted as a terminator for the entire rev-list result. """ self._require_command(b'rev-list') if format: assert b'\n' not in format assert parse for ref in refs: assert ref assert b'\n' not in ref self.check_busy() self._busy = b'rev-list' conn = self.conn conn.write(b'rev-list\n') conn.write(b'\n') if format: conn.write(format) conn.write(b'\n') for ref in refs: conn.write(ref) conn.write(b'\n') conn.write(b'\n') if not format: for line in lines_until_sentinel(conn, b'\n', ClientError): line = line.strip() assert len(line) == 40 yield line else: for line in lines_until_sentinel(conn, b'\n', ClientError): if not line.startswith(b'commit '): raise ClientError('unexpected line ' + repr(line)) cmt_oidx = line[7:].strip() assert len(cmt_oidx) == 40 yield cmt_oidx, parse(conn) # FIXME: confusing not_ok = self.check_ok() if not_ok: raise not_ok self._not_busy() def resolve(self, path, parent=None, want_meta=True, follow=False): self._require_command(b'resolve') self.check_busy() self._busy = b'resolve' conn = self.conn conn.write(b'resolve %d\n' % ((1 if want_meta else 0) | (2 if follow else 0) | (4 if parent else 0))) if parent: vfs.write_resolution(conn, parent) write_bvec(conn, path) success = ord(conn.read(1)) assert success in (0, 1) if success: result = vfs.read_resolution(conn) else: result = vfs.read_ioerror(conn) # FIXME: confusing not_ok = self.check_ok() if not_ok: raise not_ok self._not_busy() if isinstance(result, vfs.IOError): raise result return result # FIXME: disentangle this (stop inheriting) from PackWriter class PackWriter_Remote(git.PackWriter): def __new__(cls, *args, **kwargs): result = super().__new__(cls) result.remote_closed = True # supports __del__ return result def __init__(self, conn, objcache_maker, suggest_packs, onopen, onclose, ensure_busy, compression_level=1, max_pack_size=None, max_pack_objects=None): git.PackWriter.__init__(self, objcache_maker=objcache_maker, compression_level=compression_level, max_pack_size=max_pack_size, max_pack_objects=max_pack_objects) self.remote_closed = False self.file = conn self.filename = b'remote socket' self.suggest_packs = suggest_packs self.onopen = onopen self.onclose = onclose self.ensure_busy = ensure_busy self._packopen = False self._bwcount = 0 self._bwtime = time.time() # __enter__ and __exit__ are inherited def _open(self): if not self._packopen: self.onopen() self._packopen = True def _end(self, run_midx=True): # Called by other PackWriter methods like breakpoint(). # Must not close the connection (self.file) assert(run_midx) # We don't support this via remote yet self.objcache, objcache = None, self.objcache with nullcontext_if_not(objcache): if not (self._packopen and self.file): return None self.file.write(b'\0\0\0\0') self._packopen = False self.onclose() # Unbusy if objcache is not None: objcache.close() return self.suggest_packs() # Returns last idx received def close(self): # Called by inherited __exit__ self.remote_closed = True id = self._end() self.file = None super().close() return id def __del__(self): assert self.remote_closed super().__del__() def abort(self): raise ClientError("don't know how to abort remote pack writing") def _raw_write(self, datalist, sha): assert(self.file) if not self._packopen: self._open() self.ensure_busy() data = b''.join(datalist) assert(data) assert(sha) crc = zlib.crc32(data) & 0xffffffff outbuf = b''.join((struct.pack('!I', len(data) + 20 + 4), sha, struct.pack('!I', crc), data)) try: (self._bwcount, self._bwtime) = _raw_write_bwlimit( self.file, outbuf, self._bwcount, self._bwtime) except IOError as e: raise ClientError(e) from e self.outbytes += len(data) self.count += 1 if self.file.has_input(): self.suggest_packs() self.objcache.refresh() return sha, crc bup-0.33.3/lib/bup/cmd/000077500000000000000000000000001454333004200144635ustar00rootroot00000000000000bup-0.33.3/lib/bup/cmd/__init__.py000066400000000000000000000000001454333004200165620ustar00rootroot00000000000000bup-0.33.3/lib/bup/cmd/bloom.py000066400000000000000000000144111454333004200161460ustar00rootroot00000000000000 from __future__ import absolute_import import os, glob, sys from bup import options, git, bloom from bup.compat import argv_bytes, hexstr from bup.helpers import (add_error, debug1, log, progress, qprogress, saved_errors) from bup.io import path_msg optspec = """ bup bloom [options...] -- ruin ruin the specified bloom file (clearing the bitfield) f,force ignore existing bloom file and regenerate it from scratch o,output= output bloom filename (default: auto) d,dir= input directory to look for idx files (default: auto) k,hashes= number of hash functions to use (4 or 5) (default: auto) c,check= check the given .idx file against the bloom filter """ def ruin_bloom(bloomfilename): rbloomfilename = git.repo_rel(bloomfilename) if not os.path.exists(bloomfilename): log(path_msg(bloomfilename) + '\n') add_error('bloom: %s not found to ruin\n' % path_msg(rbloomfilename)) return with bloom.ShaBloom(bloomfilename, readwrite=True, expected=1) as b: b.map[16 : 16 + 2**b.bits] = b'\0' * 2**b.bits def check_bloom(path, bloomfilename, idx): rbloomfilename = git.repo_rel(bloomfilename) ridx = git.repo_rel(idx) if not os.path.exists(bloomfilename): log('bloom: %s: does not exist.\n' % path_msg(rbloomfilename)) return with bloom.ShaBloom(bloomfilename) as b: if not b.valid(): add_error('bloom: %r is invalid.\n' % path_msg(rbloomfilename)) return base = os.path.basename(idx) if base not in b.idxnames: log('bloom: %s does not contain the idx.\n' % path_msg(rbloomfilename)) return if base == idx: idx = os.path.join(path, idx) log('bloom: bloom file: %s\n' % path_msg(rbloomfilename)) log('bloom: checking %s\n' % path_msg(ridx)) with git.open_idx(idx) as oids: for oid in oids: if not b.exists(oid): add_error('bloom: ERROR: object %s missing' % hexstr(oid)) _first = None def do_bloom(path, outfilename, k, force): global _first assert k in (None, 4, 5) b = None try: if os.path.exists(outfilename) and not force: b = bloom.ShaBloom(outfilename) if not b.valid(): debug1("bloom: Existing invalid bloom found, regenerating.\n") b.close() b = None add = [] rest = [] add_count = 0 rest_count = 0 for i, name in enumerate(glob.glob(b'%s/*.idx' % path)): progress('bloom: counting: %d\r' % i) with git.open_idx(name) as ix: ixbase = os.path.basename(name) if b is not None and (ixbase in b.idxnames): rest.append(name) rest_count += len(ix) else: add.append(name) add_count += len(ix) if not add: debug1("bloom: nothing to do.\n") return if b is not None: if len(b) != rest_count: debug1("bloom: size %d != idx total %d, regenerating\n" % (len(b), rest_count)) b, b_tmp = None, b b_tmp.close() elif k is not None and k != b.k: debug1("bloom: new k %d != existing k %d, regenerating\n" % (k, b.k)) b, b_tmp = None, b b_tmp.close() elif (b.bits < bloom.MAX_BLOOM_BITS[b.k] and b.pfalse_positive(add_count) > bloom.MAX_PFALSE_POSITIVE): debug1("bloom: regenerating: adding %d entries gives " "%.2f%% false positives.\n" % (add_count, b.pfalse_positive(add_count))) b, b_tmp = None, b b_tmp.close() else: b, b_tmp = None, b b_tmp.close() b = bloom.ShaBloom(outfilename, readwrite=True, expected=add_count) if b is None: # Need all idxs to build from scratch add += rest add_count += rest_count del rest del rest_count msg = b is None and 'creating from' or 'adding' if not _first: _first = path dirprefix = (_first != path) and git.repo_rel(path) + b': ' or b'' progress('bloom: %s%s %d file%s (%d object%s).\r' % (path_msg(dirprefix), msg, len(add), len(add)!=1 and 's' or '', add_count, add_count!=1 and 's' or '')) tfname = None if b is None: tfname = os.path.join(path, b'bup.tmp.bloom') b = bloom.create(tfname, expected=add_count, k=k) count = 0 icount = 0 for name in add: with git.open_idx(name) as ix: qprogress('bloom: writing %.2f%% (%d/%d objects)\r' % (icount*100.0/add_count, icount, add_count)) b.add_idx(ix) count += 1 icount += len(ix) finally: # This won't handle pending exceptions correctly in py2 # Currently, there's an open file object for tfname inside b. # Make sure it's closed before rename. if b is not None: b.close() if tfname: os.rename(tfname, outfilename) def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal('no positional parameters expected') if not opt.check and opt.k and opt.k not in (4,5): o.fatal('only k values of 4 and 5 are supported') if opt.check: opt.check = argv_bytes(opt.check) git.check_repo_or_die() output = argv_bytes(opt.output) if opt.output else None path = opt.dir and argv_bytes(opt.dir) or git.repo(b'objects/pack') debug1('bloom: scanning %s\n' % path_msg(path)) outfilename = output or os.path.join(path, b'bup.bloom') if opt.check: check_bloom(path, outfilename, opt.check) elif opt.ruin: ruin_bloom(outfilename) else: do_bloom(path, outfilename, opt.k, opt.force) if saved_errors: log('WARNING: %d errors encountered during bloom.\n' % len(saved_errors)) sys.exit(1) elif opt.check: log('All tests passed.\n') bup-0.33.3/lib/bup/cmd/cat_file.py000066400000000000000000000043141454333004200166050ustar00rootroot00000000000000 from __future__ import absolute_import import re, stat, sys from bup import options, git, vfs from bup.compat import argv_bytes from bup.helpers import chunkyreader, log, saved_errors from bup.io import byte_stream from bup.repo import LocalRepo optspec = """ bup cat-file [--meta|--bupm] /branch/revision/[path] -- meta print the target's metadata entry (decoded then reencoded) to stdout bupm print the target directory's .bupm file directly to stdout """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) git.check_repo_or_die() if not extra: o.fatal('must specify a target') if len(extra) > 1: o.fatal('only one target file allowed') if opt.bupm and opt.meta: o.fatal('--meta and --bupm are incompatible') target = argv_bytes(extra[0]) if not re.match(br'/*[^/]+/[^/]+', target): o.fatal("path %r doesn't include a branch and revision" % target) with LocalRepo() as repo: resolved = vfs.resolve(repo, target, follow=False) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % (b'/'.join(name for name, item in resolved), target)) sys.exit(1) mode = vfs.item_mode(leaf_item) sys.stdout.flush() out = byte_stream(sys.stdout) if opt.bupm: if not stat.S_ISDIR(mode): o.fatal('%r is not a directory' % target) _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid) if bupm_oid: with vfs.tree_data_reader(repo, bupm_oid) as meta_stream: out.write(meta_stream.read()) elif opt.meta: augmented = vfs.augment_item_meta(repo, leaf_item, include_size=True) out.write(augmented.meta.encode()) else: if stat.S_ISREG(mode): with vfs.fopen(repo, leaf_item) as f: for b in chunkyreader(f): out.write(b) else: o.fatal('%r is not a plain file' % target) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/daemon.py000066400000000000000000000043001454333004200162750ustar00rootroot00000000000000 from __future__ import absolute_import import fcntl, getopt, os, socket, subprocess, sys, select from bup import options, path from bup.helpers import log, debug1 optspec = """ bup daemon [options...] -- [bup-server options...] -- l,listen ip address to listen on, defaults to * p,port port to listen on, defaults to 1982 """ def main(argv): o = options.Options(optspec, optfunc=getopt.getopt) opt, flags, extra = o.parse_bytes(argv[1:]) host = opt.listen port = opt.port and int(opt.port) or 1982 socks = [] e = None for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: s = socket.socket(af, socktype, proto) except socket.error as e: continue try: if af == socket.AF_INET6: log("bup daemon: listening on [%s]:%s\n" % sa[:2]) else: log("bup daemon: listening on %s:%s\n" % sa[:2]) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(sa) s.listen(1) fcntl.fcntl(s.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) except socket.error as e: s.close() continue socks.append(s) if not socks: log('bup daemon: listen socket: %s\n' % e.args[1]) sys.exit(1) try: while True: [rl,wl,xl] = select.select(socks, [], [], 60) for l in rl: s, src = l.accept() try: log("Socket accepted connection from %s\n" % (src,)) fd1 = os.dup(s.fileno()) fd2 = os.dup(s.fileno()) s.close() sp = subprocess.Popen([path.exe(), 'mux', '--', path.exe(), 'server'] + extra, stdin=fd1, stdout=fd2) finally: os.close(fd1) os.close(fd2) finally: for l in socks: l.shutdown(socket.SHUT_RDWR) l.close() debug1("bup daemon: done") bup-0.33.3/lib/bup/cmd/damage.py000066400000000000000000000034671454333004200162650ustar00rootroot00000000000000 from __future__ import absolute_import import os, random from bup import options from bup.compat import argv_bytes, bytes_from_uint from bup.helpers import log from bup.io import path_msg def randblock(n): return b''.join(bytes_from_uint(random.randrange(0,256)) for i in range(n)) optspec = """ bup damage [-n count] [-s maxsize] [-S seed] -- WARNING: THIS COMMAND IS EXTREMELY DANGEROUS n,num= number of blocks to damage s,size= maximum size of each damaged block percent= maximum size of each damaged block (as a percent of entire file) equal spread damage evenly throughout the file S,seed= random number seed (for repeatable tests) """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if not extra: o.fatal('filenames expected') if opt.seed != None: random.seed(opt.seed) for name in extra: name = argv_bytes(name) log('Damaging "%s"...\n' % path_msg(name)) with open(name, 'r+b') as f: st = os.fstat(f.fileno()) size = st.st_size if opt.percent or opt.size: ms1 = int(float(opt.percent or 0)/100.0*size) or size ms2 = opt.size or size maxsize = min(ms1, ms2) else: maxsize = 1 chunks = opt.num or 10 chunksize = (size // chunks) or 1 for r in range(chunks): sz = random.randrange(1, maxsize+1) if sz > size: sz = size if opt.equal: ofs = (r * chunksize) % size else: ofs = random.randrange(0, size - sz + 1) log(' %6d bytes at %d\n' % (sz, ofs)) f.seek(ofs) f.write(randblock(sz)) bup-0.33.3/lib/bup/cmd/drecurse.py000066400000000000000000000034671454333004200166630ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from os.path import relpath import sys from bup import options, drecurse from bup.compat import argv_bytes from bup.helpers import log, parse_excludes, parse_rx_excludes, saved_errors from bup.io import byte_stream optspec = """ bup drecurse -- x,xdev,one-file-system don't cross filesystem boundaries exclude= a path to exclude from the backup (can be used more than once) exclude-from= a file that contains exclude paths (can be used more than once) exclude-rx= skip paths matching the unanchored regex (may be repeated) exclude-rx-from= skip --exclude-rx patterns in file (may be repeated) q,quiet don't actually print filenames profile run under the python profiler """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if len(extra) != 1: o.fatal("exactly one filename expected") drecurse_top = argv_bytes(extra[0]) excluded_paths = parse_excludes(flags, o.fatal) if not drecurse_top.startswith(b'/'): excluded_paths = [relpath(x) for x in excluded_paths] exclude_rxs = parse_rx_excludes(flags, o.fatal) it = drecurse.recursive_dirlist([drecurse_top], opt.xdev, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs) if opt.profile: import cProfile def do_it(): for i in it: pass cProfile.run('do_it()') else: if opt.quiet: for i in it: pass else: sys.stdout.flush() out = byte_stream(sys.stdout) for (name,st) in it: out.write(name + b'\n') if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/features.py000066400000000000000000000020241454333004200166510ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import platform, sys from bup import _helpers, metadata, options, version from bup.io import byte_stream out = None def show_support(out, bool_opt, what): out.write(b' %s: %s\n' % (what, b'yes' if bool_opt else b'no')) optspec = """ bup features """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) sys.stdout.flush() out = byte_stream(sys.stdout) out.write(b'bup %s\n' % version.version) out.write(b'Source %s %s\n' % (version.commit, version.date)) have_readline = getattr(_helpers, 'readline', None) have_libacl = getattr(_helpers, 'read_acl', None) have_xattr = metadata.xattr out.write(b' Python: %s\n' % platform.python_version().encode('ascii')) show_support(out, have_readline, b'Command line editing (e.g. bup ftp)') show_support(out, have_libacl, b'Saving and restoring POSIX ACLs') show_support(out, have_xattr, b'Saving and restoring extended attributes (xattrs)') bup-0.33.3/lib/bup/cmd/fsck.py000066400000000000000000000202111454333004200157570ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from shutil import rmtree from subprocess import PIPE from tempfile import mkdtemp from binascii import hexlify import glob, os, subprocess, sys from bup import options, git from bup.compat import argv_bytes from bup.helpers import Sha1, chunkyreader, istty2, log, progress from bup.io import byte_stream par2_ok = 0 nullf = open(os.devnull, 'wb+') opt = None def debug(s): if opt.verbose > 1: log(s) def run(argv): # at least in python 2.5, using "stdout=2" or "stdout=sys.stderr" below # doesn't actually work, because subprocess closes fd #2 right before # execing for some reason. So we work around it by duplicating the fd # first. fd = os.dup(2) # copy stderr try: p = subprocess.Popen(argv, stdout=fd, close_fds=False) return p.wait() finally: os.close(fd) def par2_setup(): global par2_ok rv = 1 try: p = subprocess.Popen([b'par2', b'--help'], stdout=nullf, stderr=nullf, stdin=nullf) rv = p.wait() except OSError: log('fsck: warning: par2 not found; disabling recovery features.\n') else: par2_ok = 1 def is_par2_parallel(): # A true result means it definitely allows -t1; a false result is # technically inconclusive, but likely means no. tmpdir = mkdtemp(prefix=b'bup-fsck') try: canary = tmpdir + b'/canary' with open(canary, 'wb') as f: f.write(b'canary\n') p = subprocess.Popen((b'par2', b'create', b'-qq', b'-t1', canary), stderr=PIPE, stdin=nullf) _, err = p.communicate() parallel = p.returncode == 0 if opt.verbose: if len(err) > 0 and err != b'Invalid option specified: -t1\n': log('Unexpected par2 error output\n') log(repr(err) + '\n') if parallel: log('Assuming par2 supports parallel processing\n') else: log('Assuming par2 does not support parallel processing\n') return parallel finally: rmtree(tmpdir) _par2_parallel = None def par2(action, args, verb_floor=0): global _par2_parallel if _par2_parallel is None: _par2_parallel = is_par2_parallel() cmd = [b'par2', action] if opt.verbose >= verb_floor and not istty2: cmd.append(b'-q') else: cmd.append(b'-qq') if _par2_parallel: cmd.append(b'-t1') cmd.extend(args) return run(cmd) def par2_generate(base): return par2(b'create', [b'-n1', b'-c200', b'--', base, base + b'.pack', base + b'.idx'], verb_floor=2) def par2_verify(base): return par2(b'verify', [b'--', base], verb_floor=3) def par2_repair(base): return par2(b'repair', [b'--', base], verb_floor=2) def quick_verify(base): f = open(base + b'.pack', 'rb') f.seek(-20, 2) wantsum = f.read(20) assert(len(wantsum) == 20) f.seek(0) sum = Sha1() for b in chunkyreader(f, os.fstat(f.fileno()).st_size - 20): sum.update(b) if sum.digest() != wantsum: raise ValueError('expected %r, got %r' % (hexlify(wantsum), sum.hexdigest())) def git_verify(base): if opt.quick: try: quick_verify(base) except Exception as e: log('error: %s\n' % e) return 1 return 0 else: return run([b'git', b'verify-pack', b'--', base]) def do_pack(base, last, par2_exists, out): code = 0 if par2_ok and par2_exists and (opt.repair or not opt.generate): vresult = par2_verify(base) if vresult != 0: if opt.repair: rresult = par2_repair(base) if rresult != 0: action_result = b'failed' log('%s par2 repair: failed (%d)\n' % (last, rresult)) code = rresult else: action_result = b'repaired' log('%s par2 repair: succeeded (0)\n' % last) code = 100 else: action_result = b'failed' log('%s par2 verify: failed (%d)\n' % (last, vresult)) code = vresult else: action_result = b'ok' elif not opt.generate or (par2_ok and not par2_exists): gresult = git_verify(base) if gresult != 0: action_result = b'failed' log('%s git verify: failed (%d)\n' % (last, gresult)) code = gresult else: if par2_ok and opt.generate: presult = par2_generate(base) if presult != 0: action_result = b'failed' log('%s par2 create: failed (%d)\n' % (last, presult)) code = presult else: action_result = b'generated' else: action_result = b'ok' else: assert(opt.generate and (not par2_ok or par2_exists)) action_result = b'exists' if par2_exists else b'skipped' if opt.verbose: out.write(last + b' ' + action_result + b'\n') return code optspec = """ bup fsck [options...] [filenames...] -- r,repair attempt to repair errors using par2 (dangerous!) g,generate generate auto-repair information using par2 v,verbose increase verbosity (can be used more than once) quick just check pack sha1sum, don't use git verify-pack j,jobs= run 'n' jobs in parallel par2-ok immediately return 0 if par2 is ok, 1 if not disable-par2 ignore par2 even if it is available """ def main(argv): global opt, par2_ok o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) opt.verbose = opt.verbose or 0 par2_setup() if opt.par2_ok: if par2_ok: sys.exit(0) # 'true' in sh else: sys.exit(1) if opt.disable_par2: par2_ok = 0 git.check_repo_or_die() if extra: extra = [argv_bytes(x) for x in extra] else: debug('fsck: No filenames given: checking all packs.\n') extra = glob.glob(git.repo(b'objects/pack/*.pack')) sys.stdout.flush() out = byte_stream(sys.stdout) code = 0 count = 0 outstanding = {} for name in extra: if name.endswith(b'.pack'): base = name[:-5] elif name.endswith(b'.idx'): base = name[:-4] elif name.endswith(b'.par2'): base = name[:-5] elif os.path.exists(name + b'.pack'): base = name else: raise Exception('%r is not a pack file!' % name) (dir,last) = os.path.split(base) par2_exists = os.path.exists(base + b'.par2') if par2_exists and os.stat(base + b'.par2').st_size == 0: par2_exists = 0 sys.stdout.flush() # Not sure we still need this, but it'll flush out too debug('fsck: checking %r (%s)\n' % (last, par2_ok and par2_exists and 'par2' or 'git')) if not opt.verbose: progress('fsck (%d/%d)\r' % (count, len(extra))) if not opt.jobs: nc = do_pack(base, last, par2_exists, out) code = code or nc count += 1 else: while len(outstanding) >= opt.jobs: (pid,nc) = os.wait() nc >>= 8 if pid in outstanding: del outstanding[pid] code = code or nc count += 1 pid = os.fork() if pid: # parent outstanding[pid] = 1 else: # child try: sys.exit(do_pack(base, last, par2_exists, out)) except Exception as e: log('exception: %r\n' % e) sys.exit(99) while len(outstanding): (pid,nc) = os.wait() nc >>= 8 if pid in outstanding: del outstanding[pid] code = code or nc count += 1 if not opt.verbose: progress('fsck (%d/%d)\r' % (count, len(extra))) if istty2: debug('fsck done. \n') sys.exit(code) bup-0.33.3/lib/bup/cmd/ftp.py000066400000000000000000000214041454333004200156270ustar00rootroot00000000000000 # For now, this completely relies on the assumption that the current # encoding (LC_CTYPE, etc.) is ASCII compatible, and that it returns # the exact same bytes from a decode/encode round-trip (or the reverse # (e.g. ISO-8859-1). from __future__ import absolute_import, print_function import os, fnmatch, stat, sys, traceback from bup import _helpers, options, git, shquote, ls, vfs from bup.compat import argv_bytes from bup.helpers import chunkyreader, log, saved_errors from bup.io import byte_stream, path_msg from bup.repo import LocalRepo repo = None class CommandError(Exception): pass class OptionError(Exception): pass def do_ls(repo, pwd, args, out): pwd_str = b'/'.join(name for name, item in pwd) or b'/' try: opt = ls.opts_from_cmdline(args, onabort=OptionError, pwd=pwd_str) except OptionError as e: return None return ls.within_repo(repo, opt, out, pwd_str) def write_to_file(inf, outf): for blob in chunkyreader(inf): outf.write(blob) def _completer_get_subs(repo, line): (qtype, lastword) = shquote.unfinished_word(line) dir, name = os.path.split(lastword) dir_path = vfs.resolve(repo, dir or b'/') _, dir_item = dir_path[-1] if not dir_item: subs = tuple() else: subs = tuple(dir_path + (entry,) for entry in vfs.contents(repo, dir_item) if (entry[0] != b'.' and entry[0].startswith(name))) return qtype, lastword, subs _attempt_start = None _attempt_end = None def attempt_completion(text, start, end): global _attempt_start, _attempt_end _attempt_start = start _attempt_end = end _last_line = None _last_res = None def enter_completion(text, iteration): global repo global _attempt_end global _last_line global _last_res try: line = _helpers.get_line_buffer()[:_attempt_end] if _last_line != line: _last_res = _completer_get_subs(repo, line) _last_line = line qtype, lastword, subs = _last_res if iteration < len(subs): path = subs[iteration] leaf_name, leaf_item = path[-1] res = vfs.try_resolve(repo, leaf_name, parent=path[:-1]) leaf_name, leaf_item = res[-1] fullname = os.path.join(*(name for name, item in res)) if stat.S_ISDIR(vfs.item_mode(leaf_item)): ret = shquote.what_to_add(qtype, lastword, fullname + b'/', terminate=False) else: ret = shquote.what_to_add(qtype, lastword, fullname, terminate=True) + b' ' return text + ret except Exception as e: log('\n') _, _, tb = sys.exc_info() traceback.print_tb(tb) log('\nError in completion: %s\n' % e) return None optspec = """ bup ftp [commands...] """ def inputiter(f, pwd, out): if os.isatty(f.fileno()): while 1: prompt = b'bup %s> ' % (b'/'.join(name for name, item in pwd) or b'/', ) if hasattr(_helpers, 'readline'): try: yield _helpers.readline(prompt) except EOFError: print() # Clear the line for the terminal's next prompt break else: out.write(prompt) out.flush() read_line = f.readline() if not read_line: print('') break yield read_line else: for line in f: yield line def rpath_msg(res): """Return a path_msg for the resolved path res.""" return path_msg(b'/'.join(name for name, item in res)) def present_interface(stdin, out, extra, repo): pwd = vfs.resolve(repo, b'/') if extra: lines = (argv_bytes(arg) for arg in extra) else: if hasattr(_helpers, 'readline'): _helpers.set_completer_word_break_characters(b' \t\n\r/') _helpers.set_attempted_completion_function(attempt_completion) _helpers.set_completion_entry_function(enter_completion) if sys.platform.startswith('darwin'): # MacOS uses a slightly incompatible clone of libreadline _helpers.parse_and_bind(b'bind ^I rl_complete') _helpers.parse_and_bind(b'tab: complete') lines = inputiter(stdin, pwd, out) for line in lines: if not line.strip(): continue words = [word for (wordstart,word) in shquote.quotesplit(line)] cmd = words[0].lower() #log('execute: %r %r\n' % (cmd, parm)) try: if cmd == b'ls': do_ls(repo, pwd, words[1:], out) out.flush() elif cmd == b'cd': np = pwd for parm in words[1:]: res = vfs.resolve(repo, parm, parent=np) _, leaf_item = res[-1] if not leaf_item: raise CommandError('path does not exist: ' + rpath_msg(res)) if not stat.S_ISDIR(vfs.item_mode(leaf_item)): raise CommandError('path is not a directory: ' + path_msg(parm)) np = res pwd = np elif cmd == b'pwd': if len(pwd) == 1: out.write(b'/') out.write(b'/'.join(name for name, item in pwd) + b'\n') out.flush() elif cmd == b'cat': for parm in words[1:]: res = vfs.resolve(repo, parm, parent=pwd) _, leaf_item = res[-1] if not leaf_item: raise CommandError('path does not exist: ' + rpath_msg(res)) with vfs.fopen(repo, leaf_item) as srcfile: write_to_file(srcfile, out) out.flush() elif cmd == b'get': if len(words) not in [2,3]: raise CommandError('Usage: get [localname]') rname = words[1] (dir,base) = os.path.split(rname) lname = len(words) > 2 and words[2] or base res = vfs.resolve(repo, rname, parent=pwd) _, leaf_item = res[-1] if not leaf_item: raise CommandError('path does not exist: ' + rpath_msg(res)) with vfs.fopen(repo, leaf_item) as srcfile: with open(lname, 'wb') as destfile: log('Saving %s\n' % path_msg(lname)) write_to_file(srcfile, destfile) elif cmd == b'mget': for parm in words[1:]: dir, base = os.path.split(parm) res = vfs.resolve(repo, dir, parent=pwd) _, dir_item = res[-1] if not dir_item: raise CommandError('path does not exist: ' + path_msg(dir)) for name, item in vfs.contents(repo, dir_item): if name == b'.': continue if fnmatch.fnmatch(name, base): if stat.S_ISLNK(vfs.item_mode(item)): deref = vfs.resolve(repo, name, parent=res) deref_name, deref_item = deref[-1] if not deref_item: raise CommandError('path does not exist: ' + rpath_msg(res)) item = deref_item with vfs.fopen(repo, item) as srcfile: with open(name, 'wb') as destfile: log('Saving %s\n' % path_msg(name)) write_to_file(srcfile, destfile) elif cmd in (b'help', b'?'): out.write(b'Commands: ls cd pwd cat get mget help quit\n') out.flush() elif cmd in (b'quit', b'exit', b'bye'): break else: raise CommandError('no such command: ' + cmd.encode(errors='backslashreplace')) except CommandError as ex: out.write(b'error: %s\n' % str(ex).encode(errors='backslashreplace')) out.flush() def main(argv): global repo o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) git.check_repo_or_die() sys.stdout.flush() out = byte_stream(sys.stdout) stdin = byte_stream(sys.stdin) with LocalRepo() as repo: present_interface(stdin, out, extra, repo) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/fuse.py000066400000000000000000000134421454333004200160030ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import errno, os, sys try: import fuse except ImportError: print('error: cannot find the python "fuse" module; please install it', file=sys.stderr) sys.exit(2) if not hasattr(fuse, '__version__'): if hasattr(fuse, 'FUSE'): print('error: python fuse module appears to be fusepy, not python-fuse\n' ' please install https://github.com/libfuse/python-fuse', file=sys.stderr) else: print('error: fuse module may need to be upgraded (no fuse.__version__)', file=sys.stderr) sys.exit(2) fuse.fuse_python_api = (0, 2) if sys.version_info[0] > 2: try: fuse_ver = fuse.__version__.split('.') fuse_ver_maj = int(fuse_ver[0]) except: log('error: cannot determine the fuse major version; please report', file=sys.stderr) sys.exit(2) if len(fuse_ver) < 3 or fuse_ver_maj < 1: print("error: fuse module can't handle binary data; please upgrade to 1.0+\n", file=sys.stderr) sys.exit(2) from bup import options, git, vfs, xstat from bup.compat import argv_bytes, fsdecode from bup.helpers import log from bup.repo import LocalRepo # FIXME: self.meta and want_meta? # The path handling is just wrong, but the current fuse module can't # handle bytes paths. class BupFs(fuse.Fuse): def __init__(self, repo, verbose=0, fake_metadata=False): fuse.Fuse.__init__(self) self.repo = repo self.verbose = verbose self.fake_metadata = fake_metadata def getattr(self, path): path = argv_bytes(path) if self.verbose > 0: log('--getattr(%r)\n' % path) res = vfs.resolve(self.repo, path, want_meta=(not self.fake_metadata), follow=False) name, item = res[-1] if not item: return -errno.ENOENT if self.fake_metadata: item = vfs.augment_item_meta(self.repo, item, include_size=True) else: item = vfs.ensure_item_has_metadata(self.repo, item, include_size=True) meta = item.meta # FIXME: do we want/need to do anything more with nlink? st = fuse.Stat(st_mode=meta.mode, st_nlink=1, st_size=meta.size) st.st_mode = meta.mode st.st_uid = meta.uid or 0 st.st_gid = meta.gid or 0 st.st_atime = max(0, xstat.fstime_floor_secs(meta.atime)) st.st_mtime = max(0, xstat.fstime_floor_secs(meta.mtime)) st.st_ctime = max(0, xstat.fstime_floor_secs(meta.ctime)) return st def readdir(self, path, offset): path = argv_bytes(path) assert not offset # We don't return offsets, so offset should be unused res = vfs.resolve(self.repo, path, follow=False) dir_name, dir_item = res[-1] if not dir_item: yield -errno.ENOENT yield fuse.Direntry('..') # FIXME: make sure want_meta=False is being completely respected for ent_name, ent_item in vfs.contents(self.repo, dir_item, want_meta=False): fusename = fsdecode(ent_name.replace(b'/', b'-')) yield fuse.Direntry(fusename) def readlink(self, path): path = argv_bytes(path) if self.verbose > 0: log('--readlink(%r)\n' % path) res = vfs.resolve(self.repo, path, follow=False) name, item = res[-1] if not item: return -errno.ENOENT return fsdecode(vfs.readlink(self.repo, item)) def open(self, path, flags): path = argv_bytes(path) if self.verbose > 0: log('--open(%r)\n' % path) res = vfs.resolve(self.repo, path, follow=False) name, item = res[-1] if not item: return -errno.ENOENT accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR if (flags & accmode) != os.O_RDONLY: return -errno.EACCES # Return None since read doesn't need the file atm... # If we *do* return the file, it'll show up as the last argument #return vfs.fopen(repo, item) return None def read(self, path, size, offset): path = argv_bytes(path) if self.verbose > 0: log('--read(%r)\n' % path) res = vfs.resolve(self.repo, path, follow=False) name, item = res[-1] if not item: return -errno.ENOENT with vfs.fopen(self.repo, item) as f: f.seek(offset) return f.read(size) optspec = """ bup fuse [-d] [-f] -- f,foreground run in foreground d,debug run in the foreground and display FUSE debug information o,allow-other allow other users to access the filesystem meta report original metadata for paths when available v,verbose increase log output (can be used more than once) """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if not opt.verbose: opt.verbose = 0 # Set stderr to be line buffered, even if it's not connected to the console # so that we'll be able to see diagnostics in a timely fashion. errfd = sys.stderr.fileno() sys.stderr.flush() sys.stderr = os.fdopen(errfd, 'w', 1) if len(extra) != 1: o.fatal('only one mount point argument expected') git.check_repo_or_die() with LocalRepo() as repo: f = BupFs(repo=repo, verbose=opt.verbose, fake_metadata=(not opt.meta)) # This is likely wrong, but the fuse module doesn't currently accept bytes f.fuse_args.mountpoint = extra[0] if opt.debug: f.fuse_args.add('debug') if opt.foreground: f.fuse_args.setmod('foreground') f.multithreaded = False if opt.allow_other: f.fuse_args.add('allow_other') f.main() bup-0.33.3/lib/bup/cmd/gc.py000066400000000000000000000023511454333004200154270ustar00rootroot00000000000000 from __future__ import absolute_import from bup import git, options from bup.gc import bup_gc from bup.helpers import die_if_errors optspec = """ bup gc [options...] -- v,verbose increase log output (can be used more than once) threshold= only rewrite a packfile if it's over this percent garbage [10] #,compress= set compression level to # (0-9, 9 is highest) [1] unsafe use the command even though it may be DANGEROUS """ # FIXME: server mode? # FIXME: make sure client handles server-side changes reasonably def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if not opt.unsafe: o.fatal('refusing to run dangerous, experimental command without --unsafe') if extra: o.fatal('no positional parameters expected') if opt.threshold: try: opt.threshold = int(opt.threshold) except ValueError: o.fatal('threshold must be an integer percentage value') if opt.threshold < 0 or opt.threshold > 100: o.fatal('threshold must be an integer percentage value') git.check_repo_or_die() bup_gc(threshold=opt.threshold, compression=opt.compress, verbosity=opt.verbose) die_if_errors() bup-0.33.3/lib/bup/cmd/get.py000066400000000000000000000634741454333004200156320ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from binascii import hexlify, unhexlify from collections import namedtuple from stat import S_ISDIR import os, sys, textwrap, time from bup import compat, git, client, vfs from bup.compat import ( argv_bytes, bytes_from_byte, environ, hexstr ) from bup.git import get_cat_data, parse_commit, walk_object from bup.helpers import add_error, debug1, log, saved_errors from bup.helpers import hostname, tty_width, parse_num from bup.io import path_msg from bup.pwdgrp import userfullname, username from bup.repo import LocalRepo, RemoteRepo argspec = ( "usage: bup get [-s source] [-r remote] (<--ff|--append|...> REF [DEST])...", """Transfer data from a source repository to a destination repository according to the methods specified (--ff, --ff:, --append, etc.). Both repositories default to BUP_DIR. A remote destination may be specified with -r, and data may be pulled from a remote repository with the related "bup on HOST get ..." command.""", ('optional arguments:', (('-h, --help', 'show this help message and exit'), ('-v, --verbose', 'increase log output (can be specified more than once)'), ('-q, --quiet', "don't show progress meter"), ('-s SOURCE, --source SOURCE', 'path to the source repository (defaults to BUP_DIR)'), ('-r REMOTE, --remote REMOTE', 'hostname:/path/to/repo of remote destination repository'), ('-t --print-trees', 'output a tree id for each ref set'), ('-c, --print-commits', 'output a commit id for each ref set'), ('--print-tags', 'output an id for each tag'), ('--bwlimit BWLIMIT', 'maximum bytes/sec to transmit to server'), ('-0, -1, -2, -3, -4, -5, -6, -7, -8, -9, --compress LEVEL', 'set compression LEVEL (default: 1)'))), ('transfer methods:', (('--ff REF, --ff: REF DEST', 'fast-forward dest REF (or DEST) to match source REF'), ('--append REF, --append: REF DEST', 'append REF (treeish or committish) to dest REF (or DEST)'), ('--pick REF, --pick: REF DEST', 'append single source REF commit to dest REF (or DEST)'), ('--force-pick REF, --force-pick: REF DEST', '--pick, overwriting REF (or DEST)'), ('--new-tag REF, --new-tag: REF DEST', 'tag source ref REF as REF (or DEST) in dest unless it already exists'), ('--replace, --replace: REF DEST', 'overwrite REF (or DEST) in dest with source REF'), ('--unnamed REF', 'fetch REF anonymously (without destination ref)')))) def render_opts(opts, width=None): if not width: width = tty_width() result = [] for args, desc in opts: result.append(textwrap.fill(args, width=width, initial_indent=(' ' * 2), subsequent_indent=(' ' * 4))) result.append('\n') result.append(textwrap.fill(desc, width=width, initial_indent=(' ' * 6), subsequent_indent=(' ' * 6))) result.append('\n') return result def usage(argspec, width=None): if not width: width = tty_width() usage, preamble, groups = argspec[0], argspec[1], argspec[2:] msg = [] msg.append(textwrap.fill(usage, width=width, subsequent_indent=' ')) msg.append('\n\n') msg.append(textwrap.fill(preamble.replace('\n', ' '), width=width)) msg.append('\n') for group_name, group_args in groups: msg.extend(['\n', group_name, '\n']) msg.extend(render_opts(group_args, width=width)) return ''.join(msg) def misuse(message=None): sys.stderr.write(usage(argspec)) if message: sys.stderr.write('\nerror: ') sys.stderr.write(message) sys.stderr.write('\n') sys.exit(1) def require_n_args_or_die(n, args): if len(args) < n + 1: misuse('%s argument requires %d %s' % (n, 'values' if n == 1 else 'value')) result = args[1:1+n], args[1+n:] assert len(result[0]) == n return result Spec = namedtuple('Spec', ('method', 'src', 'dest')) def spec_msg(s): if not s.dest: return '--%s %s' % (s.method, path_msg(s.src)) return '--%s: %s %s' % (s.method, path_msg(s.src), path_msg(s.dest)) def parse_args(args): class GetOpts: pass opt = GetOpts() opt.help = False opt.verbose = 0 opt.quiet = False opt.print_commits = opt.print_trees = opt.print_tags = False opt.bwlimit = None opt.compress = 1 opt.source = opt.remote = None opt.target_specs = [] remaining = args[1:] # Skip argv[0] while remaining: arg = remaining[0] if arg in (b'-h', b'--help'): sys.stdout.write(usage(argspec)) sys.exit(0) elif arg in (b'-v', b'--verbose'): opt.verbose += 1 remaining = remaining[1:] elif arg in (b'--ff', b'--append', b'--pick', b'--force-pick', b'--new-tag', b'--replace', b'--unnamed'): (ref,), remaining = require_n_args_or_die(1, remaining) opt.target_specs.append(Spec(method=arg[2:].decode('ascii'), src=ref, dest=None)) elif arg in (b'--ff:', b'--append:', b'--pick:', b'--force-pick:', b'--new-tag:', b'--replace:'): (ref, dest), remaining = require_n_args_or_die(2, remaining) opt.target_specs.append(Spec(method=arg[2:-1].decode('ascii'), src=ref, dest=dest)) elif arg in (b'-s', b'--source'): (opt.source,), remaining = require_n_args_or_die(1, remaining) elif arg in (b'-r', b'--remote'): (opt.remote,), remaining = require_n_args_or_die(1, remaining) elif arg in (b'-c', b'--print-commits'): opt.print_commits, remaining = True, remaining[1:] elif arg in (b'-t', b'--print-trees'): opt.print_trees, remaining = True, remaining[1:] elif arg == b'--print-tags': opt.print_tags, remaining = True, remaining[1:] elif arg in (b'-0', b'-1', b'-2', b'-3', b'-4', b'-5', b'-6', b'-7', b'-8', b'-9'): opt.compress = int(arg[1:]) remaining = remaining[1:] elif arg == b'--compress': (opt.compress,), remaining = require_n_args_or_die(1, remaining) opt.compress = int(opt.compress) elif arg == b'--bwlimit': (opt.bwlimit,), remaining = require_n_args_or_die(1, remaining) opt.bwlimit = int(opt.bwlimit) elif arg.startswith(b'-') and len(arg) > 2 and arg[1] != b'-': # Try to interpret this as -xyz, i.e. "-xyz -> -x -y -z". # We do this last so that --foo -bar is valid if --foo # requires a value. remaining[0:1] = (b'-' + bytes_from_byte(c) for c in arg[1:]) # FIXME continue else: misuse() return opt # FIXME: client error handling (remote exceptions, etc.) # FIXME: walk_object in in git.py doesn't support opt.verbose. Do we # need to adjust for that here? def get_random_item(name, hash, repo, writer, opt): def already_seen(oid): return writer.exists(unhexlify(oid)) for item in walk_object(repo.cat, hash, stop_at=already_seen, include_data=True): # already_seen ensures that writer.exists(id) is false. # Otherwise, just_write() would fail. writer.just_write(item.oid, item.type, item.data) def append_commit(name, hash, parent, src_repo, writer, opt): now = time.time() items = parse_commit(get_cat_data(src_repo.cat(hash), b'commit')) tree = unhexlify(items.tree) author = b'%s <%s>' % (items.author_name, items.author_mail) author_time = (items.author_sec, items.author_offset) committer = b'%s <%s@%s>' % (userfullname(), username(), hostname()) get_random_item(name, hexlify(tree), src_repo, writer, opt) c = writer.new_commit(tree, parent, author, items.author_sec, items.author_offset, committer, now, None, items.message) return c, tree def append_commits(commits, src_name, dest_hash, src_repo, writer, opt): last_c, tree = dest_hash, None for commit in commits: last_c, tree = append_commit(src_name, commit, last_c, src_repo, writer, opt) assert(tree is not None) return last_c, tree Loc = namedtuple('Loc', ['type', 'hash', 'path']) default_loc = Loc(None, None, None) def find_vfs_item(name, repo): res = repo.resolve(name, follow=False, want_meta=False) leaf_name, leaf_item = res[-1] if not leaf_item: return None kind = type(leaf_item) if kind == vfs.Root: kind = 'root' elif kind == vfs.Tags: kind = 'tags' elif kind == vfs.RevList: kind = 'branch' elif kind == vfs.Commit: if len(res) > 1 and isinstance(res[-2][1], vfs.RevList): kind = 'save' else: kind = 'commit' elif kind == vfs.Item: if S_ISDIR(vfs.item_mode(leaf_item)): kind = 'tree' else: kind = 'blob' elif kind == vfs.Chunky: kind = 'tree' elif kind == vfs.FakeLink: # Don't have to worry about ELOOP, excepting malicious # remotes, since "latest" is the only FakeLink. assert leaf_name == b'latest' res = repo.resolve(leaf_item.target, parent=res[:-1], follow=False, want_meta=False) leaf_name, leaf_item = res[-1] assert leaf_item assert isinstance(leaf_item, vfs.Commit) name = b'/'.join(x[0] for x in res) kind = 'save' else: raise Exception('unexpected resolution for %s: %r' % (path_msg(name), res)) path = b'/'.join(name for name, item in res) if hasattr(leaf_item, 'coid'): result = Loc(type=kind, hash=leaf_item.coid, path=path) elif hasattr(leaf_item, 'oid'): result = Loc(type=kind, hash=leaf_item.oid, path=path) else: result = Loc(type=kind, hash=None, path=path) return result Target = namedtuple('Target', ['spec', 'src', 'dest']) def loc_desc(loc): if loc and loc.hash: loc = loc._replace(hash=hexlify(loc.hash)) return repr(loc) # FIXME: see if resolve() means we can drop the vfs path cleanup def cleanup_vfs_path(p): result = os.path.normpath(p) if result.startswith(b'/'): return result return b'/' + result def validate_vfs_path(p, spec): if p.startswith(b'/.') \ and not p.startswith(b'/.tag/'): misuse('unsupported destination path %s in %s' % (path_msg(p), spec_msg(spec))) return p def resolve_src(spec, src_repo): src = find_vfs_item(spec.src, src_repo) spec_args = spec_msg(spec) if not src: misuse('cannot find source for %s' % spec_args) if src.type == 'root': misuse('cannot fetch entire repository for %s' % spec_args) if src.type == 'tags': misuse('cannot fetch entire /.tag directory for %s' % spec_args) debug1('src: %s\n' % loc_desc(src)) return src def get_save_branch(repo, path): res = repo.resolve(path, follow=False, want_meta=False) leaf_name, leaf_item = res[-1] if not leaf_item: misuse('error: cannot access %r in %r' % (leaf_name, path)) assert len(res) == 3 res_path = b'/'.join(name for name, item in res[:-1]) return res_path def resolve_branch_dest(spec, src, src_repo, dest_repo): # Resulting dest must be treeish, or not exist. if not spec.dest: # Pick a default dest. if src.type == 'branch': spec = spec._replace(dest=spec.src) elif src.type == 'save': spec = spec._replace(dest=get_save_branch(src_repo, spec.src)) elif src.path.startswith(b'/.tag/'): # Dest defaults to the same. spec = spec._replace(dest=spec.src) spec_args = spec_msg(spec) if not spec.dest: misuse('no destination (implicit or explicit) for %s', spec_args) dest = find_vfs_item(spec.dest, dest_repo) if dest: if dest.type == 'commit': misuse('destination for %s is a tagged commit, not a branch' % spec_args) if dest.type != 'branch': misuse('destination for %s is a %s, not a branch' % (spec_args, dest.type)) else: dest = default_loc._replace(path=cleanup_vfs_path(spec.dest)) if dest.path.startswith(b'/.'): misuse('destination for %s must be a valid branch name' % spec_args) debug1('dest: %s\n' % loc_desc(dest)) return spec, dest def resolve_ff(spec, src_repo, dest_repo): src = resolve_src(spec, src_repo) spec_args = spec_msg(spec) if src.type == 'tree': misuse('%s is impossible; can only --append a tree to a branch' % spec_args) if src.type not in ('branch', 'save', 'commit'): misuse('source for %s must be a branch, save, or commit, not %s' % (spec_args, src.type)) spec, dest = resolve_branch_dest(spec, src, src_repo, dest_repo) return Target(spec=spec, src=src, dest=dest) def handle_ff(item, src_repo, writer, opt): assert item.spec.method == 'ff' assert item.src.type in ('branch', 'save', 'commit') src_oidx = hexlify(item.src.hash) dest_oidx = hexlify(item.dest.hash) if item.dest.hash else None if not dest_oidx or dest_oidx in src_repo.rev_list(src_oidx): # Can fast forward. get_random_item(item.spec.src, src_oidx, src_repo, writer, opt) commit_items = parse_commit(get_cat_data(src_repo.cat(src_oidx), b'commit')) return item.src.hash, unhexlify(commit_items.tree) misuse('destination is not an ancestor of source for %s' % spec_msg(item.spec)) # misuse() doesn't return return None def resolve_append(spec, src_repo, dest_repo): src = resolve_src(spec, src_repo) if src.type not in ('branch', 'save', 'commit', 'tree'): misuse('source for %s must be a branch, save, commit, or tree, not %s' % (spec_msg(spec), src.type)) spec, dest = resolve_branch_dest(spec, src, src_repo, dest_repo) return Target(spec=spec, src=src, dest=dest) def handle_append(item, src_repo, writer, opt): assert item.spec.method == 'append' assert item.src.type in ('branch', 'save', 'commit', 'tree') assert item.dest.type == 'branch' or not item.dest.type src_oidx = hexlify(item.src.hash) if item.src.type == 'tree': get_random_item(item.spec.src, src_oidx, src_repo, writer, opt) parent = item.dest.hash msg = b'bup save\n\nGenerated by command:\n%r\n' % compat.get_argvb() userline = b'%s <%s@%s>' % (userfullname(), username(), hostname()) now = time.time() commit = writer.new_commit(item.src.hash, parent, userline, now, None, userline, now, None, msg) return commit, item.src.hash commits = list(src_repo.rev_list(src_oidx)) commits.reverse() return append_commits(commits, item.spec.src, item.dest.hash, src_repo, writer, opt) def resolve_pick(spec, src_repo, dest_repo): src = resolve_src(spec, src_repo) spec_args = spec_msg(spec) if src.type == 'tree': misuse('%s is impossible; can only --append a tree' % spec_args) if src.type not in ('commit', 'save'): misuse('%s impossible; can only pick a commit or save, not %s' % (spec_args, src.type)) if not spec.dest: if src.path.startswith(b'/.tag/'): spec = spec._replace(dest=spec.src) elif src.type == 'save': spec = spec._replace(dest=get_save_branch(src_repo, spec.src)) if not spec.dest: misuse('no destination provided for %s', spec_args) dest = find_vfs_item(spec.dest, dest_repo) if not dest: cp = validate_vfs_path(cleanup_vfs_path(spec.dest), spec) dest = default_loc._replace(path=cp) else: if not dest.type == 'branch' and not dest.path.startswith(b'/.tag/'): misuse('%s destination is not a tag or branch' % spec_args) if spec.method == 'pick' \ and dest.hash and dest.path.startswith(b'/.tag/'): misuse('cannot overwrite existing tag for %s (requires --force-pick)' % spec_args) return Target(spec=spec, src=src, dest=dest) def handle_pick(item, src_repo, writer, opt): assert item.spec.method in ('pick', 'force-pick') assert item.src.type in ('save', 'commit') src_oidx = hexlify(item.src.hash) if item.dest.hash: return append_commit(item.spec.src, src_oidx, item.dest.hash, src_repo, writer, opt) return append_commit(item.spec.src, src_oidx, None, src_repo, writer, opt) def resolve_new_tag(spec, src_repo, dest_repo): src = resolve_src(spec, src_repo) spec_args = spec_msg(spec) if not spec.dest and src.path.startswith(b'/.tag/'): spec = spec._replace(dest=src.path) if not spec.dest: misuse('no destination (implicit or explicit) for %s', spec_args) dest = find_vfs_item(spec.dest, dest_repo) if not dest: dest = default_loc._replace(path=cleanup_vfs_path(spec.dest)) if not dest.path.startswith(b'/.tag/'): misuse('destination for %s must be a VFS tag' % spec_args) if dest.hash: misuse('cannot overwrite existing tag for %s (requires --replace)' % spec_args) return Target(spec=spec, src=src, dest=dest) def handle_new_tag(item, src_repo, writer, opt): assert item.spec.method == 'new-tag' assert item.dest.path.startswith(b'/.tag/') get_random_item(item.spec.src, hexlify(item.src.hash), src_repo, writer, opt) return (item.src.hash,) def resolve_replace(spec, src_repo, dest_repo): src = resolve_src(spec, src_repo) spec_args = spec_msg(spec) if not spec.dest: if src.path.startswith(b'/.tag/') or src.type == 'branch': spec = spec._replace(dest=spec.src) if not spec.dest: misuse('no destination provided for %s', spec_args) dest = find_vfs_item(spec.dest, dest_repo) if dest: if not dest.type == 'branch' and not dest.path.startswith(b'/.tag/'): misuse('%s impossible; can only overwrite branch or tag' % spec_args) else: cp = validate_vfs_path(cleanup_vfs_path(spec.dest), spec) dest = default_loc._replace(path=cp) if not dest.path.startswith(b'/.tag/') \ and not src.type in ('branch', 'save', 'commit'): misuse('cannot overwrite branch with %s for %s' % (src.type, spec_args)) return Target(spec=spec, src=src, dest=dest) def handle_replace(item, src_repo, writer, opt): assert(item.spec.method == 'replace') if item.dest.path.startswith(b'/.tag/'): get_random_item(item.spec.src, hexlify(item.src.hash), src_repo, writer, opt) return (item.src.hash,) assert(item.dest.type == 'branch' or not item.dest.type) src_oidx = hexlify(item.src.hash) get_random_item(item.spec.src, src_oidx, src_repo, writer, opt) commit_items = parse_commit(get_cat_data(src_repo.cat(src_oidx), b'commit')) return item.src.hash, unhexlify(commit_items.tree) def resolve_unnamed(spec, src_repo, dest_repo): if spec.dest: misuse('destination name given for %s' % spec_msg(spec)) src = resolve_src(spec, src_repo) return Target(spec=spec, src=src, dest=None) def handle_unnamed(item, src_repo, writer, opt): get_random_item(item.spec.src, hexlify(item.src.hash), src_repo, writer, opt) return (None,) def resolve_targets(specs, src_repo, dest_repo): resolved_items = [] common_args = src_repo, dest_repo for spec in specs: debug1('initial-spec: %r\n' % (spec,)) if spec.method == 'ff': resolved_items.append(resolve_ff(spec, *common_args)) elif spec.method == 'append': resolved_items.append(resolve_append(spec, *common_args)) elif spec.method in ('pick', 'force-pick'): resolved_items.append(resolve_pick(spec, *common_args)) elif spec.method == 'new-tag': resolved_items.append(resolve_new_tag(spec, *common_args)) elif spec.method == 'replace': resolved_items.append(resolve_replace(spec, *common_args)) elif spec.method == 'unnamed': resolved_items.append(resolve_unnamed(spec, *common_args)) else: # Should be impossible -- prevented by the option parser. assert(False) # FIXME: check for prefix overlap? i.e.: # bup get --ff foo --ff: baz foo/bar # bup get --new-tag .tag/foo --new-tag: bar .tag/foo/bar # Now that we have all the items, check for duplicate tags. tags_targeted = set() for item in resolved_items: dest_path = item.dest and item.dest.path if dest_path: assert(dest_path.startswith(b'/')) if dest_path.startswith(b'/.tag/'): if dest_path in tags_targeted: if item.spec.method not in ('replace', 'force-pick'): misuse('cannot overwrite tag %s via %s' \ % (path_msg(dest_path), spec_msg(item.spec))) else: tags_targeted.add(dest_path) return resolved_items def log_item(name, type, opt, tree=None, commit=None, tag=None): if tag and opt.print_tags: print(hexstr(tag)) if tree and opt.print_trees: print(hexstr(tree)) if commit and opt.print_commits: print(hexstr(commit)) if opt.verbose: last = '' if type in ('root', 'branch', 'save', 'commit', 'tree'): if not name.endswith(b'/'): last = '/' log('%s%s\n' % (path_msg(name), last)) def main(argv): is_reverse = environ.get(b'BUP_SERVER_REVERSE') opt = parse_args(argv) git.check_repo_or_die() if opt.source: opt.source = argv_bytes(opt.source) if opt.bwlimit: client.bwlimit = parse_num(opt.bwlimit) if is_reverse and opt.remote: misuse("don't use -r in reverse mode; it's automatic") if opt.remote or is_reverse: dest_repo = RemoteRepo(opt.remote) else: dest_repo = LocalRepo() with dest_repo as dest_repo: with LocalRepo(repo_dir=opt.source) as src_repo: with dest_repo.new_packwriter(compression_level=opt.compress) as writer: # Resolve and validate all sources and destinations, # implicit or explicit, and do it up-front, so we can # fail before we start writing (for any obviously # broken cases). target_items = resolve_targets(opt.target_specs, src_repo, dest_repo) updated_refs = {} # ref_name -> (original_ref, tip_commit(bin)) no_ref_info = (None, None) handlers = {'ff': handle_ff, 'append': handle_append, 'force-pick': handle_pick, 'pick': handle_pick, 'new-tag': handle_new_tag, 'replace': handle_replace, 'unnamed': handle_unnamed} for item in target_items: debug1('get-spec: %r\n' % (item.spec,)) debug1('get-src: %s\n' % loc_desc(item.src)) debug1('get-dest: %s\n' % loc_desc(item.dest)) dest_path = item.dest and item.dest.path if dest_path: if dest_path.startswith(b'/.tag/'): dest_ref = b'refs/tags/%s' % dest_path[6:] else: dest_ref = b'refs/heads/%s' % dest_path[1:] else: dest_ref = None dest_hash = item.dest and item.dest.hash orig_ref, cur_ref = updated_refs.get(dest_ref, no_ref_info) orig_ref = orig_ref or dest_hash cur_ref = cur_ref or dest_hash handler = handlers[item.spec.method] item_result = handler(item, src_repo, writer, opt) if len(item_result) > 1: new_id, tree = item_result else: new_id = item_result[0] if not dest_ref: log_item(item.spec.src, item.src.type, opt) else: updated_refs[dest_ref] = (orig_ref, new_id) if dest_ref.startswith(b'refs/tags/'): log_item(item.spec.src, item.src.type, opt, tag=new_id) else: log_item(item.spec.src, item.src.type, opt, tree=tree, commit=new_id) # Only update the refs at the very end, once the writer is # closed, so that if something goes wrong above, the old refs # will be undisturbed. for ref_name, info in updated_refs.items(): orig_ref, new_ref = info try: dest_repo.update_ref(ref_name, new_ref, orig_ref) if opt.verbose: new_hex = hexlify(new_ref) if orig_ref: orig_hex = hexlify(orig_ref) log('updated %r (%s -> %s)\n' % (ref_name, orig_hex, new_hex)) else: log('updated %r (%s)\n' % (ref_name, new_hex)) except (git.GitError, client.ClientError) as ex: add_error('unable to update ref %r: %s' % (ref_name, ex)) if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/help.py000066400000000000000000000017501454333004200157700ustar00rootroot00000000000000 from __future__ import absolute_import import os, glob, sys from bup import options, path from bup.compat import argv_bytes optspec = """ bup help """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if len(extra) == 0: # the wrapper program provides the default usage string os.execvp(path.exe(), [path.exe()]) elif len(extra) == 1: docname = (extra[0]=='bup' and b'bup' or (b'bup-%s' % argv_bytes(extra[0]))) manpath = os.path.join(path.exedir(), b'../../Documentation/' + docname + b'.[1-9]') g = glob.glob(manpath) try: if g: os.execvp('man', ['man', '-l', g[0]]) else: os.execvp('man', ['man', docname]) except OSError as e: sys.stderr.write('Unable to run man command: %s\n' % e) sys.exit(1) else: o.fatal("exactly one command name expected") bup-0.33.3/lib/bup/cmd/import_duplicity.py000066400000000000000000000062211454333004200204360ustar00rootroot00000000000000 from __future__ import absolute_import from calendar import timegm from subprocess import check_call from time import strptime import os, sys, tempfile from bup import git, helpers, options from bup.compat import argv_bytes from bup.helpers import (log, shstr, saved_errors) import bup.path optspec = """ bup import-duplicity [-n] -- n,dry-run don't do anything; just print what would be done """ dry_run = False def logcmd(cmd): log(shstr(cmd).decode(errors='backslashreplace') + '\n') def exc(cmd, shell=False): logcmd(cmd) if not dry_run: check_call(cmd, shell=shell) def exo(cmd, shell=False, preexec_fn=None, close_fds=True): logcmd(cmd) if dry_run: return b'' return helpers.exo(cmd, shell=shell, preexec_fn=preexec_fn, close_fds=close_fds)[0] def redirect_dup_output(): os.dup2(1, 3) os.dup2(1, 2) def main(argv): global dry_run log('\nbup: import-duplicity is EXPERIMENTAL (proceed with caution)\n\n') o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) dry_run = opt.dry_run if len(extra) < 1 or not extra[0]: o.fatal('duplicity source URL required') if len(extra) < 2 or not extra[1]: o.fatal('bup destination save name required') if len(extra) > 2: o.fatal('too many arguments') source_url, save_name = extra source_url = argv_bytes(source_url) save_name = argv_bytes(save_name) bup_path = bup.path.exe() git.check_repo_or_die() tmpdir = tempfile.mkdtemp(prefix=b'bup-import-dup-') try: dup = [b'duplicity', b'--archive-dir', tmpdir + b'/dup-cache'] restoredir = tmpdir + b'/restore' tmpidx = tmpdir + b'/index' collection_status = \ exo(dup + [b'collection-status', b'--log-fd=3', source_url], close_fds=False, preexec_fn=redirect_dup_output) # i.e. 3>&1 1>&2 # Duplicity output lines of interest look like this (one leading space): # full 20150222T073111Z 1 noenc # inc 20150222T073233Z 1 noenc dup_timestamps = [] for line in collection_status.splitlines(): if line.startswith(b' inc '): assert(len(line) >= len(b' inc 20150222T073233Z')) dup_timestamps.append(line[5:21]) elif line.startswith(b' full '): assert(len(line) >= len(b' full 20150222T073233Z')) dup_timestamps.append(line[6:22]) for i, dup_ts in enumerate(dup_timestamps): tm = strptime(dup_ts.decode('ascii'), '%Y%m%dT%H%M%SZ') exc([b'rm', b'-rf', restoredir]) exc(dup + [b'restore', b'-t', dup_ts, source_url, restoredir]) exc([bup_path, b'index', b'-uxf', tmpidx, restoredir]) exc([bup_path, b'save', b'--strip', b'--date', b'%d' % timegm(tm), b'-f', tmpidx, b'-n', save_name, restoredir]) sys.stderr.flush() finally: exc([b'rm', b'-rf', tmpdir]) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/index.py000066400000000000000000000314371454333004200161540ustar00rootroot00000000000000from __future__ import absolute_import, print_function from binascii import hexlify import errno, os, stat, sys, time from bup import metadata, options, git, index, hlinkdb from bup.compat import argv_bytes from bup.drecurse import recursive_dirlist from bup.hashsplit import GIT_MODE_FILE from bup.helpers import (add_error, handle_ctrl_c, log, parse_excludes, parse_rx_excludes, progress, qprogress, saved_errors) from bup.io import byte_stream, path_msg class IterHelper: def __init__(self, l): self.i = iter(l) self.cur = None self.next() def __next__(self): self.cur = next(self.i, None) return self.cur next = __next__ def check_index(reader, verbose): try: log('check: checking forward iteration...\n') e = None d = {} for e in reader.forward_iter(): if e.children_n: if verbose: log('%08x+%-4d %r\n' % (e.children_ofs, e.children_n, path_msg(e.name))) assert(e.children_ofs) assert e.name.endswith(b'/') assert(not d.get(e.children_ofs)) d[e.children_ofs] = 1 if e.flags & index.IX_HASHVALID: assert(e.sha != index.EMPTY_SHA) assert(e.gitmode) assert not e or bytes(e.name) == b'/' # last entry is *always* / log('check: checking normal iteration...\n') last = None for e in reader: if last: assert(last > e.name) last = e.name except: log('index error! at %r\n' % e) raise log('check: passed.\n') def clear_index(indexfile, verbose): indexfiles = [indexfile, indexfile + b'.meta', indexfile + b'.hlink'] for indexfile in indexfiles: try: os.remove(indexfile) if verbose: log('clear: removed %s\n' % path_msg(indexfile)) except OSError as e: if e.errno != errno.ENOENT: raise def update_index(top, excluded_paths, exclude_rxs, indexfile, check=False, check_device=True, xdev=False, xdev_exceptions=frozenset(), fake_valid=False, fake_invalid=False, out=None, verbose=0): # tmax must be epoch nanoseconds. tmax = (time.time() - 1) * 10**9 with index.MetaStoreWriter(indexfile + b'.meta') as msw, \ hlinkdb.HLinkDB(indexfile + b'.hlink') as hlinks, \ index.Writer(indexfile, msw, tmax) as wi, \ index.Reader(indexfile) as ri: rig = IterHelper(ri.iter(name=top)) fake_hash = None if fake_valid: def fake_hash(name): return (GIT_MODE_FILE, index.FAKE_SHA) total = 0 bup_dir = os.path.abspath(git.repo()) index_start = time.time() for path, pst in recursive_dirlist([top], xdev=xdev, bup_dir=bup_dir, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs, xdev_exceptions=xdev_exceptions): if verbose>=2 or (verbose == 1 and stat.S_ISDIR(pst.st_mode)): out.write(b'%s\n' % path) out.flush() elapsed = time.time() - index_start paths_per_sec = total / elapsed if elapsed else 0 qprogress('Indexing: %d (%d paths/s)\r' % (total, paths_per_sec)) elif not (total % 128): elapsed = time.time() - index_start paths_per_sec = total / elapsed if elapsed else 0 qprogress('Indexing: %d (%d paths/s)\r' % (total, paths_per_sec)) total += 1 while rig.cur and rig.cur.name > path: # deleted paths if rig.cur.exists(): rig.cur.set_deleted() rig.cur.repack() if rig.cur.nlink > 1 and not stat.S_ISDIR(rig.cur.mode): hlinks.del_path(rig.cur.name) rig.next() if rig.cur and rig.cur.name == path: # paths that already existed need_repack = False if(rig.cur.stale(pst, check_device=check_device)): try: meta = metadata.from_path(path, statinfo=pst) except (OSError, IOError) as e: add_error(e) rig.next() continue if not stat.S_ISDIR(rig.cur.mode) and rig.cur.nlink > 1: hlinks.del_path(rig.cur.name) if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1: hlinks.add_path(path, pst.st_dev, pst.st_ino) # Clear these so they don't bloat the store -- they're # already in the index (since they vary a lot and they're # fixed length). If you've noticed "tmax", you might # wonder why it's OK to do this, since that code may # adjust (mangle) the index mtime and ctime -- producing # fake values which must not end up in a .bupm. However, # it looks like that shouldn't be possible: (1) When # "save" validates the index entry, it always reads the # metadata from the filesytem. (2) Metadata is only # read/used from the index if hashvalid is true. (3) # "faked" entries will be stale(), and so we'll invalidate # them below. meta.ctime = meta.mtime = meta.atime = 0 meta_ofs = msw.store(meta) rig.cur.update_from_stat(pst, meta_ofs) rig.cur.invalidate() need_repack = True if not (rig.cur.flags & index.IX_HASHVALID): if fake_hash: if rig.cur.sha == index.EMPTY_SHA: rig.cur.gitmode, rig.cur.sha = fake_hash(path) rig.cur.flags |= index.IX_HASHVALID need_repack = True if fake_invalid: rig.cur.invalidate() need_repack = True if need_repack: rig.cur.repack() rig.next() else: # new paths try: meta = metadata.from_path(path, statinfo=pst) except (OSError, IOError) as e: add_error(e) continue # See same assignment to 0, above, for rationale. meta.atime = meta.mtime = meta.ctime = 0 meta_ofs = msw.store(meta) wi.add(path, pst, meta_ofs, hashgen=fake_hash) if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1: hlinks.add_path(path, pst.st_dev, pst.st_ino) elapsed = time.time() - index_start paths_per_sec = total / elapsed if elapsed else 0 progress('Indexing: %d, done (%d paths/s).\n' % (total, paths_per_sec)) hlinks.prepare_save() if not ri.exists(): wi.close() else: ri.save() wi.flush() if wi.count: with wi.new_reader() as wr: if check: log('check: before merging: oldfile\n') check_index(ri, verbose) log('check: before merging: newfile\n') check_index(wr, verbose) with index.Writer(indexfile, msw, tmax) as mi: for e in index.merge(ri, wr): # FIXME: shouldn't we remove deleted entries # eventually? When? mi.add_ixentry(e) mi.close() hlinks.commit_save() optspec = """ bup index <-p|-m|-s|-u|--clear|--check> [options...] -- Modes: p,print print the index entries for the given names (also works with -u) m,modified print only added/deleted/modified files (implies -p) s,status print each filename with a status char (A/M/D) (implies -p) u,update recursively update the index entries for the given file/dir names (default if no mode is specified) check carefully check index file integrity clear clear the default index Options: H,hash print the hash for each object next to its name l,long print more information about each file no-check-device don't invalidate an entry if the containing device changes fake-valid mark all index entries as up-to-date even if they aren't fake-invalid mark all index entries as invalid f,indexfile= the name of the index file (normally BUP_DIR/bupindex) exclude= a path to exclude from the backup (may be repeated) exclude-from= skip --exclude paths in file (may be repeated) exclude-rx= skip paths matching the unanchored regex (may be repeated) exclude-rx-from= skip --exclude-rx patterns in file (may be repeated) v,verbose increase log output (can be used more than once) x,xdev,one-file-system don't cross filesystem boundaries """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if not (opt.modified or \ opt['print'] or \ opt.status or \ opt.update or \ opt.check or \ opt.clear): opt.update = 1 if (opt.fake_valid or opt.fake_invalid) and not opt.update: o.fatal('--fake-{in,}valid are meaningless without -u') if opt.fake_valid and opt.fake_invalid: o.fatal('--fake-valid is incompatible with --fake-invalid') if opt.clear and opt.indexfile: o.fatal('cannot clear an external index (via -f)') # FIXME: remove this once we account for timestamp races, i.e. index; # touch new-file; index. It's possible for this to happen quickly # enough that new-file ends up with the same timestamp as the first # index, and then bup will ignore it. tick_start = time.time() time.sleep(1 - (tick_start - int(tick_start))) git.check_repo_or_die() handle_ctrl_c() if opt.verbose is None: opt.verbose = 0 if opt.indexfile: indexfile = argv_bytes(opt.indexfile) else: indexfile = git.repo(b'bupindex') if opt.check: log('check: starting initial check.\n') with index.Reader(indexfile) as reader: check_index(reader, opt.verbose) if opt.clear: log('clear: clearing index.\n') clear_index(indexfile, opt.verbose) sys.stdout.flush() out = byte_stream(sys.stdout) if opt.update: if not extra: o.fatal('update mode (-u) requested but no paths given') extra = [argv_bytes(x) for x in extra] excluded_paths = parse_excludes(flags, o.fatal) exclude_rxs = parse_rx_excludes(flags, o.fatal) xexcept = index.unique_resolved_paths(extra) for rp, path in index.reduce_paths(extra): update_index(rp, excluded_paths, exclude_rxs, indexfile, check=opt.check, check_device=opt.check_device, xdev=opt.xdev, xdev_exceptions=xexcept, fake_valid=opt.fake_valid, fake_invalid=opt.fake_invalid, out=out, verbose=opt.verbose) if opt['print'] or opt.status or opt.modified: extra = [argv_bytes(x) for x in extra] with index.Reader(indexfile) as reader: for name, ent in reader.filter(extra or [b'']): if (opt.modified and (ent.is_valid() or ent.is_deleted() or not ent.mode)): continue line = b'' if opt.status: if ent.is_deleted(): line += b'D ' elif not ent.is_valid(): if ent.sha == index.EMPTY_SHA: line += b'A ' else: line += b'M ' else: line += b' ' if opt.hash: line += hexlify(ent.sha) + b' ' if opt.long: line += f'{ent.mode:07o} {ent.gitmode:07o} '.encode('ascii') out.write(line + (name or b'./') + b'\n') if opt.check and (opt['print'] or opt.status or opt.modified or opt.update): log('check: starting final check.\n') with index.Reader(indexfile) as reader: check_index(reader, opt.verbose) if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/init.py000066400000000000000000000012651454333004200160040ustar00rootroot00000000000000 from __future__ import absolute_import import sys from bup import git, options, client from bup.helpers import log from bup.compat import argv_bytes optspec = """ [BUP_DIR=...] bup init [-r host:path] -- r,remote= remote repository path """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal("no arguments expected") try: git.init_repo() # local repo except git.GitError as e: log("bup: error: could not init repository: %s" % e) sys.exit(1) if opt.remote: git.check_repo_or_die() with client.Client(argv_bytes(opt.remote), create=True): pass bup-0.33.3/lib/bup/cmd/join.py000066400000000000000000000022251454333004200157750ustar00rootroot00000000000000 from __future__ import absolute_import import sys from bup import git, options from bup.compat import argv_bytes from bup.helpers import linereader, log from bup.io import byte_stream from bup.repo import LocalRepo, RemoteRepo optspec = """ bup join [-r host:path] [refs or hashes...] -- r,remote= remote repository path o= output filename """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if opt.remote: opt.remote = argv_bytes(opt.remote) git.check_repo_or_die() stdin = byte_stream(sys.stdin) if not extra: extra = linereader(stdin) ret = 0 with RemoteRepo(opt.remote) if opt.remote else LocalRepo() as repo: if opt.o: outfile = open(opt.o, 'wb') else: sys.stdout.flush() outfile = byte_stream(sys.stdout) for ref in [argv_bytes(x) for x in extra]: try: for blob in repo.join(ref): outfile.write(blob) except KeyError as e: outfile.flush() log('error: %s\n' % e) ret = 1 sys.exit(ret) bup-0.33.3/lib/bup/cmd/list_idx.py000066400000000000000000000036031454333004200166560ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from binascii import hexlify, unhexlify import sys from bup import git, options from bup.compat import argv_bytes from bup.helpers import add_error, handle_ctrl_c, log, qprogress, saved_errors from bup.io import byte_stream optspec = """ bup list-idx [--find=] -- find= display only objects that start with """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) handle_ctrl_c() opt.find = argv_bytes(opt.find) if opt.find else b'' if not extra: o.fatal('you must provide at least one filename') if len(opt.find) > 40: o.fatal('--find parameter must be <= 40 chars long') else: if len(opt.find) % 2: s = opt.find + b'0' else: s = opt.find try: bin = unhexlify(s) except TypeError: o.fatal('--find parameter is not a valid hex string') sys.stdout.flush() out = byte_stream(sys.stdout) find = opt.find.lower() count = 0 idxfiles = [argv_bytes(x) for x in extra] for name in idxfiles: try: ix = git.open_idx(name) except git.GitError as e: add_error('%r: %s' % (name, e)) ix.close() continue with ix: if len(opt.find) == 40: if ix.exists(bin): out.write(b'%s %s\n' % (name, find)) else: # slow, exhaustive search for _i in ix: i = hexlify(_i) if i.startswith(find): out.write(b'%s %s\n' % (name, i)) qprogress('Searching: %d\r' % count) count += 1 if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/ls.py000066400000000000000000000005151454333004200154540ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import sys from bup import git, ls from bup.io import byte_stream def main(argv): git.check_repo_or_die() sys.stdout.flush() out = byte_stream(sys.stdout) # Check out lib/bup/ls.py for the opt spec rc = ls.via_cmdline(argv[1:], out=out) sys.exit(rc) bup-0.33.3/lib/bup/cmd/margin.py000066400000000000000000000046741454333004200163250ustar00rootroot00000000000000 from __future__ import absolute_import import math, struct, sys from bup import options, git, _helpers from bup.helpers import log from bup.io import byte_stream POPULATION_OF_EARTH=6.7e9 # as of September, 2010 optspec = """ bup margin -- predict Guess object offsets and report the maximum deviation ignore-midx Don't use midx files; use only plain pack idx files. """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal("no arguments expected") git.check_repo_or_die() with git.PackIdxList(git.repo(b'objects/pack'), ignore_midx=opt.ignore_midx) as mi: def do_predict(ix, out): total = len(ix) maxdiff = 0 for count,i in enumerate(ix): prefix = struct.unpack('!Q', i[:8])[0] expected = prefix * total // (1 << 64) diff = count - expected maxdiff = max(maxdiff, abs(diff)) out.write(b'%d of %d (%.3f%%) ' % (maxdiff, len(ix), maxdiff * 100.0 / len(ix))) out.flush() assert(count+1 == len(ix)) sys.stdout.flush() out = byte_stream(sys.stdout) if opt.predict: if opt.ignore_midx: for pack in mi.packs: do_predict(pack, out) else: do_predict(mi, out) else: # default mode: find longest matching prefix last = b'\0'*20 longmatch = 0 for i in mi: if i == last: continue #assert(str(i) >= last) pm = _helpers.bitmatch(last, i) longmatch = max(longmatch, pm) last = i out.write(b'%d\n' % longmatch) log('%d matching prefix bits\n' % longmatch) doublings = math.log(len(mi), 2) bpd = longmatch / doublings log('%.2f bits per doubling\n' % bpd) remain = 160 - longmatch rdoublings = remain / bpd log('%d bits (%.2f doublings) remaining\n' % (remain, rdoublings)) larger = 2**rdoublings log('%g times larger is possible\n' % larger) perperson = larger/POPULATION_OF_EARTH log('\nEveryone on earth could have %d data sets like yours, all in one\n' 'repository, and we would expect 1 object collision.\n' % int(perperson)) bup-0.33.3/lib/bup/cmd/memtest.py000066400000000000000000000101661454333004200165170ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import re, resource, sys, time from bup import git, bloom, midx, options, _helpers from bup.io import byte_stream from bup.helpers import log _linux_warned = 0 def linux_memstat(): global _linux_warned #fields = ['VmSize', 'VmRSS', 'VmData', 'VmStk', 'ms'] d = {} try: f = open(b'/proc/self/status', 'rb') except IOError as e: if not _linux_warned: log('Warning: %s\n' % e) _linux_warned = 1 return {} for line in f: # Note that on Solaris, this file exists but is binary. If that # happens, this split() might not return two elements. We don't # really need to care about the binary format since this output # isn't used for much and report() can deal with missing entries. t = re.split(br':\s*', line.strip(), 1) if len(t) == 2: k,v = t d[k] = v return d last = last_u = last_s = start = 0 def report(count, out): global last, last_u, last_s, start headers = ['RSS', 'MajFlt', 'user', 'sys', 'ms'] ru = resource.getrusage(resource.RUSAGE_SELF) now = time.time() rss = int(ru.ru_maxrss // 1024) if not rss: rss = linux_memstat().get(b'VmRSS', b'??') fields = [rss, ru.ru_majflt, int((ru.ru_utime - last_u) * 1000), int((ru.ru_stime - last_s) * 1000), int((now - last) * 1000)] fmt = '%9s ' + ('%10s ' * len(fields)) if count >= 0: line = fmt % tuple([count] + fields) out.write(line.encode('ascii') + b'\n') else: start = now out.write((fmt % tuple([''] + headers)).encode('ascii') + b'\n') out.flush() # don't include time to run report() in usage counts ru = resource.getrusage(resource.RUSAGE_SELF) last_u = ru.ru_utime last_s = ru.ru_stime last = time.time() optspec = """ bup memtest [-n elements] [-c cycles] -- n,number= number of objects per cycle [10000] c,cycles= number of cycles to run [100] ignore-midx ignore .midx files, use only .idx files existing test with existing objects instead of fake ones """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal('no arguments expected') git.check_repo_or_die() sys.stdout.flush() out = byte_stream(sys.stdout) report(-1, out) _helpers.random_sha() report(0, out) with git.PackIdxList(git.repo(b'objects/pack'), ignore_midx=opt.ignore_midx) as m: if opt.existing: def foreverit(mi): while 1: for e in mi: yield e objit = iter(foreverit(m)) for c in range(opt.cycles): for n in range(opt.number): if opt.existing: bin = next(objit) assert(m.exists(bin)) else: bin = _helpers.random_sha() # technically, a randomly generated object id might exist. # but the likelihood of that is the likelihood of finding # a collision in sha-1 by accident, which is so unlikely that # we don't care. assert(not m.exists(bin)) report((c+1)*opt.number, out) if bloom._total_searches: out.write(b'bloom: %d objects searched in %d steps: avg %.3f steps/object\n' % (bloom._total_searches, bloom._total_steps, bloom._total_steps*1.0/bloom._total_searches)) if midx._total_searches: out.write(b'midx: %d objects searched in %d steps: avg %.3f steps/object\n' % (midx._total_searches, midx._total_steps, midx._total_steps*1.0/midx._total_searches)) if git._total_searches: out.write(b'idx: %d objects searched in %d steps: avg %.3f steps/object\n' % (git._total_searches, git._total_steps, git._total_steps*1.0/git._total_searches)) out.write(b'Total time: %.3fs\n' % (time.time() - start)) bup-0.33.3/lib/bup/cmd/meta.py000066400000000000000000000136241454333004200157710ustar00rootroot00000000000000# Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. # TODO: Add tar-like -C option. from __future__ import absolute_import import sys from bup import metadata from bup import options from bup.compat import argv_bytes from bup.io import byte_stream from bup.helpers import log, saved_errors def open_input(name): if not name or name == b'-': return byte_stream(sys.stdin) return open(name, 'rb') def open_output(name): if not name or name == b'-': sys.stdout.flush() return byte_stream(sys.stdout) return open(name, 'wb') optspec = """ bup meta --create [OPTION ...] bup meta --list [OPTION ...] bup meta --extract [OPTION ...] bup meta --start-extract [OPTION ...] bup meta --finish-extract [OPTION ...] bup meta --edit [OPTION ...] -- c,create write metadata for PATHs to stdout (or --file) t,list display metadata x,extract perform --start-extract followed by --finish-extract start-extract build tree matching metadata provided on standard input (or --file) finish-extract finish applying standard input (or --file) metadata to filesystem edit alter metadata; write to stdout (or --file) f,file= specify source or destination file R,recurse recurse into subdirectories xdev,one-file-system don't cross filesystem boundaries numeric-ids apply numeric IDs (user, group, etc.) rather than names symlinks handle symbolic links (default is true) paths include paths in metadata (default is true) set-uid= set metadata uid (via --edit) set-gid= set metadata gid (via --edit) set-user= set metadata user (via --edit) unset-user remove metadata user (via --edit) set-group= set metadata group (via --edit) unset-group remove metadata group (via --edit) v,verbose increase log output (can be used more than once) q,quiet don't show progress meter """ def main(argv): o = options.Options(optspec) opt, flags, remainder = o.parse_bytes([b'--paths', b'--symlinks', b'--recurse'] + argv[1:]) opt.verbose = opt.verbose or 0 opt.quiet = opt.quiet or 0 metadata.verbose = opt.verbose - opt.quiet opt.file = argv_bytes(opt.file) if opt.file else None action_count = sum([bool(x) for x in [opt.create, opt.list, opt.extract, opt.start_extract, opt.finish_extract, opt.edit]]) if action_count > 1: o.fatal("bup: only one action permitted: --create --list --extract --edit") if action_count == 0: o.fatal("bup: no action specified") if opt.create: if len(remainder) < 1: o.fatal("no paths specified for create") output_file = open_output(opt.file) metadata.save_tree(output_file, [argv_bytes(r) for r in remainder], recurse=opt.recurse, write_paths=opt.paths, save_symlinks=opt.symlinks, xdev=opt.xdev) elif opt.list: if len(remainder) > 0: o.fatal("cannot specify paths for --list") src = open_input(opt.file) metadata.display_archive(src, open_output(b'-')) elif opt.start_extract: if len(remainder) > 0: o.fatal("cannot specify paths for --start-extract") src = open_input(opt.file) metadata.start_extract(src, create_symlinks=opt.symlinks) elif opt.finish_extract: if len(remainder) > 0: o.fatal("cannot specify paths for --finish-extract") src = open_input(opt.file) metadata.finish_extract(src, restore_numeric_ids=opt.numeric_ids) elif opt.extract: if len(remainder) > 0: o.fatal("cannot specify paths for --extract") src = open_input(opt.file) metadata.extract(src, restore_numeric_ids=opt.numeric_ids, create_symlinks=opt.symlinks) elif opt.edit: if len(remainder) < 1: o.fatal("no paths specified for edit") output_file = open_output(opt.file) unset_user = False # True if --unset-user was the last relevant option. unset_group = False # True if --unset-group was the last relevant option. for flag in flags: if flag[0] == '--set-user': unset_user = False elif flag[0] == '--unset-user': unset_user = True elif flag[0] == '--set-group': unset_group = False elif flag[0] == '--unset-group': unset_group = True for path in remainder: f = open(argv_bytes(path), 'rb') try: for m in metadata._ArchiveIterator(f): if opt.set_uid is not None: try: m.uid = int(opt.set_uid) except ValueError: o.fatal("uid must be an integer") if opt.set_gid is not None: try: m.gid = int(opt.set_gid) except ValueError: o.fatal("gid must be an integer") if unset_user: m.user = b'' elif opt.set_user is not None: m.user = argv_bytes(opt.set_user) if unset_group: m.group = b'' elif opt.set_group is not None: m.group = argv_bytes(opt.set_group) m.write(output_file) finally: f.close() if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) else: sys.exit(0) bup-0.33.3/lib/bup/cmd/midx.py000066400000000000000000000241011454333004200157740ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from binascii import hexlify import glob, os, math, resource, struct, sys from bup import options, git, midx, _helpers, xstat from bup.compat import ExitStack, argv_bytes, hexstr from bup.helpers import (Sha1, add_error, atomically_replaced_file, debug1, fdatasync, log, mmap_readwrite, qprogress, saved_errors, unlink) from bup.io import byte_stream, path_msg PAGE_SIZE=4096 SHA_PER_PAGE=PAGE_SIZE/20. optspec = """ bup midx [options...] -- o,output= output midx filename (default: auto-generated) a,auto automatically use all existing .midx/.idx files as input f,force merge produce exactly one .midx containing all objects p,print print names of generated midx files check validate contents of the given midx files (with -a, all midx files) max-files= maximum number of idx files to open at once [-1] d,dir= directory containing idx/midx files """ merge_into = _helpers.merge_into def _group(l, count): for i in range(0, len(l), count): yield l[i:i+count] def max_files(): mf = min(resource.getrlimit(resource.RLIMIT_NOFILE)) if mf > 32: mf -= 20 # just a safety margin else: mf -= 6 # minimum safety margin return mf def check_midx(name): nicename = git.repo_rel(name) log('Checking %s.\n' % path_msg(nicename)) try: ix = git.open_idx(name) except git.GitError as e: add_error('%s: %s' % (path_msg(name), e)) return with ix: for count,subname in enumerate(ix.idxnames): with git.open_idx(os.path.join(os.path.dirname(name), subname)) \ as sub: for ecount,e in enumerate(sub): if not (ecount % 1234): qprogress(' %d/%d: %s %d/%d\r' % (count, len(ix.idxnames), git.shorten_hash(subname).decode('ascii'), ecount, len(sub))) if not sub.exists(e): add_error("%s: %s: %s missing from idx" % (path_msg(nicename), git.shorten_hash(subname).decode('ascii'), hexstr(e))) if not ix.exists(e): add_error("%s: %s: %s missing from midx" % (path_msg(nicename), git.shorten_hash(subname).decode('ascii'), hexstr(e))) prev = None for ecount,e in enumerate(ix): if not (ecount % 1234): qprogress(' Ordering: %d/%d\r' % (ecount, len(ix))) if e and prev and not e >= prev: add_error('%s: ordering error: %s < %s' % (nicename, hexstr(e), hexstr(prev))) prev = e _first = None def _do_midx(outdir, outfilename, infilenames, prefixstr, auto=False, force=False): global _first if not outfilename: assert(outdir) sum = hexlify(Sha1(b'\0'.join(infilenames)).digest()) outfilename = b'%s/midx-%s.midx' % (outdir, sum) inp = [] total = 0 allfilenames = [] with ExitStack() as contexts: for name in infilenames: ix = git.open_idx(name) contexts.enter_context(ix) inp.append(( ix.map, len(ix), ix.sha_ofs, isinstance(ix, midx.PackMidx) and ix.which_ofs or 0, len(allfilenames), )) for n in ix.idxnames: allfilenames.append(os.path.basename(n)) total += len(ix) inp.sort(reverse=True, key=lambda x: x[0][x[2] : x[2] + 20]) if not _first: _first = outdir dirprefix = (_first != outdir) and git.repo_rel(outdir) + b': ' or b'' debug1('midx: %s%screating from %d files (%d objects).\n' % (dirprefix, prefixstr, len(infilenames), total)) if (auto and (total < 1024 and len(infilenames) < 3)) \ or ((auto or force) and len(infilenames) < 2) \ or (force and not total): debug1('midx: nothing to do.\n') return None pages = int(total/SHA_PER_PAGE) or 1 bits = int(math.ceil(math.log(pages, 2))) entries = 2**bits debug1('midx: table size: %d (%d bits)\n' % (entries*4, bits)) unlink(outfilename) with atomically_replaced_file(outfilename, 'w+b') as f: f.write(b'MIDX') f.write(struct.pack('!II', midx.MIDX_VERSION, bits)) assert(f.tell() == 12) f.truncate(12 + 4*entries + 20*total + 4*total) f.flush() fdatasync(f.fileno()) with mmap_readwrite(f, close=False) as fmap: count = merge_into(fmap, bits, total, inp) f.seek(0, os.SEEK_END) f.write(b'\0'.join(allfilenames)) # This is just for testing (if you enable this, don't clear inp above) # if 0: # p = midx.PackMidx(outfilename) # assert(len(p.idxnames) == len(infilenames)) # log(repr(p.idxnames) + '\n') # assert(len(p) == total) # for pe, e in p, git.idxmerge(inp, final_progress=False): # pin = next(pi) # assert(i == pin) # assert(p.exists(i)) return total, outfilename def do_midx(outdir, outfilename, infilenames, prefixstr, prout, auto=False, force=False, print_names=False): rv = _do_midx(outdir, outfilename, infilenames, prefixstr, auto=auto, force=force) if rv and print_names: prout.write(rv[1] + b'\n') def do_midx_dir(path, outfilename, prout, auto=False, force=False, max_files=-1, print_names=False): already = {} sizes = {} if force and not auto: midxs = [] # don't use existing midx files else: midxs = glob.glob(b'%s/*.midx' % path) contents = {} for mname in midxs: with git.open_idx(mname) as m: contents[mname] = [(b'%s/%s' % (path,i)) for i in m.idxnames] sizes[mname] = len(m) # sort the biggest+newest midxes first, so that we can eliminate # smaller (or older) redundant ones that come later in the list midxs.sort(key=lambda ix: (-sizes[ix], -xstat.stat(ix).st_mtime)) for mname in midxs: any = 0 for iname in contents[mname]: if not already.get(iname): already[iname] = 1 any = 1 if not any: debug1('%r is redundant\n' % mname) unlink(mname) already[mname] = 1 midxs = [k for k in midxs if not already.get(k)] idxs = [k for k in glob.glob(b'%s/*.idx' % path) if not already.get(k)] for iname in idxs: with git.open_idx(iname) as i: sizes[iname] = len(i) all = [(sizes[n],n) for n in (midxs + idxs)] # FIXME: what are the optimal values? Does this make sense? DESIRED_HWM = force and 1 or 5 DESIRED_LWM = force and 1 or 2 existed = dict((name,1) for sz,name in all) debug1('midx: %d indexes; want no more than %d.\n' % (len(all), DESIRED_HWM)) if len(all) <= DESIRED_HWM: debug1('midx: nothing to do.\n') while len(all) > DESIRED_HWM: all.sort() part1 = [name for sz,name in all[:len(all)-DESIRED_LWM+1]] part2 = all[len(all)-DESIRED_LWM+1:] all = list(do_midx_group(path, outfilename, part1, auto=auto, force=force, max_files=max_files)) \ + part2 if len(all) > DESIRED_HWM: debug1('\nStill too many indexes (%d > %d). Merging again.\n' % (len(all), DESIRED_HWM)) if print_names: for sz,name in all: if not existed.get(name): prout.write(name + b'\n') def do_midx_group(outdir, outfilename, infiles, auto=False, force=False, max_files=-1): groups = list(_group(infiles, max_files)) gprefix = '' for n,sublist in enumerate(groups): if len(groups) != 1: gprefix = 'Group %d: ' % (n+1) rv = _do_midx(outdir, outfilename, sublist, gprefix, auto=auto, force=force) if rv: yield rv def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) opt.output = argv_bytes(opt.output) if opt.output else None if extra and (opt.auto or opt.force): o.fatal("you can't use -f/-a and also provide filenames") if opt.check and (not extra and not opt.auto): o.fatal("if using --check, you must provide filenames or -a") git.check_repo_or_die() if opt.max_files < 0: opt.max_files = max_files() assert(opt.max_files >= 5) path = opt.dir and argv_bytes(opt.dir) or git.repo(b'objects/pack') extra = [argv_bytes(x) for x in extra] if opt.check: # check existing midx files if extra: midxes = extra else: debug1('midx: scanning %s\n' % path) midxes = glob.glob(os.path.join(path, b'*.midx')) for name in midxes: check_midx(name) if not saved_errors: log('All tests passed.\n') else: if extra: sys.stdout.flush() do_midx(path, opt.output, extra, b'', byte_stream(sys.stdout), auto=opt.auto, force=opt.force, print_names=opt.print) elif opt.auto or opt.force: sys.stdout.flush() debug1('midx: scanning %s\n' % path_msg(path)) do_midx_dir(path, opt.output, byte_stream(sys.stdout), auto=opt.auto, force=opt.force, max_files=opt.max_files) else: o.fatal("you must use -f or -a or provide input filenames") if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/mux.py000066400000000000000000000023321454333004200156460ustar00rootroot00000000000000 from __future__ import absolute_import import os, subprocess, sys from bup import options from bup.helpers import debug1, debug2, mux from bup.io import byte_stream optspec = """ bup mux command [arguments...] -- """ def main(argv): # Give the subcommand exclusive access to stdin. orig_stdin = os.dup(0) devnull = os.open(os.devnull, os.O_RDONLY) os.dup2(devnull, 0) os.close(devnull) o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if len(extra) < 1: o.fatal('command is required') subcmd = extra debug2('bup mux: starting %r\n' % (extra,)) outr, outw = os.pipe() errr, errw = os.pipe() def close_fds(): os.close(outr) os.close(errr) p = subprocess.Popen(subcmd, stdin=orig_stdin, stdout=outw, stderr=errw, close_fds=False, preexec_fn=close_fds) os.close(outw) os.close(errw) sys.stdout.flush() out = byte_stream(sys.stdout) out.write(b'BUPMUX') out.flush() mux(p, out.fileno(), outr, errr) os.close(outr) os.close(errr) prv = p.wait() if prv: debug1('%s exited with code %d\n' % (extra[0], prv)) debug1('bup mux: done\n') sys.exit(prv) bup-0.33.3/lib/bup/cmd/on.py000066400000000000000000000046531454333004200154610ustar00rootroot00000000000000from __future__ import absolute_import from subprocess import PIPE import getopt, os, signal, struct, subprocess, sys from bup import options, ssh, path from bup.compat import argv_bytes from bup.helpers import DemuxConn, log from bup.io import byte_stream optspec = """ bup on index ... bup on save ... bup on split ... bup on get ... """ def main(argv): o = options.Options(optspec, optfunc=getopt.getopt) opt, flags, extra = o.parse_bytes(argv[1:]) if len(extra) < 2: o.fatal('arguments expected') class SigException(Exception): def __init__(self, signum): self.signum = signum Exception.__init__(self, 'signal %d received' % signum) def handler(signum, frame): raise SigException(signum) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) sys.stdout.flush() out = byte_stream(sys.stdout) try: sp = None p = None ret = 99 hp = argv_bytes(extra[0]).split(b':') if len(hp) == 1: (hostname, port) = (hp[0], None) else: (hostname, port) = hp argv = [argv_bytes(x) for x in extra[1:]] p = ssh.connect(hostname, port, b'on--server', stderr=PIPE) try: argvs = b'\0'.join([b'bup'] + argv) p.stdin.write(struct.pack('!I', len(argvs)) + argvs) p.stdin.flush() sp = subprocess.Popen([path.exe(), b'server'], stdin=p.stdout, stdout=p.stdin) p.stdin.close() p.stdout.close() # Demultiplex remote client's stderr (back to stdout/stderr). with DemuxConn(p.stderr.fileno(), open(os.devnull, "wb")) as dmc: for line in iter(dmc.readline, b''): out.write(line) finally: while 1: # if we get a signal while waiting, we have to keep waiting, just # in case our child doesn't die. try: ret = p.wait() if sp: sp.wait() break except SigException as e: log('\nbup on: %s\n' % e) os.kill(p.pid, e.signum) ret = 84 except SigException as e: if ret == 0: ret = 99 log('\nbup on: %s\n' % e) sys.exit(ret) bup-0.33.3/lib/bup/cmd/on__server.py000066400000000000000000000040751454333004200172040ustar00rootroot00000000000000 from __future__ import absolute_import import os, struct, sys from bup import options, helpers, path from bup.compat import environ from bup.io import byte_stream optspec = """ bup on--server -- This command is run automatically by 'bup on' """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal('no arguments expected') # get the subcommand's argv. # Normally we could just pass this on the command line, but since we'll often # be getting called on the other end of an ssh pipe, which tends to mangle # argv (by sending it via the shell), this way is much safer. stdin = byte_stream(sys.stdin) buf = stdin.read(4) sz = struct.unpack('!I', buf)[0] assert(sz > 0) assert(sz < 1000000) buf = stdin.read(sz) assert(len(buf) == sz) argv = buf.split(b'\0') argv[0] = path.exe() argv = [argv[0], b'mux', b'--'] + argv # stdin/stdout are supposedly connected to 'bup server' that the caller # started for us (often on the other end of an ssh tunnel), so we don't want # to misuse them. Move them out of the way, then replace stdout with # a pointer to stderr in case our subcommand wants to do something with it. # # It might be nice to do the same with stdin, but my experiments showed that # ssh seems to make its child's stderr a readable-but-never-reads-anything # socket. They really should have used shutdown(SHUT_WR) on the other end # of it, but probably didn't. Anyway, it's too messy, so let's just make sure # anyone reading from stdin is disappointed. # # (You can't just leave stdin/stdout "not open" by closing the file # descriptors. Then the next file that opens is automatically assigned 0 or 1, # and people *trying* to read/write stdin/stdout get screwed.) os.dup2(0, 3) os.dup2(1, 4) os.dup2(2, 1) fd = os.open(os.devnull, os.O_RDONLY) os.dup2(fd, 0) os.close(fd) environ[b'BUP_SERVER_REVERSE'] = helpers.hostname() os.execvp(argv[0], argv) sys.exit(99) bup-0.33.3/lib/bup/cmd/prune_older.py000066400000000000000000000145651454333004200173660ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from binascii import hexlify, unhexlify from collections import defaultdict from itertools import groupby from time import localtime, strftime, time import sys from bup import git, options from bup.compat import argv_bytes from bup.gc import bup_gc from bup.helpers import die_if_errors, log, partition, period_as_secs from bup.io import byte_stream from bup.repo import LocalRepo from bup.rm import bup_rm def branches(refnames=tuple()): return ((name[11:], hexlify(sha)) for (name,sha) in git.list_refs(patterns=(b'refs/heads/' + n for n in refnames), limit_to_heads=True)) def save_name(branch, utc): return branch + b'/' \ + strftime('%Y-%m-%d-%H%M%S', localtime(utc)).encode('ascii') def classify_saves(saves, period_start): """For each (utc, id) in saves, yield (True, (utc, id)) if the save should be kept and (False, (utc, id)) if the save should be removed. The ids are binary hashes. """ def retain_newest_in_region(region): for save in region[0:1]: yield True, save for save in region[1:]: yield False, save matches, rest = partition(lambda s: s[0] >= period_start['all'], saves) for save in matches: yield True, save tm_ranges = ((period_start['dailies'], lambda s: localtime(s[0]).tm_yday), (period_start['monthlies'], lambda s: localtime(s[0]).tm_mon), (period_start['yearlies'], lambda s: localtime(s[0]).tm_year)) # Break the decreasing utc sorted saves up into the respective # period ranges (dailies, monthlies, ...). Within each range, # group the saves by the period scale (days, months, ...), and # then yield a "keep" action (True, utc) for the newest save in # each group, and a "drop" action (False, utc) for the rest. for pstart, time_region_id in tm_ranges: matches, rest = partition(lambda s: s[0] >= pstart, rest) for region_id, region_saves in groupby(matches, time_region_id): for action in retain_newest_in_region(list(region_saves)): yield action # Finally, drop any saves older than the specified periods for save in rest: yield False, save optspec = """ bup prune-older [options...] [BRANCH...] -- keep-all-for= retain all saves within the PERIOD keep-dailies-for= retain the newest save per day within the PERIOD keep-monthlies-for= retain the newest save per month within the PERIOD keep-yearlies-for= retain the newest save per year within the PERIOD wrt= end all periods at this number of seconds since the epoch pretend don't prune, just report intended actions to standard output gc collect garbage after removals [1] gc-threshold= only rewrite a packfile if it's over this percent garbage [10] #,compress= set compression level to # (0-9, 9 is highest) [1] v,verbose increase log output (can be used more than once) unsafe use the command even though it may be DANGEROUS """ def main(argv): o = options.Options(optspec) opt, flags, roots = o.parse_bytes(argv[1:]) roots = [argv_bytes(x) for x in roots] if not opt.unsafe: o.fatal('refusing to run dangerous, experimental command without --unsafe') now = int(time()) if opt.wrt is None else opt.wrt if not isinstance(now, int): o.fatal('--wrt value ' + str(now) + ' is not an integer') period_start = {} for period, extent in (('all', opt.keep_all_for), ('dailies', opt.keep_dailies_for), ('monthlies', opt.keep_monthlies_for), ('yearlies', opt.keep_yearlies_for)): if extent: secs = period_as_secs(extent.encode('ascii')) if not secs: o.fatal('%r is not a valid period' % extent) period_start[period] = now - secs if not period_start: o.fatal('at least one keep argument is required') period_start = defaultdict(lambda: float('inf'), period_start) if opt.verbose: epoch_ymd = strftime('%Y-%m-%d-%H%M%S', localtime(0)) for kind in ['all', 'dailies', 'monthlies', 'yearlies']: period_utc = period_start[kind] if period_utc != float('inf'): if not (period_utc > float('-inf')): log('keeping all ' + kind) else: try: when = strftime('%Y-%m-%d-%H%M%S', localtime(period_utc)) log('keeping ' + kind + ' since ' + when + '\n') except ValueError as ex: if period_utc < 0: log('keeping %s since %d seconds before %s\n' %(kind, abs(period_utc), epoch_ymd)) elif period_utc > 0: log('keeping %s since %d seconds after %s\n' %(kind, period_utc, epoch_ymd)) else: log('keeping %s since %s\n' % (kind, epoch_ymd)) git.check_repo_or_die() # This could be more efficient, but for now just build the whole list # in memory and let bup_rm() do some redundant work. def parse_info(f): author_secs = f.readline().strip() return int(author_secs) sys.stdout.flush() out = byte_stream(sys.stdout) removals = [] for branch, branch_id in branches(roots): die_if_errors() saves = ((utc, unhexlify(oidx)) for (oidx, utc) in git.rev_list(branch_id, format=b'%at', parse=parse_info)) for keep_save, (utc, id) in classify_saves(saves, period_start): assert(keep_save in (False, True)) # FIXME: base removals on hashes if opt.pretend: out.write((b'+ ' if keep_save else b'- ') + save_name(branch, utc) + b'\n') elif not keep_save: removals.append(save_name(branch, utc)) if not opt.pretend: die_if_errors() with LocalRepo() as repo: bup_rm(repo, removals, compression=opt.compress, verbosity=opt.verbose) if opt.gc: die_if_errors() bup_gc(threshold=opt.gc_threshold, compression=opt.compress, verbosity=opt.verbose) die_if_errors() bup-0.33.3/lib/bup/cmd/random.py000066400000000000000000000015001454333004200163110ustar00rootroot00000000000000 from __future__ import absolute_import import sys from bup import options, _helpers from bup.helpers import handle_ctrl_c, log, parse_num, istty1 optspec = """ bup random [-S seed] -- S,seed= optional random number seed [1] f,force print random data to stdout even if it's a tty v,verbose print byte counter to stderr """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if len(extra) != 1: o.fatal("exactly one argument expected") total = parse_num(extra[0]) handle_ctrl_c() if opt.force or not istty1: _helpers.write_random(sys.stdout.fileno(), total, opt.seed, opt.verbose and 1 or 0) else: log('error: not writing binary data to a terminal. Use -f to force.\n') sys.exit(1) bup-0.33.3/lib/bup/cmd/restore.py000066400000000000000000000277101454333004200165270ustar00rootroot00000000000000 from __future__ import absolute_import from stat import S_ISDIR import copy, errno, os, re, stat, sys from bup import options, git, vfs from bup._helpers import write_sparsely from bup.compat import argv_bytes, fsencode from bup.helpers import (add_error, chunkyreader, die_if_errors, mkdirp, parse_rx_excludes, progress, qprogress, should_rx_exclude_path) from bup.io import byte_stream from bup.repo import LocalRepo, RemoteRepo optspec = """ bup restore [-r host:path] [-C outdir] -- r,remote= remote repository path C,outdir= change to given outdir before extracting files numeric-ids restore numeric IDs (user, group, etc.) rather than names exclude-rx= skip paths matching the unanchored regex (may be repeated) exclude-rx-from= skip --exclude-rx patterns in file (may be repeated) sparse create sparse files v,verbose increase log output (can be used more than once) map-user= given OLD=NEW, restore OLD user as NEW user map-group= given OLD=NEW, restore OLD group as NEW group map-uid= given OLD=NEW, restore OLD uid as NEW uid map-gid= given OLD=NEW, restore OLD gid as NEW gid q,quiet don't show progress meter """ total_restored = 0 # stdout should be flushed after each line, even when not connected to a tty stdoutfd = sys.stdout.fileno() sys.stdout.flush() sys.stdout = os.fdopen(stdoutfd, 'w', 1) out = byte_stream(sys.stdout) def valid_restore_path(path): path = os.path.normpath(path) if path.startswith(b'/'): path = path[1:] if b'/' in path: return True return False def parse_owner_mappings(type, options, fatal): """Traverse the options and parse all --map-TYPEs, or call Option.fatal().""" opt_name = '--map-' + type if type in ('uid', 'gid'): value_rx = re.compile(br'^(-?[0-9]+)=(-?[0-9]+)$') else: value_rx = re.compile(br'^([^=]+)=([^=]*)$') owner_map = {} for flag in options: (option, parameter) = flag if option != opt_name: continue parameter = argv_bytes(parameter) match = value_rx.match(parameter) if not match: raise fatal("couldn't parse %r as %s mapping" % (parameter, type)) old_id, new_id = match.groups() if type in ('uid', 'gid'): old_id = int(old_id) new_id = int(new_id) owner_map[old_id] = new_id return owner_map def apply_metadata(meta, name, restore_numeric_ids, owner_map): m = copy.deepcopy(meta) m.user = owner_map['user'].get(m.user, m.user) m.group = owner_map['group'].get(m.group, m.group) m.uid = owner_map['uid'].get(m.uid, m.uid) m.gid = owner_map['gid'].get(m.gid, m.gid) m.apply_to_path(name, restore_numeric_ids = restore_numeric_ids) def hardlink_compatible(prev_path, prev_item, new_item, top): prev_candidate = top + prev_path if not os.path.exists(prev_candidate): return False prev_meta, new_meta = prev_item.meta, new_item.meta if new_item.oid != prev_item.oid \ or new_meta.mtime != prev_meta.mtime \ or new_meta.ctime != prev_meta.ctime \ or new_meta.mode != prev_meta.mode: return False # FIXME: should we be checking the path on disk, or the recorded metadata? # The exists() above might seem to suggest the former. if not new_meta.same_file(prev_meta): return False return True def hardlink_if_possible(fullname, item, top, hardlinks): """Find a suitable hardlink target, link to it, and return true, otherwise return false.""" # The cwd will be dirname(fullname), and fullname will be # absolute, i.e. /foo/bar, and the caller is expected to handle # restoring the metadata if hardlinking isn't possible. # FIXME: we can probably replace the target_vfs_path with the # relevant vfs item # hardlinks tracks a list of (restore_path, vfs_path, meta) # triples for each path we've written for a given hardlink_target. # This allows us to handle the case where we restore a set of # hardlinks out of order (with respect to the original save # call(s)) -- i.e. when we don't restore the hardlink_target path # first. This data also allows us to attempt to handle other # situations like hardlink sets that change on disk during a save, # or between index and save. target = item.meta.hardlink_target assert(target) assert(fullname.startswith(b'/')) target_versions = hardlinks.get(target) if target_versions: # Check every path in the set that we've written so far for a match. for prev_path, prev_item in target_versions: if hardlink_compatible(prev_path, prev_item, item, top): try: os.link(top + prev_path, top + fullname) return True except OSError as e: if e.errno != errno.EXDEV: raise else: target_versions = [] hardlinks[target] = target_versions target_versions.append((fullname, item)) return False def write_file_content(repo, dest_path, vfs_file): with vfs.fopen(repo, vfs_file) as inf: with open(dest_path, 'wb') as outf: for b in chunkyreader(inf): outf.write(b) def write_file_content_sparsely(repo, dest_path, vfs_file): with vfs.fopen(repo, vfs_file) as inf: outfd = os.open(dest_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) try: trailing_zeros = 0; for b in chunkyreader(inf): trailing_zeros = write_sparsely(outfd, b, 512, trailing_zeros) pos = os.lseek(outfd, trailing_zeros, os.SEEK_END) os.ftruncate(outfd, pos) finally: os.close(outfd) def restore(repo, parent_path, name, item, top, sparse, numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks): global total_restored mode = vfs.item_mode(item) treeish = S_ISDIR(mode) fullname = parent_path + b'/' + name # Match behavior of index --exclude-rx with respect to paths. if should_rx_exclude_path(fullname + (b'/' if treeish else b''), exclude_rxs): return if not treeish: # Do this now so we'll have meta.symlink_target for verbose output item = vfs.augment_item_meta(repo, item, include_size=True) meta = item.meta assert(meta.mode == mode) if stat.S_ISDIR(mode): if verbosity >= 1: out.write(b'%s/\n' % fullname) elif stat.S_ISLNK(mode): assert(meta.symlink_target) if verbosity >= 2: out.write(b'%s@ -> %s\n' % (fullname, meta.symlink_target)) else: if verbosity >= 2: out.write(fullname + b'\n') orig_cwd = os.getcwd() try: if treeish: # Assumes contents() returns '.' with the full metadata first sub_items = vfs.contents(repo, item, want_meta=True) dot, item = next(sub_items, None) assert(dot == b'.') item = vfs.augment_item_meta(repo, item, include_size=True) meta = item.meta meta.create_path(name) os.chdir(name) total_restored += 1 if verbosity >= 0: qprogress('Restoring: %d\r' % total_restored) for sub_name, sub_item in sub_items: restore(repo, fullname, sub_name, sub_item, top, sparse, numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) os.chdir(b'..') apply_metadata(meta, name, numeric_ids, owner_map) else: created_hardlink = False if meta.hardlink_target: created_hardlink = hardlink_if_possible(fullname, item, top, hardlinks) if not created_hardlink: meta.create_path(name) if stat.S_ISREG(meta.mode): if sparse: write_file_content_sparsely(repo, name, item) else: write_file_content(repo, name, item) total_restored += 1 if verbosity >= 0: qprogress('Restoring: %d\r' % total_restored) if not created_hardlink: apply_metadata(meta, name, numeric_ids, owner_map) finally: os.chdir(orig_cwd) def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) verbosity = (opt.verbose or 0) if not opt.quiet else -1 if opt.remote: opt.remote = argv_bytes(opt.remote) if opt.outdir: opt.outdir = argv_bytes(opt.outdir) git.check_repo_or_die() if not extra: o.fatal('must specify at least one filename to restore') exclude_rxs = parse_rx_excludes(flags, o.fatal) owner_map = {} for map_type in ('user', 'group', 'uid', 'gid'): owner_map[map_type] = parse_owner_mappings(map_type, flags, o.fatal) if opt.outdir: mkdirp(opt.outdir) os.chdir(opt.outdir) with RemoteRepo(opt.remote) if opt.remote else LocalRepo() as repo: top = fsencode(os.getcwd()) hardlinks = {} for path in [argv_bytes(x) for x in extra]: if not valid_restore_path(path): add_error("path %r doesn't include a branch and revision" % path) continue try: resolved = vfs.resolve(repo, path, want_meta=True, follow=False) except vfs.IOError as e: add_error(e) continue if len(resolved) == 3 and resolved[2][0] == b'latest': # Follow latest symlink to the actual save try: resolved = vfs.resolve(repo, b'latest', parent=resolved[:-1], want_meta=True) except vfs.IOError as e: add_error(e) continue # Rename it back to 'latest' resolved = tuple(elt if i != 2 else (b'latest',) + elt[1:] for i, elt in enumerate(resolved)) path_parent, path_name = os.path.split(path) leaf_name, leaf_item = resolved[-1] if not leaf_item: add_error('error: cannot access %r in %r' % (b'/'.join(name for name, item in resolved), path)) continue if not path_name or path_name == b'.': # Source is /foo/what/ever/ or /foo/what/ever/. -- extract # what/ever/* to the current directory, and if name == '.' # (i.e. /foo/what/ever/.), then also restore what/ever's # metadata to the current directory. treeish = vfs.item_mode(leaf_item) if not treeish: add_error('%r cannot be restored as a directory' % path) else: items = vfs.contents(repo, leaf_item, want_meta=True) dot, leaf_item = next(items, None) assert dot == b'.' for sub_name, sub_item in items: restore(repo, b'', sub_name, sub_item, top, opt.sparse, opt.numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) if path_name == b'.': leaf_item = vfs.augment_item_meta(repo, leaf_item, include_size=True) apply_metadata(leaf_item.meta, b'.', opt.numeric_ids, owner_map) else: restore(repo, b'', leaf_name, leaf_item, top, opt.sparse, opt.numeric_ids, owner_map, exclude_rxs, verbosity, hardlinks) if verbosity >= 0: progress('Restoring: %d, done.\n' % total_restored) die_if_errors() bup-0.33.3/lib/bup/cmd/rm.py000066400000000000000000000016411454333004200154550ustar00rootroot00000000000000 from __future__ import absolute_import from bup.compat import argv_bytes from bup.git import check_repo_or_die from bup.options import Options from bup.helpers import die_if_errors from bup.repo import LocalRepo from bup.rm import bup_rm optspec = """ bup rm -- #,compress= set compression level to # (0-9, 9 is highest) [6] v,verbose increase verbosity (can be specified multiple times) unsafe use the command even though it may be DANGEROUS """ def main(argv): o = Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if not opt.unsafe: o.fatal('refusing to run dangerous, experimental command without --unsafe') if len(extra) < 1: o.fatal('no paths specified') check_repo_or_die() with LocalRepo() as repo: bup_rm(repo, [argv_bytes(x) for x in extra], compression=opt.compress, verbosity=opt.verbose) die_if_errors() bup-0.33.3/lib/bup/cmd/save.py000066400000000000000000000521671454333004200160060ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from binascii import hexlify from errno import ENOENT from io import BytesIO import math, os, stat, sys, time from bup import hashsplit, git, options, index, client, metadata from bup import hlinkdb from bup.compat import argv_bytes, environ, nullcontext from bup.hashsplit import GIT_MODE_TREE, GIT_MODE_FILE, GIT_MODE_SYMLINK from bup.helpers import (add_error, grafted_path_components, handle_ctrl_c, hostname, istty2, log, parse_date_or_fatal, parse_num, path_components, progress, qprogress, resolve_parent, saved_errors, stripped_path_components, valid_save_name) from bup.io import byte_stream, path_msg from bup.pwdgrp import userfullname, username from bup.tree import StackDir optspec = """ bup save [-tc] [-n name] -- r,remote= hostname:/path/to/repo of remote repository t,tree output a tree id c,commit output a commit id n,name= name of backup set to update (if any) d,date= date for the commit (seconds since the epoch) v,verbose increase log output (can be used more than once) q,quiet don't show progress meter smaller= only back up files smaller than n bytes bwlimit= maximum bytes/sec to transmit to server f,indexfile= the name of the index file (normally BUP_DIR/bupindex) strip strips the path to every filename given strip-path= path-prefix to be stripped when saving graft= a graft point *old_path*=*new_path* (can be used more than once) #,compress= set compression level to # (0-9, 9 is highest) [1] """ ### Test hooks after_nondir_metadata_stat = None def before_saving_regular_file(name): return def opts_from_cmdline(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if opt.indexfile: opt.indexfile = argv_bytes(opt.indexfile) if opt.name: opt.name = argv_bytes(opt.name) if opt.remote: opt.remote = argv_bytes(opt.remote) if opt.strip_path: opt.strip_path = argv_bytes(opt.strip_path) if not (opt.tree or opt.commit or opt.name): o.fatal("use one or more of -t, -c, -n") if not extra: o.fatal("no filenames given") if opt.date: opt.date = parse_date_or_fatal(opt.date, o.fatal) else: opt.date = time.time() opt.progress = (istty2 and not opt.quiet) opt.smaller = parse_num(opt.smaller or 0) if opt.bwlimit: opt.bwlimit = parse_num(opt.bwlimit) if opt.strip and opt.strip_path: o.fatal("--strip is incompatible with --strip-path") opt.sources = [argv_bytes(x) for x in extra] grafts = [] if opt.graft: if opt.strip: o.fatal("--strip is incompatible with --graft") if opt.strip_path: o.fatal("--strip-path is incompatible with --graft") for (option, parameter) in flags: if option == "--graft": parameter = argv_bytes(parameter) splitted_parameter = parameter.split(b'=') if len(splitted_parameter) != 2: o.fatal("a graft point must be of the form old_path=new_path") old_path, new_path = splitted_parameter if not (old_path and new_path): o.fatal("a graft point cannot be empty") grafts.append((resolve_parent(old_path), resolve_parent(new_path))) opt.grafts = grafts opt.is_reverse = environ.get(b'BUP_SERVER_REVERSE') if opt.is_reverse and opt.remote: o.fatal("don't use -r in reverse mode; it's automatic") if opt.name and not valid_save_name(opt.name): o.fatal("'%s' is not a valid branch name" % path_msg(opt.name)) return opt def save_tree(opt, reader, hlink_db, msr, w): # Metadata is stored in a file named .bupm in each directory. The # first metadata entry will be the metadata for the current directory. # The remaining entries will be for each of the other directory # elements, in the order they're listed in the index. # # Since the git tree elements are sorted according to # git.shalist_item_sort_key, the metalist items are accumulated as # (sort_key, metadata) tuples, and then sorted when the .bupm file is # created. The sort_key should have been computed using the element's # mangled name and git mode (after hashsplitting), but the code isn't # actually doing that but rather uses the element's real name and mode. # This makes things a bit more difficult when reading it back, see # vfs.ordered_tree_entries(). # Maintain a stack of information representing the current location in # the archive being constructed. The current path is recorded in # parts, which will be something like # [StackDir(name=''), StackDir(name='home'), StackDir(name='someuser')], # and the accumulated content and metadata for files in the dirs is stored # in the .items member of the StackDir. stack = [] def _push(part, metadata): # Enter a new archive directory -- make it the current directory. item = StackDir(part, metadata) stack.append(item) def _pop(force_tree=None, dir_metadata=None): # Leave the current archive directory and add its tree to its parent. item = stack.pop() # FIXME: only test if collision is possible (i.e. given --strip, etc.)? if force_tree: tree = force_tree else: names_seen = set() clean_list = [] for x in item.items: name = x.name if name in names_seen: parent_path = b'/'.join(x.name for x in stack) + b'/' add_error('error: ignoring duplicate path %s in %s' % (path_msg(name), path_msg(parent_path))) else: names_seen.add(name) clean_list.append(x) # if set, overrides the original metadata pushed for this dir. if dir_metadata is None: dir_metadata = item.meta metalist = [(b'', dir_metadata)] metalist += [(git.shalist_item_sort_key((entry.mode, entry.name, None)), entry.meta) for entry in clean_list if entry.mode != GIT_MODE_TREE] metalist.sort(key = lambda x: x[0]) metadata = BytesIO(b''.join(m[1].encode() for m in metalist)) mode, id = hashsplit.split_to_blob_or_tree(w.new_blob, w.new_tree, [metadata], keep_boundaries=False) shalist = [(mode, b'.bupm', id)] shalist += [(entry.gitmode, git.mangle_name(entry.name, entry.mode, entry.gitmode), entry.oid) for entry in clean_list] tree = w.new_tree(shalist) if stack: stack[-1].append(item.name, GIT_MODE_TREE, GIT_MODE_TREE, tree, None) return tree # Hack around lack of nonlocal vars in python 2 _nonlocal = {} _nonlocal['count'] = 0 _nonlocal['subcount'] = 0 _nonlocal['lastremain'] = None def progress_report(n): _nonlocal['subcount'] += n cc = _nonlocal['count'] + _nonlocal['subcount'] pct = total and (cc*100.0/total) or 0 now = time.time() elapsed = now - tstart kps = elapsed and int(cc/1024./elapsed) kps_frac = 10 ** int(math.log(kps+1, 10) - 1) kps = int(kps/kps_frac)*kps_frac if cc: remain = elapsed*1.0/cc * (total-cc) else: remain = 0.0 if (_nonlocal['lastremain'] and (remain > _nonlocal['lastremain']) and ((remain - _nonlocal['lastremain'])/_nonlocal['lastremain'] < 0.05)): remain = _nonlocal['lastremain'] else: _nonlocal['lastremain'] = remain hours = int(remain/60/60) mins = int(remain/60 - hours*60) secs = int(remain - hours*60*60 - mins*60) if elapsed < 30: remainstr = '' kpsstr = '' else: kpsstr = '%dk/s' % kps if hours: remainstr = '%dh%dm' % (hours, mins) elif mins: remainstr = '%dm%d' % (mins, secs) else: remainstr = '%ds' % secs qprogress('Saving: %.2f%% (%d/%dk, %d/%d files) %s %s\r' % (pct, cc/1024, total/1024, fcount, ftotal, remainstr, kpsstr)) def already_saved(ent): return ent.is_valid() and w.exists(ent.sha) and ent.sha def wantrecurse_pre(ent): return not already_saved(ent) def wantrecurse_during(ent): return not already_saved(ent) or ent.sha_missing() def find_hardlink_target(hlink_db, ent): if hlink_db and not stat.S_ISDIR(ent.mode) and ent.nlink > 1: link_paths = hlink_db.node_paths(ent.dev, ent.ino) if link_paths: return link_paths[0] return None total = ftotal = 0 if opt.progress: for transname, ent in reader.filter(opt.sources, wantrecurse=wantrecurse_pre): if not (ftotal % 10024): qprogress('Reading index: %d\r' % ftotal) exists = ent.exists() hashvalid = already_saved(ent) ent.set_sha_missing(not hashvalid) if not opt.smaller or ent.size < opt.smaller: if exists and not hashvalid: total += ent.size ftotal += 1 progress('Reading index: %d, done.\n' % ftotal) hashsplit.progress_callback = progress_report # Root collisions occur when strip or graft options map more than one # path to the same directory (paths which originally had separate # parents). When that situation is detected, use empty metadata for # the parent. Otherwise, use the metadata for the common parent. # Collision example: "bup save ... --strip /foo /foo/bar /bar". # FIXME: Add collision tests, or handle collisions some other way. # FIXME: Detect/handle strip/graft name collisions (other than root), # i.e. if '/foo/bar' and '/bar' both map to '/'. first_root = None root_collision = None tstart = time.time() fcount = 0 lastskip_name = None lastdir = b'' for transname, ent in reader.filter(opt.sources, wantrecurse=wantrecurse_during): (dir, file) = os.path.split(ent.name) exists = (ent.flags & index.IX_EXISTS) hashvalid = already_saved(ent) wasmissing = ent.sha_missing() oldsize = ent.size if opt.verbose: if not exists: status = 'D' elif not hashvalid: if ent.sha == index.EMPTY_SHA: status = 'A' else: status = 'M' else: status = ' ' if opt.verbose >= 2: log('%s %-70s\n' % (status, path_msg(ent.name))) elif not stat.S_ISDIR(ent.mode) and lastdir != dir: if not lastdir.startswith(dir): log('%s %-70s\n' % (status, path_msg(os.path.join(dir, b'')))) lastdir = dir if opt.progress: progress_report(0) fcount += 1 if not exists: continue if opt.smaller and ent.size >= opt.smaller: if exists and not hashvalid: if opt.verbose: log('skipping large file "%s"\n' % path_msg(ent.name)) lastskip_name = ent.name continue assert(dir.startswith(b'/')) if opt.strip: dirp = stripped_path_components(dir, opt.sources) elif opt.strip_path: dirp = stripped_path_components(dir, [opt.strip_path]) elif opt.grafts: dirp = grafted_path_components(opt.grafts, dir) else: dirp = path_components(dir) # At this point, dirp contains a representation of the archive # path that looks like [(archive_dir_name, real_fs_path), ...]. # So given "bup save ... --strip /foo/bar /foo/bar/baz", dirp # might look like this at some point: # [('', '/foo/bar'), ('baz', '/foo/bar/baz'), ...]. # This dual representation supports stripping/grafting, where the # archive path may not have a direct correspondence with the # filesystem. The root directory is represented by an initial # component named '', and any component that doesn't have a # corresponding filesystem directory (due to grafting, for # example) will have a real_fs_path of None, i.e. [('', None), # ...]. if first_root == None: first_root = dirp[0] elif first_root != dirp[0]: root_collision = True # If switching to a new sub-tree, finish the current sub-tree. while [x.name for x in stack] > [x[0] for x in dirp]: _pop() # If switching to a new sub-tree, start a new sub-tree. for path_component in dirp[len(stack):]: dir_name, fs_path = path_component # Not indexed, so just grab the FS metadata or use empty metadata. try: meta = metadata.from_path(fs_path, normalized=True) \ if fs_path else metadata.Metadata() except (OSError, IOError) as e: add_error(e) lastskip_name = dir_name meta = metadata.Metadata() _push(dir_name, meta) if not file: if len(stack) == 1: continue # We're at the top level -- keep the current root dir # Since there's no filename, this is a subdir -- finish it. oldtree = already_saved(ent) # may be None newtree = _pop(force_tree = oldtree) if not oldtree: if lastskip_name and lastskip_name.startswith(ent.name): ent.invalidate() else: ent.validate(GIT_MODE_TREE, newtree) ent.repack() if exists and wasmissing: _nonlocal['count'] += oldsize continue # it's not a directory if hashvalid: meta = msr.metadata_at(ent.meta_ofs) meta.hardlink_target = find_hardlink_target(hlink_db, ent) # Restore the times that were cleared to 0 in the metastore. (meta.atime, meta.mtime, meta.ctime) = (ent.atime, ent.mtime, ent.ctime) stack[-1].append(file, ent.mode, ent.gitmode, ent.sha, meta) else: id = None hlink = find_hardlink_target(hlink_db, ent) try: meta = metadata.from_path(ent.name, hardlink_target=hlink, normalized=True, after_stat=after_nondir_metadata_stat) except (OSError, IOError) as e: add_error(e) lastskip_name = ent.name continue if stat.S_IFMT(ent.mode) != stat.S_IFMT(meta.mode): # The mode changed since we indexed the file, this is bad. # This can cause two issues: # 1) We e.g. think the file is a regular file, but now it's # something else (a device, socket, FIFO or symlink, etc.) # and _read_ from it when we shouldn't. # 2) We then record it as valid, but don't update the index # metadata, and on a subsequent save it has 'hashvalid' # but is recorded as the file type from the index, when # the content is something else ... # Avoid all of these consistency issues by just skipping such # things - it really ought to not happen anyway. add_error("%s: mode changed since indexing, skipping." % path_msg(ent.name)) lastskip_name = ent.name continue if stat.S_ISREG(ent.mode): try: # If the file changes while we're reading it, then our reading # may stop at some point, but the stat() above may have gotten # a different size already. Recalculate the meta size so that # the repository records the accurate size in the metadata, even # if the other stat() data might be slightly older than the file # content (which we can't fix, this is inherently racy, but we # can prevent the size mismatch.) meta.size = 0 def new_blob(data): meta.size += len(data) return w.new_blob(data) before_saving_regular_file(ent.name) with hashsplit.open_noatime(ent.name) as f: (mode, id) = hashsplit.split_to_blob_or_tree( new_blob, w.new_tree, [f], keep_boundaries=False) except (IOError, OSError) as e: add_error('%s: %s' % (ent.name, e)) lastskip_name = ent.name elif stat.S_ISDIR(ent.mode): assert(0) # handled above elif stat.S_ISLNK(ent.mode): mode, id = (GIT_MODE_SYMLINK, w.new_blob(meta.symlink_target)) else: # Everything else should be fully described by its # metadata, so just record an empty blob, so the paths # in the tree and .bupm will match up. (mode, id) = (GIT_MODE_FILE, w.new_blob(b'')) if id: ent.validate(mode, id) ent.repack() stack[-1].append(file, ent.mode, ent.gitmode, id, meta) if exists and wasmissing: _nonlocal['count'] += oldsize _nonlocal['subcount'] = 0 if opt.progress: pct = total and _nonlocal['count']*100.0/total or 100 progress('Saving: %.2f%% (%d/%dk, %d/%d files), done. \n' % (pct, _nonlocal['count']/1024, total/1024, fcount, ftotal)) while len(stack) > 1: # _pop() all the parts above the root _pop() # Finish the root directory. # When there's a collision, use empty metadata for the root. tree = _pop(dir_metadata = metadata.Metadata() if root_collision else None) return tree def commit_tree(tree, parent, date, argv, writer): # Strip b prefix from python 3 bytes reprs to preserve previous format msgcmd = b'[%s]' % b', '.join([repr(argv_bytes(x))[1:].encode('ascii') for x in argv]) msg = b'bup save\n\nGenerated by command:\n%s\n' % msgcmd userline = (b'%s <%s@%s>' % (userfullname(), username(), hostname())) return writer.new_commit(tree, parent, userline, date, None, userline, date, None, msg) def main(argv): handle_ctrl_c() opt = opts_from_cmdline(argv) client.bwlimit = opt.bwlimit git.check_repo_or_die() remote_dest = opt.remote or opt.is_reverse if not remote_dest: repo = git cli = nullcontext() else: try: cli = repo = client.Client(opt.remote) except client.ClientError as e: log('error: %s' % e) sys.exit(1) # cli creation must be last nontrivial command in each if clause above with cli: if not remote_dest: w = git.PackWriter(compression_level=opt.compress) else: w = cli.new_packwriter(compression_level=opt.compress) with w: sys.stdout.flush() out = byte_stream(sys.stdout) if opt.name: refname = b'refs/heads/%s' % opt.name parent = repo.read_ref(refname) else: refname = parent = None indexfile = opt.indexfile or git.repo(b'bupindex') try: msr = index.MetaStoreReader(indexfile + b'.meta') except IOError as ex: if ex.errno != ENOENT: raise log('error: cannot access %r; have you run bup index?' % path_msg(indexfile)) sys.exit(1) with msr, \ hlinkdb.HLinkDB(indexfile + b'.hlink') as hlink_db, \ index.Reader(indexfile) as reader: tree = save_tree(opt, reader, hlink_db, msr, w) if opt.tree: out.write(hexlify(tree)) out.write(b'\n') if opt.commit or opt.name: commit = commit_tree(tree, parent, opt.date, argv, w) if opt.commit: out.write(hexlify(commit)) out.write(b'\n') # packwriter must be closed before we can update the ref if opt.name: repo.update_ref(refname, commit, parent) if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/server.py000066400000000000000000000235131454333004200163470ustar00rootroot00000000000000 from __future__ import absolute_import from binascii import hexlify, unhexlify import os, struct, subprocess, sys from bup import options, git, vfs, vint from bup.compat import environ, hexstr, pending_raise from bup.helpers \ import (Conn, debug1, debug2, finalized, linereader, lines_until_sentinel, log) from bup.io import byte_stream, path_msg from bup.repo import LocalRepo suspended_w = None dumb_server_mode = False repo = None def do_help(conn, junk): conn.write(b'Commands:\n %s\n' % b'\n '.join(sorted(commands))) conn.ok() def _set_mode(): global dumb_server_mode dumb_server_mode = os.path.exists(git.repo(b'bup-dumb-server')) debug1('bup server: serving in %s mode\n' % (dumb_server_mode and 'dumb' or 'smart')) def _init_session(reinit_with_new_repopath=None): global repo if reinit_with_new_repopath is None and git.repodir: if not repo: repo = LocalRepo() return git.check_repo_or_die(reinit_with_new_repopath) if repo: repo.close() repo = LocalRepo() # OK. we now know the path is a proper repository. Record this path in the # environment so that subprocesses inherit it and know where to operate. environ[b'BUP_DIR'] = git.repodir debug1('bup server: bupdir is %s\n' % path_msg(git.repodir)) _set_mode() def init_dir(conn, arg): git.init_repo(arg) debug1('bup server: bupdir initialized: %s\n' % path_msg(git.repodir)) _init_session(arg) conn.ok() def set_dir(conn, arg): _init_session(arg) conn.ok() def list_indexes(conn, junk): _init_session() suffix = b'' if dumb_server_mode: suffix = b' load' for f in os.listdir(git.repo(b'objects/pack')): if f.endswith(b'.idx'): conn.write(b'%s%s\n' % (f, suffix)) conn.ok() def send_index(conn, name): _init_session() assert name.find(b'/') < 0 assert name.endswith(b'.idx') with git.open_idx(git.repo(b'objects/pack/%s' % name)) as idx: conn.write(struct.pack('!I', len(idx.map))) conn.write(idx.map) conn.ok() def receive_objects_v2(conn, junk): global suspended_w _init_session() if suspended_w: w = suspended_w suspended_w = None elif dumb_server_mode: w = git.PackWriter(objcache_maker=None) else: w = git.PackWriter() try: suggested = set() while 1: ns = conn.read(4) if not ns: w.abort() raise Exception('object read: expected length header, got EOF') n = struct.unpack('!I', ns)[0] #debug2('expecting %d bytes\n' % n) if not n: debug1('bup server: received %d object%s.\n' % (w.count, w.count!=1 and "s" or '')) fullpath = w.close(run_midx=not dumb_server_mode) w = None if fullpath: dir, name = os.path.split(fullpath) conn.write(b'%s.idx\n' % name) conn.ok() return elif n == 0xffffffff: debug2('bup server: receive-objects suspending\n') conn.ok() suspended_w = w w = None return shar = conn.read(20) crcr = struct.unpack('!I', conn.read(4))[0] n -= 20 + 4 buf = conn.read(n) # object sizes in bup are reasonably small #debug2('read %d bytes\n' % n) _check(w, n, len(buf), 'object read: expected %d bytes, got %d\n') if not dumb_server_mode: oldpack = w.exists(shar, want_source=True) if oldpack: assert(not oldpack == True) assert(oldpack.endswith(b'.idx')) (dir,name) = os.path.split(oldpack) if not (name in suggested): debug1("bup server: suggesting index %s\n" % git.shorten_hash(name).decode('ascii')) debug1("bup server: because of object %s\n" % hexstr(shar)) conn.write(b'index %s\n' % name) suggested.add(name) continue nw, crc = w._raw_write((buf,), sha=shar) _check(w, crcr, crc, 'object read: expected crc %d, got %d\n') # py2: this clause is unneeded with py3 except BaseException as ex: with pending_raise(ex): if w: w, w_tmp = None, w w_tmp.close() finally: if w: w.close() assert False # should be unreachable def _check(w, expected, actual, msg): if expected != actual: w.abort() raise Exception(msg % (expected, actual)) def read_ref(conn, refname): _init_session() r = git.read_ref(refname) conn.write(b'%s\n' % hexlify(r) if r else b'') conn.ok() def update_ref(conn, refname): _init_session() newval = conn.readline().strip() oldval = conn.readline().strip() git.update_ref(refname, unhexlify(newval), unhexlify(oldval)) conn.ok() def join(conn, id): _init_session() try: for blob in git.cp().join(id): conn.write(struct.pack('!I', len(blob))) conn.write(blob) except KeyError as e: log('server: error: %s\n' % e) conn.write(b'\0\0\0\0') conn.error(e) else: conn.write(b'\0\0\0\0') conn.ok() def cat_batch(conn, dummy): _init_session() cat_pipe = git.cp() # For now, avoid potential deadlock by just reading them all for ref in tuple(lines_until_sentinel(conn, b'\n', Exception)): ref = ref[:-1] it = cat_pipe.get(ref) info = next(it) if not info[0]: conn.write(b'missing\n') continue conn.write(b'%s %s %d\n' % info) for buf in it: conn.write(buf) conn.ok() def refs(conn, args): limit_to_heads, limit_to_tags = args.split() assert limit_to_heads in (b'0', b'1') assert limit_to_tags in (b'0', b'1') limit_to_heads = int(limit_to_heads) limit_to_tags = int(limit_to_tags) _init_session() patterns = tuple(x[:-1] for x in lines_until_sentinel(conn, b'\n', Exception)) for name, oid in git.list_refs(patterns=patterns, limit_to_heads=limit_to_heads, limit_to_tags=limit_to_tags): assert b'\n' not in name conn.write(b'%s %s\n' % (hexlify(oid), name)) conn.write(b'\n') conn.ok() def rev_list(conn, _): _init_session() count = conn.readline() if not count: raise Exception('Unexpected EOF while reading rev-list count') assert count == b'\n' count = None fmt = conn.readline() if not fmt: raise Exception('Unexpected EOF while reading rev-list format') fmt = None if fmt == b'\n' else fmt[:-1] refs = tuple(x[:-1] for x in lines_until_sentinel(conn, b'\n', Exception)) args = git.rev_list_invocation(refs, format=fmt) p = subprocess.Popen(args, env=git._gitenv(git.repodir), stdout=subprocess.PIPE) while True: out = p.stdout.read(64 * 1024) if not out: break conn.write(out) conn.write(b'\n') rv = p.wait() # not fatal if rv: msg = 'git rev-list returned error %d' % rv conn.error(msg) raise git.GitError(msg) conn.ok() def resolve(conn, args): _init_session() (flags,) = args.split() flags = int(flags) want_meta = bool(flags & 1) follow = bool(flags & 2) have_parent = bool(flags & 4) parent = vfs.read_resolution(conn) if have_parent else None path = vint.read_bvec(conn) if not len(path): raise Exception('Empty resolve path') try: res = list(vfs.resolve(repo, path, parent=parent, want_meta=want_meta, follow=follow)) except vfs.IOError as ex: res = ex if isinstance(res, vfs.IOError): conn.write(b'\x00') # error vfs.write_ioerror(conn, res) else: conn.write(b'\x01') # success vfs.write_resolution(conn, res) conn.ok() optspec = """ bup server """ commands = { b'quit': None, b'help': do_help, b'init-dir': init_dir, b'set-dir': set_dir, b'list-indexes': list_indexes, b'send-index': send_index, b'receive-objects-v2': receive_objects_v2, b'read-ref': read_ref, b'update-ref': update_ref, b'join': join, b'cat': join, # apocryphal alias b'cat-batch' : cat_batch, b'refs': refs, b'rev-list': rev_list, b'resolve': resolve } def main(argv): global repo, suspended_w o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal('no arguments expected') debug2('bup server: reading from stdin.\n') # FIXME: this protocol is totally lame and not at all future-proof. # (Especially since we abort completely as soon as *anything* bad happens) sys.stdout.flush() with Conn(byte_stream(sys.stdin), byte_stream(sys.stdout)) as conn, \ finalized(None, lambda _: repo and repo.close()), \ finalized(None, lambda _: suspended_w and suspended_w.close()): lr = linereader(conn) for _line in lr: line = _line.strip() if not line: continue debug1('bup server: command: %r\n' % line) words = line.split(b' ', 1) cmd = words[0] rest = len(words)>1 and words[1] or b'' if cmd == b'quit': break else: cmd = commands.get(cmd) if cmd: cmd(conn, rest) else: raise Exception('unknown server command: %r\n' % line) debug1('bup server: done\n') bup-0.33.3/lib/bup/cmd/split.py000066400000000000000000000235341454333004200161770ustar00rootroot00000000000000 from __future__ import absolute_import, division, print_function from binascii import hexlify import sys, time from bup import compat, hashsplit, git, options, client from bup.compat import argv_bytes, environ, nullcontext from bup.helpers import (add_error, hostname, log, parse_num, qprogress, reprogress, saved_errors, valid_save_name, parse_date_or_fatal) from bup.io import byte_stream from bup.pwdgrp import userfullname, username optspec = """ bup split [-t] [-c] [-n name] OPTIONS [--git-ids | filenames...] bup split -b OPTIONS [--git-ids | filenames...] bup split --copy OPTIONS [--git-ids | filenames...] bup split --noop [-b|-t] OPTIONS [--git-ids | filenames...] -- Modes: b,blobs output a series of blob ids. Implies --fanout=0. t,tree output a tree id c,commit output a commit id n,name= save the result under the given name noop split the input, but throw away the result copy split the input, copy it to stdout, don't save to repo Options: r,remote= remote repository path d,date= date for the commit (seconds since the epoch) q,quiet don't print progress messages v,verbose increase log output (can be used more than once) git-ids read a list of git object ids from stdin and split their contents keep-boundaries don't let one chunk span two input files bench print benchmark timings to stderr max-pack-size= maximum bytes in a single pack max-pack-objects= maximum number of objects in a single pack fanout= average number of blobs in a single tree bwlimit= maximum bytes/sec to transmit to server #,compress= set compression level to # (0-9, 9 is highest) [1] """ class NoOpPackWriter: def __init__(self): self.closed = False def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def close(self): self.closed = True def __del__(self): assert self.closed def new_blob(self, content): return git.calc_hash(b'blob', content) def new_tree(self, shalist): return git.calc_hash(b'tree', git.tree_encode(shalist)) def opts_from_cmdline(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) opt.sources = extra if opt.name: opt.name = argv_bytes(opt.name) if opt.remote: opt.remote = argv_bytes(opt.remote) if opt.verbose is None: opt.verbose = 0 if not (opt.blobs or opt.tree or opt.commit or opt.name or opt.noop or opt.copy): o.fatal("use one or more of -b, -t, -c, -n, --noop, --copy") if opt.copy and (opt.blobs or opt.tree): o.fatal('--copy is incompatible with -b, -t') if (opt.noop or opt.copy) and (opt.commit or opt.name): o.fatal('--noop and --copy are incompatible with -c, -n') if opt.blobs and (opt.tree or opt.commit or opt.name): o.fatal('-b is incompatible with -t, -c, -n') if extra and opt.git_ids: o.fatal("don't provide filenames when using --git-ids") if opt.verbose >= 2: git.verbose = opt.verbose - 1 opt.bench = 1 if opt.max_pack_size: opt.max_pack_size = parse_num(opt.max_pack_size) if opt.max_pack_objects: opt.max_pack_objects = parse_num(opt.max_pack_objects) if opt.fanout: opt.fanout = parse_num(opt.fanout) if opt.bwlimit: opt.bwlimit = parse_num(opt.bwlimit) if opt.date: opt.date = parse_date_or_fatal(opt.date, o.fatal) else: opt.date = time.time() opt.is_reverse = environ.get(b'BUP_SERVER_REVERSE') if opt.is_reverse and opt.remote: o.fatal("don't use -r in reverse mode; it's automatic") if opt.name and not valid_save_name(opt.name): o.fatal("'%r' is not a valid branch name." % opt.name) return opt def split(opt, files, parent, out, pack_writer): # Hack around lack of nonlocal vars in python 2 total_bytes = [0] def prog(filenum, nbytes): total_bytes[0] += nbytes if filenum > 0: qprogress('Splitting: file #%d, %d kbytes\r' % (filenum+1, total_bytes[0] // 1024)) else: qprogress('Splitting: %d kbytes\r' % (total_bytes[0] // 1024)) new_blob = pack_writer.new_blob new_tree = pack_writer.new_tree if opt.blobs: shalist = hashsplit.split_to_blobs(new_blob, files, keep_boundaries=opt.keep_boundaries, progress=prog) for sha, size, level in shalist: out.write(hexlify(sha) + b'\n') reprogress() elif opt.tree or opt.commit or opt.name: if opt.name: # insert dummy_name which may be used as a restore target mode, sha = \ hashsplit.split_to_blob_or_tree(new_blob, new_tree, files, keep_boundaries=opt.keep_boundaries, progress=prog) splitfile_name = git.mangle_name(b'data', hashsplit.GIT_MODE_FILE, mode) shalist = [(mode, splitfile_name, sha)] else: shalist = \ hashsplit.split_to_shalist(new_blob, new_tree, files, keep_boundaries=opt.keep_boundaries, progress=prog) tree = new_tree(shalist) else: last = 0 it = hashsplit.hashsplit_iter(files, keep_boundaries=opt.keep_boundaries, progress=prog) for blob, level in it: hashsplit.total_split += len(blob) if opt.copy: sys.stdout.write(str(blob)) megs = hashsplit.total_split // 1024 // 1024 if not opt.quiet and last != megs: last = megs if opt.verbose: log('\n') if opt.tree: out.write(hexlify(tree) + b'\n') commit = None if opt.commit or opt.name: msg = b'bup split\n\nGenerated by command:\n%r\n' % compat.get_argvb() userline = b'%s <%s@%s>' % (userfullname(), username(), hostname()) commit = pack_writer.new_commit(tree, parent, userline, opt.date, None, userline, opt.date, None, msg) if opt.commit: out.write(hexlify(commit) + b'\n') return commit def main(argv): opt = opts_from_cmdline(argv) if opt.verbose >= 2: git.verbose = opt.verbose - 1 if opt.fanout: hashsplit.fanout = opt.fanout if opt.blobs: hashsplit.fanout = 0 if opt.bwlimit: client.bwlimit = opt.bwlimit start_time = time.time() sys.stdout.flush() out = byte_stream(sys.stdout) stdin = byte_stream(sys.stdin) if opt.git_ids: # the input is actually a series of git object ids that we should retrieve # and split. # # This is a bit messy, but basically it converts from a series of # CatPipe.get() iterators into a series of file-type objects. # It would be less ugly if either CatPipe.get() returned a file-like object # (not very efficient), or split_to_shalist() expected an iterator instead # of a file. cp = git.CatPipe() class IterToFile: def __init__(self, it): self.it = iter(it) def read(self, size): v = next(self.it, None) return v or b'' def read_ids(): while 1: line = stdin.readline() if not line: break if line: line = line.strip() try: it = cp.get(line.strip()) next(it, None) # skip the file info except KeyError as e: add_error('error: %s' % e) continue yield IterToFile(it) files = read_ids() else: # the input either comes from a series of files or from stdin. if opt.sources: files = (open(argv_bytes(fn), 'rb') for fn in opt.sources) else: files = [stdin] writing = not (opt.noop or opt.copy) remote_dest = opt.remote or opt.is_reverse if writing: git.check_repo_or_die() if remote_dest and writing: cli = repo = client.Client(opt.remote) else: cli = nullcontext() repo = git # cli creation must be last nontrivial command in each if clause above with cli: if opt.name and writing: refname = opt.name and b'refs/heads/%s' % opt.name oldref = repo.read_ref(refname) else: refname = oldref = None if not writing: pack_writer = NoOpPackWriter() elif not remote_dest: pack_writer = git.PackWriter(compression_level=opt.compress, max_pack_size=opt.max_pack_size, max_pack_objects=opt.max_pack_objects) else: pack_writer = cli.new_packwriter(compression_level=opt.compress, max_pack_size=opt.max_pack_size, max_pack_objects=opt.max_pack_objects) # packwriter creation must be last command in each if clause above with pack_writer: commit = split(opt, files, oldref, out, pack_writer) # pack_writer must be closed before we can update the ref if refname: repo.update_ref(refname, commit, oldref) secs = time.time() - start_time size = hashsplit.total_split if opt.bench: log('bup: %.2f kbytes in %.2f secs = %.2f kbytes/sec\n' % (size / 1024, secs, size / 1024 / secs)) if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/tag.py000066400000000000000000000045711454333004200156170ustar00rootroot00000000000000 from __future__ import absolute_import import sys from bup import git, options from bup.compat import argv_bytes from bup.helpers import debug1, log from bup.io import byte_stream, path_msg # FIXME: review for safe writes. optspec = """ bup tag bup tag [-f] bup tag [-f] -d -- d,delete= Delete a tag f,force Overwrite existing tag, or ignore missing tag when deleting """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) git.check_repo_or_die() tags = [t for sublist in git.tags().values() for t in sublist] if opt.delete: # git.delete_ref() doesn't complain if a ref doesn't exist. We # could implement this verification but we'd need to read in the # contents of the tag file and pass the hash, and we already know # about the tag's existance via "tags". tag_name = argv_bytes(opt.delete) if not opt.force and tag_name not in tags: log("error: tag '%s' doesn't exist\n" % path_msg(tag_name)) sys.exit(1) tag_file = b'refs/tags/%s' % tag_name git.delete_ref(tag_file) sys.exit(0) if not extra: for t in tags: sys.stdout.flush() out = byte_stream(sys.stdout) out.write(t) out.write(b'\n') sys.exit(0) elif len(extra) != 2: o.fatal('expected commit ref and hash') tag_name, commit = map(argv_bytes, extra[:2]) if not tag_name: o.fatal("tag name must not be empty.") debug1("args: tag name = %s; commit = %s\n" % (path_msg(tag_name), commit.decode('ascii'))) if tag_name in tags and not opt.force: log("bup: error: tag '%s' already exists\n" % path_msg(tag_name)) sys.exit(1) if tag_name.startswith(b'.'): o.fatal("'%s' is not a valid tag name." % path_msg(tag_name)) try: hash = git.rev_parse(commit) except git.GitError as e: log("bup: error: %s" % e) sys.exit(2) if not hash: log("bup: error: commit %s not found.\n" % commit.decode('ascii')) sys.exit(2) with git.PackIdxList(git.repo(b'objects/pack')) as pL: if not pL.exists(hash): log("bup: error: commit %s not found.\n" % commit.decode('ascii')) sys.exit(2) git.update_ref(b'refs/tags/' + tag_name, hash, None, force=True) bup-0.33.3/lib/bup/cmd/tick.py000066400000000000000000000005141454333004200157670ustar00rootroot00000000000000 from __future__ import absolute_import import time from bup import options optspec = """ bup tick """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if extra: o.fatal("no arguments expected") t = time.time() tleft = 1 - (t - int(t)) time.sleep(tleft) bup-0.33.3/lib/bup/cmd/version.py000066400000000000000000000015131454333004200165220ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import re, sys from bup import options, version from bup.io import byte_stream version_rx = re.compile(r'^[0-9]+\.[0-9]+(\.[0-9]+)?(-[0-9]+-g[0-9abcdef]+)?$') optspec = """ bup version [--date|--commit] -- date display the date this version of bup was created commit display the git commit id of this version of bup """ def main(argv): o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) total = (opt.date or 0) + (opt.commit or 0) if total > 1: o.fatal('at most one option expected') sys.stdout.flush() out = byte_stream(sys.stdout) if opt.date: out.write(version.date.split(b' ')[0] + b'\n') elif opt.commit: out.write(version.commit + b'\n') else: out.write(version.version + b'\n') bup-0.33.3/lib/bup/cmd/web.py000066400000000000000000000247511454333004200156230ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from collections import namedtuple import mimetypes, os, posixpath, signal, stat, sys, time, webbrowser from binascii import hexlify from bup import options, git, vfs from bup.helpers import (chunkyreader, debug1, format_filesize, log, saved_errors) from bup.path import resource_path from bup.repo import LocalRepo from bup.io import path_msg try: from tornado import gen from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.netutil import bind_unix_socket import tornado.web except ImportError: log('error: cannot find the python "tornado" module; please install it\n') sys.exit(1) # FIXME: right now the way hidden files are handled causes every # directory to be traversed twice. def http_date_from_utc_ns(utc_ns): return time.strftime('%a, %d %b %Y %H:%M:%S', time.gmtime(utc_ns / 10**9)) def _compute_breadcrumbs(path, show_hidden=False): """Returns a list of breadcrumb objects for a path.""" breadcrumbs = [] breadcrumbs.append((b'[root]', b'/')) path_parts = path.split(b'/')[1:-1] full_path = b'/' for part in path_parts: full_path += part + b"/" url_append = b"" if show_hidden: url_append = b'?hidden=1' breadcrumbs.append((part, full_path+url_append)) return breadcrumbs def _contains_hidden_files(repo, dir_item): """Return true if the directory contains items with names other than '.' and '..' that begin with '.' """ for name, item in vfs.contents(repo, dir_item, want_meta=False): if name in (b'.', b'..'): continue if name.startswith(b'.'): return True return False def _dir_contents(repo, resolution, show_hidden=False): """Yield the display information for the contents of dir_item.""" url_query = b'?hidden=1' if show_hidden else b'' def display_info(name, item, resolved_item, display_name=None, omitsize=False): global opt # link should be based on fully resolved type to avoid extra # HTTP redirect. link = tornado.escape.url_escape(name, plus=False) if stat.S_ISDIR(vfs.item_mode(resolved_item)): link += '/' link = link.encode('ascii') if not omitsize: size = vfs.item_size(repo, item) if opt.human_readable: display_size = format_filesize(size) else: display_size = size else: display_size = None if not display_name: mode = vfs.item_mode(item) if stat.S_ISDIR(mode): display_name = name + b'/' display_size = None elif stat.S_ISLNK(mode): display_name = name + b'@' display_size = None else: display_name = name return display_name, link + url_query, display_size dir_item = resolution[-1][1] for name, item in vfs.contents(repo, dir_item): if not show_hidden: if (name not in (b'.', b'..')) and name.startswith(b'.'): continue if name == b'.': parent_item = resolution[-2][1] if len(resolution) > 1 else dir_item yield display_info(b'..', parent_item, parent_item, b'..', omitsize=True) continue res_item = vfs.ensure_item_has_metadata(repo, item, include_size=True) yield display_info(name, item, res_item) class BupRequestHandler(tornado.web.RequestHandler): def initialize(self, repo=None): self.repo = repo def decode_argument(self, value, name=None): if name == 'path': return value return super().decode_argument(value, name) def get(self, path): return self._process_request(path) def head(self, path): return self._process_request(path) def _process_request(self, path): print('Handling request for %s' % path) sys.stdout.flush() # Set want_meta because dir metadata won't be fetched, and if # it's not a dir, then we're going to want the metadata. res = vfs.resolve(self.repo, path, want_meta=True) leaf_name, leaf_item = res[-1] if not leaf_item: self.send_error(404) return mode = vfs.item_mode(leaf_item) if stat.S_ISDIR(mode): self._list_directory(path, res) else: self._get_file(self.repo, path, res) def _list_directory(self, path, resolution): """Helper to produce a directory listing. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ if not path.endswith(b'/') and len(path) > 0: print('Redirecting from %s to %s' % (path_msg(path), path_msg(path + b'/'))) return self.redirect(path + b'/', permanent=True) hidden_arg = self.request.arguments.get('hidden', [0])[-1] try: show_hidden = int(hidden_arg) except ValueError as e: show_hidden = False self.render( 'list-directory.html', path=path, breadcrumbs=_compute_breadcrumbs(path, show_hidden), files_hidden=_contains_hidden_files(self.repo, resolution[-1][1]), hidden_shown=show_hidden, dir_contents=_dir_contents(self.repo, resolution, show_hidden=show_hidden)) return None @gen.coroutine def _get_file(self, repo, path, resolved): """Process a request on a file. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ file_item = resolved[-1][1] file_item = vfs.augment_item_meta(repo, file_item, include_size=True) meta = file_item.meta ctype = self._guess_type(path) self.set_header("Last-Modified", http_date_from_utc_ns(meta.mtime)) self.set_header("Content-Type", ctype) self.set_header("Content-Length", str(meta.size)) assert len(file_item.oid) == 20 self.set_header("Etag", hexlify(file_item.oid)) if self.request.method != 'HEAD': with vfs.fopen(self.repo, file_item) as f: it = chunkyreader(f) for blob in chunkyreader(f): self.write(blob) raise gen.Return() def _guess_type(self, path): """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ base, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() if ext in self.extensions_map: return self.extensions_map[ext] else: return self.extensions_map[''] if not mimetypes.inited: mimetypes.init() # try to read system mime.types extensions_map = mimetypes.types_map.copy() extensions_map.update({ '': 'text/plain', # Default '.py': 'text/plain', '.c': 'text/plain', '.h': 'text/plain', }) io_loop = None def handle_sigterm(signum, frame): global io_loop debug1('\nbup-web: signal %d received\n' % signum) log('Shutdown requested\n') if not io_loop: sys.exit(0) io_loop.stop() optspec = """ bup web [[hostname]:port] bup web unix://path -- human-readable display human readable file sizes (i.e. 3.9K, 4.7M) browser show repository in default browser (incompatible with unix://) """ opt = None def main(argv): global opt signal.signal(signal.SIGTERM, handle_sigterm) UnixAddress = namedtuple('UnixAddress', ['path']) InetAddress = namedtuple('InetAddress', ['host', 'port']) o = options.Options(optspec) opt, flags, extra = o.parse_bytes(argv[1:]) if len(extra) > 1: o.fatal("at most one argument expected") if len(extra) == 0: address = InetAddress(host='127.0.0.1', port=8080) else: bind_url = extra[0] if bind_url.startswith('unix://'): address = UnixAddress(path=bind_url[len('unix://'):]) else: addr_parts = extra[0].split(':', 1) if len(addr_parts) == 1: host = '127.0.0.1' port = addr_parts[0] else: host, port = addr_parts try: port = int(port) except (TypeError, ValueError) as ex: o.fatal('port must be an integer, not %r' % port) address = InetAddress(host=host, port=port) git.check_repo_or_die() settings = dict( debug = 1, template_path = resource_path(b'web').decode('utf-8'), static_path = resource_path(b'web/static').decode('utf-8'), ) # Disable buffering on stdout, for debug messages try: sys.stdout._line_buffering = True except AttributeError: sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) with LocalRepo() as repo: handlers = [ (r"(?P/.*)", BupRequestHandler, dict(repo=repo))] application = tornado.web.Application(handlers, **settings) http_server = HTTPServer(application) io_loop_pending = IOLoop.instance() if isinstance(address, InetAddress): sockets = tornado.netutil.bind_sockets(address.port, address.host) http_server.add_sockets(sockets) print('Serving HTTP on %s:%d...' % sockets[0].getsockname()[0:2]) if opt.browser: browser_addr = 'http://' + address[0] + ':' + str(address[1]) io_loop_pending.add_callback(lambda : webbrowser.open(browser_addr)) elif isinstance(address, UnixAddress): unix_socket = bind_unix_socket(address.path) http_server.add_socket(unix_socket) print('Serving HTTP on filesystem socket %r' % address.path) else: log('error: unexpected address %r', address) sys.exit(1) io_loop = io_loop_pending io_loop.start() if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.33.3/lib/bup/cmd/xstat.py000066400000000000000000000076021454333004200162050ustar00rootroot00000000000000# Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. from __future__ import absolute_import, print_function import errno, sys from bup import metadata, options from bup.compat import argv_bytes from bup.helpers import add_error, parse_timestamp, saved_errors, \ add_error, log from bup.io import byte_stream def parse_timestamp_arg(o, field, value): res = str(value) # Undo autoconversion. try: res = parse_timestamp(res) except ValueError as ex: if ex.args: o.fatal('unable to parse %s resolution "%s" (%s)' % (field, value, ex)) else: o.fatal('unable to parse %s resolution "%s"' % (field, value)) if res != 1 and res % 10: o.fatal('%s resolution "%s" must be a power of 10' % (field, value)) return res optspec = """ bup xstat pathinfo [OPTION ...] -- v,verbose increase log output (can be used more than once) q,quiet don't show progress meter exclude-fields= exclude comma-separated fields include-fields= include comma-separated fields (definitive if first) atime-resolution= limit s, ms, us, ns, 10ns (value must be a power of 10) [ns] mtime-resolution= limit s, ms, us, ns, 10ns (value must be a power of 10) [ns] ctime-resolution= limit s, ms, us, ns, 10ns (value must be a power of 10) [ns] """ def main(argv): target_filename = b'' active_fields = metadata.all_fields o = options.Options(optspec) (opt, flags, remainder) = o.parse_bytes(argv[1:]) atime_resolution = parse_timestamp_arg(o, 'atime', opt.atime_resolution) mtime_resolution = parse_timestamp_arg(o, 'mtime', opt.mtime_resolution) ctime_resolution = parse_timestamp_arg(o, 'ctime', opt.ctime_resolution) treat_include_fields_as_definitive = True for flag, value in flags: if flag == '--exclude-fields': exclude_fields = frozenset(value.split(',')) for f in exclude_fields: if not f in metadata.all_fields: o.fatal(f + ' is not a valid field name') active_fields = active_fields - exclude_fields treat_include_fields_as_definitive = False elif flag == '--include-fields': include_fields = frozenset(value.split(',')) for f in include_fields: if not f in metadata.all_fields: o.fatal(f + ' is not a valid field name') if treat_include_fields_as_definitive: active_fields = include_fields treat_include_fields_as_definitive = False else: active_fields = active_fields | include_fields opt.verbose = opt.verbose or 0 opt.quiet = opt.quiet or 0 metadata.verbose = opt.verbose - opt.quiet sys.stdout.flush() out = byte_stream(sys.stdout) first_path = True for path in remainder: path = argv_bytes(path) try: m = metadata.from_path(path, archive_path = path) except (OSError,IOError) as e: if e.errno == errno.ENOENT: add_error(e) continue else: raise if metadata.verbose >= 0: if not first_path: out.write(b'\n') if atime_resolution != 1: m.atime = (m.atime / atime_resolution) * atime_resolution if mtime_resolution != 1: m.mtime = (m.mtime / mtime_resolution) * mtime_resolution if ctime_resolution != 1: m.ctime = (m.ctime / ctime_resolution) * ctime_resolution out.write(metadata.detailed_bytes(m, active_fields)) out.write(b'\n') first_path = False if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) else: sys.exit(0) bup-0.33.3/lib/bup/compat.py000066400000000000000000000045341454333004200155630ustar00rootroot00000000000000 # pylint: disable=unused-import from contextlib import ExitStack, nullcontext from os import environb as environ from os import fsdecode, fsencode from shlex import quote import os, sys def hexstr(b): """Return hex string (not bytes as with hexlify) representation of b.""" return b.hex() class pending_raise: """If rethrow is true, rethrow ex (if any), unless the body throws. (Supports Python 2 compatibility.) """ # This is completely vestigial, and should be removed def __init__(self, ex, rethrow=True): self.closed = False self.ex = ex self.rethrow = rethrow def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): self.closed = True if not exc_type and self.ex and self.rethrow: raise self.ex def __del__(self): assert self.closed def argv_bytes(x): """Return the original bytes passed to main() for an argv argument.""" return fsencode(x) def bytes_from_uint(i): return bytes((i,)) def bytes_from_byte(b): # python > 2: b[3] returns ord('x'), not b'x' return bytes((b,)) byte_int = lambda x: x def buffer(object, offset=None, size=None): if size: assert offset is not None return memoryview(object)[offset:offset + size] if offset: return memoryview(object)[offset:] return memoryview(object) def getcwd(): return fsencode(os.getcwd()) try: import bup_main except ModuleNotFoundError: bup_main = None if bup_main: def get_argvb(): "Return a new list containing the current process argv bytes." return bup_main.argv() def get_argv(): "Return a new list containing the current process argv strings." return [x.decode(errors='surrogateescape') for x in bup_main.argv()] else: def get_argvb(): raise Exception('get_argvb requires the bup_main module'); def get_argv(): raise Exception('get_argv requires the bup_main module'); def wrap_main(main): """Run main() and raise a SystemExit with the return value if it returns, pass along any SystemExit it raises, convert KeyboardInterrupts into exit(130), and print a Python 3 style contextual backtrace for other exceptions in both Python 2 and 3).""" try: sys.exit(main()) except KeyboardInterrupt as ex: sys.exit(130) bup-0.33.3/lib/bup/drecurse.py000066400000000000000000000110611454333004200161050ustar00rootroot00000000000000 from __future__ import absolute_import import stat, os from bup.helpers \ import (add_error, debug1, finalized, resolve_parent, should_rx_exclude_path) from bup.io import path_msg import bup.xstat as xstat # the use of fchdir() and lstat() is for two reasons: # - help out the kernel by not making it repeatedly look up the absolute path # - avoid race conditions caused by doing listdir() on a changing symlink try: O_LARGEFILE = os.O_LARGEFILE except AttributeError: O_LARGEFILE = 0 try: O_NOFOLLOW = os.O_NOFOLLOW except AttributeError: O_NOFOLLOW = 0 def finalized_fd(path): fd = os.open(path, os.O_RDONLY|O_LARGEFILE|O_NOFOLLOW|os.O_NDELAY) return finalized(fd, lambda x: os.close(x)) def _dirlist(): l = [] for n in os.listdir(b'.'): try: st = xstat.lstat(n) except OSError as e: add_error(Exception('%s: %s' % (resolve_parent(n), str(e)))) continue if stat.S_ISDIR(st.st_mode): n += b'/' l.append((n,st)) l.sort(reverse=True) return l def _recursive_dirlist(prepend, xdev, bup_dir=None, excluded_paths=None, exclude_rxs=None, xdev_exceptions=frozenset()): for (name,pst) in _dirlist(): path = prepend + name if excluded_paths: if os.path.normpath(path) in excluded_paths: debug1('Skipping %r: excluded.\n' % path_msg(path)) continue if exclude_rxs and should_rx_exclude_path(path, exclude_rxs): continue if name.endswith(b'/'): if bup_dir != None: if os.path.normpath(path) == bup_dir: debug1('Skipping BUP_DIR.\n') continue if xdev != None and pst.st_dev != xdev \ and path not in xdev_exceptions: debug1('Skipping contents of %r: different filesystem.\n' % path_msg(path)) else: try: with finalized_fd(name) as fd: os.fchdir(fd) except OSError as e: add_error('%s: %s' % (prepend, e)) else: for i in _recursive_dirlist(prepend=prepend+name, xdev=xdev, bup_dir=bup_dir, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs, xdev_exceptions=xdev_exceptions): yield i os.chdir(b'..') yield (path, pst) def recursive_dirlist(paths, xdev, bup_dir=None, excluded_paths=None, exclude_rxs=None, xdev_exceptions=frozenset()): with finalized_fd(b'.') as startdir: try: assert not isinstance(paths, str) for path in paths: try: pst = xstat.lstat(path) if stat.S_ISLNK(pst.st_mode): yield (path, pst) continue except OSError as e: add_error('recursive_dirlist: %s' % e) continue try: opened_pfile = finalized_fd(path) except OSError as e: add_error(e) continue with opened_pfile as pfile: pst = xstat.fstat(pfile) if xdev: xdev = pst.st_dev else: xdev = None if stat.S_ISDIR(pst.st_mode): os.fchdir(pfile) prepend = os.path.join(path, b'') for i in _recursive_dirlist(prepend=prepend, xdev=xdev, bup_dir=bup_dir, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs, xdev_exceptions=xdev_exceptions): yield i os.fchdir(startdir) else: prepend = path yield (prepend,pst) except: try: os.fchdir(startdir) except: pass raise bup-0.33.3/lib/bup/gc.py000066400000000000000000000246641454333004200146770ustar00rootroot00000000000000 from __future__ import absolute_import from binascii import hexlify, unhexlify from os.path import basename import glob, os, subprocess, sys, tempfile from bup import bloom, git, midx from bup.compat import hexstr, pending_raise from bup.git import MissingObject, walk_object from bup.helpers import Nonlocal, log, progress, qprogress from bup.io import path_msg # This garbage collector uses a Bloom filter to track the live objects # during the mark phase. This means that the collection is # probabilistic; it may retain some (known) percentage of garbage, but # it can also work within a reasonable, fixed RAM budget for any # particular percentage and repository size. # # The collection proceeds as follows: # # - Scan all live objects by walking all of the refs, and insert # every hash encountered into a new Bloom "liveness" filter. # Compute the size of the liveness filter based on the total # number of objects in the repository. This is the "mark phase". # # - Clear the data that's dependent on the repository's object set, # i.e. the reflog, the normal Bloom filter, and the midxes. # # - Traverse all of the pack files, consulting the liveness filter # to decide which objects to keep. # # For each pack file, rewrite it iff it probably contains more # than (currently) 10% garbage (computed by an initial traversal # of the packfile in consultation with the liveness filter). To # rewrite, traverse the packfile (again) and write each hash that # tests positive against the liveness filter to a packwriter. # # During the traversal of all of the packfiles, delete redundant, # old packfiles only after the packwriter has finished the pack # that contains all of their live objects. # # The current code unconditionally tracks the set of tree hashes seen # during the mark phase, and skips any that have already been visited. # This should decrease the IO load at the cost of increased RAM use. # FIXME: add a bloom filter tuning parameter? def count_objects(dir, verbosity): # For now we'll just use open_idx(), but we could probably be much # more efficient since all we need is a single integer (the last # fanout entry) from each index. object_count = 0 indexes = glob.glob(os.path.join(dir, b'*.idx')) for i, idx_name in enumerate(indexes): if verbosity: log('found %d objects (%d/%d %s)\r' % (object_count, i + 1, len(indexes), path_msg(basename(idx_name)))) with git.open_idx(idx_name) as idx: object_count += len(idx) return object_count def report_live_item(n, total, ref_name, ref_id, item, verbosity): status = 'scanned %02.2f%%' % (n * 100.0 / total) hex_id = hexstr(ref_id) dirslash = b'/' if item.type == b'tree' else b'' chunk_path = item.chunk_path if chunk_path: if verbosity < 4: return ps = b'/'.join(item.path) chunk_ps = b'/'.join(chunk_path) log('%s %s:%s/%s%s\n' % (status, hex_id, path_msg(ps), path_msg(chunk_ps), path_msg(dirslash))) return # Top commit, for example has none. demangled = git.demangle_name(item.path[-1], item.mode)[0] if item.path \ else None # Don't print mangled paths unless the verbosity is over 3. if demangled: ps = b'/'.join(item.path[:-1] + [demangled]) if verbosity == 1: qprogress('%s %s:%s%s\r' % (status, hex_id, path_msg(ps), path_msg(dirslash))) elif (verbosity > 1 and item.type == b'tree') \ or (verbosity > 2 and item.type == b'blob'): log('%s %s:%s%s\n' % (status, hex_id, path_msg(ps), path_msg(dirslash))) elif verbosity > 3: ps = b'/'.join(item.path) log('%s %s:%s%s\n' % (status, hex_id, path_msg(ps), path_msg(dirslash))) def find_live_objects(existing_count, cat_pipe, verbosity=0): prune_visited_trees = True # In case we want a command line option later pack_dir = git.repo(b'objects/pack') ffd, bloom_filename = tempfile.mkstemp(b'.bloom', b'tmp-gc-', pack_dir) os.close(ffd) # FIXME: allow selection of k? # FIXME: support ephemeral bloom filters (i.e. *never* written to disk) live_objs = bloom.create(bloom_filename, expected=existing_count, k=None) # live_objs will hold on to the fd until close or exit os.unlink(bloom_filename) stop_at, trees_visited = None, None if prune_visited_trees: trees_visited = set() stop_at = lambda x: unhexlify(x) in trees_visited approx_live_count = 0 for ref_name, ref_id in git.list_refs(): for item in walk_object(cat_pipe.get, hexlify(ref_id), stop_at=stop_at, include_data=None): # FIXME: batch ids if verbosity: report_live_item(approx_live_count, existing_count, ref_name, ref_id, item, verbosity) if trees_visited is not None and item.type == b'tree': trees_visited.add(item.oid) if verbosity: if not live_objs.exists(item.oid): live_objs.add(item.oid) approx_live_count += 1 else: live_objs.add(item.oid) trees_visited = None if verbosity: log('expecting to retain about %.2f%% unnecessary objects\n' % live_objs.pfalse_positive()) return live_objs def sweep(live_objects, existing_count, cat_pipe, threshold, compression, verbosity): # Traverse all the packs, saving the (probably) live data. ns = Nonlocal() ns.stale_files = [] def remove_stale_files(new_pack_prefix): if verbosity and new_pack_prefix: log('created ' + path_msg(basename(new_pack_prefix)) + '\n') for p in ns.stale_files: if new_pack_prefix and p.startswith(new_pack_prefix): continue # Don't remove the new pack file if verbosity: log('removing ' + path_msg(basename(p)) + '\n') os.unlink(p) if ns.stale_files: # So git cat-pipe will close them cat_pipe.restart() ns.stale_files = [] writer = git.PackWriter(objcache_maker=None, compression_level=compression, run_midx=False, on_pack_finish=remove_stale_files) try: # FIXME: sanity check .idx names vs .pack names? collect_count = 0 for idx_name in glob.glob(os.path.join(git.repo(b'objects/pack'), b'*.idx')): if verbosity: qprogress('preserving live data (%d%% complete)\r' % ((float(collect_count) / existing_count) * 100)) with git.open_idx(idx_name) as idx: idx_live_count = 0 for sha in idx: if live_objects.exists(sha): idx_live_count += 1 collect_count += idx_live_count if idx_live_count == 0: if verbosity: log('deleting %s\n' % path_msg(git.repo_rel(basename(idx_name)))) ns.stale_files.append(idx_name) ns.stale_files.append(idx_name[:-3] + b'pack') continue live_frac = idx_live_count / float(len(idx)) if live_frac > ((100 - threshold) / 100.0): if verbosity: log('keeping %s (%d%% live)\n' % (git.repo_rel(basename(idx_name)), live_frac * 100)) continue if verbosity: log('rewriting %s (%.2f%% live)\n' % (basename(idx_name), live_frac * 100)) for sha in idx: if live_objects.exists(sha): item_it = cat_pipe.get(hexlify(sha)) _, typ, _ = next(item_it) writer.just_write(sha, typ, b''.join(item_it)) ns.stale_files.append(idx_name) ns.stale_files.append(idx_name[:-3] + b'pack') if verbosity: progress('preserving live data (%d%% complete)\n' % ((float(collect_count) / existing_count) * 100)) # Nothing should have recreated midx/bloom yet. pack_dir = git.repo(b'objects/pack') assert(not os.path.exists(os.path.join(pack_dir, b'bup.bloom'))) assert(not glob.glob(os.path.join(pack_dir, b'*.midx'))) except BaseException as ex: with pending_raise(ex): writer.abort() finally: # This will finally run midx. writer.close() remove_stale_files(None) # In case we didn't write to the writer. if verbosity: log('discarded %d%% of objects\n' % ((existing_count - count_objects(pack_dir, verbosity)) / float(existing_count) * 100)) def bup_gc(threshold=10, compression=1, verbosity=0): cat_pipe = git.cp() existing_count = count_objects(git.repo(b'objects/pack'), verbosity) if verbosity: log('found %d objects\n' % existing_count) if not existing_count: if verbosity: log('nothing to collect\n') else: try: live_objects = find_live_objects(existing_count, cat_pipe, verbosity=verbosity) except MissingObject as ex: log('bup: missing object %r \n' % hexstr(ex.oid)) sys.exit(1) with live_objects: # FIXME: just rename midxes and bloom, and restore them at the end if # we didn't change any packs? packdir = git.repo(b'objects/pack') if verbosity: log('clearing midx files\n') midx.clear_midxes(packdir) if verbosity: log('clearing bloom filter\n') bloom.clear_bloom(packdir) if verbosity: log('clearing reflog\n') expirelog_cmd = [b'git', b'reflog', b'expire', b'--all', b'--expire=all'] expirelog = subprocess.Popen(expirelog_cmd, env=git._gitenv()) git._git_wait(b' '.join(expirelog_cmd), expirelog) if verbosity: log('removing unreachable data\n') sweep(live_objects, existing_count, cat_pipe, threshold, compression, verbosity) bup-0.33.3/lib/bup/git.py000066400000000000000000001476561454333004200151000ustar00rootroot00000000000000"""Git interaction library. bup repositories are in Git format. This library allows us to interact with the Git data structures. """ from __future__ import absolute_import, print_function import os, sys, zlib, subprocess, struct, stat, re, glob from array import array from binascii import hexlify, unhexlify from collections import namedtuple from contextlib import ExitStack from itertools import islice from shutil import rmtree from bup import _helpers, hashsplit, path, midx, bloom, xstat from bup.compat import (buffer, byte_int, bytes_from_byte, bytes_from_uint, environ, pending_raise) from bup.io import path_msg from bup.helpers import (Sha1, add_error, chunkyreader, debug1, debug2, exo, fdatasync, finalized, log, merge_dict, merge_iter, mmap_read, mmap_readwrite, nullcontext_if_not, progress, qprogress, stat_if_exists, temp_dir, unlink, utc_offset_str) verbose = 0 repodir = None # The default repository, once initialized _typemap = {b'blob': 3, b'tree': 2, b'commit': 1, b'tag': 4} _typermap = {v: k for k, v in _typemap.items()} _total_searches = 0 _total_steps = 0 class GitError(Exception): pass def _gitenv(repo_dir=None): # This is not always used, i.e. sometimes we just use --git-dir if not repo_dir: repo_dir = repo() return merge_dict(environ, {b'GIT_DIR': os.path.abspath(repo_dir)}) def _git_wait(cmd, p): rv = p.wait() if rv != 0: raise GitError('%r returned %d' % (cmd, rv)) def _git_exo(cmd, **kwargs): kwargs['check'] = False result = exo(cmd, **kwargs) _, _, proc = result if proc.returncode != 0: raise GitError('%r returned %d' % (cmd, proc.returncode)) return result def git_config_get(option, repo_dir=None, opttype=None, cfg_file=None): assert not (repo_dir and cfg_file), "repo_dir and cfg_file cannot both be used" cmd = [b'git', b'--git-dir', repo_dir or repo(), b'config', b'--null'] if cfg_file: cmd.extend([b'--file', cfg_file]) if opttype == 'int': cmd.extend([b'--int']) elif opttype == 'bool': cmd.extend([b'--bool']) else: assert opttype is None cmd.extend([b'--get', option]) env=None p = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True) # with --null, git writes out a trailing \0 after the value r = p.stdout.read()[:-1] rc = p.wait() if rc == 0: if opttype == 'int': return int(r) elif opttype == 'bool': # git converts to 'true' or 'false' return r == b'true' return r if rc != 1: raise GitError('%r returned %d' % (cmd, rc)) return None def parse_tz_offset(s): """UTC offset in seconds.""" tz_off = (int(s[1:3]) * 60 * 60) + (int(s[3:5]) * 60) if bytes_from_byte(s[0]) == b'-': return - tz_off return tz_off def parse_commit_gpgsig(sig): """Return the original signature bytes. i.e. with the "gpgsig " header and the leading space character on each continuation line removed. """ if not sig: return None assert sig.startswith(b'gpgsig ') sig = sig[7:] return sig.replace(b'\n ', b'\n') # FIXME: derived from http://git.rsbx.net/Documents/Git_Data_Formats.txt # Make sure that's authoritative. # See also # https://github.com/git/git/blob/master/Documentation/technical/signature-format.txt # The continuation lines have only one leading space. _start_end_char = br'[^ .,:;<>"\'\0\n]' _content_char = br'[^\0\n<>]' _safe_str_rx = br'(?:%s{1,2}|(?:%s%s*%s))' \ % (_start_end_char, _start_end_char, _content_char, _start_end_char) _tz_rx = br'[-+]\d\d[0-5]\d' _parent_rx = br'(?:parent [abcdefABCDEF0123456789]{40}\n)' # Assumes every following line starting with a space is part of the # mergetag. Is there a formal commit blob spec? _mergetag_rx = br'(?:\nmergetag object [abcdefABCDEF0123456789]{40}(?:\n [^\0\n]*)*)' _commit_rx = re.compile(br'''tree (?P[abcdefABCDEF0123456789]{40}) (?P%s*)author (?P%s) <(?P%s)> (?P\d+) (?P%s) committer (?P%s) <(?P%s)> (?P\d+) (?P%s)(?P%s?) (?Pgpgsig .*\n(?: .*\n)*)? (?P(?:.|\n)*)''' % (_parent_rx, _safe_str_rx, _safe_str_rx, _tz_rx, _safe_str_rx, _safe_str_rx, _tz_rx, _mergetag_rx)) _parent_hash_rx = re.compile(br'\s*parent ([abcdefABCDEF0123456789]{40})\s*') # Note that the author_sec and committer_sec values are (UTC) epoch # seconds, and for now the mergetag is not included. CommitInfo = namedtuple('CommitInfo', ['tree', 'parents', 'author_name', 'author_mail', 'author_sec', 'author_offset', 'committer_name', 'committer_mail', 'committer_sec', 'committer_offset', 'gpgsig', 'message']) def parse_commit(content): commit_match = re.match(_commit_rx, content) if not commit_match: raise Exception('cannot parse commit %r' % content) matches = commit_match.groupdict() return CommitInfo(tree=matches['tree'], parents=re.findall(_parent_hash_rx, matches['parents']), author_name=matches['author_name'], author_mail=matches['author_mail'], author_sec=int(matches['asec']), author_offset=parse_tz_offset(matches['atz']), committer_name=matches['committer_name'], committer_mail=matches['committer_mail'], committer_sec=int(matches['csec']), committer_offset=parse_tz_offset(matches['ctz']), gpgsig=parse_commit_gpgsig(matches['gpgsig']), message=matches['message']) def get_cat_data(cat_iterator, expected_type): _, kind, _ = next(cat_iterator) if kind != expected_type: raise Exception('expected %r, saw %r' % (expected_type, kind)) return b''.join(cat_iterator) def get_commit_items(id, cp): return parse_commit(get_cat_data(cp.get(id), b'commit')) def _local_git_date_str(epoch_sec): return b'%d %s' % (epoch_sec, utc_offset_str(epoch_sec)) def _git_date_str(epoch_sec, tz_offset_sec): offs = tz_offset_sec // 60 return b'%d %s%02d%02d' \ % (epoch_sec, b'+' if offs >= 0 else b'-', abs(offs) // 60, abs(offs) % 60) def repo(sub = b'', repo_dir=None): """Get the path to the git repository or one of its subdirectories.""" repo_dir = repo_dir or repodir if not repo_dir: raise GitError('You should call check_repo_or_die()') # If there's a .git subdirectory, then the actual repo is in there. gd = os.path.join(repo_dir, b'.git') if os.path.exists(gd): repo_dir = gd return os.path.join(repo_dir, sub) _shorten_hash_rx = \ re.compile(br'([^0-9a-z]|\b)([0-9a-z]{7})[0-9a-z]{33}([^0-9a-z]|\b)') def shorten_hash(s): return _shorten_hash_rx.sub(br'\1\2*\3', s) def repo_rel(path): full = os.path.abspath(path) fullrepo = os.path.abspath(repo(b'')) if not fullrepo.endswith(b'/'): fullrepo += b'/' if full.startswith(fullrepo): path = full[len(fullrepo):] if path.startswith(b'index-cache/'): path = path[len(b'index-cache/'):] return shorten_hash(path) def auto_midx(objdir): args = [path.exe(), b'midx', b'--auto', b'--dir', objdir] try: rv = subprocess.call(args, stdout=open(os.devnull, 'w')) except OSError as e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise if rv: add_error('%r: returned %d' % (args, rv)) args = [path.exe(), b'bloom', b'--dir', objdir] try: rv = subprocess.call(args, stdout=open(os.devnull, 'w')) except OSError as e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise if rv: add_error('%r: returned %d' % (args, rv)) def mangle_name(name, mode, gitmode): """Mangle a file name to present an abstract name for segmented files. Mangled file names will have the ".bup" extension added to them. If a file's name already ends with ".bup", a ".bupl" extension is added to disambiguate normal files from segmented ones. """ if stat.S_ISREG(mode) and not stat.S_ISREG(gitmode): assert(stat.S_ISDIR(gitmode)) return name + b'.bup' elif name.endswith(b'.bup') or name[:-1].endswith(b'.bup'): return name + b'.bupl' else: return name (BUP_NORMAL, BUP_CHUNKED) = (0,1) def demangle_name(name, mode): """Remove name mangling from a file name, if necessary. The return value is a tuple (demangled_filename,mode), where mode is one of the following: * BUP_NORMAL : files that should be read as-is from the repository * BUP_CHUNKED : files that were chunked and need to be reassembled For more information on the name mangling algorithm, see mangle_name() """ if name.endswith(b'.bupl'): return (name[:-5], BUP_NORMAL) elif name.endswith(b'.bup'): return (name[:-4], BUP_CHUNKED) elif name.endswith(b'.bupm'): return (name[:-5], BUP_CHUNKED if stat.S_ISDIR(mode) else BUP_NORMAL) return (name, BUP_NORMAL) def calc_hash(type, content): """Calculate some content's hash in the Git fashion.""" header = b'%s %d\0' % (type, len(content)) sum = Sha1(header) sum.update(content) return sum.digest() def shalist_item_sort_key(ent): (mode, name, id) = ent assert(mode+0 == mode) if stat.S_ISDIR(mode): return name + b'/' else: return name def tree_encode(shalist): """Generate a git tree object from (mode,name,hash) tuples.""" shalist = sorted(shalist, key = shalist_item_sort_key) l = [] for (mode,name,bin) in shalist: assert(mode) assert(mode+0 == mode) assert(name) assert(len(bin) == 20) s = b'%o %s\0%s' % (mode,name,bin) assert s[0] != b'0' # 0-padded octal is not acceptable in a git tree l.append(s) return b''.join(l) def tree_decode(buf): """Generate a list of (mode,name,hash) from the git tree object in buf.""" ofs = 0 while ofs < len(buf): z = buf.find(b'\0', ofs) assert(z > ofs) spl = buf[ofs:z].split(b' ', 1) assert(len(spl) == 2) mode,name = spl sha = buf[z+1:z+1+20] ofs = z+1+20 yield (int(mode, 8), name, sha) def _encode_packobj(type, content, compression_level=1): if compression_level not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9): raise ValueError('invalid compression level %s' % compression_level) szout = b'' sz = len(content) szbits = (sz & 0x0f) | (_typemap[type]<<4) sz >>= 4 while 1: if sz: szbits |= 0x80 szout += bytes_from_uint(szbits) if not sz: break szbits = sz & 0x7f sz >>= 7 z = zlib.compressobj(compression_level) yield szout yield z.compress(content) yield z.flush() def _decode_packobj(buf): assert(buf) c = byte_int(buf[0]) type = _typermap[(c & 0x70) >> 4] sz = c & 0x0f shift = 4 i = 0 while c & 0x80: i += 1 c = byte_int(buf[i]) sz |= (c & 0x7f) << shift shift += 7 if not (c & 0x80): break return (type, zlib.decompress(buf[i+1:])) class PackIdx(object): def find_offset(self, hash): """Get the offset of an object inside the index file.""" idx = self._idx_from_hash(hash) if idx != None: return self._ofs_from_idx(idx) return None def exists(self, hash, want_source=False): """Return nonempty if the object exists in this index.""" if hash and (self._idx_from_hash(hash) != None): return want_source and os.path.basename(self.name) or True return None def _idx_from_hash(self, hash): global _total_searches, _total_steps _total_searches += 1 assert(len(hash) == 20) b1 = byte_int(hash[0]) start = self.fanout[b1-1] # range -1..254 end = self.fanout[b1] # range 0..255 want = hash _total_steps += 1 # lookup table is a step while start < end: _total_steps += 1 mid = start + (end - start) // 2 v = self._idx_to_hash(mid) if v < want: start = mid+1 elif v > want: end = mid else: # got it! return mid return None class PackIdxV1(PackIdx): """Object representation of a Git pack index (version 1) file.""" def __init__(self, filename, f): super().__init__() self.closed = False self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) # Min size for 'L' is 4, which is sufficient for struct's '!I' self.fanout = array('L', struct.unpack('!256I', self.map)) self.fanout.append(0) # entry "-1" self.nsha = self.fanout[255] self.sha_ofs = 256 * 4 # Avoid slicing shatable for individual hashes (very high overhead) self.shatable = buffer(self.map, self.sha_ofs, self.nsha * 24) def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def __len__(self): return int(self.nsha) # int() from long for python 2 def _ofs_from_idx(self, idx): if idx >= self.nsha or idx < 0: raise IndexError('invalid pack index index %d' % idx) ofs = self.sha_ofs + idx * 24 return struct.unpack_from('!I', self.map, offset=ofs)[0] def _idx_to_hash(self, idx): if idx >= self.nsha or idx < 0: raise IndexError('invalid pack index index %d' % idx) ofs = self.sha_ofs + idx * 24 + 4 return self.map[ofs : ofs + 20] def __iter__(self): start = self.sha_ofs + 4 for ofs in range(start, start + 24 * self.nsha, 24): yield self.map[ofs : ofs + 20] def close(self): self.closed = True if self.map is not None: self.shatable = None self.map.close() self.map = None def __del__(self): assert self.closed class PackIdxV2(PackIdx): """Object representation of a Git pack index (version 2) file.""" def __init__(self, filename, f): super().__init__() self.closed = False self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) assert self.map[0:8] == b'\377tOc\0\0\0\2' # Min size for 'L' is 4, which is sufficient for struct's '!I' self.fanout = array('L', struct.unpack_from('!256I', self.map, offset=8)) self.fanout.append(0) self.nsha = self.fanout[255] self.sha_ofs = 8 + 256*4 self.ofstable_ofs = self.sha_ofs + self.nsha * 20 + self.nsha * 4 self.ofs64table_ofs = self.ofstable_ofs + self.nsha * 4 # Avoid slicing this for individual hashes (very high overhead) self.shatable = buffer(self.map, self.sha_ofs, self.nsha*20) def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def __len__(self): return int(self.nsha) # int() from long for python 2 def _ofs_from_idx(self, idx): if idx >= self.nsha or idx < 0: raise IndexError('invalid pack index index %d' % idx) ofs_ofs = self.ofstable_ofs + idx * 4 ofs = struct.unpack_from('!I', self.map, offset=ofs_ofs)[0] if ofs & 0x80000000: idx64 = ofs & 0x7fffffff ofs64_ofs = self.ofs64table_ofs + idx64 * 8 ofs = struct.unpack_from('!Q', self.map, offset=ofs64_ofs)[0] return ofs def _idx_to_hash(self, idx): if idx >= self.nsha or idx < 0: raise IndexError('invalid pack index index %d' % idx) ofs = self.sha_ofs + idx * 20 return self.map[ofs : ofs + 20] def __iter__(self): start = self.sha_ofs for ofs in range(start, start + 20 * self.nsha, 20): yield self.map[ofs : ofs + 20] def close(self): self.closed = True if self.map is not None: self.shatable = None self.map.close() self.map = None def __del__(self): assert self.closed _mpi_count = 0 class PackIdxList: def __init__(self, dir, ignore_midx=False): global _mpi_count # Q: was this also intended to prevent opening multiple repos? assert(_mpi_count == 0) # these things suck tons of VM; don't waste it _mpi_count += 1 self.open = True self.dir = dir self.also = set() self.packs = [] self.do_bloom = False self.bloom = None self.ignore_midx = ignore_midx try: self.refresh() except BaseException as ex: with pending_raise(ex): self.close() def close(self): global _mpi_count if not self.open: assert _mpi_count == 0 return _mpi_count -= 1 assert _mpi_count == 0 self.also = None self.bloom, bloom = None, self.bloom self.packs, packs = None, self.packs self.open = False with ExitStack() as stack: for pack in packs: stack.enter_context(pack) if bloom: bloom.close() def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def __del__(self): assert not self.open def __iter__(self): return iter(idxmerge(self.packs)) def __len__(self): return sum(len(pack) for pack in self.packs) def exists(self, hash, want_source=False): """Return nonempty if the object exists in the index files.""" global _total_searches _total_searches += 1 if hash in self.also: return True if self.do_bloom and self.bloom: if self.bloom.exists(hash): self.do_bloom = False else: _total_searches -= 1 # was counted by bloom return None for i in range(len(self.packs)): p = self.packs[i] _total_searches -= 1 # will be incremented by sub-pack ix = p.exists(hash, want_source=want_source) if ix: # reorder so most recently used packs are searched first self.packs = [p] + self.packs[:i] + self.packs[i+1:] return ix self.do_bloom = True return None def refresh(self, skip_midx = False): """Refresh the index list. This method verifies if .midx files were superseded (e.g. all of its contents are in another, bigger .midx file) and removes the superseded files. If skip_midx is True, all work on .midx files will be skipped and .midx files will be removed from the list. The instance variable 'ignore_midx' can force this function to always act as if skip_midx was True. """ if self.bloom is not None: self.bloom.close() self.bloom = None # Always reopen the bloom as it may have been relaced self.do_bloom = False skip_midx = skip_midx or self.ignore_midx d = dict((p.name, p) for p in self.packs if not skip_midx or not isinstance(p, midx.PackMidx)) if os.path.exists(self.dir): if not skip_midx: midxl = [] midxes = set(glob.glob(os.path.join(self.dir, b'*.midx'))) # remove any *.midx files from our list that no longer exist for ix in list(d.values()): if not isinstance(ix, midx.PackMidx): continue if ix.name in midxes: continue # remove the midx del d[ix.name] ix.close() self.packs.remove(ix) for ix in self.packs: if isinstance(ix, midx.PackMidx): for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix for full in midxes: if not d.get(full): mx = midx.PackMidx(full) (mxd, mxf) = os.path.split(mx.name) broken = False for n in mx.idxnames: if not os.path.exists(os.path.join(mxd, n)): log(('warning: index %s missing\n' ' used by %s\n') % (path_msg(n), path_msg(mxf))) broken = True if broken: mx.close() unlink(full) else: midxl.append(mx) midxl.sort(key=lambda ix: (-len(ix), -xstat.stat(ix.name).st_mtime)) for ix in midxl: any_needed = False for sub in ix.idxnames: found = d.get(os.path.join(self.dir, sub)) if not found or isinstance(found, PackIdx): # doesn't exist, or exists but not in a midx any_needed = True break if any_needed: d[ix.name] = ix for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix elif not ix.force_keep: debug1('midx: removing redundant: %s\n' % path_msg(os.path.basename(ix.name))) ix.close() unlink(ix.name) for full in glob.glob(os.path.join(self.dir, b'*.idx')): if not d.get(full): try: ix = open_idx(full) except GitError as e: add_error(e) continue d[full] = ix bfull = os.path.join(self.dir, b'bup.bloom') new_packs = set(d.values()) for p in self.packs: if not p in new_packs: p.close() new_packs = list(new_packs) new_packs.sort(reverse=True, key=lambda x: len(x)) self.packs = new_packs if self.bloom is None and os.path.exists(bfull): self.bloom = bloom.ShaBloom(bfull) try: if self.bloom and self.bloom.valid() and len(self.bloom) >= len(self): self.do_bloom = True else: if self.bloom: self.bloom, bloom_tmp = None, self.bloom bloom_tmp.close() except BaseException as ex: with pending_raise(ex): if self.bloom: self.bloom.close() debug1('PackIdxList: using %d index%s.\n' % (len(self.packs), len(self.packs)!=1 and 'es' or '')) def add(self, hash): """Insert an additional object in the list.""" self.also.add(hash) def open_idx(filename): if filename.endswith(b'.idx'): f = open(filename, 'rb') header = f.read(8) if header[0:4] == b'\377tOc': version = struct.unpack('!I', header[4:8])[0] if version == 2: return PackIdxV2(filename, f) else: raise GitError('%s: expected idx file version 2, got %d' % (path_msg(filename), version)) elif len(header) == 8 and header[0:4] < b'\377tOc': return PackIdxV1(filename, f) else: raise GitError('%s: unrecognized idx file header' % path_msg(filename)) elif filename.endswith(b'.midx'): return midx.PackMidx(filename) else: raise GitError('idx filenames must end with .idx or .midx') def idxmerge(idxlist, final_progress=True): """Generate a list of all the objects reachable in a PackIdxList.""" def pfunc(count, total): qprogress('Reading indexes: %.2f%% (%d/%d)\r' % (count*100.0/total, count, total)) def pfinal(count, total): if final_progress: progress('Reading indexes: %.2f%% (%d/%d), done.\n' % (100, total, total)) return merge_iter(idxlist, 10024, pfunc, pfinal) def create_commit_blob(tree, parent, author, adate_sec, adate_tz, committer, cdate_sec, cdate_tz, msg): if adate_tz is not None: adate_str = _git_date_str(adate_sec, adate_tz) else: adate_str = _local_git_date_str(adate_sec) if cdate_tz is not None: cdate_str = _git_date_str(cdate_sec, cdate_tz) else: cdate_str = _local_git_date_str(cdate_sec) l = [] if tree: l.append(b'tree %s' % hexlify(tree)) if parent: l.append(b'parent %s' % hexlify(parent)) if author: l.append(b'author %s %s' % (author, adate_str)) if committer: l.append(b'committer %s %s' % (committer, cdate_str)) l.append(b'') l.append(msg) return b'\n'.join(l) def _make_objcache(): return PackIdxList(repo(b'objects/pack')) # bup-gc assumes that it can disable all PackWriter activities # (bloom/midx/cache) via the constructor and close() arguments. class PackWriter(object): """Writes Git objects inside a pack file.""" def __init__(self, objcache_maker=_make_objcache, compression_level=1, run_midx=True, on_pack_finish=None, max_pack_size=None, max_pack_objects=None, repo_dir=None): self.closed = False self.repo_dir = repo_dir or repo() self.file = None self.parentfd = None self.count = 0 self.outbytes = 0 self.tmpdir = None self.idx = None self.objcache_maker = objcache_maker self.objcache = None self.compression_level = compression_level self.run_midx=run_midx self.on_pack_finish = on_pack_finish if not max_pack_size: max_pack_size = git_config_get(b'pack.packSizeLimit', repo_dir=self.repo_dir, opttype='int') if not max_pack_size: # larger packs slow down pruning max_pack_size = 1000 * 1000 * 1000 self.max_pack_size = max_pack_size # cache memory usage is about 83 bytes per object self.max_pack_objects = max_pack_objects if max_pack_objects \ else max(1, self.max_pack_size // 5000) def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def _open(self): if not self.file: with ExitStack() as err_stack: objdir = dir = os.path.join(self.repo_dir, b'objects') self.tmpdir = err_stack.enter_context(temp_dir(dir=objdir, prefix=b'pack-tmp-')) self.file = err_stack.enter_context(open(self.tmpdir + b'/pack', 'w+b')) self.parentfd = err_stack.enter_context(finalized(os.open(objdir, os.O_RDONLY), lambda x: os.close(x))) self.file.write(b'PACK\0\0\0\2\0\0\0\0') self.idx = PackIdxV2Writer() err_stack.pop_all() def _raw_write(self, datalist, sha): self._open() f = self.file # in case we get interrupted (eg. KeyboardInterrupt), it's best if # the file never has a *partial* blob. So let's make sure it's # all-or-nothing. (The blob shouldn't be very big anyway, thanks # to our hashsplit algorithm.) f.write() does its own buffering, # but that's okay because we'll flush it in _end(). oneblob = b''.join(datalist) try: f.write(oneblob) except IOError as e: raise GitError(e) from e nw = len(oneblob) crc = zlib.crc32(oneblob) & 0xffffffff self._update_idx(sha, crc, nw) self.outbytes += nw self.count += 1 return nw, crc def _update_idx(self, sha, crc, size): assert(sha) if self.idx: self.idx.add(sha, crc, self.file.tell() - size) def _write(self, sha, type, content): if verbose: log('>') assert sha size, crc = self._raw_write(_encode_packobj(type, content, self.compression_level), sha=sha) if self.outbytes >= self.max_pack_size \ or self.count >= self.max_pack_objects: self.breakpoint() return sha def _require_objcache(self): if self.objcache is None and self.objcache_maker: self.objcache = self.objcache_maker() if self.objcache is None: raise GitError( "PackWriter not opened or can't check exists w/o objcache") def exists(self, id, want_source=False): """Return non-empty if an object is found in the object cache.""" self._require_objcache() return self.objcache.exists(id, want_source=want_source) def just_write(self, sha, type, content): """Write an object to the pack file without checking for duplication.""" self._write(sha, type, content) # If nothing else, gc doesn't have/want an objcache if self.objcache is not None: self.objcache.add(sha) def maybe_write(self, type, content): """Write an object to the pack file if not present and return its id.""" sha = calc_hash(type, content) if not self.exists(sha): self._require_objcache() self.just_write(sha, type, content) return sha def new_blob(self, blob): """Create a blob object in the pack with the supplied content.""" return self.maybe_write(b'blob', blob) def new_tree(self, shalist): """Create a tree object in the pack.""" content = tree_encode(shalist) return self.maybe_write(b'tree', content) def new_commit(self, tree, parent, author, adate_sec, adate_tz, committer, cdate_sec, cdate_tz, msg): """Create a commit object in the pack. The date_sec values must be epoch-seconds, and if a tz is None, the local timezone is assumed.""" content = create_commit_blob(tree, parent, author, adate_sec, adate_tz, committer, cdate_sec, cdate_tz, msg) return self.maybe_write(b'commit', content) def _end(self, run_midx=True, abort=False): # Ignores run_midx during abort self.tmpdir, tmpdir = None, self.tmpdir self.parentfd, pfd, = None, self.parentfd self.file, f = None, self.file self.idx, idx = None, self.idx try: with nullcontext_if_not(self.objcache), \ finalized(pfd, lambda x: x is not None and os.close(x)), \ nullcontext_if_not(f): if abort or not f: return None # update object count f.seek(8) cp = struct.pack('!i', self.count) assert len(cp) == 4 f.write(cp) # calculate the pack sha1sum f.seek(0) sum = Sha1() for b in chunkyreader(f): sum.update(b) packbin = sum.digest() f.write(packbin) f.flush() fdatasync(f.fileno()) f.close() idx.write(tmpdir + b'/idx', packbin) nameprefix = os.path.join(self.repo_dir, b'objects/pack/pack-' + hexlify(packbin)) os.rename(tmpdir + b'/pack', nameprefix + b'.pack') os.rename(tmpdir + b'/idx', nameprefix + b'.idx') os.fsync(pfd) if self.on_pack_finish: self.on_pack_finish(nameprefix) if run_midx: auto_midx(os.path.join(self.repo_dir, b'objects/pack')) return nameprefix finally: if tmpdir: rmtree(tmpdir) # Must be last -- some of the code above depends on it self.objcache = None def abort(self): """Remove the pack file from disk.""" self.closed = True self._end(abort=True) def breakpoint(self): """Clear byte and object counts and return the last processed id.""" id = self._end(self.run_midx) self.outbytes = self.count = 0 return id def close(self, run_midx=True): """Close the pack file and move it to its definitive path.""" self.closed = True return self._end(run_midx=run_midx) def __del__(self): assert self.closed class PackIdxV2Writer: def __init__(self): self.idx = list(list() for i in range(256)) self.count = 0 def add(self, sha, crc, offs): assert(sha) self.count += 1 self.idx[byte_int(sha[0])].append((sha, crc, offs)) def write(self, filename, packbin): ofs64_count = 0 for section in self.idx: for entry in section: if entry[2] >= 2**31: ofs64_count += 1 # Length: header + fan-out + shas-and-crcs + overflow-offsets index_len = 8 + (4 * 256) + (28 * self.count) + (8 * ofs64_count) idx_map = None idx_f = open(filename, 'w+b') try: idx_f.truncate(index_len) fdatasync(idx_f.fileno()) idx_map = mmap_readwrite(idx_f, close=False) try: count = _helpers.write_idx(filename, idx_map, self.idx, self.count) assert(count == self.count) idx_map.flush() finally: idx_map.close() finally: idx_f.close() idx_f = open(filename, 'a+b') try: idx_f.write(packbin) idx_f.seek(0) idx_sum = Sha1() b = idx_f.read(8 + 4*256) idx_sum.update(b) for b in chunkyreader(idx_f, 20 * self.count): idx_sum.update(b) for b in chunkyreader(idx_f): idx_sum.update(b) idx_f.write(idx_sum.digest()) fdatasync(idx_f.fileno()) finally: idx_f.close() def list_refs(patterns=None, repo_dir=None, limit_to_heads=False, limit_to_tags=False): """Yield (refname, hash) tuples for all repository refs unless patterns are specified. In that case, only include tuples for refs matching those patterns (cf. git-show-ref(1)). The limits restrict the result items to refs/heads or refs/tags. If both limits are specified, items from both sources will be included. """ argv = [b'git', b'show-ref'] if limit_to_heads: argv.append(b'--heads') if limit_to_tags: argv.append(b'--tags') argv.append(b'--') if patterns: argv.extend(patterns) p = subprocess.Popen(argv, env=_gitenv(repo_dir), stdout=subprocess.PIPE, close_fds=True) out = p.stdout.read().strip() rv = p.wait() # not fatal if rv: assert(not out) if out: for d in out.split(b'\n'): sha, name = d.split(b' ', 1) yield name, unhexlify(sha) def read_ref(refname, repo_dir = None): """Get the commit id of the most recent commit made on a given ref.""" refs = list_refs(patterns=[refname], repo_dir=repo_dir, limit_to_heads=True) l = tuple(islice(refs, 2)) if l: assert(len(l) == 1) return l[0][1] else: return None def rev_list_invocation(ref_or_refs, format=None): if isinstance(ref_or_refs, bytes): refs = (ref_or_refs,) else: refs = ref_or_refs argv = [b'git', b'rev-list'] if format: argv.append(b'--pretty=format:' + format) for ref in refs: assert not ref.startswith(b'-') argv.append(ref) argv.append(b'--') return argv def rev_list(ref_or_refs, parse=None, format=None, repo_dir=None): """Yield information about commits as per "git rev-list". If a format is not provided, yield one hex hash at a time. If a format is provided, pass it to rev-list and call parse(git_stdout) for each commit with the stream positioned just after the rev-list "commit HASH" header line. When a format is provided yield (oidx, parse(git_stdout)) for each commit. """ assert bool(parse) == bool(format) p = subprocess.Popen(rev_list_invocation(ref_or_refs, format=format), env=_gitenv(repo_dir), stdout = subprocess.PIPE, close_fds=True) if not format: for line in p.stdout: yield line.strip() else: line = p.stdout.readline() while line: s = line.strip() if not s.startswith(b'commit '): raise Exception('unexpected line ' + repr(s)) s = s[7:] assert len(s) == 40 yield s, parse(p.stdout) line = p.stdout.readline() rv = p.wait() # not fatal if rv: raise GitError('git rev-list returned error %d' % rv) def rev_parse(committish, repo_dir=None): """Resolve the full hash for 'committish', if it exists. Should be roughly equivalent to 'git rev-parse'. Returns the hex value of the hash if it is found, None if 'committish' does not correspond to anything. """ head = read_ref(committish, repo_dir=repo_dir) if head: debug2("resolved from ref: commit = %s\n" % hexlify(head)) return head if len(committish) == 40: try: hash = unhexlify(committish) except TypeError: return None with PackIdxList(repo(b'objects/pack', repo_dir=repo_dir)) as pL: if pL.exists(hash): return hash return None def update_ref(refname, newval, oldval, repo_dir=None, force=False): """Update a repository reference. With force=True, don't care about the previous ref (oldval); with force=False oldval must be either a sha1 or None (for an entirely new branch) """ if force: assert oldval is None oldarg = [] elif not oldval: oldarg = [b''] else: oldarg = [hexlify(oldval)] assert refname.startswith(b'refs/heads/') \ or refname.startswith(b'refs/tags/') p = subprocess.Popen([b'git', b'update-ref', refname, hexlify(newval)] + oldarg, env=_gitenv(repo_dir), close_fds=True) _git_wait(b'git update-ref', p) def delete_ref(refname, oldvalue=None): """Delete a repository reference (see git update-ref(1)).""" assert refname.startswith(b'refs/') oldvalue = [] if not oldvalue else [oldvalue] p = subprocess.Popen([b'git', b'update-ref', b'-d', refname] + oldvalue, env=_gitenv(), close_fds=True) _git_wait('git update-ref', p) def guess_repo(): """Return the global repodir or BUP_DIR when either is set, or ~/.bup. Usually, if you are interacting with a bup repository, you would not be calling this function but using check_repo_or_die(). """ if repodir: return repodir repo = environ.get(b'BUP_DIR') if not repo: repo = os.path.expanduser(b'~/.bup') return repo def init_repo(path=None): """Create the Git bare repository for bup in a given path.""" global repodir repodir = path or guess_repo() d = repo() # appends a / to the path parent = os.path.dirname(os.path.dirname(d)) if parent and not os.path.exists(parent): raise GitError('parent directory "%s" does not exist\n' % path_msg(parent)) if os.path.exists(d) and not os.path.isdir(os.path.join(d, b'.')): raise GitError('"%s" exists but is not a directory\n' % path_msg(d)) p = subprocess.Popen([b'git', b'--bare', b'init'], stdout=sys.stderr, env=_gitenv(), close_fds=True) _git_wait('git init', p) # Force the index version configuration in order to ensure bup works # regardless of the version of the installed Git binary. p = subprocess.Popen([b'git', b'config', b'pack.indexVersion', '2'], stdout=sys.stderr, env=_gitenv(), close_fds=True) _git_wait('git config', p) # Enable the reflog p = subprocess.Popen([b'git', b'config', b'core.logAllRefUpdates', b'true'], stdout=sys.stderr, env=_gitenv(), close_fds=True) _git_wait('git config', p) def check_repo_or_die(path=None): """Check to see if a bup repository probably exists, and abort if not.""" global repodir repodir = path or guess_repo() top = repo() pst = stat_if_exists(top + b'/objects/pack') if pst and stat.S_ISDIR(pst.st_mode): return if not pst: top_st = stat_if_exists(top) if not top_st: log('error: repository %r does not exist (see "bup help init")\n' % top) sys.exit(15) log('error: %s is not a repository\n' % path_msg(top)) sys.exit(14) def is_suitable_git(ver_str): if not ver_str.startswith(b'git version '): return 'unrecognized' ver_str = ver_str[len(b'git version '):] if ver_str.startswith(b'0.'): return 'insufficient' if ver_str.startswith(b'1.'): if re.match(br'1\.[012345]rc', ver_str): return 'insufficient' if re.match(br'1\.[01234]\.', ver_str): return 'insufficient' if re.match(br'1\.5\.[012345]($|\.)', ver_str): return 'insufficient' if re.match(br'1\.5\.6-rc', ver_str): return 'insufficient' return 'suitable' if re.match(br'[0-9]+(\.|$)?', ver_str): return 'suitable' sys.exit(13) _git_great = None def require_suitable_git(ver_str=None): """Raise GitError if the version of git isn't suitable. Rely on ver_str when provided, rather than invoking the git in the path. """ global _git_great if _git_great is not None: return if environ.get(b'BUP_GIT_VERSION_IS_FINE', b'').lower() \ in (b'yes', b'true', b'1'): _git_great = True return if not ver_str: ver_str, _, _ = _git_exo([b'git', b'--version']) status = is_suitable_git(ver_str) if status == 'unrecognized': raise GitError('Unexpected git --version output: %r' % ver_str) if status == 'insufficient': log('error: git version must be at least 1.5.6\n') sys.exit(1) if status == 'suitable': _git_great = True return assert False class CatPipe: """Link to 'git cat-file' that is used to retrieve blob data.""" def __init__(self, repo_dir = None): require_suitable_git() self.repo_dir = repo_dir self.p = self.inprogress = None def close(self, wait=False): self.p, p = None, self.p self.inprogress = None if p: try: p.stdout.close() finally: # This will handle pending exceptions correctly once # we drop py2 p.stdin.close() if wait: p.wait() return p.returncode return None def restart(self): self.close() self.p = subprocess.Popen([b'git', b'cat-file', b'--batch'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds = True, bufsize = 4096, env=_gitenv(self.repo_dir)) def get(self, ref): """Yield (oidx, type, size), followed by the data referred to by ref. If ref does not exist, only yield (None, None, None). """ if not self.p or self.p.poll() != None: self.restart() assert(self.p) poll_result = self.p.poll() assert(poll_result == None) if self.inprogress: log('get: opening %r while %r is open\n' % (ref, self.inprogress)) assert(not self.inprogress) assert ref.find(b'\n') < 0 assert ref.find(b'\r') < 0 assert not ref.startswith(b'-') self.inprogress = ref self.p.stdin.write(ref + b'\n') self.p.stdin.flush() hdr = self.p.stdout.readline() if not hdr: raise GitError('unexpected cat-file EOF (last request: %r, exit: %s)' % (ref, self.p.poll() or 'none')) if hdr.endswith(b' missing\n'): self.inprogress = None yield None, None, None return info = hdr.split(b' ') if len(info) != 3 or len(info[0]) != 40: raise GitError('expected object (id, type, size), got %r' % info) oidx, typ, size = info size = int(size) try: it = chunkyreader(self.p.stdout, size) yield oidx, typ, size for blob in chunkyreader(self.p.stdout, size): yield blob readline_result = self.p.stdout.readline() assert readline_result == b'\n' self.inprogress = None except Exception as ex: with pending_raise(ex): self.close() def _join(self, it): _, typ, _ = next(it) if typ == b'blob': for blob in it: yield blob elif typ == b'tree': treefile = b''.join(it) for (mode, name, sha) in tree_decode(treefile): for blob in self.join(hexlify(sha)): yield blob elif typ == b'commit': treeline = b''.join(it).split(b'\n')[0] assert treeline.startswith(b'tree ') for blob in self.join(treeline[5:]): yield blob else: raise GitError('invalid object type %r: expected blob/tree/commit' % typ) def join(self, id): """Generate a list of the content of all blobs that can be reached from an object. The hash given in 'id' must point to a blob, a tree or a commit. The content of all blobs that can be seen from trees or commits will be added to the list. """ for d in self._join(self.get(id)): yield d _cp = {} def cp(repo_dir=None): """Create a CatPipe object or reuse the already existing one.""" global _cp, repodir if not repo_dir: repo_dir = repodir or repo() repo_dir = os.path.abspath(repo_dir) cp = _cp.get(repo_dir) if not cp: cp = CatPipe(repo_dir) _cp[repo_dir] = cp return cp def close_catpipes(): # FIXME: chain exceptions while _cp: _, cp = _cp.popitem() cp.close(wait=True) def tags(repo_dir = None): """Return a dictionary of all tags in the form {hash: [tag_names, ...]}.""" tags = {} for n, c in list_refs(repo_dir = repo_dir, limit_to_tags=True): assert n.startswith(b'refs/tags/') name = n[10:] if not c in tags: tags[c] = [] tags[c].append(name) # more than one tag can point at 'c' return tags class MissingObject(KeyError): def __init__(self, oid): self.oid = oid KeyError.__init__(self, 'object %r is missing' % hexlify(oid)) WalkItem = namedtuple('WalkItem', ['oid', 'type', 'mode', 'path', 'chunk_path', 'data']) # The path is the mangled path, and if an item represents a fragment # of a chunked file, the chunk_path will be the chunked subtree path # for the chunk, i.e. ['', '2d3115e', ...]. The top-level path for a # chunked file will have a chunk_path of ['']. So some chunk subtree # of the file '/foo/bar/baz' might look like this: # # item.path = ['foo', 'bar', 'baz.bup'] # item.chunk_path = ['', '2d3115e', '016b097'] # item.type = 'tree' # ... def walk_object(get_ref, oidx, stop_at=None, include_data=None): """Yield everything reachable from oidx via get_ref (which must behave like CatPipe get) as a WalkItem, stopping whenever stop_at(oidx) returns true. Throw MissingObject if a hash encountered is missing from the repository, and don't read or return blob content in the data field unless include_data is set. """ # Maintain the pending stack on the heap to avoid stack overflow pending = [(oidx, [], [], None)] while len(pending): oidx, parent_path, chunk_path, mode = pending.pop() oid = unhexlify(oidx) if stop_at and stop_at(oidx): continue if (not include_data) and mode and stat.S_ISREG(mode): # If the object is a "regular file", then it's a leaf in # the graph, so we can skip reading the data if the caller # hasn't requested it. yield WalkItem(oid=oid, type=b'blob', chunk_path=chunk_path, path=parent_path, mode=mode, data=None) continue item_it = get_ref(oidx) get_oidx, typ, _ = next(item_it) if not get_oidx: raise MissingObject(unhexlify(oidx)) if typ not in (b'blob', b'commit', b'tree'): raise Exception('unexpected repository object type %r' % typ) # FIXME: set the mode based on the type when the mode is None if typ == b'blob' and not include_data: # Dump data until we can ask cat_pipe not to fetch it for ignored in item_it: pass data = None else: data = b''.join(item_it) yield WalkItem(oid=oid, type=typ, chunk_path=chunk_path, path=parent_path, mode=mode, data=(data if include_data else None)) if typ == b'commit': commit_items = parse_commit(data) for pid in commit_items.parents: pending.append((pid, parent_path, chunk_path, mode)) pending.append((commit_items.tree, parent_path, chunk_path, hashsplit.GIT_MODE_TREE)) elif typ == b'tree': for mode, name, ent_id in tree_decode(data): demangled, bup_type = demangle_name(name, mode) if chunk_path: sub_path = parent_path sub_chunk_path = chunk_path + [name] else: sub_path = parent_path + [name] if bup_type == BUP_CHUNKED: sub_chunk_path = [b''] else: sub_chunk_path = chunk_path pending.append((hexlify(ent_id), sub_path, sub_chunk_path, mode)) bup-0.33.3/lib/bup/hashsplit.py000066400000000000000000000203711454333004200162740ustar00rootroot00000000000000 from __future__ import absolute_import import io, math, os from bup import _helpers, helpers from bup._helpers import cat_bytes from bup.compat import buffer from bup.helpers import sc_page_size _fmincore = getattr(helpers, 'fmincore', None) BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit BLOB_READ_SIZE = 8 * 1024 * 1024 MAX_PER_TREE = 256 progress_callback = None fanout = 16 GIT_MODE_FILE = 0o100644 GIT_MODE_TREE = 0o40000 GIT_MODE_SYMLINK = 0o120000 # The purpose of this type of buffer is to avoid copying on peek(), get(), # and eat(). We do copy the buffer contents on put(), but that should # be ok if we always only put() large amounts of data at a time. class Buf: def __init__(self): self.data = b'' self.start = 0 def put(self, s): if not self.data: self.data = s self.start = 0 elif s: remaining = len(self.data) - self.start self.data = cat_bytes(self.data, self.start, remaining, s, 0, len(s)) self.start = 0 def peek(self, count): if count <= 256: return self.data[self.start : self.start + count] return buffer(self.data, self.start, count) def eat(self, count): self.start += count def get(self, count): if count <= 256: v = self.data[self.start : self.start + count] else: v = buffer(self.data, self.start, count) self.start += count return v def used(self): return len(self.data) - self.start def _fadvise_pages_done(fd, first_page, count): assert(first_page >= 0) assert(count >= 0) if count > 0: _helpers.fadvise_done(fd, first_page * sc_page_size, count * sc_page_size) def _nonresident_page_regions(status_bytes, incore_mask, max_region_len=None): """Return (start_page, count) pairs in ascending start_page order for each contiguous region of nonresident pages indicated by the mincore() status_bytes. Limit the number of pages in each region to max_region_len.""" assert(max_region_len is None or max_region_len > 0) start = None for i, x in enumerate(status_bytes): in_core = x & incore_mask if start is None: if not in_core: start = i else: count = i - start if in_core: yield (start, count) start = None elif max_region_len and count >= max_region_len: yield (start, count) start = i if start is not None: yield (start, len(status_bytes) - start) def _uncache_ours_upto(fd, offset, first_region, remaining_regions): """Uncache the pages of fd indicated by first_region and remaining_regions that are before offset, where each region is a (start_page, count) pair. The final region must have a start_page of None.""" rstart, rlen = first_region while rstart is not None and (rstart + rlen) * sc_page_size <= offset: _fadvise_pages_done(fd, rstart, rlen) rstart, rlen = next(remaining_regions, (None, None)) return (rstart, rlen) def readfile_iter(files, progress=None): for filenum,f in enumerate(files): ofs = 0 b = '' fd = rpr = rstart = rlen = None if _fmincore and hasattr(f, 'fileno'): try: fd = f.fileno() except io.UnsupportedOperation: pass if fd: mcore = _fmincore(fd) if mcore: max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size) rpr = _nonresident_page_regions(mcore, helpers.MINCORE_INCORE, max_chunk) rstart, rlen = next(rpr, (None, None)) while 1: if progress: progress(filenum, len(b)) b = f.read(BLOB_READ_SIZE) ofs += len(b) if rpr: rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr) if not b: break yield b if rpr: rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr) def _splitbuf(buf, basebits, fanbits): while 1: b = buf.peek(buf.used()) (ofs, bits) = _helpers.splitbuf(b) if ofs: if ofs > BLOB_MAX: ofs = BLOB_MAX level = 0 else: level = (bits-basebits)//fanbits # integer division buf.eat(ofs) yield buffer(b, 0, ofs), level else: break while buf.used() >= BLOB_MAX: # limit max blob size yield buf.get(BLOB_MAX), 0 def _hashsplit_iter(files, progress): assert(BLOB_READ_SIZE > BLOB_MAX) basebits = _helpers.blobbits() fanbits = int(math.log(fanout or 128, 2)) buf = Buf() for inblock in readfile_iter(files, progress): buf.put(inblock) for buf_and_level in _splitbuf(buf, basebits, fanbits): yield buf_and_level if buf.used(): yield buf.get(buf.used()), 0 def _hashsplit_iter_keep_boundaries(files, progress): for real_filenum,f in enumerate(files): if progress: def prog(filenum, nbytes): # the inner _hashsplit_iter doesn't know the real file count, # so we'll replace it here. return progress(real_filenum, nbytes) else: prog = None for buf_and_level in _hashsplit_iter([f], progress=prog): yield buf_and_level def hashsplit_iter(files, keep_boundaries, progress): if keep_boundaries: return _hashsplit_iter_keep_boundaries(files, progress) else: return _hashsplit_iter(files, progress) total_split = 0 def split_to_blobs(makeblob, files, keep_boundaries, progress): global total_split for (blob, level) in hashsplit_iter(files, keep_boundaries, progress): sha = makeblob(blob) total_split += len(blob) if progress_callback: progress_callback(len(blob)) yield (sha, len(blob), level) def _make_shalist(l): ofs = 0 l = list(l) total = sum(size for mode,sha,size, in l) vlen = len(b'%x' % total) shalist = [] for (mode, sha, size) in l: shalist.append((mode, b'%0*x' % (vlen,ofs), sha)) ofs += size assert(ofs == total) return (shalist, total) def _squish(maketree, stacks, n): i = 0 while i < n or len(stacks[i]) >= MAX_PER_TREE: while len(stacks) <= i+1: stacks.append([]) if len(stacks[i]) == 1: stacks[i+1] += stacks[i] elif stacks[i]: (shalist, size) = _make_shalist(stacks[i]) tree = maketree(shalist) stacks[i+1].append((GIT_MODE_TREE, tree, size)) stacks[i] = [] i += 1 def split_to_shalist(makeblob, maketree, files, keep_boundaries, progress=None): sl = split_to_blobs(makeblob, files, keep_boundaries, progress) assert(fanout != 0) if not fanout: shal = [] for (sha,size,level) in sl: shal.append((GIT_MODE_FILE, sha, size)) return _make_shalist(shal)[0] else: stacks = [[]] for (sha,size,level) in sl: stacks[0].append((GIT_MODE_FILE, sha, size)) _squish(maketree, stacks, level) #log('stacks: %r\n' % [len(i) for i in stacks]) _squish(maketree, stacks, len(stacks)-1) #log('stacks: %r\n' % [len(i) for i in stacks]) return _make_shalist(stacks[-1])[0] def split_to_blob_or_tree(makeblob, maketree, files, keep_boundaries, progress=None): shalist = list(split_to_shalist(makeblob, maketree, files, keep_boundaries, progress)) if len(shalist) == 1: return (shalist[0][0], shalist[0][2]) elif len(shalist) == 0: return (GIT_MODE_FILE, makeblob(b'')) else: return (GIT_MODE_TREE, maketree(shalist)) def open_noatime(name): fd = _helpers.open_noatime(name) try: return os.fdopen(fd, 'rb', 1024*1024) except: try: os.close(fd) except: pass raise bup-0.33.3/lib/bup/helpers.py000066400000000000000000001145361454333004200157460ustar00rootroot00000000000000"""Helper functions and classes for bup.""" from __future__ import absolute_import, division from collections import namedtuple from contextlib import ExitStack from ctypes import sizeof, c_void_p from math import floor from os import environ from subprocess import PIPE, Popen from tempfile import mkdtemp from shutil import rmtree import sys, os, subprocess, errno, select, mmap, stat, re, struct import hashlib, heapq, math, operator, time from bup import _helpers from bup import io from bup.compat import argv_bytes, byte_int, nullcontext, pending_raise from bup.io import byte_stream, path_msg # This function should really be in helpers, not in bup.options. But we # want options.py to be standalone so people can include it in other projects. from bup.options import _tty_width as tty_width buglvl = int(os.environ.get('BUP_DEBUG', 0)) class Nonlocal: """Helper to deal with Python scoping issues""" pass def nullcontext_if_not(manager): return manager if manager is not None else nullcontext() class finalized: def __init__(self, enter_result=None, finalize=None): assert finalize self.finalize = finalize self.enter_result = enter_result def __enter__(self): return self.enter_result def __exit__(self, exc_type, exc_value, traceback): self.finalize(self.enter_result) def temp_dir(*args, **kwargs): # This is preferable to tempfile.TemporaryDirectory because the # latter uses @contextmanager, and so will always eventually be # deleted if it's handed to an ExitStack, whenever the stack is # gc'ed, even if you pop_all() (the new stack will also trigger # the deletion) because # https://github.com/python/cpython/issues/88458 return finalized(mkdtemp(*args, **kwargs), lambda x: rmtree(x)) sc_page_size = os.sysconf('SC_PAGE_SIZE') assert(sc_page_size > 0) sc_arg_max = os.sysconf('SC_ARG_MAX') if sc_arg_max == -1: # "no definite limit" - let's choose 2M sc_arg_max = 2 * 1024 * 1024 def last(iterable): result = None for result in iterable: pass return result try: _fdatasync = os.fdatasync except AttributeError: _fdatasync = os.fsync if sys.platform.startswith('darwin'): # Apparently os.fsync on OS X doesn't guarantee to sync all the way down import fcntl def fdatasync(fd): try: return fcntl.fcntl(fd, fcntl.F_FULLFSYNC) except IOError as e: # Fallback for file systems (SMB) that do not support F_FULLFSYNC if e.errno == errno.ENOTSUP: return _fdatasync(fd) else: raise else: fdatasync = _fdatasync def partition(predicate, stream): """Returns (leading_matches_it, rest_it), where leading_matches_it must be completely exhausted before traversing rest_it. """ stream = iter(stream) ns = Nonlocal() ns.first_nonmatch = None def leading_matches(): for x in stream: if predicate(x): yield x else: ns.first_nonmatch = (x,) break def rest(): if ns.first_nonmatch: yield ns.first_nonmatch[0] for x in stream: yield x return (leading_matches(), rest()) def merge_dict(*xs): result = {} for x in xs: result.update(x) return result def lines_until_sentinel(f, sentinel, ex_type): # sentinel must end with \n and must contain only one \n while True: line = f.readline() if not (line and line.endswith(b'\n')): raise ex_type('Hit EOF while reading line') if line == sentinel: return yield line def stat_if_exists(path): try: return os.stat(path) except OSError as e: if e.errno != errno.ENOENT: raise return None # Write (blockingly) to sockets that may or may not be in blocking mode. # We need this because our stderr is sometimes eaten by subprocesses # (probably ssh) that sometimes make it nonblocking, if only temporarily, # leading to race conditions. Ick. We'll do it the hard way. def _hard_write(fd, buf): while buf: (r,w,x) = select.select([], [fd], [], None) if not w: raise IOError('select(fd) returned without being writable') try: sz = os.write(fd, buf) except OSError as e: if e.errno != errno.EAGAIN: raise assert(sz >= 0) buf = buf[sz:] _last_prog = 0 def log(s): """Print a log message to stderr.""" global _last_prog sys.stdout.flush() _hard_write(sys.stderr.fileno(), s if isinstance(s, bytes) else s.encode()) _last_prog = 0 def debug1(s): if buglvl >= 1: log(s) def debug2(s): if buglvl >= 2: log(s) istty1 = os.isatty(1) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 1) istty2 = os.isatty(2) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 2) _last_progress = '' def progress(s): """Calls log() if stderr is a TTY. Does nothing otherwise.""" global _last_progress if istty2: log(s) _last_progress = s def qprogress(s): """Calls progress() only if we haven't printed progress in a while. This avoids overloading the stderr buffer with excess junk. """ global _last_prog now = time.time() if now - _last_prog > 0.1: progress(s) _last_prog = now def reprogress(): """Calls progress() to redisplay the most recent progress message. Useful after you've printed some other message that wipes out the progress line. """ if _last_progress and _last_progress.endswith('\r'): progress(_last_progress) def mkdirp(d, mode=None): """Recursively create directories on path 'd'. Unlike os.makedirs(), it doesn't raise an exception if the last element of the path already exists. """ try: if mode: os.makedirs(d, mode) else: os.makedirs(d) except OSError as e: if e.errno == errno.EEXIST: pass else: raise class MergeIterItem: def __init__(self, entry, read_it): self.entry = entry self.read_it = read_it def __lt__(self, x): return self.entry < x.entry def merge_iter(iters, pfreq, pfunc, pfinal, key=None): if key: samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None) else: samekey = operator.eq count = 0 total = sum(len(it) for it in iters) iters = (iter(it) for it in iters) heap = ((next(it, None),it) for it in iters) heap = [MergeIterItem(e, it) for e, it in heap if e] heapq.heapify(heap) pe = None while heap: if not count % pfreq: pfunc(count, total) e, it = heap[0].entry, heap[0].read_it if not samekey(e, pe): pe = e yield e count += 1 try: e = next(it) except StopIteration: heapq.heappop(heap) # remove current else: # shift current to new location heapq.heapreplace(heap, MergeIterItem(e, it)) pfinal(count, total) def unlink(f): """Delete a file at path 'f' if it currently exists. Unlike os.unlink(), does not throw an exception if the file didn't already exist. """ try: os.unlink(f) except OSError as e: if e.errno != errno.ENOENT: raise _bq_simple_id_rx = re.compile(br'^[-_./a-zA-Z0-9]+$') _sq_simple_id_rx = re.compile(r'^[-_./a-zA-Z0-9]+$') def bquote(x): if x == b'': return b"''" if _bq_simple_id_rx.match(x): return x return b"'%s'" % x.replace(b"'", b"'\"'\"'") def squote(x): if x == '': return "''" if _sq_simple_id_rx.match(x): return x return "'%s'" % x.replace("'", "'\"'\"'") def quote(x): if isinstance(x, bytes): return bquote(x) if isinstance(x, str): return squote(x) assert False # some versions of pylint get confused return None def shstr(cmd): """Return a shell quoted string for cmd if it's a sequence, else cmd. cmd must be a string, bytes, or a sequence of one or the other, and the assumption is that if cmd is a string or bytes, then it's already quoted (because it's what's actually being passed to call() and friends. e.g. log(shstr(cmd)); call(cmd) """ if isinstance(cmd, (bytes, str)): return cmd elif all(isinstance(x, bytes) for x in cmd): return b' '.join(map(bquote, cmd)) elif all(isinstance(x, str) for x in cmd): return ' '.join(map(squote, cmd)) raise TypeError('unsupported shstr argument: ' + repr(cmd)) exc = subprocess.check_call def exo(cmd, input=None, stdin=None, stderr=None, shell=False, check=True, preexec_fn=None, close_fds=True): if input: assert stdin in (None, PIPE) stdin = PIPE p = Popen(cmd, stdin=stdin, stdout=PIPE, stderr=stderr, shell=shell, preexec_fn=preexec_fn, close_fds=close_fds) out, err = p.communicate(input) if check and p.returncode != 0: raise Exception('subprocess %r failed with status %d%s' % (b' '.join(map(quote, cmd)), p.returncode, ', stderr: %r' % err if err else '')) return out, err, p def readpipe(argv, preexec_fn=None, shell=False): """Run a subprocess and return its output.""" return exo(argv, preexec_fn=preexec_fn, shell=shell)[0] def _argmax_base(command): base_size = 2048 for c in command: base_size += len(command) + 1 for k, v in environ.items(): base_size += len(k) + len(v) + 2 + sizeof(c_void_p) return base_size def _argmax_args_size(args): return sum(len(x) + 1 + sizeof(c_void_p) for x in args) def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max): """If args is not empty, yield the output produced by calling the command list with args as a sequence of strings (It may be necessary to return multiple strings in order to respect ARG_MAX).""" # The optional arg_max arg is a workaround for an issue with the # current wvtest behavior. base_size = _argmax_base(command) while args: room = arg_max - base_size i = 0 while i < len(args): next_size = _argmax_args_size(args[i:i+1]) if room - next_size < 0: break room -= next_size i += 1 sub_args = args[:i] args = args[i:] assert(len(sub_args)) yield readpipe(command + sub_args, preexec_fn=preexec_fn) def resolve_parent(p): """Return the absolute path of a file without following any final symlink. Behaves like os.path.realpath, but doesn't follow a symlink for the last element. (ie. if 'p' itself is a symlink, this one won't follow it, but it will follow symlinks in p's directory) """ try: st = os.lstat(p) except OSError: st = None if st and stat.S_ISLNK(st.st_mode): (dir, name) = os.path.split(p) dir = os.path.realpath(dir) out = os.path.join(dir, name) else: out = os.path.realpath(p) #log('realpathing:%r,%r\n' % (p, out)) return out def detect_fakeroot(): "Return True if we appear to be running under fakeroot." return os.getenv("FAKEROOTKEY") != None if sys.platform.startswith('cygwin'): def is_superuser(): # https://cygwin.com/ml/cygwin/2015-02/msg00057.html groups = os.getgroups() return 544 in groups or 0 in groups else: def is_superuser(): return os.geteuid() == 0 def cache_key_value(get_value, key, cache): """Return (value, was_cached). If there is a value in the cache for key, use that, otherwise, call get_value(key) which should throw a KeyError if there is no value -- in which case the cached and returned value will be None. """ try: # Do we already have it (or know there wasn't one)? value = cache[key] return value, True except KeyError: pass value = None try: cache[key] = value = get_value(key) except KeyError: cache[key] = None return value, False _hostname = None def hostname(): """Get the FQDN of this machine.""" global _hostname if not _hostname: _hostname = _helpers.gethostname() return _hostname def format_filesize(size): unit = 1024.0 size = float(size) if size < unit: return "%d" % (size) exponent = int(math.log(size) // math.log(unit)) size_prefix = "KMGTPE"[exponent - 1] return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix) class NotOk(Exception): pass class BaseConn: def __init__(self, outp): self._base_closed = False self.outp = outp def close(self): self._base_closed = True def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): with pending_raise(exc_value, rethrow=False): self.close() def __del__(self): assert self._base_closed def _read(self, size): raise NotImplementedError("Subclasses must implement _read") def read(self, size): """Read 'size' bytes from input stream.""" self.outp.flush() return self._read(size) def _readline(self, size): raise NotImplementedError("Subclasses must implement _readline") def readline(self): """Read from input stream until a newline is found.""" self.outp.flush() return self._readline() def write(self, data): """Write 'data' to output stream.""" #log('%d writing: %d bytes\n' % (os.getpid(), len(data))) self.outp.write(data) def has_input(self): """Return true if input stream is readable.""" raise NotImplementedError("Subclasses must implement has_input") def ok(self): """Indicate end of output from last sent command.""" self.write(b'\nok\n') def error(self, s): """Indicate server error to the client.""" s = re.sub(br'\s+', b' ', s) self.write(b'\nerror %s\n' % s) def _check_ok(self, onempty): self.outp.flush() rl = b'' for rl in linereader(self): #log('%d got line: %r\n' % (os.getpid(), rl)) if not rl: # empty line continue elif rl == b'ok': return None elif rl.startswith(b'error '): #log('client: error: %s\n' % rl[6:]) return NotOk(rl[6:]) else: onempty(rl) raise Exception('server exited unexpectedly; see errors above') def drain_and_check_ok(self): """Remove all data for the current command from input stream.""" def onempty(rl): pass return self._check_ok(onempty) def check_ok(self): """Verify that server action completed successfully.""" def onempty(rl): raise Exception('expected "ok", got %r' % rl) return self._check_ok(onempty) class Conn(BaseConn): def __init__(self, inp, outp): BaseConn.__init__(self, outp) self.inp = inp def _read(self, size): return self.inp.read(size) def _readline(self): return self.inp.readline() def has_input(self): [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0) if rl: assert(rl[0] == self.inp.fileno()) return True else: return None def checked_reader(fd, n): while n > 0: rl, _, _ = select.select([fd], [], []) assert(rl[0] == fd) buf = os.read(fd, n) if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n) yield buf n -= len(buf) MAX_PACKET = 128 * 1024 def mux(p, outfd, outr, errr): try: fds = [outr, errr] while p.poll() is None: rl, _, _ = select.select(fds, [], []) for fd in rl: if fd == outr: buf = os.read(outr, MAX_PACKET) if not buf: break os.write(outfd, struct.pack('!IB', len(buf), 1) + buf) elif fd == errr: buf = os.read(errr, 1024) if not buf: break os.write(outfd, struct.pack('!IB', len(buf), 2) + buf) finally: os.write(outfd, struct.pack('!IB', 0, 3)) class DemuxConn(BaseConn): """A helper class for bup's client-server protocol.""" def __init__(self, infd, outp): BaseConn.__init__(self, outp) # Anything that comes through before the sync string was not # multiplexed and can be assumed to be debug/log before mux init. tail = b'' stderr = byte_stream(sys.stderr) while tail != b'BUPMUX': # Make sure to write all pre-BUPMUX output to stderr b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1) if not b: ex = IOError('demux: unexpected EOF during initialization') with pending_raise(ex): stderr.write(tail) stderr.flush() tail += b stderr.write(tail[:-6]) tail = tail[-6:] stderr.flush() self.infd = infd self.reader = None self.buf = None self.closed = False def write(self, data): self._load_buf(0) BaseConn.write(self, data) def _next_packet(self, timeout): if self.closed: return False rl, wl, xl = select.select([self.infd], [], [], timeout) if not rl: return False assert(rl[0] == self.infd) ns = b''.join(checked_reader(self.infd, 5)) n, fdw = struct.unpack('!IB', ns) if n > MAX_PACKET: # assume that something went wrong and print stuff ns += os.read(self.infd, 1024) stderr = byte_stream(sys.stderr) stderr.write(ns) stderr.flush() raise Exception("Connection broken") if fdw == 1: self.reader = checked_reader(self.infd, n) elif fdw == 2: for buf in checked_reader(self.infd, n): byte_stream(sys.stderr).write(buf) elif fdw == 3: self.closed = True debug2("DemuxConn: marked closed\n") return True def _load_buf(self, timeout): if self.buf is not None: return True while not self.closed: while not self.reader: if not self._next_packet(timeout): return False try: self.buf = next(self.reader) return True except StopIteration: self.reader = None return False def _read_parts(self, ix_fn): while self._load_buf(None): assert(self.buf is not None) i = ix_fn(self.buf) if i is None or i == len(self.buf): yv = self.buf self.buf = None else: yv = self.buf[:i] self.buf = self.buf[i:] yield yv if i is not None: break def _readline(self): def find_eol(buf): try: return buf.index(b'\n')+1 except ValueError: return None return b''.join(self._read_parts(find_eol)) def _read(self, size): csize = [size] def until_size(buf): # Closes on csize if len(buf) < csize[0]: csize[0] -= len(buf) return None else: return csize[0] return b''.join(self._read_parts(until_size)) def has_input(self): return self._load_buf(0) def linereader(f): """Generate a list of input lines from 'f' without terminating newlines.""" while 1: line = f.readline() if not line: break yield line[:-1] def chunkyreader(f, count = None): """Generate a list of chunks of data read from 'f'. If count is None, read until EOF is reached. If count is a positive integer, read 'count' bytes from 'f'. If EOF is reached while reading, raise IOError. """ if count != None: while count > 0: b = f.read(min(count, 65536)) if not b: raise IOError('EOF with %d bytes remaining' % count) yield b count -= len(b) else: while 1: b = f.read(65536) if not b: break yield b class atomically_replaced_file: def __init__(self, path, mode='w', buffering=-1): """Return a context manager supporting the atomic replacement of a file. The context manager yields an open file object that has been created in a mkdtemp-style temporary directory in the same directory as the path. The temporary file will be renamed to the target path (atomically if the platform allows it) if there are no exceptions, and the temporary directory will always be removed. Calling cancel() will prevent the replacement. The file object will have a name attribute containing the file's path, and the mode and buffering arguments will be handled exactly as with open(). The resulting permissions will also match those produced by open(). E.g.:: with atomically_replaced_file('foo.txt', 'w') as f: f.write('hello jack.') """ assert 'w' in mode self.path = path self.mode = mode self.buffering = buffering self.canceled = False self.tmp_path = None self.cleanup = ExitStack() def __enter__(self): with self.cleanup: parent, name = os.path.split(self.path) tmpdir = self.cleanup.enter_context(temp_dir(dir=parent, prefix=name + b'-')) self.tmp_path = tmpdir + b'/pending' f = open(self.tmp_path, mode=self.mode, buffering=self.buffering) f = self.cleanup.enter_context(f) self.cleanup = self.cleanup.pop_all() return f def __exit__(self, exc_type, exc_value, traceback): with self.cleanup: if not (self.canceled or exc_type): os.rename(self.tmp_path, self.path) def cancel(self): self.canceled = True def slashappend(s): """Append "/" to 's' if it doesn't aleady end in "/".""" assert isinstance(s, bytes) if s and not s.endswith(b'/'): return s + b'/' else: return s def _mmap_do(f, sz, flags, prot, close): if not sz: st = os.fstat(f.fileno()) sz = st.st_size if not sz: # trying to open a zero-length map gives an error, but an empty # string has all the same behaviour of a zero-length map, ie. it has # no elements :) return '' map = io.mmap(f.fileno(), sz, flags, prot) if close: f.close() # map will persist beyond file close return map def mmap_read(f, sz = 0, close=True): """Create a read-only memory mapped region on file 'f'. If sz is 0, the region will cover the entire file. """ return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close) def mmap_readwrite(f, sz = 0, close=True): """Create a read-write memory mapped region on file 'f'. If sz is 0, the region will cover the entire file. """ return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE, close) def mmap_readwrite_private(f, sz = 0, close=True): """Create a read-write memory mapped region on file 'f'. If sz is 0, the region will cover the entire file. The map is private, which means the changes are never flushed back to the file. """ return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE, close) _mincore = getattr(_helpers, 'mincore', None) if _mincore: # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined. MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1) _fmincore_chunk_size = None def _set_fmincore_chunk_size(): global _fmincore_chunk_size pref_chunk_size = 64 * 1024 * 1024 chunk_size = sc_page_size if (sc_page_size < pref_chunk_size): chunk_size = sc_page_size * (pref_chunk_size // sc_page_size) _fmincore_chunk_size = chunk_size def fmincore(fd): """Return the mincore() data for fd as a bytearray whose values can be tested via MINCORE_INCORE, or None if fd does not fully support the operation.""" st = os.fstat(fd) if (st.st_size == 0): return bytearray(0) if not _fmincore_chunk_size: _set_fmincore_chunk_size() pages_per_chunk = _fmincore_chunk_size // sc_page_size; page_count = (st.st_size + sc_page_size - 1) // sc_page_size; chunk_count = (st.st_size + _fmincore_chunk_size - 1) // _fmincore_chunk_size result = bytearray(page_count) for ci in range(chunk_count): pos = _fmincore_chunk_size * ci; msize = min(_fmincore_chunk_size, st.st_size - pos) try: m = io.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos) except mmap.error as ex: if ex.errno in (errno.EINVAL, errno.ENODEV): # Perhaps the file was a pipe, i.e. "... | bup split ..." return None raise ex with m: try: _mincore(m, msize, 0, result, ci * pages_per_chunk) except OSError as ex: if ex.errno == errno.ENOSYS: return None raise return result def parse_timestamp(epoch_str): """Return the number of nanoseconds since the epoch that are described by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed, throw a ValueError that may contain additional information.""" ns_per = {'s' : 1000000000, 'ms' : 1000000, 'us' : 1000, 'ns' : 1} match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str) if not match: if re.match(r'^([-+]?[0-9]+)$', epoch_str): raise ValueError('must include units, i.e. 100ns, 100ms, ...') raise ValueError() (n, units) = match.group(1, 2) if not n: n = 1 n = int(n) return n * ns_per[units] def parse_num(s): """Parse string or bytes as a possibly unit suffixed number. For example: 199.2k means 203981 bytes 1GB means 1073741824 bytes 2.1 tb means 2199023255552 bytes """ if isinstance(s, bytes): # FIXME: should this raise a ValueError for UnicodeDecodeError # (perhaps with the latter as the context). s = s.decode('ascii') g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s)) if not g: raise ValueError("can't parse %r as a number" % s) (val, unit) = g.groups() num = float(val) unit = unit.lower() if unit in ['t', 'tb']: mult = 1024*1024*1024*1024 elif unit in ['g', 'gb']: mult = 1024*1024*1024 elif unit in ['m', 'mb']: mult = 1024*1024 elif unit in ['k', 'kb']: mult = 1024 elif unit in ['', 'b']: mult = 1 else: raise ValueError("invalid unit %r in number %r" % (unit, s)) return int(num*mult) saved_errors = [] def add_error(e): """Append an error message to the list of saved errors. Once processing is able to stop and output the errors, the saved errors are accessible in the module variable helpers.saved_errors. """ saved_errors.append(e) log('%-70s\n' % e) def clear_errors(): global saved_errors saved_errors = [] def die_if_errors(msg=None, status=1): global saved_errors if saved_errors: if not msg: msg = 'warning: %d errors encountered\n' % len(saved_errors) log(msg) sys.exit(status) def handle_ctrl_c(): """Replace the default exception handler for KeyboardInterrupt (Ctrl-C). The new exception handler will make sure that bup will exit without an ugly stacktrace when Ctrl-C is hit. """ oldhook = sys.excepthook def newhook(exctype, value, traceback): if exctype == KeyboardInterrupt: log('\nInterrupted.\n') else: oldhook(exctype, value, traceback) sys.excepthook = newhook def columnate(l, prefix): """Format elements of 'l' in columns with 'prefix' leading each line. The number of columns is determined automatically based on the string lengths. """ binary = isinstance(prefix, bytes) nothing = b'' if binary else '' nl = b'\n' if binary else '\n' if not l: return nothing l = l[:] clen = max(len(s) for s in l) ncols = (tty_width() - len(prefix)) // (clen + 2) if ncols <= 1: ncols = 1 clen = 0 cols = [] while len(l) % ncols: l.append(nothing) rows = len(l) // ncols for s in range(0, len(l), rows): cols.append(l[s:s+rows]) out = [] fmt = b'%-*s' if binary else '%-*s' for row in zip(*cols): out.append(prefix + nothing.join((fmt % (clen+2, s)) for s in row) + nl) return nothing.join(out) def parse_date_or_fatal(str, fatal): """Parses the given date or calls Option.fatal(). For now we expect a string that contains a float.""" try: date = float(str) except ValueError as e: raise fatal('invalid date format (should be a float): %r' % e) else: return date def parse_excludes(options, fatal): """Traverse the options and extract all excludes, or call Option.fatal().""" excluded_paths = [] for flag in options: (option, parameter) = flag if option == '--exclude': excluded_paths.append(resolve_parent(argv_bytes(parameter))) elif option == '--exclude-from': try: f = open(resolve_parent(argv_bytes(parameter)), 'rb') except IOError as e: raise fatal("couldn't read %r" % parameter) for exclude_path in f.readlines(): # FIXME: perhaps this should be rstrip('\n') exclude_path = resolve_parent(exclude_path.strip()) if exclude_path: excluded_paths.append(exclude_path) return sorted(frozenset(excluded_paths)) def parse_rx_excludes(options, fatal): """Traverse the options and extract all rx excludes, or call Option.fatal().""" excluded_patterns = [] for flag in options: (option, parameter) = flag if option == '--exclude-rx': try: excluded_patterns.append(re.compile(argv_bytes(parameter))) except re.error as ex: fatal('invalid --exclude-rx pattern (%r): %s' % (parameter, ex)) elif option == '--exclude-rx-from': try: f = open(resolve_parent(parameter), 'rb') except IOError as e: raise fatal("couldn't read %r" % parameter) for pattern in f.readlines(): spattern = pattern.rstrip(b'\n') if not spattern: continue try: excluded_patterns.append(re.compile(spattern)) except re.error as ex: fatal('invalid --exclude-rx pattern (%r): %s' % (spattern, ex)) return excluded_patterns def should_rx_exclude_path(path, exclude_rxs): """Return True if path matches a regular expression in exclude_rxs.""" for rx in exclude_rxs: if rx.search(path): debug1('Skipping %r: excluded by rx pattern %r.\n' % (path, rx.pattern)) return True return False # FIXME: Carefully consider the use of functions (os.path.*, etc.) # that resolve against the current filesystem in the strip/graft # functions for example, but elsewhere as well. I suspect bup's not # always being careful about that. For some cases, the contents of # the current filesystem should be irrelevant, and consulting it might # produce the wrong result, perhaps via unintended symlink resolution, # for example. def path_components(path): """Break path into a list of pairs of the form (name, full_path_to_name). Path must start with '/'. Example: '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]""" if not path.startswith(b'/'): raise Exception('path must start with "/": %s' % path_msg(path)) # Since we assume path startswith('/'), we can skip the first element. result = [(b'', b'/')] norm_path = os.path.abspath(path) if norm_path == b'/': return result full_path = b'' for p in norm_path.split(b'/')[1:]: full_path += b'/' + p result.append((p, full_path)) return result def stripped_path_components(path, strip_prefixes): """Strip any prefix in strip_prefixes from path and return a list of path components where each component is (name, none_or_full_fs_path_to_name). Assume path startswith('/'). See thelpers.py for examples.""" normalized_path = os.path.abspath(path) sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True) for bp in sorted_strip_prefixes: normalized_bp = os.path.abspath(bp) if normalized_bp == b'/': continue if normalized_path.startswith(normalized_bp): prefix = normalized_path[:len(normalized_bp)] result = [] for p in normalized_path[len(normalized_bp):].split(b'/'): if p: # not root prefix += b'/' prefix += p result.append((p, prefix)) return result # Nothing to strip. return path_components(path) def grafted_path_components(graft_points, path): # Create a result that consists of some number of faked graft # directories before the graft point, followed by all of the real # directories from path that are after the graft point. Arrange # for the directory at the graft point in the result to correspond # to the "orig" directory in --graft orig=new. See t/thelpers.py # for some examples. # Note that given --graft orig=new, orig and new have *nothing* to # do with each other, even if some of their component names # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically # equivalent to --graft /foo/bar/baz=/x/y/z, or even # /foo/bar/baz=/x. # FIXME: This can't be the best solution... clean_path = os.path.abspath(path) for graft_point in graft_points: old_prefix, new_prefix = graft_point # Expand prefixes iff not absolute paths. old_prefix = os.path.normpath(old_prefix) new_prefix = os.path.normpath(new_prefix) if clean_path.startswith(old_prefix): escaped_prefix = re.escape(old_prefix) grafted_path = re.sub(br'^' + escaped_prefix, new_prefix, clean_path) # Handle /foo=/ (at least) -- which produces //whatever. grafted_path = b'/' + grafted_path.lstrip(b'/') clean_path_components = path_components(clean_path) # Count the components that were stripped. strip_count = 0 if old_prefix == b'/' else old_prefix.count(b'/') new_prefix_parts = new_prefix.split(b'/') result_prefix = grafted_path.split(b'/')[:new_prefix.count(b'/')] result = [(p, None) for p in result_prefix] \ + clean_path_components[strip_count:] # Now set the graft point name to match the end of new_prefix. graft_point = len(result_prefix) result[graft_point] = \ (new_prefix_parts[-1], clean_path_components[strip_count][1]) if new_prefix == b'/': # --graft ...=/ is a special case. return result[1:] return result return path_components(clean_path) Sha1 = hashlib.sha1 _localtime = getattr(_helpers, 'localtime', None) if _localtime: bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday', 'tm_hour', 'tm_min', 'tm_sec', 'tm_wday', 'tm_yday', 'tm_isdst', 'tm_gmtoff', 'tm_zone']) # Define a localtime() that returns bup_time when possible. Note: # this means that any helpers.localtime() results may need to be # passed through to_py_time() before being passed to python's time # module, which doesn't appear willing to ignore the extra items. if _localtime: def localtime(time): return bup_time(*_helpers.localtime(int(floor(time)))) def utc_offset_str(t): """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t. If the current UTC offset does not represent an integer number of minutes, the fractional component will be truncated.""" off = localtime(t).tm_gmtoff # Note: // doesn't truncate like C for negative values, it rounds down. offmin = abs(off) // 60 m = offmin % 60 h = (offmin - m) // 60 return b'%+03d%02d' % (-h if off < 0 else h, m) def to_py_time(x): if isinstance(x, time.struct_time): return x return time.struct_time(x[:9]) else: localtime = time.localtime def utc_offset_str(t): return time.strftime(b'%z', localtime(t)) def to_py_time(x): return x _some_invalid_save_parts_rx = re.compile(br'[\[ ~^:?*\\]|\.\.|//|@{') def valid_save_name(name): # Enforce a superset of the restrictions in git-check-ref-format(1) if name == b'@' \ or name.startswith(b'/') or name.endswith(b'/') \ or name.endswith(b'.'): return False if _some_invalid_save_parts_rx.search(name): return False for c in name: if byte_int(c) < 0x20 or byte_int(c) == 0x7f: return False for part in name.split(b'/'): if part.startswith(b'.') or part.endswith(b'.lock'): return False return True _period_rx = re.compile(br'^([0-9]+)(s|min|h|d|w|m|y)$') def period_as_secs(s): if s == b'forever': return float('inf') match = _period_rx.match(s) if not match: return None mag = int(match.group(1)) scale = match.group(2) return mag * {b's': 1, b'min': 60, b'h': 60 * 60, b'd': 60 * 60 * 24, b'w': 60 * 60 * 24 * 7, b'm': 60 * 60 * 24 * 31, b'y': 60 * 60 * 24 * 366}[scale] bup-0.33.3/lib/bup/hlinkdb.py000066400000000000000000000067011454333004200157110ustar00rootroot00000000000000 from contextlib import ExitStack import os, pickle from bup.helpers import atomically_replaced_file, unlink def pickle_load(filename): try: f = open(filename, 'rb') except FileNotFoundError: return None with f: return pickle.load(f, encoding='bytes') class Error(Exception): pass class HLinkDB: def __init__(self, filename): self.closed = False self._cleanup = ExitStack() self._filename = filename self._pending_save = None # Map a "dev:ino" node to a list of paths associated with that node. self._node_paths = pickle_load(filename) or {} # Map a path to a "dev:ino" node (a reverse hard link index). self._path_node = {} for node, paths in self._node_paths.items(): for path in paths: self._path_node[path] = node def prepare_save(self): """ Commit all of the relevant data to disk. Do as much work as possible without actually making the changes visible.""" if self._pending_save: raise Error('save of %r already in progress' % self._filename) with self._cleanup: if self._node_paths: dir, name = os.path.split(self._filename) self._pending_save = atomically_replaced_file(self._filename, mode='wb', buffering=65536) with self._cleanup.enter_context(self._pending_save) as f: pickle.dump(self._node_paths, f, 2) else: # No data self._cleanup.callback(lambda: unlink(self._filename)) self._cleanup = self._cleanup.pop_all() def commit_save(self): self.closed = True if self._node_paths and not self._pending_save: raise Error('cannot commit save of %r; no save prepared' % self._filename) self._cleanup.close() self._pending_save = None def abort_save(self): self.closed = True with self._cleanup: if self._pending_save: self._pending_save.cancel() self._pending_save = None def __enter__(self): return self def __exit__(self, type, value, traceback): self.abort_save() def __del__(self): assert self.closed def add_path(self, path, dev, ino): # Assume path is new. node = b'%d:%d' % (dev, ino) self._path_node[path] = node link_paths = self._node_paths.get(node) if link_paths and path not in link_paths: link_paths.append(path) else: self._node_paths[node] = [path] def _del_node_path(self, node, path): link_paths = self._node_paths[node] link_paths.remove(path) if not link_paths: del self._node_paths[node] def change_path(self, path, new_dev, new_ino): prev_node = self._path_node.get(path) if prev_node: self._del_node_path(prev_node, path) self.add_path(new_dev, new_ino, path) def del_path(self, path): # Path may not be in db (if updating a pre-hardlink support index). node = self._path_node.get(path) if node: self._del_node_path(node, path) del self._path_node[path] def node_paths(self, dev, ino): node = b'%d:%d' % (dev, ino) return self._node_paths[node] bup-0.33.3/lib/bup/index.py000066400000000000000000000541351454333004200154110ustar00rootroot00000000000000 from contextlib import ExitStack import errno, os, stat, struct from bup import metadata, xstat from bup._helpers import UINT_MAX, bytescmp from bup.compat import pending_raise from bup.helpers import (add_error, atomically_replaced_file, log, merge_iter, mmap_readwrite, progress, qprogress, resolve_parent, slashappend) EMPTY_SHA = b'\0' * 20 FAKE_SHA = b'\x01' * 20 INDEX_HDR = b'BUPI\0\0\0\7' # Time values are handled as integer nanoseconds since the epoch in # memory, but are written as xstat/metadata timespecs. This behavior # matches the existing metadata/xstat/.bupm code. # Record times (mtime, ctime, atime) as xstat/metadata timespecs, and # store all of the times in the index so they won't interfere with the # forthcoming metadata cache. INDEX_SIG = ('!' 'Q' # dev 'Q' # ino 'Q' # nlink 'qQ' # ctime_s, ctime_ns 'qQ' # mtime_s, mtime_ns 'qQ' # atime_s, atime_ns 'Q' # size 'I' # mode 'I' # gitmode '20s' # sha 'H' # flags 'Q' # children_ofs 'I' # children_n 'Q') # meta_ofs ENTLEN = struct.calcsize(INDEX_SIG) FOOTER_SIG = '!Q' FOOTLEN = struct.calcsize(FOOTER_SIG) IX_EXISTS = 0x8000 # file exists on filesystem IX_HASHVALID = 0x4000 # the stored sha1 matches the filesystem IX_SHAMISSING = 0x2000 # the stored sha1 object doesn't seem to exist class Error(Exception): pass class MetaStoreReader: def __init__(self, filename): self._file = None self._file = open(filename, 'rb') def close(self): f, self._file = self._file, None if f: f.close() def __del__(self): assert not self._file def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=True): self.close() def metadata_at(self, ofs): self._file.seek(ofs) return metadata.Metadata.read(self._file) class MetaStoreWriter: # For now, we just append to the file, and try to handle any # truncation or corruption somewhat sensibly. def __init__(self, filename): self._closed = False # Map metadata hashes to bupindex.meta offsets. self._offsets = {} self._filename = filename self._file = None # FIXME: see how slow this is; does it matter? m_file = open(filename, 'ab+') try: m_file.seek(0) try: m_off = m_file.tell() m = metadata.Metadata.read(m_file) while m: m_encoded = m.encode() self._offsets[m_encoded] = m_off m_off = m_file.tell() m = metadata.Metadata.read(m_file) except EOFError: pass except: log('index metadata in %r appears to be corrupt\n' % filename) raise finally: m_file.close() self._file = open(filename, 'ab') def close(self): self._closed = True if self._file: self._file.close() self._file = None def __del__(self): assert self._closed def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def store(self, metadata): meta_encoded = metadata.encode(include_path=False) ofs = self._offsets.get(meta_encoded) if ofs: return ofs ofs = self._file.tell() self._file.write(meta_encoded) self._offsets[meta_encoded] = ofs return ofs class Level: def __init__(self, ename, parent): self.parent = parent self.ename = ename self.list = [] self.count = 0 def write(self, f): (ofs,n) = (f.tell(), len(self.list)) if self.list: count = len(self.list) #log('popping %r with %d entries\n' # % (''.join(self.ename), count)) for e in self.list: e.write(f) if self.parent: self.parent.count += count + self.count return (ofs,n) def _golevel(level, f, ename, newentry, metastore, tmax): # close nodes back up the tree assert(level) default_meta_ofs = metastore.store(metadata.Metadata()) while ename[:len(level.ename)] != level.ename: n = BlankNewEntry(level.ename[-1], default_meta_ofs, tmax) n.flags |= IX_EXISTS (n.children_ofs,n.children_n) = level.write(f) level.parent.list.append(n) level = level.parent # create nodes down the tree while len(level.ename) < len(ename): level = Level(ename[:len(level.ename)+1], level) # are we in precisely the right place? assert(ename == level.ename) n = newentry or \ BlankNewEntry(ename and level.ename[-1] or None, default_meta_ofs, tmax) (n.children_ofs,n.children_n) = level.write(f) if level.parent: level.parent.list.append(n) level = level.parent return level class Entry: def __init__(self, basename, name, meta_ofs, tmax): assert basename is None or isinstance(basename, bytes) assert name is None or isinstance(name, bytes) self.basename = basename self.name = name self.meta_ofs = meta_ofs self.tmax = tmax self.children_ofs = 0 self.children_n = 0 def __repr__(self): return ("(%r,0x%04x,%d,%d,%d,%d,%d,%d,%s/%s,0x%04x,%d,0x%08x/%d)" % (self.name, self.dev, self.ino, self.nlink, self.ctime, self.mtime, self.atime, self.size, self.mode, self.gitmode, self.flags, self.meta_ofs, self.children_ofs, self.children_n)) def packed(self): try: ctime = xstat.nsecs_to_timespec(self.ctime) mtime = xstat.nsecs_to_timespec(self.mtime) atime = xstat.nsecs_to_timespec(self.atime) return struct.pack(INDEX_SIG, self.dev, self.ino, self.nlink, ctime[0], ctime[1], mtime[0], mtime[1], atime[0], atime[1], self.size, self.mode, self.gitmode, self.sha, self.flags, self.children_ofs, self.children_n, self.meta_ofs) except (DeprecationWarning, struct.error) as e: log('pack error: %s (%r)\n' % (e, self)) raise def stale(self, st, check_device=True): if self.size != st.st_size: return True if self.mtime != st.st_mtime: return True if self.sha == EMPTY_SHA: return True if not self.gitmode: return True if self.ctime != st.st_ctime: return True if self.ino != st.st_ino: return True if self.nlink != st.st_nlink: return True if not (self.flags & IX_EXISTS): return True if check_device and (self.dev != st.st_dev): return True return False def update_from_stat(self, st, meta_ofs): # Should only be called when the entry is stale(), and # invalidate() should almost certainly be called afterward. self.dev = st.st_dev self.ino = st.st_ino self.nlink = st.st_nlink self.ctime = st.st_ctime self.mtime = st.st_mtime self.atime = st.st_atime self.size = st.st_size self.mode = st.st_mode self.flags |= IX_EXISTS self.meta_ofs = meta_ofs self._fixup() def _fixup(self): self.mtime = self._fixup_time(self.mtime) self.ctime = self._fixup_time(self.ctime) def _fixup_time(self, t): if self.tmax != None and t > self.tmax: return self.tmax else: return t def is_valid(self): f = IX_HASHVALID|IX_EXISTS return (self.flags & f) == f def invalidate(self): self.flags &= ~IX_HASHVALID def validate(self, gitmode, sha): assert(sha) assert(gitmode) assert(gitmode+0 == gitmode) self.gitmode = gitmode self.sha = sha self.flags |= IX_HASHVALID|IX_EXISTS def exists(self): return not self.is_deleted() def sha_missing(self): return (self.flags & IX_SHAMISSING) or not (self.flags & IX_HASHVALID) def is_deleted(self): return (self.flags & IX_EXISTS) == 0 def set_deleted(self): if self.flags & IX_EXISTS: self.flags &= ~(IX_EXISTS | IX_HASHVALID) def is_real(self): return not self.is_fake() def is_fake(self): return not self.ctime def _cmp(self, other): # Note reversed name ordering bc = bytescmp(other.name, self.name) if bc != 0: return bc vc = self.is_valid() - other.is_valid() if vc != 0: return vc fc = self.is_fake() - other.is_fake() if fc != 0: return fc return 0 def __eq__(self, other): return self._cmp(other) == 0 def __ne__(self, other): return self._cmp(other) != 0 def __lt__(self, other): return self._cmp(other) < 0 def __gt__(self, other): return self._cmp(other) > 0 def __le__(self, other): return self._cmp(other) <= 0 def __ge__(self, other): return self._cmp(other) >= 0 def write(self, f): f.write(self.basename + b'\0' + self.packed()) class NewEntry(Entry): def __init__(self, basename, name, tmax, dev, ino, nlink, ctime, mtime, atime, size, mode, gitmode, sha, flags, meta_ofs, children_ofs, children_n): Entry.__init__(self, basename, name, meta_ofs, tmax) (self.dev, self.ino, self.nlink, self.ctime, self.mtime, self.atime, self.size, self.mode, self.gitmode, self.sha, self.flags, self.children_ofs, self.children_n ) = (dev, ino, nlink, ctime, mtime, atime, size, mode, gitmode, sha, flags, children_ofs, children_n) self._fixup() class BlankNewEntry(NewEntry): def __init__(self, basename, meta_ofs, tmax): NewEntry.__init__(self, basename, basename, tmax, 0, 0, 0, 0, 0, 0, 0, 0, 0, EMPTY_SHA, 0, meta_ofs, 0, 0) class ExistingEntry(Entry): def __init__(self, parent, basename, name, m, ofs): Entry.__init__(self, basename, name, None, None) self.parent = parent self._m = m self._ofs = ofs (self.dev, self.ino, self.nlink, self.ctime, ctime_ns, self.mtime, mtime_ns, self.atime, atime_ns, self.size, self.mode, self.gitmode, self.sha, self.flags, self.children_ofs, self.children_n, self.meta_ofs ) = struct.unpack(INDEX_SIG, m[ofs : ofs + ENTLEN]) self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns)) self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns)) self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns)) # effectively, we don't bother messing with IX_SHAMISSING if # not IX_HASHVALID, since it's redundant, and repacking is more # expensive than not repacking. # This is implemented by having sha_missing() check IX_HASHVALID too. def set_sha_missing(self, val): val = val and 1 or 0 oldval = self.sha_missing() and 1 or 0 if val != oldval: flag = val and IX_SHAMISSING or 0 newflags = (self.flags & (~IX_SHAMISSING)) | flag self.flags = newflags self.repack() def unset_sha_missing(self, flag): if self.flags & IX_SHAMISSING: self.flags &= ~IX_SHAMISSING self.repack() def repack(self): self._m[self._ofs:self._ofs+ENTLEN] = self.packed() if self.parent and not self.is_valid(): self.parent.invalidate() self.parent.repack() def iter(self, name=None, wantrecurse=None): dname = name if dname and not dname.endswith(b'/'): dname += b'/' ofs = self.children_ofs assert(ofs <= len(self._m)) assert(self.children_n <= UINT_MAX) # i.e. python struct 'I' for i in range(self.children_n): eon = self._m.find(b'\0', ofs) assert(eon >= 0) assert(eon >= ofs) assert(eon > ofs) basename = self._m[ofs : ofs + (eon - ofs)] child = ExistingEntry(self, basename, self.name + basename, self._m, eon+1) if (not dname or child.name.startswith(dname) or child.name.endswith(b'/') and dname.startswith(child.name)): if not wantrecurse or wantrecurse(child): for e in child.iter(name=name, wantrecurse=wantrecurse): yield e if not name or child.name == name or child.name.startswith(dname): yield child ofs = eon + 1 + ENTLEN def __iter__(self): return self.iter() class Reader: def __init__(self, filename): self.closed = False self.filename = filename self.m = b'' self.writable = False self.count = 0 f = None try: f = open(filename, 'rb+') except IOError as e: if e.errno == errno.ENOENT: pass else: raise if f: b = f.read(len(INDEX_HDR)) if b != INDEX_HDR: log('warning: %s: header: expected %r, got %r\n' % (filename, INDEX_HDR, b)) else: st = os.fstat(f.fileno()) if st.st_size: self.m = mmap_readwrite(f) self.writable = True self.count = struct.unpack(FOOTER_SIG, self.m[st.st_size - FOOTLEN : st.st_size])[0] def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def __len__(self): return int(self.count) def forward_iter(self): ofs = len(INDEX_HDR) while ofs+ENTLEN <= len(self.m)-FOOTLEN: eon = self.m.find(b'\0', ofs) assert(eon >= 0) assert(eon >= ofs) assert(eon > ofs) basename = self.m[ofs : ofs + (eon - ofs)] yield ExistingEntry(None, basename, basename, self.m, eon+1) ofs = eon + 1 + ENTLEN def iter(self, name=None, wantrecurse=None): if len(self.m) > len(INDEX_HDR)+ENTLEN: dname = name if dname and not dname.endswith(b'/'): dname += b'/' root = ExistingEntry(None, b'/', b'/', self.m, len(self.m)-FOOTLEN-ENTLEN) for sub in root.iter(name=name, wantrecurse=wantrecurse): yield sub if not dname or dname == root.name: yield root def __iter__(self): return self.iter() def find(self, name): return next((e for e in self.iter(name, wantrecurse=lambda x : True) if e.name == name), None) def exists(self): return self.m def save(self): if self.writable and self.m: self.m.flush() def close(self): self.closed = True self.save() if self.writable and self.m: self.m.close() self.m = None self.writable = False def __del__(self): assert self.closed def filter(self, prefixes, wantrecurse=None): for (rp, path) in reduce_paths(prefixes): any_entries = False for e in self.iter(rp, wantrecurse=wantrecurse): any_entries = True assert(e.name.startswith(rp)) name = path + e.name[len(rp):] yield (name, e) if not any_entries: # Always return at least the top for each prefix. # Otherwise something like "save x/y" will produce # nothing if x is up to date. pe = self.find(rp) if not pe: raise Exception("cannot find %r" % rp) name = path + pe.name[len(rp):] yield (name, pe) # FIXME: this function isn't very generic, because it splits the filename # in an odd way and depends on a terminating '/' to indicate directories. def pathsplit(p): """Split a path into a list of elements of the file system hierarchy.""" l = p.split(b'/') l = [i + b'/' for i in l[:-1]] + l[-1:] if l[-1] == b'': l.pop() # extra blank caused by terminating '/' return l class Writer: def __init__(self, filename, metastore, tmax): self.closed = False self.rootlevel = self.level = Level([], None) self.pending_index = None self.f = None self.count = 0 self.lastfile = None self.filename = None self.filename = filename = resolve_parent(filename) self.metastore = metastore self.tmax = tmax (dir,name) = os.path.split(filename) with ExitStack() as self.cleanup: self.pending_index = atomically_replaced_file(self.filename, mode='wb', buffering=65536) self.f = self.cleanup.enter_context(self.pending_index) self.cleanup.enter_context(self.f) self.f.write(INDEX_HDR) self.cleanup = self.cleanup.pop_all() def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.abort() def abort(self): self.close(abort=True) def flush(self): if self.level: self.level = _golevel(self.level, self.f, [], None, self.metastore, self.tmax) self.count = self.rootlevel.count if self.count: self.count += 1 self.f.write(struct.pack(FOOTER_SIG, self.count)) self.f.flush() assert(self.level == None) def close(self, abort=False): self.closed = True with self.cleanup: if abort: self.pending_index.cancel() else: self.flush() def __del__(self): assert self.closed def _add(self, ename, entry): if self.lastfile and self.lastfile <= ename: raise Error('%r must come before %r' % (''.join(ename), ''.join(self.lastfile))) self.lastfile = ename self.level = _golevel(self.level, self.f, ename, entry, self.metastore, self.tmax) def add(self, name, st, meta_ofs, hashgen = None): endswith = name.endswith(b'/') ename = pathsplit(name) basename = ename[-1] #log('add: %r %r\n' % (basename, name)) flags = IX_EXISTS sha = None if hashgen: (gitmode, sha) = hashgen(name) flags |= IX_HASHVALID else: (gitmode, sha) = (0, EMPTY_SHA) if st: isdir = stat.S_ISDIR(st.st_mode) assert(isdir == endswith) e = NewEntry(basename, name, self.tmax, st.st_dev, st.st_ino, st.st_nlink, st.st_ctime, st.st_mtime, st.st_atime, st.st_size, st.st_mode, gitmode, sha, flags, meta_ofs, 0, 0) else: assert(endswith) meta_ofs = self.metastore.store(metadata.Metadata()) e = BlankNewEntry(basename, meta_ofs, self.tmax) e.gitmode = gitmode e.sha = sha e.flags = flags self._add(ename, e) def add_ixentry(self, e): e.children_ofs = e.children_n = 0 self._add(pathsplit(e.name), e) def new_reader(self): self.flush() return Reader(self.f.name) def _slashappend_or_add_error(p, caller): """Return p, after ensuring it has a single trailing slash if it names a directory, unless there's an OSError, in which case, call add_error() and return None.""" try: st = os.lstat(p) except OSError as e: add_error('%s: %s' % (caller, e)) return None else: if stat.S_ISDIR(st.st_mode): return slashappend(p) return p def unique_resolved_paths(paths): "Return a collection of unique resolved paths." rps = (_slashappend_or_add_error(resolve_parent(p), 'unique_resolved_paths') for p in paths) return frozenset((x for x in rps if x is not None)) def reduce_paths(paths): xpaths = [] for p in paths: rp = _slashappend_or_add_error(resolve_parent(p), 'reduce_paths') if rp: xpaths.append((rp, slashappend(p) if rp.endswith(b'/') else p)) xpaths.sort() paths = [] prev = None for (rp, p) in xpaths: if prev and (prev == rp or (prev.endswith(b'/') and rp.startswith(prev))): continue # already superceded by previous path paths.append((rp, p)) prev = rp paths.sort(reverse=True) return paths def merge(*iters): def pfunc(count, total): qprogress('bup: merging indexes (%d/%d)\r' % (count, total)) def pfinal(count, total): progress('bup: merging indexes (%d/%d), done.\n' % (count, total)) return merge_iter(iters, 1024, pfunc, pfinal, key='name') bup-0.33.3/lib/bup/io.py000066400000000000000000000034401454333004200147020ustar00rootroot00000000000000 import mmap as py_mmap from bup.compat import pending_raise def byte_stream(file): return file.buffer def path_msg(x): """Return a string representation of a path.""" # FIXME: configurability (might git-config quotePath be involved?) return x.decode(errors='backslashreplace') assert not hasattr(py_mmap.mmap, '__del__') if hasattr(py_mmap.mmap, '__enter__'): assert hasattr(py_mmap.mmap, '__exit__') class mmap(py_mmap.mmap): '''mmap.mmap wrapper that detects and complains about any instances that aren't explicitly closed. ''' def __new__(cls, *args, **kwargs): result = super().__new__(cls, *args, **kwargs) result._bup_closed = True # supports __del__ return result def __init__(self, *args, **kwargs): # Silence deprecation warnings. mmap's current parent is # object, which accepts no params and as of at least 2.7 # warns about them. if py_mmap.mmap.__init__ is not object.__init__: super().__init__(self, *args, **kwargs) self._bup_closed = False def close(self): self._bup_closed = True super().close() if hasattr(py_mmap.mmap, '__enter__'): def __enter__(self): super().__enter__() return self def __exit__(self, type, value, traceback): # Don't call self.close() when the parent has its own __exit__; # defer to it. self._bup_closed = True result = super().__exit__(type, value, traceback) return result else: def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def __del__(self): assert self._bup_closed bup-0.33.3/lib/bup/ls.py000066400000000000000000000164361454333004200147220ustar00rootroot00000000000000"""Common code for listing files from a bup repository.""" from __future__ import absolute_import from binascii import hexlify from itertools import chain from stat import S_ISDIR import os.path import posixpath from bup import metadata, vfs, xstat from bup.compat import argv_bytes from bup.io import path_msg from bup.options import Options from bup.repo import LocalRepo, RemoteRepo from bup.helpers import columnate, istty1, log def item_hash(item, tree_for_commit): """If the item is a Commit, return its commit oid, otherwise return the item's oid, if it has one. """ if tree_for_commit and isinstance(item, vfs.Commit): return item.coid return getattr(item, 'oid', None) def item_info(item, name, show_hash = False, commit_hash=False, long_fmt = False, classification = None, numeric_ids = False, human_readable = False): """Return bytes containing the information to display for the VFS item. Classification may be "all", "type", or None. """ result = b'' if show_hash: oid = item_hash(item, commit_hash) result += b'%s ' % (hexlify(oid) if oid else b'0000000000000000000000000000000000000000') if long_fmt: meta = item.meta.copy() meta.path = name # FIXME: need some way to track fake vs real meta items? result += metadata.summary_bytes(meta, numeric_ids=numeric_ids, classification=classification, human_readable=human_readable) else: result += name if classification: cls = xstat.classification_str(vfs.item_mode(item), classification == 'all') result += cls.encode('ascii') return result optspec = """ bup ls [-r host:path] [-l] [-d] [-F] [-a] [-A] [-s] [-n] [path...] -- r,remote= remote repository path s,hash show hash for each file commit-hash show commit hash instead of tree for commits (implies -s) a,all show hidden files A,almost-all show hidden files except . and .. l use a detailed, long listing format d,directory show directories, not contents; don't follow symlinks F,classify append type indicator: dir/ sym@ fifo| sock= exec* file-type append type indicator: dir/ sym@ fifo| sock= human-readable print human readable file sizes (i.e. 3.9K, 4.7M) n,numeric-ids list numeric IDs (user, group, etc.) rather than names """ def opts_from_cmdline(args, onabort=None, pwd=b'/'): """Parse ls command line arguments and return a dictionary of ls options, agumented with "classification", "long_listing", "paths", and "show_hidden". """ if onabort: opt, flags, extra = Options(optspec, onabort=onabort).parse_bytes(args) else: opt, flags, extra = Options(optspec).parse_bytes(args) opt.paths = [argv_bytes(x) for x in extra] or (pwd,) opt.long_listing = opt.l opt.classification = None opt.show_hidden = None for flag in flags: option, parameter = flag if option in ('-F', '--classify'): opt.classification = 'all' elif option == '--file-type': opt.classification = 'type' elif option in ('-a', '--all'): opt.show_hidden = 'all' elif option in ('-A', '--almost-all'): opt.show_hidden = 'almost' return opt def within_repo(repo, opt, out, pwd=b''): if opt.commit_hash: opt.hash = True def item_line(item, name): return item_info(item, name, show_hash=opt.hash, commit_hash=opt.commit_hash, long_fmt=opt.long_listing, classification=opt.classification, numeric_ids=opt.numeric_ids, human_readable=opt.human_readable) ret = 0 want_meta = bool(opt.long_listing or opt.classification) pending = [] last_n = len(opt.paths) - 1 for n, printpath in enumerate(opt.paths): path = posixpath.join(pwd, printpath) try: if last_n > 0: out.write(b'%s:\n' % printpath) if opt.directory: resolved = vfs.resolve(repo, path, follow=False) else: resolved = vfs.try_resolve(repo, path, want_meta=want_meta) leaf_name, leaf_item = resolved[-1] if not leaf_item: log('error: cannot access %r in %r\n' % ('/'.join(path_msg(name) for name, item in resolved), path_msg(path))) ret = 1 continue if not opt.directory and S_ISDIR(vfs.item_mode(leaf_item)): items = vfs.contents(repo, leaf_item, want_meta=want_meta) if opt.show_hidden == 'all': # Match non-bup "ls -a ... /". parent = resolved[-2] if len(resolved) > 1 else resolved[0] items = chain(items, ((b'..', parent[1]),)) for sub_name, sub_item in sorted(items, key=lambda x: x[0]): if opt.show_hidden != 'all' and sub_name == b'.': continue if sub_name.startswith(b'.') and \ opt.show_hidden not in ('almost', 'all'): continue if opt.l: sub_item = vfs.ensure_item_has_metadata(repo, sub_item, include_size=True) elif want_meta: sub_item = vfs.augment_item_meta(repo, sub_item, include_size=True) line = item_line(sub_item, sub_name) if not opt.long_listing and istty1: pending.append(line) else: out.write(line) out.write(b'\n') else: if opt.long_listing: leaf_item = vfs.augment_item_meta(repo, leaf_item, include_size=True) line = item_line(leaf_item, os.path.normpath(path)) if not opt.long_listing and istty1: pending.append(line) else: out.write(line) out.write(b'\n') except vfs.IOError as ex: log('bup: %s\n' % ex) ret = 1 if pending: out.write(columnate(pending, b'')) pending = [] if n < last_n: out.write(b'\n') return ret def via_cmdline(args, out=None, onabort=None): """Write a listing of a file or directory in the bup repository to out. When a long listing is not requested and stdout is attached to a tty, the output is formatted in columns. When not attached to tty (for example when the output is piped to another command), one file is listed per line. """ assert out opt = opts_from_cmdline(args, onabort=onabort) with RemoteRepo(argv_bytes(opt.remote)) if opt.remote \ else LocalRepo() as repo: return within_repo(repo, opt, out) bup-0.33.3/lib/bup/main.py000077500000000000000000000307761454333004200152360ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import bup_main, os, sys if bup_main.env_pythonpath: if sys.version_info[0] < 3: os.environ['PYTHONPATH'] = bup_main.env_pythonpath else: os.environb[b'PYTHONPATH'] = bup_main.env_pythonpath else: del os.environ['PYTHONPATH'] from importlib import import_module from pkgutil import iter_modules from subprocess import PIPE from threading import Thread import re, select, signal, subprocess from bup import compat, path, helpers from bup.compat import ( environ, fsdecode, wrap_main ) from bup.helpers import ( columnate, handle_ctrl_c, log, tty_width ) from bup.git import close_catpipes from bup.io import byte_stream, path_msg from bup.options import _tty_width import bup.cmd def maybe_import_early(argv): """Scan argv and import any modules specified by --import-py-module.""" while argv: if argv[0] != '--import-py-module': argv = argv[1:] continue if len(argv) < 2: log("bup: --import-py-module must have an argument\n") exit(2) mod = argv[1] import_module(mod) argv = argv[2:] maybe_import_early(compat.get_argv()) handle_ctrl_c() cmdpath = path.cmddir() # We manipulate the subcmds here as strings, but they must be ASCII # compatible, since we're going to be looking for exactly # b'bup-SUBCMD' to exec. def usage(msg=""): log('Usage: bup [-?|--help] [-d BUP_DIR] [--debug] [--profile] ' ' [options...]\n\n') common = dict( ftp = 'Browse backup sets using an ftp-like client', fsck = 'Check backup sets for damage and add redundancy information', fuse = 'Mount your backup sets as a filesystem', help = 'Print detailed help for the given command', index = 'Create or display the index of files to back up', on = 'Backup a remote machine to the local one', restore = 'Extract files from a backup set', save = 'Save files into a backup set (note: run "bup index" first)', tag = 'Tag commits for easier access', web = 'Launch a web server to examine backup sets', ) log('Common commands:\n') for cmd,synopsis in sorted(common.items()): log(' %-10s %s\n' % (cmd, synopsis)) log('\n') log('Other available commands:\n') cmds = set() for c in sorted(os.listdir(cmdpath)): if c.startswith(b'bup-') and c.find(b'.') < 0: cname = fsdecode(c[4:]) if cname not in common: cmds.add(c[4:].decode(errors='backslashreplace')) # built-in commands take precedence for _, name, _ in iter_modules(path=bup.cmd.__path__): name = name.replace('_','-') if name not in common: cmds.add(name) log(columnate(sorted(cmds), ' ')) log('\n') log("See 'bup help COMMAND' for more information on " + "a specific command.\n") if msg: log("\n%s\n" % msg) sys.exit(99) def extract_argval(args): """Assume args (all elements bytes) starts with a -x, --x, or --x=, argument that requires a value and return that value and the remaining args. Exit with an errror if the value is missing. """ # Assumes that first arg is a valid arg arg = args[0] if b'=' in arg: val = arg.split(b'=')[1] if not val: usage('error: no value provided for %s option' % arg) return val, args[1:] if len(args) < 2: usage('error: no value provided for %s option' % arg) return args[1], args[2:] args = compat.get_argvb() if len(args) < 2: usage() ## Parse global options help_requested = None do_profile = False bup_dir = None args = args[1:] subcmd = None while args: arg = args[0] if arg in (b'-?', b'--help'): help_requested = True args = args[1:] elif arg in (b'-V', b'--version'): subcmd = [b'version'] args = args[1:] elif arg in (b'-D', b'--debug'): helpers.buglvl += 1 environ[b'BUP_DEBUG'] = b'%d' % helpers.buglvl args = args[1:] elif arg == b'--profile': do_profile = True args = args[1:] elif arg in (b'-d', b'--bup-dir') or arg.startswith(b'--bup-dir='): bup_dir, args = extract_argval(args) elif arg == b'--import-py-module' or arg.startswith(b'--import-py-module='): # Just need to skip it here _, args = extract_argval(args) elif arg.startswith(b'-'): usage('error: unexpected option "%s"' % arg.decode('ascii', 'backslashescape')) else: break subcmd = subcmd or args # Make BUP_DIR absolute, so we aren't affected by chdir (i.e. save -C, etc.). if bup_dir: environ[b'BUP_DIR'] = os.path.abspath(bup_dir) if len(subcmd) == 0: if help_requested: subcmd = [b'help'] else: usage() if help_requested and subcmd[0] != b'help': subcmd = [b'help'] + subcmd if len(subcmd) > 1 and subcmd[1] == b'--help' and subcmd[0] != b'help': subcmd = [b'help', subcmd[0]] + subcmd[2:] subcmd_name = subcmd[0] if not subcmd_name: usage() try: cmd_module = import_module('bup.cmd.' + subcmd_name.decode('ascii').replace('-', '_')) except ModuleNotFoundError as ex: cmd_module = None if not cmd_module: subcmd[0] = os.path.join(cmdpath, b'bup-' + subcmd_name) if not os.path.exists(subcmd[0]): usage('error: unknown command "%s"' % path_msg(subcmd_name)) already_fixed = int(environ.get(b'BUP_FORCE_TTY', 0)) if subcmd_name in (b'mux', b'ftp', b'help', b'fuse'): fix_stdout = False fix_stderr = False else: fix_stdout = not (already_fixed & 1) and os.isatty(1) fix_stderr = not (already_fixed & 2) and os.isatty(2) if fix_stdout or fix_stderr: _ttymask = (fix_stdout and 1 or 0) + (fix_stderr and 2 or 0) environ[b'BUP_FORCE_TTY'] = b'%d' % _ttymask environ[b'BUP_TTY_WIDTH'] = b'%d' % _tty_width() sep_rx = re.compile(br'([\r\n])') def print_clean_line(dest, content, width, sep=None): """Write some or all of content, followed by sep, to the dest fd after padding the content with enough spaces to fill the current terminal width or truncating it to the terminal width if sep is a carriage return.""" global sep_rx assert sep in (b'\r', b'\n', None) if not content: if sep: os.write(dest, sep) return for x in content: assert not sep_rx.match(x) content = b''.join(content) if sep == b'\r' and len(content) > width: content = content[:width] os.write(dest, content) if len(content) < width: os.write(dest, b' ' * (width - len(content))) if sep: os.write(dest, sep) def filter_output(srcs, dests): """Transfer data from file descriptors in srcs to the corresponding file descriptors in dests print_clean_line until all of the srcs have closed. """ global sep_rx assert all(isinstance(x, int) for x in srcs) assert len(srcs) == len(dests) srcs = tuple(srcs) dest_for = dict(zip(srcs, dests)) pending = {} try: while srcs: ready_fds, _, _ = select.select(srcs, [], []) width = tty_width() for fd in ready_fds: buf = os.read(fd, 4096) dest = dest_for[fd] if not buf: srcs = tuple([x for x in srcs if x is not fd]) print_clean_line(dest, pending.pop(fd, []), width) else: split = sep_rx.split(buf) while len(split) > 1: content, sep = split[:2] split = split[2:] print_clean_line(dest, pending.pop(fd, []) + [content], width, sep) assert len(split) == 1 if split[0]: pending.setdefault(fd, []).extend(split) except BaseException as ex: pending_ex = ex # Try to finish each of the streams try: for fd, pending_items in pending.items(): dest = dest_for[fd] width = tty_width() try: print_clean_line(dest, pending_items, width) except (EnvironmentError, EOFError) as ex: ex.__cause__ = pending_ex pending_ex = ex finally: raise pending_ex def import_and_run_main(module, args): if do_profile: import cProfile f = compile('module.main(args)', __file__, 'exec') cProfile.runctx(f, globals(), locals()) else: module.main(args) def run_module_cmd(module, args): if not (fix_stdout or fix_stderr): import_and_run_main(module, args) return # Interpose filter_output between all attempts to write to the # stdout/stderr and the real stdout/stderr (e.g. the fds that # connect directly to the terminal) via a thread that runs # filter_output in a pipeline. srcs = [] dests = [] real_out_fd = real_err_fd = stdout_pipe = stderr_pipe = None filter_thread = filter_thread_started = None try: if fix_stdout: sys.stdout.flush() stdout_pipe = os.pipe() # monitored_by_filter, stdout_everyone_uses real_out_fd = os.dup(sys.stdout.fileno()) os.dup2(stdout_pipe[1], sys.stdout.fileno()) srcs.append(stdout_pipe[0]) dests.append(real_out_fd) if fix_stderr: sys.stderr.flush() stderr_pipe = os.pipe() # monitored_by_filter, stderr_everyone_uses real_err_fd = os.dup(sys.stderr.fileno()) os.dup2(stderr_pipe[1], sys.stderr.fileno()) srcs.append(stderr_pipe[0]) dests.append(real_err_fd) filter_thread = Thread(name='output filter', target=lambda : filter_output(srcs, dests)) filter_thread.start() filter_thread_started = True import_and_run_main(module, args) finally: # Try to make sure that whatever else happens, we restore # stdout and stderr here, if that's possible, so that we don't # risk just losing some output. Nest the finally blocks so we # try each one no matter what happens, and accumulate alll # exceptions in the pending exception __context__. try: try: try: try: real_out_fd is not None and \ os.dup2(real_out_fd, sys.stdout.fileno()) finally: real_err_fd is not None and \ os.dup2(real_err_fd, sys.stderr.fileno()) finally: # Kick filter loose stdout_pipe is not None and os.close(stdout_pipe[1]) finally: stderr_pipe is not None and os.close(stderr_pipe[1]) finally: close_catpipes() # There's no point in trying to join unless we finished the finally block. if filter_thread_started: filter_thread.join() def run_subproc_cmd(args): c = (do_profile and [sys.executable, b'-m', b'cProfile'] or []) + args if not (fix_stdout or fix_stderr): os.execvp(c[0], c) sys.stdout.flush() sys.stderr.flush() out = byte_stream(sys.stdout) err = byte_stream(sys.stderr) p = None try: p = subprocess.Popen(c, stdout=PIPE if fix_stdout else out, stderr=PIPE if fix_stderr else err, bufsize=4096, close_fds=True) # Assume p will receive these signals and quit, which will # then cause us to quit. for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT): signal.signal(sig, signal.SIG_IGN) srcs = [] dests = [] if fix_stdout: srcs.append(p.stdout.fileno()) dests.append(out.fileno()) if fix_stderr: srcs.append(p.stderr.fileno()) dests.append(err.fileno()) filter_output(srcs, dests) return p.wait() except BaseException as ex: if p and p.poll() == None: os.kill(p.pid, signal.SIGTERM) p.wait() raise def run_subcmd(module, args): if module: run_module_cmd(module, args) else: run_subproc_cmd(args) def main(): wrap_main(lambda : run_subcmd(cmd_module, subcmd)) if __name__ == "__main__": main() bup-0.33.3/lib/bup/metadata.py000066400000000000000000001332401454333004200160550ustar00rootroot00000000000000"""Metadata read/write support for bup.""" # Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. from __future__ import absolute_import, print_function from copy import deepcopy from errno import EACCES, EINVAL, ENOTTY, ENOSYS, EOPNOTSUPP from io import BytesIO from time import gmtime, strftime import errno, os, sys, stat, time, socket, struct from bup import vint, xstat from bup.drecurse import recursive_dirlist from bup.helpers import add_error, mkdirp, log, is_superuser, format_filesize from bup.io import path_msg from bup.pwdgrp import pwd_from_uid, pwd_from_name, grp_from_gid, grp_from_name from bup.xstat import utime, lutime xattr = None if sys.platform.startswith('linux'): # prefer python-pyxattr (it's a lot faster), but fall back to python-xattr # as the two are incompatible and only one can be installed on a system try: import xattr except ImportError: log('Warning: Linux xattr support missing; install python-pyxattr.\n') if xattr and getattr(xattr, 'get_all', None) is None: try: from xattr import pyxattr_compat as xattr if not isinstance(xattr.NS_USER, bytes): xattr = None except ImportError: xattr = None if xattr is None: log('Warning: python-xattr module is too old; ' 'upgrade or install python-pyxattr instead.\n') try: from bup._helpers import read_acl, apply_acl except ImportError: read_acl = apply_acl = None try: from bup._helpers import get_linux_file_attr, set_linux_file_attr except ImportError: # No need for a warning here; the only reason they won't exist is that we're # not on Linux, in which case files don't have any linux attrs anyway, so # lacking the functions isn't a problem. get_linux_file_attr = set_linux_file_attr = None # See the bup_get_linux_file_attr() comments. _suppress_linux_file_attr = \ sys.byteorder == 'big' and struct.calcsize('@l') > struct.calcsize('@i') def check_linux_file_attr_api(): global get_linux_file_attr, set_linux_file_attr if not (get_linux_file_attr or set_linux_file_attr): return if _suppress_linux_file_attr: log('Warning: Linux attr support disabled (see "bup help index").\n') get_linux_file_attr = set_linux_file_attr = None # WARNING: the metadata encoding is *not* stable yet. Caveat emptor! # Q: Consider hardlink support? # Q: Is it OK to store raw linux attr (chattr) flags? # Q: Can anything other than S_ISREG(x) or S_ISDIR(x) support posix1e ACLs? # Q: Is the application of posix1e has_extended() correct? # Q: Is one global --numeric-ids argument sufficient? # Q: Do nfsv4 acls trump posix1e acls? (seems likely) # Q: Add support for crtime -- ntfs, and (only internally?) ext*? # FIXME: Fix relative/abs path detection/stripping wrt other platforms. # FIXME: Add nfsv4 acl handling - see nfs4-acl-tools. # FIXME: Consider other entries mentioned in stat(2) (S_IFDOOR, etc.). # FIXME: Consider pack('vvvvsss', ...) optimization. ## FS notes: # # osx (varies between hfs and hfs+): # type - regular dir char block fifo socket ... # perms - rwxrwxrwxsgt # times - ctime atime mtime # uid # gid # hard-link-info (hfs+ only) # link-target # device-major/minor # attributes-osx see chflags # content-type # content-creator # forks # # ntfs # type - regular dir ... # times - creation, modification, posix change, access # hard-link-info # link-target # attributes - see attrib # ACLs # forks (alternate data streams) # crtime? # # fat # type - regular dir ... # perms - rwxrwxrwx (maybe - see wikipedia) # times - creation, modification, access # attributes - see attrib verbose = 0 _have_lchmod = hasattr(os, 'lchmod') def _clean_up_path_for_archive(p): # Not the most efficient approach. result = p # Take everything after any '/../'. pos = result.rfind(b'/../') if pos != -1: result = result[result.rfind(b'/../') + 4:] # Take everything after any remaining '../'. if result.startswith(b"../"): result = result[3:] # Remove any '/./' sequences. pos = result.find(b'/./') while pos != -1: result = result[0:pos] + b'/' + result[pos + 3:] pos = result.find(b'/./') # Remove any leading '/'s. result = result.lstrip(b'/') # Replace '//' with '/' everywhere. pos = result.find(b'//') while pos != -1: result = result[0:pos] + b'/' + result[pos + 2:] pos = result.find(b'//') # Take everything after any remaining './'. if result.startswith(b'./'): result = result[2:] # Take everything before any remaining '/.'. if result.endswith(b'/.'): result = result[:-2] if result == b'' or result.endswith(b'/..'): result = b'.' return result def _risky_path(p): if p.startswith(b'/'): return True if p.find(b'/../') != -1: return True if p.startswith(b'../'): return True if p.endswith(b'/..'): return True return False def _clean_up_extract_path(p): result = p.lstrip(b'/') if result == b'': return b'.' elif _risky_path(result): return None else: return result # These tags are currently conceptually private to Metadata, and they # must be unique, and must *never* be changed. _rec_tag_end = 0 _rec_tag_path = 1 _rec_tag_common_v1 = 2 # times, user, group, type, perms, etc. (legacy/broken) _rec_tag_symlink_target = 3 _rec_tag_posix1e_acl_v1 = 4 # (broken \n delimited format, see v2 below) _rec_tag_nfsv4_acl = 5 # intended to supplant posix1e? (unimplemented) _rec_tag_linux_attr = 6 # lsattr(1) chattr(1) _rec_tag_linux_xattr = 7 # getfattr(1) setfattr(1) _rec_tag_hardlink_target = 8 # hard link target path _rec_tag_common_v2 = 9 # times, user, group, type, perms, etc. (current) _rec_tag_common_v3 = 10 # adds optional size to v2 _rec_tag_posix1e_acl_v2 = 11 # getfacl(1), setfacl(1), etc. _warned_about_attr_einval = None class ApplyError(Exception): # Thrown when unable to apply any given bit of metadata to a path. pass class Metadata: # Metadata is stored as a sequence of tagged binary records. Each # record will have some subset of add, encode, load, create, and # apply methods, i.e. _add_foo... # We do allow an "empty" object as a special case, i.e. no # records. One can be created by trying to write Metadata(), and # for such an object, read() will return None. This is used by # "bup save", for example, as a placeholder in cases where # from_path() fails. # NOTE: if any relevant fields are added or removed, be sure to # update same_file() below. ## Common records # Timestamps are (sec, ns), relative to 1970-01-01 00:00:00, ns # must be non-negative and < 10**9. def _add_common(self, path, st): assert(st.st_uid >= 0) assert(st.st_gid >= 0) self.size = st.st_size self.uid = st.st_uid self.gid = st.st_gid self.atime = st.st_atime self.mtime = st.st_mtime self.ctime = st.st_ctime self.user = self.group = b'' entry = pwd_from_uid(st.st_uid) if entry: self.user = entry.pw_name entry = grp_from_gid(st.st_gid) if entry: self.group = entry.gr_name self.mode = st.st_mode # Only collect st_rdev if we might need it for a mknod() # during restore. On some platforms (i.e. kFreeBSD), it isn't # stable for other file types. For example "cp -a" will # change it for a plain file. if stat.S_ISCHR(st.st_mode) or stat.S_ISBLK(st.st_mode): self.rdev = st.st_rdev else: self.rdev = 0 def _same_common(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.uid == other.uid \ and self.gid == other.gid \ and self.rdev == other.rdev \ and self.mtime == other.mtime \ and self.ctime == other.ctime \ and self.user == other.user \ and self.group == other.group \ and self.size == other.size def _encode_common(self): if not self.mode: return None atime = xstat.nsecs_to_timespec(self.atime) mtime = xstat.nsecs_to_timespec(self.mtime) ctime = xstat.nsecs_to_timespec(self.ctime) result = vint.pack('vvsvsvvVvVvVv', self.mode, self.uid, self.user, self.gid, self.group, self.rdev, atime[0], atime[1], mtime[0], mtime[1], ctime[0], ctime[1], self.size if self.size is not None else -1) return result def _load_common_rec(self, port, version=3): if version == 3: # Added trailing size to v2, negative when None. unpack_fmt = 'vvsvsvvVvVvVv' elif version == 2: unpack_fmt = 'vvsvsvvVvVvV' elif version == 1: unpack_fmt = 'VVsVsVvVvVvV' else: raise Exception('unexpected common_rec version %d' % version) data = vint.read_bvec(port) values = vint.unpack(unpack_fmt, data) if version == 3: (self.mode, self.uid, self.user, self.gid, self.group, self.rdev, self.atime, atime_ns, self.mtime, mtime_ns, self.ctime, ctime_ns, size) = values if size >= 0: self.size = size else: (self.mode, self.uid, self.user, self.gid, self.group, self.rdev, self.atime, atime_ns, self.mtime, mtime_ns, self.ctime, ctime_ns) = values self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns)) self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns)) self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns)) def _recognized_file_type(self): return stat.S_ISREG(self.mode) \ or stat.S_ISDIR(self.mode) \ or stat.S_ISCHR(self.mode) \ or stat.S_ISBLK(self.mode) \ or stat.S_ISFIFO(self.mode) \ or stat.S_ISSOCK(self.mode) \ or stat.S_ISLNK(self.mode) def _create_via_common_rec(self, path, create_symlinks=True): if not self.mode: raise ApplyError('no metadata - cannot create path ' + path_msg(path)) # If the path already exists and is a dir, try rmdir. # If the path already exists and is anything else, try unlink. st = None try: st = xstat.lstat(path) except OSError as e: if e.errno != errno.ENOENT: raise if st: if stat.S_ISDIR(st.st_mode): try: os.rmdir(path) except OSError as e: if e.errno in (errno.ENOTEMPTY, errno.EEXIST): raise Exception('refusing to overwrite non-empty dir ' + path_msg(path)) raise else: os.unlink(path) if stat.S_ISREG(self.mode): assert(self._recognized_file_type()) fd = os.open(path, os.O_CREAT|os.O_WRONLY|os.O_EXCL, 0o600) os.close(fd) elif stat.S_ISDIR(self.mode): assert(self._recognized_file_type()) os.mkdir(path, 0o700) elif stat.S_ISCHR(self.mode): assert(self._recognized_file_type()) os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev) elif stat.S_ISBLK(self.mode): assert(self._recognized_file_type()) os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev) elif stat.S_ISFIFO(self.mode): assert(self._recognized_file_type()) os.mkfifo(path, 0o600 | stat.S_IFIFO) elif stat.S_ISSOCK(self.mode): try: os.mknod(path, 0o600 | stat.S_IFSOCK) except OSError as e: if e.errno in (errno.EINVAL, errno.EPERM): s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(path) else: raise elif stat.S_ISLNK(self.mode): assert(self._recognized_file_type()) if self.symlink_target and create_symlinks: # on MacOS, symlink() permissions depend on umask, and there's # no way to chown a symlink after creating it, so we have to # be careful here! oldumask = os.umask((self.mode & 0o777) ^ 0o777) try: os.symlink(self.symlink_target, path) finally: os.umask(oldumask) # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2). else: assert(not self._recognized_file_type()) add_error('not creating "%s" with unrecognized mode "0x%x"\n' % (path_msg(path), self.mode)) def _apply_common_rec(self, path, restore_numeric_ids=False): if not self.mode: raise ApplyError('no metadata - cannot apply to ' + path_msg(path)) # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2). # EACCES errors at this stage are fatal for the current path. if lutime and stat.S_ISLNK(self.mode): try: lutime(path, (self.atime, self.mtime)) except OSError as e: if e.errno == errno.EACCES: raise ApplyError('lutime: %s' % e) else: raise else: try: utime(path, (self.atime, self.mtime)) except OSError as e: if e.errno == errno.EACCES: raise ApplyError('utime: %s' % e) else: raise uid = gid = -1 # By default, do nothing. if is_superuser(): if self.uid is not None: uid = self.uid if self.gid is not None: gid = self.gid if not restore_numeric_ids: if self.uid != 0 and self.user: entry = pwd_from_name(self.user) if entry: uid = entry.pw_uid if self.gid != 0 and self.group: entry = grp_from_name(self.group) if entry: gid = entry.gr_gid else: # not superuser - only consider changing the group/gid user_gids = os.getgroups() if self.gid in user_gids: gid = self.gid if not restore_numeric_ids and self.gid != 0: # The grp might not exist on the local system. grps = filter(None, [grp_from_gid(x) for x in user_gids]) if self.group in [x.gr_name for x in grps]: g = grp_from_name(self.group) if g: gid = g.gr_gid if uid != -1 or gid != -1: try: os.lchown(path, uid, gid) except OSError as e: if e.errno == errno.EPERM: add_error('lchown: %s' % e) elif sys.platform.startswith('cygwin') \ and e.errno == errno.EINVAL: add_error('lchown: unknown uid/gid (%d/%d) for %s' % (uid, gid, path_msg(path))) else: raise if _have_lchmod: try: os.lchmod(path, stat.S_IMODE(self.mode)) except OSError as e: # - "Function not implemented" # - "Operation not supported" might be generated by glibc if e.errno in (errno.ENOSYS, errno.EOPNOTSUPP): pass else: raise elif not stat.S_ISLNK(self.mode): os.chmod(path, stat.S_IMODE(self.mode)) ## Path records def _encode_path(self): if self.path: return vint.pack('s', self.path) else: return None def _load_path_rec(self, port): self.path = vint.unpack('s', vint.read_bvec(port))[0] ## Symlink targets def _add_symlink_target(self, path, st): try: if stat.S_ISLNK(st.st_mode): self.symlink_target = os.readlink(path) # might have read a different link than the # one that was in place when we did stat() self.size = len(self.symlink_target) except OSError as e: add_error('readlink: %s' % e) def _encode_symlink_target(self): return self.symlink_target def _load_symlink_target_rec(self, port): target = vint.read_bvec(port) self.symlink_target = target if self.size is None: self.size = len(target) else: assert(self.size == len(target)) ## Hardlink targets def _add_hardlink_target(self, target): self.hardlink_target = target def _same_hardlink_target(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.hardlink_target == other.hardlink_target def _encode_hardlink_target(self): return self.hardlink_target def _load_hardlink_target_rec(self, port): self.hardlink_target = vint.read_bvec(port) ## POSIX1e ACL records # Recorded as a list: # [txt_id_acl, num_id_acl] # or, if a directory: # [txt_id_acl, num_id_acl, txt_id_default_acl, num_id_default_acl] # The numeric/text distinction only matters when reading/restoring # a stored record. def _add_posix1e_acl(self, path, st): if not read_acl: return if not stat.S_ISLNK(st.st_mode): isdir = 1 if stat.S_ISDIR(st.st_mode) else 0 self.posix1e_acl = read_acl(path, isdir) def _same_posix1e_acl(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.posix1e_acl == other.posix1e_acl def _encode_posix1e_acl(self): # Encode as two strings (w/default ACL string possibly empty). if self.posix1e_acl: acls = self.posix1e_acl if len(acls) == 2: return vint.pack('ssss', acls[0], acls[1], b'', b'') return vint.pack('ssss', acls[0], acls[1], acls[2], acls[3]) else: return None @staticmethod def _correct_posix1e_v1_delimiters(acls, path): assert acls # The v0 format had newline delimiters which are incorrect for # the ACL short text format we request, and which are rejected # with EINVAL by acl_from_text() on some platforms. For now, # this function assumes (potentially incorrectly) that no # field name (including the user and group names) contains a # newline or comma. If any field name does, then the results # may be wrong. (Debian, at least, disallows them.) for i in range(len(acls)): acl = acls[i] if b',' in acl: if path: msg = f'Unexpected comma in ACL entry; ignoring {acl!r}' \ f' for {path_msg(path)}\n' else: msg = f'Unexpected comma in ACL entry; ignoring {acl!r}\n' add_error(msg) return None acls[i] = acl.replace(b'\n', b',') return acls def _load_posix1e_acl_rec(self, port, *, version): assert version in (1, 2) acl_rep = vint.unpack('ssss', vint.read_bvec(port)) if acl_rep[2] == b'': acl_rep = acl_rep[:2] if version == 1: acl_rep = self._correct_posix1e_v1_delimiters(acl_rep, self.path) self.posix1e_acl = acl_rep def _apply_posix1e_acl_rec(self, path, restore_numeric_ids=False): if not self.posix1e_acl: return if not apply_acl: add_error("%s: can't restore ACLs; posix1e support missing.\n" % path_msg(path)) return try: acls = self.posix1e_acl offs = 1 if restore_numeric_ids else 0 if len(acls) > 2: apply_acl(path, acls[offs], acls[offs + 2]) else: apply_acl(path, acls[offs]) except IOError as e: if e.errno == errno.EINVAL: # libacl returns with errno set to EINVAL if a user # (or group) doesn't exist raise ApplyError("POSIX1e ACL: can't create %r for %r" % (acls, path_msg(path))) elif e.errno in (errno.EPERM, errno.EOPNOTSUPP): raise ApplyError('POSIX1e ACL applyto: %s' % e) else: raise ## Linux attributes (lsattr(1), chattr(1)) def _add_linux_attr(self, path, st): check_linux_file_attr_api() if not get_linux_file_attr: return if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode): try: attr = get_linux_file_attr(path) if attr != 0: self.linux_attr = attr except OSError as e: if e.errno == errno.EACCES: add_error('read Linux attr: %s' % e) elif e.errno in (ENOTTY, ENOSYS, EOPNOTSUPP): # Assume filesystem doesn't support attrs. return elif e.errno == EINVAL: global _warned_about_attr_einval if not _warned_about_attr_einval: log("Ignoring attr EINVAL;" + " if you're not using ntfs-3g, please report: " + path_msg(path) + '\n') _warned_about_attr_einval = True return else: raise def _same_linux_attr(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.linux_attr == other.linux_attr def _encode_linux_attr(self): if self.linux_attr: return vint.pack('V', self.linux_attr) else: return None def _load_linux_attr_rec(self, port): data = vint.read_bvec(port) self.linux_attr = vint.unpack('V', data)[0] def _apply_linux_attr_rec(self, path, restore_numeric_ids=False): if self.linux_attr: check_linux_file_attr_api() if not set_linux_file_attr: add_error("%s: can't restore linuxattrs: " "linuxattr support missing.\n" % path_msg(path)) return try: set_linux_file_attr(path, self.linux_attr) except OSError as e: if e.errno in (EACCES, ENOTTY, EOPNOTSUPP, ENOSYS): raise ApplyError('Linux chattr: %s (0x%s)' % (e, hex(self.linux_attr))) elif e.errno == EINVAL: msg = "if you're not using ntfs-3g, please report" raise ApplyError('Linux chattr: %s (0x%s) (%s)' % (e, hex(self.linux_attr), msg)) else: raise ## Linux extended attributes (getfattr(1), setfattr(1)) def _add_linux_xattr(self, path, st): if not xattr: return try: self.linux_xattr = xattr.get_all(path, nofollow=True) except EnvironmentError as e: if e.errno != errno.EOPNOTSUPP: raise def _same_linux_xattr(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.linux_xattr == other.linux_xattr def _encode_linux_xattr(self): if self.linux_xattr: result = vint.pack('V', len(self.linux_xattr)) for name, value in self.linux_xattr: result += vint.pack('ss', name, value) return result else: return None def _load_linux_xattr_rec(self, file): data = vint.read_bvec(file) memfile = BytesIO(data) result = [] for i in range(vint.read_vuint(memfile)): key = vint.read_bvec(memfile) value = vint.read_bvec(memfile) result.append((key, value)) self.linux_xattr = result def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False): if not xattr: if self.linux_xattr: add_error("%s: can't restore xattr; xattr support missing.\n" % path_msg(path)) return if not self.linux_xattr: return try: existing_xattrs = set(xattr.list(path, nofollow=True)) except IOError as e: if e.errno == errno.EACCES: raise ApplyError('xattr.set %r: %s' % (path_msg(path), e)) else: raise for k, v in self.linux_xattr: if k not in existing_xattrs \ or v != xattr.get(path, k, nofollow=True): try: xattr.set(path, k, v, nofollow=True) except IOError as e: if e.errno in (errno.EPERM, errno.EOPNOTSUPP): raise ApplyError('xattr.set %r: %s' % (path_msg(path), e)) else: raise existing_xattrs -= frozenset([k]) for k in existing_xattrs: try: xattr.remove(path, k, nofollow=True) except IOError as e: if e.errno in (errno.EPERM, errno.EACCES): raise ApplyError('xattr.remove %r: %s' % (path_msg(path), e)) else: raise def __init__(self): __slots__ = ('mode', 'uid', 'atime', 'mtime', 'ctime', 'path', 'size', 'symlink_target', 'hardlink_target', 'linux_attr', 'linux_xattr', 'posix1e_acl') self.mode = self.uid = self.gid = self.user = self.group = None self.rdev = None self.atime = self.mtime = self.ctime = None # optional members self.path = None self.size = None self.symlink_target = None self.hardlink_target = None self.linux_attr = None self.linux_xattr = None self.posix1e_acl = None def __eq__(self, other): if not isinstance(other, Metadata): return False if self.mode != other.mode: return False if self.mtime != other.mtime: return False if self.ctime != other.ctime: return False if self.atime != other.atime: return False if self.rdev != other.rdev: return False if self.path != other.path: return False if self.uid != other.uid: return False if self.gid != other.gid: return False if self.size != other.size: return False if self.user != other.user: return False if self.group != other.group: return False if self.symlink_target != other.symlink_target: return False if self.hardlink_target != other.hardlink_target: return False if self.linux_attr != other.linux_attr: return False if self.posix1e_acl != other.posix1e_acl: return False return True def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self.mode, self.mtime, self.ctime, self.atime, self.rdev, self.path, self.uid, self.gid, self.size, self.user, self.group, self.symlink_target, self.hardlink_target, self.linux_attr, self.posix1e_acl)) def __repr__(self): result = ['<%s instance at %s' % (self.__class__, hex(id(self)))] if self.path is not None: result += ' path:' + repr(self.path) if self.mode is not None: result += ' mode: %o (%s)' % (self.mode, xstat.mode_str(self.mode)) if self.uid is not None: result += ' uid:' + str(self.uid) if self.gid is not None: result += ' gid:' + str(self.gid) if self.user is not None: result += ' user:' + repr(self.user) if self.group is not None: result += ' group:' + repr(self.group) if self.rdev is not None: result += ' rdev:' + repr(self.group) if self.size is not None: result += ' size:' + repr(self.size) for name, val in (('atime', self.atime), ('mtime', self.mtime), ('ctime', self.ctime)): if val is not None: result += ' %s:%r (%d)' \ % (name, strftime('%Y-%m-%d %H:%M %z', gmtime(xstat.fstime_floor_secs(val))), val) result += '>' return ''.join(result) def write(self, port, include_path=True): port.write(self.encode(include_path=include_path)) def encode(self, include_path=True): ret = [] records = include_path and [(_rec_tag_path, self._encode_path())] or [] records.extend([(_rec_tag_common_v3, self._encode_common()), (_rec_tag_symlink_target, self._encode_symlink_target()), (_rec_tag_hardlink_target, self._encode_hardlink_target()), (_rec_tag_posix1e_acl_v2, self._encode_posix1e_acl()), (_rec_tag_linux_attr, self._encode_linux_attr()), (_rec_tag_linux_xattr, self._encode_linux_xattr())]) for tag, data in records: if data: ret.extend((vint.encode_vuint(tag), vint.encode_bvec(data))) ret.append(vint.encode_vuint(_rec_tag_end)) return b''.join(ret) def copy(self): return deepcopy(self) @staticmethod def read(port): # This method should either return a valid Metadata object, # return None if there was no information at all (just a # _rec_tag_end), throw EOFError if there was nothing at all to # read, or throw an Exception if a valid object could not be # read completely. tag = vint.read_vuint(port) if tag == _rec_tag_end: return None try: # From here on, EOF is an error. result = Metadata() while True: # only exit is error (exception) or _rec_tag_end if tag == _rec_tag_path: result._load_path_rec(port) elif tag == _rec_tag_common_v3: result._load_common_rec(port, version=3) elif tag == _rec_tag_common_v2: result._load_common_rec(port, version=2) elif tag == _rec_tag_symlink_target: result._load_symlink_target_rec(port) elif tag == _rec_tag_hardlink_target: result._load_hardlink_target_rec(port) elif tag == _rec_tag_posix1e_acl_v2: result._load_posix1e_acl_rec(port, version=2) elif tag == _rec_tag_posix1e_acl_v1: result._load_posix1e_acl_rec(port, version=1) elif tag == _rec_tag_linux_attr: result._load_linux_attr_rec(port) elif tag == _rec_tag_linux_xattr: result._load_linux_xattr_rec(port) elif tag == _rec_tag_end: return result elif tag == _rec_tag_common_v1: # Should be very rare. result._load_common_rec(port, version=1) else: # unknown record vint.skip_bvec(port) tag = vint.read_vuint(port) except EOFError: raise Exception("EOF while reading Metadata") def isdir(self): return stat.S_ISDIR(self.mode) def create_path(self, path, create_symlinks=True): self._create_via_common_rec(path, create_symlinks=create_symlinks) def apply_to_path(self, path=None, restore_numeric_ids=False): # apply metadata to path -- file must exist if not path: path = self.path if not path: raise Exception('Metadata.apply_to_path() called with no path') if not self._recognized_file_type(): add_error('not applying metadata to "%s"' % path_msg(path) + ' with unrecognized mode "0x%x"\n' % self.mode) return num_ids = restore_numeric_ids for apply_metadata in (self._apply_common_rec, self._apply_posix1e_acl_rec, self._apply_linux_attr_rec, self._apply_linux_xattr_rec): try: apply_metadata(path, restore_numeric_ids=num_ids) except ApplyError as e: add_error(e) def same_file(self, other): """Compare this to other for equivalency. Return true if their information implies they could represent the same file on disk, in the hardlink sense. Assume they're both regular files.""" return self._same_common(other) \ and self._same_hardlink_target(other) \ and self._same_posix1e_acl(other) \ and self._same_linux_attr(other) \ and self._same_linux_xattr(other) def from_path(path, statinfo=None, archive_path=None, save_symlinks=True, hardlink_target=None, normalized=False, after_stat=None): # This function is also a test hook; see test-save-errors """Return the metadata associated with the path. When normalized is true, return the metadata appropriate for a typical save, which may or may not be all of it.""" result = Metadata() result.path = archive_path st = statinfo or xstat.lstat(path) if after_stat: after_stat(path) result._add_common(path, st) if save_symlinks: result._add_symlink_target(path, st) result._add_hardlink_target(hardlink_target) result._add_posix1e_acl(path, st) result._add_linux_attr(path, st) result._add_linux_xattr(path, st) if normalized: # Only store sizes for regular files and symlinks for now. if not (stat.S_ISREG(result.mode) or stat.S_ISLNK(result.mode)): result.size = None return result def save_tree(output_file, paths, recurse=False, write_paths=True, save_symlinks=True, xdev=False): # Issue top-level rewrite warnings. for path in paths: safe_path = _clean_up_path_for_archive(path) if safe_path != path: log('archiving "%s" as "%s"\n' % (path_msg(path), path_msg(safe_path))) if not recurse: for p in paths: safe_path = _clean_up_path_for_archive(p) st = xstat.lstat(p) if stat.S_ISDIR(st.st_mode): safe_path += b'/' m = from_path(p, statinfo=st, archive_path=safe_path, save_symlinks=save_symlinks) if verbose: print(m.path, file=sys.stderr) m.write(output_file, include_path=write_paths) else: start_dir = os.getcwd() try: for (p, st) in recursive_dirlist(paths, xdev=xdev): dirlist_dir = os.getcwd() os.chdir(start_dir) safe_path = _clean_up_path_for_archive(p) m = from_path(p, statinfo=st, archive_path=safe_path, save_symlinks=save_symlinks) if verbose: print(m.path, file=sys.stderr) m.write(output_file, include_path=write_paths) os.chdir(dirlist_dir) finally: os.chdir(start_dir) def _set_up_path(meta, create_symlinks=True): # Allow directories to exist as a special case -- might have # been created by an earlier longer path. if meta.isdir(): mkdirp(meta.path) else: parent = os.path.dirname(meta.path) if parent: mkdirp(parent) meta.create_path(meta.path, create_symlinks=create_symlinks) all_fields = frozenset(['path', 'mode', 'link-target', 'rdev', 'size', 'uid', 'gid', 'user', 'group', 'atime', 'mtime', 'ctime', 'linux-attr', 'linux-xattr', 'posix1e-acl']) def summary_bytes(meta, numeric_ids = False, classification = None, human_readable = False): """Return bytes containing the "ls -l" style listing for meta. Classification may be "all", "type", or None.""" user_str = group_str = size_or_dev_str = b'?' symlink_target = None if meta: name = meta.path mode_str = xstat.mode_str(meta.mode).encode('ascii') symlink_target = meta.symlink_target mtime_secs = xstat.fstime_floor_secs(meta.mtime) mtime_str = strftime('%Y-%m-%d %H:%M', time.localtime(mtime_secs)).encode('ascii') if meta.user and not numeric_ids: user_str = meta.user elif meta.uid != None: user_str = str(meta.uid).encode() if meta.group and not numeric_ids: group_str = meta.group elif meta.gid != None: group_str = str(meta.gid).encode() if stat.S_ISCHR(meta.mode) or stat.S_ISBLK(meta.mode): if meta.rdev: size_or_dev_str = ('%d,%d' % (os.major(meta.rdev), os.minor(meta.rdev))).encode() elif meta.size != None: if human_readable: size_or_dev_str = format_filesize(meta.size).encode() else: size_or_dev_str = str(meta.size).encode() else: size_or_dev_str = b'-' if classification: classification_str = \ xstat.classification_str(meta.mode, classification == 'all').encode() else: mode_str = b'?' * 10 mtime_str = b'????-??-?? ??:??' classification_str = b'?' name = name or b'' if classification: name += classification_str if symlink_target: name += b' -> ' + meta.symlink_target return b'%-10s %-11s %11s %16s %s' % (mode_str, user_str + b'/' + group_str, size_or_dev_str, mtime_str, name) def detailed_bytes(meta, fields = None): # FIXME: should optional fields be omitted, or empty i.e. "rdev: # 0", "link-target:", etc. if not fields: fields = all_fields result = [] if 'path' in fields: path = meta.path or b'' result.append(b'path: ' + path) if 'mode' in fields: result.append(b'mode: %o (%s)' % (meta.mode, xstat.mode_str(meta.mode).encode('ascii'))) if 'link-target' in fields and stat.S_ISLNK(meta.mode): result.append(b'link-target: ' + meta.symlink_target) if 'rdev' in fields: if meta.rdev: result.append(b'rdev: %d,%d' % (os.major(meta.rdev), os.minor(meta.rdev))) else: result.append(b'rdev: 0') if 'size' in fields and meta.size is not None: result.append(b'size: %d' % meta.size) if 'uid' in fields: result.append(b'uid: %d' % meta.uid) if 'gid' in fields: result.append(b'gid: %d' % meta.gid) if 'user' in fields: result.append(b'user: ' + meta.user) if 'group' in fields: result.append(b'group: ' + meta.group) if 'atime' in fields: # If we don't have xstat.lutime, that means we have to use # utime(), and utime() has no way to set the mtime/atime of a # symlink. Thus, the mtime/atime of a symlink is meaningless, # so let's not report it. (That way scripts comparing # before/after won't trigger.) if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append(b'atime: ' + xstat.fstime_to_sec_bytes(meta.atime)) else: result.append(b'atime: 0') if 'mtime' in fields: if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append(b'mtime: ' + xstat.fstime_to_sec_bytes(meta.mtime)) else: result.append(b'mtime: 0') if 'ctime' in fields: result.append(b'ctime: ' + xstat.fstime_to_sec_bytes(meta.ctime)) if 'linux-attr' in fields and meta.linux_attr: result.append(b'linux-attr: %x' % meta.linux_attr) if 'linux-xattr' in fields and meta.linux_xattr: for name, value in meta.linux_xattr: result.append(b'linux-xattr: %s -> %s' % (name, value)) if 'posix1e-acl' in fields and meta.posix1e_acl: acl = meta.posix1e_acl[0] result.append(b'posix1e-acl: ' + acl + b'\n') if stat.S_ISDIR(meta.mode): def_acl = meta.posix1e_acl[2] result.append(b'posix1e-acl-default: ' + def_acl + b'\n') return b'\n'.join(result) class _ArchiveIterator: def __next__(self): try: return Metadata.read(self._file) except EOFError: raise StopIteration() next = __next__ def __iter__(self): return self def __init__(self, file): self._file = file def display_archive(file, out): if verbose > 1: first_item = True for meta in _ArchiveIterator(file): if not first_item: out.write(b'\n') out.write(detailed_bytes(meta)) out.write(b'\n') first_item = False elif verbose > 0: for meta in _ArchiveIterator(file): out.write(summary_bytes(meta)) out.write(b'\n') elif verbose == 0: for meta in _ArchiveIterator(file): if not meta.path: log('bup: no metadata path, but asked to only display path' ' (increase verbosity?)') sys.exit(1) out.write(meta.path) out.write(b'\n') def start_extract(file, create_symlinks=True): for meta in _ArchiveIterator(file): if not meta: # Hit end record. break if verbose: print(path_msg(meta.path), file=sys.stderr) xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % path_msg(meta.path))) else: meta.path = xpath _set_up_path(meta, create_symlinks=create_symlinks) def finish_extract(file, restore_numeric_ids=False): all_dirs = [] for meta in _ArchiveIterator(file): if not meta: # Hit end record. break xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % path_msg(meta.path))) else: if os.path.isdir(meta.path): all_dirs.append(meta) else: if verbose: print(path_msg(meta.path), file=sys.stderr) meta.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids) all_dirs.sort(key = lambda x : len(x.path), reverse=True) for dir in all_dirs: # Don't need to check xpath -- won't be in all_dirs if not OK. xpath = _clean_up_extract_path(dir.path) if verbose: print(path_msg(dir.path), file=sys.stderr) dir.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids) def extract(file, restore_numeric_ids=False, create_symlinks=True): # For now, just store all the directories and handle them last, # longest first. all_dirs = [] for meta in _ArchiveIterator(file): if not meta: # Hit end record. break xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % path_msg(meta.path))) else: meta.path = xpath if verbose: print('+', path_msg(meta.path), file=sys.stderr) _set_up_path(meta, create_symlinks=create_symlinks) if os.path.isdir(meta.path): all_dirs.append(meta) else: if verbose: print('=', path_msg(meta.path), file=sys.stderr) meta.apply_to_path(restore_numeric_ids=restore_numeric_ids) all_dirs.sort(key = lambda x : len(x.path), reverse=True) for dir in all_dirs: # Don't need to check xpath -- won't be in all_dirs if not OK. xpath = _clean_up_extract_path(dir.path) if verbose: print('=', path_msg(xpath), file=sys.stderr) # Shouldn't have to check for risky paths here (omitted above). dir.apply_to_path(path=dir.path, restore_numeric_ids=restore_numeric_ids) bup-0.33.3/lib/bup/midx.py000066400000000000000000000116441454333004200152410ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import glob, os, struct from bup import _helpers from bup.compat import pending_raise from bup.helpers import log, mmap_read from bup.io import path_msg MIDX_VERSION = 4 extract_bits = _helpers.extract_bits _total_searches = 0 _total_steps = 0 class PackMidx: """Wrapper which contains data from multiple index files. Multiple index (.midx) files constitute a wrapper around index (.idx) files and make it possible for bup to expand Git's indexing capabilities to vast amounts of files. """ def __init__(self, filename): self.closed = False self.name = filename self.force_keep = False self.map = None assert(filename.endswith(b'.midx')) self.map = mmap_read(open(filename)) if self.map[0:4] != b'MIDX': log('Warning: skipping: invalid MIDX header in %r\n' % path_msg(filename)) self.force_keep = True self._init_failed() return ver = struct.unpack('!I', self.map[4:8])[0] if ver < MIDX_VERSION: log('Warning: ignoring old-style (v%d) midx %r\n' % (ver, path_msg(filename))) self.force_keep = False # old stuff is boring self._init_failed() return if ver > MIDX_VERSION: log('Warning: ignoring too-new (v%d) midx %r\n' % (ver, path_msg(filename))) self.force_keep = True # new stuff is exciting self._init_failed() return self.bits = _helpers.firstword(self.map[8:12]) self.entries = 2**self.bits self.fanout_ofs = 12 # fanout len is self.entries * 4 self.sha_ofs = self.fanout_ofs + self.entries * 4 self.nsha = self._fanget(self.entries - 1) # sha table len is self.nsha * 20 self.which_ofs = self.sha_ofs + 20 * self.nsha # which len is self.nsha * 4 self.idxnames = self.map[self.which_ofs + 4 * self.nsha:].split(b'\0') def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def _init_failed(self): self.bits = 0 self.entries = 1 self.idxnames = [] def _fanget(self, i): if i >= self.entries * 4 or i < 0: raise IndexError('invalid midx index %d' % i) ofs = self.fanout_ofs + i * 4 return _helpers.firstword(self.map[ofs : ofs + 4]) def _get(self, i): if i >= self.nsha or i < 0: raise IndexError('invalid midx index %d' % i) ofs = self.sha_ofs + i * 20 return self.map[ofs : ofs + 20] def _get_idx_i(self, i): if i >= self.nsha or i < 0: raise IndexError('invalid midx index %d' % i) ofs = self.which_ofs + i * 4 return struct.unpack_from('!I', self.map, offset=ofs)[0] def _get_idxname(self, i): return self.idxnames[self._get_idx_i(i)] def close(self): self.closed = True if self.map is not None: self.fanout = self.shatable = self.whichlist = self.idxnames = None self.map.close() self.map = None def __del__(self): assert self.closed def exists(self, hash, want_source=False): """Return nonempty if the object exists in the index files.""" global _total_searches, _total_steps _total_searches += 1 want = hash el = extract_bits(want, self.bits) if el: start = self._fanget(el-1) startv = el << (32-self.bits) else: start = 0 startv = 0 end = self._fanget(el) endv = (el+1) << (32-self.bits) _total_steps += 1 # lookup table is a step hashv = _helpers.firstword(hash) #print '(%08x) %08x %08x %08x' % (extract_bits(want, 32), startv, hashv, endv) while start < end: _total_steps += 1 #print '! %08x %08x %08x %d - %d' % (startv, hashv, endv, start, end) mid = start + (hashv - startv) * (end - start - 1) // (endv - startv) #print ' %08x %08x %08x %d %d %d' % (startv, hashv, endv, start, mid, end) v = self._get(mid) #print ' %08x' % self._num(v) if v < want: start = mid+1 startv = _helpers.firstword(v) elif v > want: end = mid endv = _helpers.firstword(v) else: # got it! return want_source and self._get_idxname(mid) or True return None def __iter__(self): start = self.sha_ofs for ofs in range(start, start + self.nsha * 20, 20): yield self.map[ofs : ofs + 20] def __len__(self): return int(self.nsha) def clear_midxes(dir=None): for midx in glob.glob(os.path.join(dir, b'*.midx')): os.unlink(midx) bup-0.33.3/lib/bup/options.py000066400000000000000000000242761454333004200160000ustar00rootroot00000000000000# Copyright 2010-2012 Avery Pennarun and options.py contributors. # All rights reserved. # # (This license applies to this file but not necessarily the other files in # this package.) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY AVERY PENNARUN AND CONTRIBUTORS ``AS # IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL # OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. # """Command-line options parser. With the help of an options spec string, easily parse command-line options. An options spec is made up of two parts, separated by a line with two dashes. The first part is the synopsis of the command and the second one specifies options, one per line. Each non-empty line in the synopsis gives a set of options that can be used together. Option flags must be at the begining of the line and multiple flags are separated by commas. Usually, options have a short, one character flag, and a longer one, but the short one can be omitted. Long option flags are used as the option's key for the OptDict produced when parsing options. When the flag definition is ended with an equal sign, the option takes one string as an argument, and that string will be converted to an integer when possible. Otherwise, the option does not take an argument and corresponds to a boolean flag that is true when the option is given on the command line. The option's description is found at the right of its flags definition, after one or more spaces. The description ends at the end of the line. If the description contains text enclosed in square brackets, the enclosed text will be used as the option's default value. Options can be put in different groups. Options in the same group must be on consecutive lines. Groups are formed by inserting a line that begins with a space. The text on that line will be output after an empty line. """ from __future__ import absolute_import import sys, os, textwrap, getopt, re, struct try: import fcntl except ImportError: fcntl = None try: import termios except ImportError: termios = None def _invert(v, invert): if invert: return not v return v def _remove_negative_kv(k, v): if k.startswith('no-') or k.startswith('no_'): return k[3:], not v return k,v class OptDict(object): """Dictionary that exposes keys as attributes. Keys can be set or accessed with a "no-" or "no_" prefix to negate the value. """ def __init__(self, aliases): self._opts = {} self._aliases = aliases def _unalias(self, k): k, reinvert = _remove_negative_kv(k, False) k, invert = self._aliases[k] return k, invert ^ reinvert def __setitem__(self, k, v): k, invert = self._unalias(k) self._opts[k] = _invert(v, invert) def __getitem__(self, k): k, invert = self._unalias(k) return _invert(self._opts[k], invert) def __getattr__(self, k): return self[k] def _default_onabort(msg): sys.exit(97) def _intify(v): try: vv = int(v or '') if str(vv) == v: return vv except ValueError: pass return v if not fcntl and termios: def _tty_width(): return 70 else: def _tty_width(): forced = os.environ.get('BUP_TTY_WIDTH', None) if forced: return int(forced) s = struct.pack("HHHH", 0, 0, 0, 0) try: s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s) except IOError: return 70 ysize, xsize, ypix, xpix = struct.unpack('HHHH', s) return xsize or 70 class Options: """Option parser. When constructed, a string called an option spec must be given. It specifies the synopsis and option flags and their description. For more information about option specs, see the docstring at the top of this file. Two optional arguments specify an alternative parsing function and an alternative behaviour on abort (after having output the usage string). By default, the parser function is getopt.gnu_getopt, and the abort behaviour is to exit the program. """ def __init__(self, optspec, optfunc=getopt.gnu_getopt, onabort=_default_onabort): self.optspec = optspec self._onabort = onabort self.optfunc = optfunc self._aliases = {} self._shortopts = 'h?' self._longopts = ['help', 'usage'] self._hasparms = {} self._defaults = {} self._usagestr = self._gen_usage() # this also parses the optspec def _gen_usage(self): out = [] lines = self.optspec.strip().split('\n') lines.reverse() first_syn = True while lines: l = lines.pop() if l == '--': break out.append('%s: %s\n' % (first_syn and 'usage' or ' or', l)) first_syn = False out.append('\n') last_was_option = False while lines: l = lines.pop() if l.startswith(' '): out.append('%s%s\n' % (last_was_option and '\n' or '', l.lstrip())) last_was_option = False elif l: (flags,extra) = (l + ' ').split(' ', 1) extra = extra.strip() if flags.endswith('='): flags = flags[:-1] has_parm = 1 else: has_parm = 0 g = re.search(r'\[([^\]]*)\]$', extra) if g: defval = _intify(g.group(1)) else: defval = None flagl = flags.split(',') flagl_nice = [] flag_main, invert_main = _remove_negative_kv(flagl[0], False) self._defaults[flag_main] = _invert(defval, invert_main) for _f in flagl: f,invert = _remove_negative_kv(_f, 0) self._aliases[f] = (flag_main, invert_main ^ invert) self._hasparms[f] = has_parm if f == '#': self._shortopts += '0123456789' flagl_nice.append('-#') elif len(f) == 1: self._shortopts += f + (has_parm and ':' or '') flagl_nice.append('-' + f) else: f_nice = re.sub(r'\W', '_', f) self._aliases[f_nice] = (flag_main, invert_main ^ invert) self._longopts.append(f + (has_parm and '=' or '')) self._longopts.append('no-' + f) flagl_nice.append('--' + _f) flags_nice = ', '.join(flagl_nice) if has_parm: flags_nice += ' ...' prefix = ' %-20s ' % flags_nice argtext = '\n'.join(textwrap.wrap(extra, width=_tty_width(), initial_indent=prefix, subsequent_indent=' '*28)) out.append(argtext + '\n') last_was_option = True else: out.append('\n') last_was_option = False return ''.join(out).rstrip() + '\n' def usage(self, msg=""): """Print usage string to stderr and abort.""" sys.stderr.write(self._usagestr) if msg: sys.stderr.write(msg) e = self._onabort and self._onabort(msg) or None if e: raise e def fatal(self, msg): """Print an error message to stderr and abort with usage string.""" msg = '\nerror: %s\n' % msg return self.usage(msg) def parse(self, args): """Parse a list of arguments and return (options, flags, extra). In the returned tuple, "options" is an OptDict with known options, "flags" is a list of option flags that were used on the command-line, and "extra" is a list of positional arguments. """ try: (flags,extra) = self.optfunc(args, self._shortopts, self._longopts) except getopt.GetoptError as e: self.fatal(e) opt = OptDict(aliases=self._aliases) for k,v in self._defaults.items(): opt[k] = v for (k,v) in flags: k = k.lstrip('-') if k in ('h', '?', 'help', 'usage'): self.usage() if (self._aliases.get('#') and k in ('0','1','2','3','4','5','6','7','8','9')): v = int(k) # guaranteed to be exactly one digit k, invert = self._aliases['#'] opt['#'] = v else: k, invert = opt._unalias(k) if not self._hasparms[k]: assert(v == '') v = (opt._opts.get(k) or 0) + 1 else: v = _intify(v) opt[k] = _invert(v, invert) return (opt,flags,extra) def parse_bytes(self, args): if sys.version_info[0] > 2: args = [x.decode(errors='surrogateescape') for x in args] return self.parse(args) bup-0.33.3/lib/bup/path.py000066400000000000000000000011401454333004200152220ustar00rootroot00000000000000 from __future__ import absolute_import import os, sys # Eventually, if we physically move the source tree cmd/ to lib/, then # we could use realpath here and save some stats... fsencode = os.fsencode if sys.version_info[0] >= 3 else lambda x: x _libdir = os.path.abspath(os.path.dirname(fsencode(__file__)) + b'/..') _resdir = _libdir _exedir = os.path.abspath(_libdir + b'/cmd') _exe = os.path.join(_exedir, b'bup') def exe(): return _exe def exedir(): return _exedir cmddir = exedir def libdir(): return _libdir def resource_path(subdir=b''): return os.path.join(_resdir, subdir) bup-0.33.3/lib/bup/pwdgrp.py000066400000000000000000000076231454333004200156050ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import os from bup import _helpers from bup.helpers import cache_key_value # Using __slots__ makes these much smaller (even than a namedtuple) class Passwd: """Drop in replacement for pwd's structure with bytes instead of strings.""" __slots__ = ('pw_name', 'pw_passwd', 'pw_uid', 'pw_gid', 'pw_gecos', 'pw_dir', 'pw_shell') def __init__(self, name, passwd, uid, gid, gecos, dir, shell): assert isinstance(name, bytes) assert isinstance(passwd, bytes) assert isinstance(gecos, bytes) assert isinstance(dir, bytes) assert isinstance(shell, bytes) (self.pw_name, self.pw_passwd, self.pw_uid, self.pw_gid, self.pw_gecos, self.pw_dir, self.pw_shell) = \ name, passwd, uid, gid, gecos, dir, shell def getpwuid(uid): r = _helpers.getpwuid(uid) return Passwd(*r) if r else None def getpwnam(name): assert isinstance(name, bytes) r = _helpers.getpwnam(name) return Passwd(*r) if r else None class Group: """Drop in replacement for grp's structure with bytes instead of strings.""" __slots__ = 'gr_name', 'gr_passwd', 'gr_gid', 'gr_mem' def __init__(self, name, passwd, gid, mem): assert isinstance(name, bytes) # None was observed on Android assert isinstance(passwd, bytes) or passwd is None for m in mem: assert isinstance(m, bytes) self.gr_name, self.gr_passwd, self.gr_gid, self.gr_mem = \ name, passwd, gid, mem def getgrgid(uid): r = _helpers.getgrgid(uid) return Group(*r) if r else None def getgrnam(name): assert isinstance(name, bytes) r = _helpers.getgrnam(name) return Group(*r) if r else None _uid_to_pwd_cache = {} _name_to_pwd_cache = {} def pwd_from_uid(uid): """Return password database entry for uid (may be a cached value). Return None if no entry is found. """ global _uid_to_pwd_cache, _name_to_pwd_cache entry, cached = cache_key_value(getpwuid, uid, _uid_to_pwd_cache) if entry and not cached: _name_to_pwd_cache[entry.pw_name] = entry return entry def pwd_from_name(name): """Return password database entry for name (may be a cached value). Return None if no entry is found. """ assert isinstance(name, bytes) global _uid_to_pwd_cache, _name_to_pwd_cache entry, cached = cache_key_value(getpwnam, name, _name_to_pwd_cache) if entry and not cached: _uid_to_pwd_cache[entry.pw_uid] = entry return entry _gid_to_grp_cache = {} _name_to_grp_cache = {} def grp_from_gid(gid): """Return password database entry for gid (may be a cached value). Return None if no entry is found. """ global _gid_to_grp_cache, _name_to_grp_cache entry, cached = cache_key_value(getgrgid, gid, _gid_to_grp_cache) if entry and not cached: _name_to_grp_cache[entry.gr_name] = entry return entry def grp_from_name(name): """Return password database entry for name (may be a cached value). Return None if no entry is found. """ assert isinstance(name, bytes) global _gid_to_grp_cache, _name_to_grp_cache entry, cached = cache_key_value(getgrnam, name, _name_to_grp_cache) if entry and not cached: _gid_to_grp_cache[entry.gr_gid] = entry return entry _username = None def username(): """Get the user's login name.""" global _username if not _username: uid = os.getuid() _username = pwd_from_uid(uid).pw_name or b'user%d' % uid return _username _userfullname = None def userfullname(): """Get the user's full name.""" global _userfullname if not _userfullname: uid = os.getuid() entry = pwd_from_uid(uid) if entry: _userfullname = entry.pw_gecos.split(b',')[0] or entry.pw_name if not _userfullname: _userfullname = b'user%d' % uid return _userfullname bup-0.33.3/lib/bup/repo.py000066400000000000000000000115441454333004200152440ustar00rootroot00000000000000 from __future__ import absolute_import from os.path import realpath from functools import partial from bup import client, git, vfs from bup.compat import pending_raise _next_repo_id = 0 _repo_ids = {} def _repo_id(key): global _next_repo_id, _repo_ids repo_id = _repo_ids.get(key) if repo_id: return repo_id next_id = _next_repo_id = _next_repo_id + 1 _repo_ids[key] = next_id return next_id class LocalRepo: def __init__(self, repo_dir=None): self.closed = False self.repo_dir = realpath(repo_dir or git.repo()) self._cp = git.cp(self.repo_dir) self.update_ref = partial(git.update_ref, repo_dir=self.repo_dir) self.rev_list = partial(git.rev_list, repo_dir=self.repo_dir) self._id = _repo_id(self.repo_dir) def close(self): self.closed = True def __del__(self): assert self.closed def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def id(self): """Return an identifier that differs from any other repository that doesn't share the same repository-specific information (e.g. refs, tags, etc.).""" return self._id def is_remote(self): return False def new_packwriter(self, compression_level=1, max_pack_size=None, max_pack_objects=None): return git.PackWriter(repo_dir=self.repo_dir, compression_level=compression_level, max_pack_size=max_pack_size, max_pack_objects=max_pack_objects) def cat(self, ref): """If ref does not exist, yield (None, None, None). Otherwise yield (oidx, type, size), and then all of the data associated with ref. """ it = self._cp.get(ref) oidx, typ, size = info = next(it) yield info if oidx: for data in it: yield data assert not next(it, None) def join(self, ref): return self._cp.join(ref) def refs(self, patterns=None, limit_to_heads=False, limit_to_tags=False): for ref in git.list_refs(patterns=patterns, limit_to_heads=limit_to_heads, limit_to_tags=limit_to_tags, repo_dir=self.repo_dir): yield ref ## Of course, the vfs better not call this... def resolve(self, path, parent=None, want_meta=True, follow=True): ## FIXME: mode_only=? return vfs.resolve(self, path, parent=parent, want_meta=want_meta, follow=follow) class RemoteRepo: def __init__(self, address): self.closed = False self.address = address self.client = client.Client(address) self.new_packwriter = self.client.new_packwriter self.update_ref = self.client.update_ref self.rev_list = self.client.rev_list self._id = _repo_id(self.address) def close(self): if not self.closed: self.closed = True self.client.close() self.client = None def __del__(self): assert self.closed def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() def id(self): """Return an identifier that differs from any other repository that doesn't share the same repository-specific information (e.g. refs, tags, etc.).""" return self._id def is_remote(self): return True def cat(self, ref): """If ref does not exist, yield (None, None, None). Otherwise yield (oidx, type, size), and then all of the data associated with ref. """ # Yield all the data here so that we don't finish the # cat_batch iterator (triggering its cleanup) until all of the # data has been read. Otherwise we'd be out of sync with the # server. items = self.client.cat_batch((ref,)) oidx, typ, size, it = info = next(items) yield info[:-1] if oidx: for data in it: yield data assert not next(items, None) def join(self, ref): return self.client.join(ref) def refs(self, patterns=None, limit_to_heads=False, limit_to_tags=False): for ref in self.client.refs(patterns=patterns, limit_to_heads=limit_to_heads, limit_to_tags=limit_to_tags): yield ref def resolve(self, path, parent=None, want_meta=True, follow=True): ## FIXME: mode_only=? return self.client.resolve(path, parent=parent, want_meta=want_meta, follow=follow) bup-0.33.3/lib/bup/rm.py000066400000000000000000000133441454333004200147150ustar00rootroot00000000000000 from __future__ import absolute_import from binascii import hexlify, unhexlify from bup import git, vfs from bup.client import ClientError from bup.compat import hexstr, pending_raise from bup.git import get_commit_items from bup.helpers import add_error, die_if_errors, log, saved_errors from bup.io import path_msg def append_commit(hash, parent, cp, writer): ci = get_commit_items(hash, cp) tree = unhexlify(ci.tree) author = b'%s <%s>' % (ci.author_name, ci.author_mail) committer = b'%s <%s>' % (ci.committer_name, ci.committer_mail) c = writer.new_commit(tree, parent, author, ci.author_sec, ci.author_offset, committer, ci.committer_sec, ci.committer_offset, ci.message) return c, tree def filter_branch(tip_commit_hex, exclude, writer): # May return None if everything is excluded. commits = [unhexlify(x) for x in git.rev_list(tip_commit_hex)] commits.reverse() last_c, tree = None, None # Rather than assert that we always find an exclusion here, we'll # just let the StopIteration signal the error. first_exclusion = next(i for i, c in enumerate(commits) if exclude(c)) if first_exclusion != 0: last_c = commits[first_exclusion - 1] tree = unhexlify(get_commit_items(hexlify(last_c), git.cp()).tree) commits = commits[first_exclusion:] for c in commits: if exclude(c): continue last_c, tree = append_commit(hexlify(c), last_c, git.cp(), writer) return last_c def commit_oid(item): if isinstance(item, vfs.Commit): return item.coid assert isinstance(item, vfs.RevList) return item.oid def rm_saves(saves, writer): assert(saves) first_branch_item = saves[0][1] for save, branch in saves: # Be certain they're all on the same branch assert(branch == first_branch_item) rm_commits = frozenset([commit_oid(save) for save, branch in saves]) orig_tip = commit_oid(first_branch_item) new_tip = filter_branch(hexlify(orig_tip), lambda x: x in rm_commits, writer) assert(orig_tip) assert(new_tip != orig_tip) return orig_tip, new_tip def dead_items(repo, paths): """Return an optimized set of removals, reporting errors via add_error, and if there are any errors, return None, None.""" dead_branches = {} dead_saves = {} # Scan for bad requests, and opportunities to optimize for path in paths: try: resolved = vfs.resolve(repo, path, follow=False) except vfs.IOError as e: add_error(e) continue else: leaf_name, leaf_item = resolved[-1] if not leaf_item: add_error('error: cannot access %s in %s' % (path_msg(b'/'.join(name for name, item in resolved)), path_msg(path))) continue if isinstance(leaf_item, vfs.RevList): # rm /foo branchname = leaf_name dead_branches[branchname] = leaf_item dead_saves.pop(branchname, None) # rm /foo obviates rm /foo/bar elif isinstance(leaf_item, vfs.Commit): # rm /foo/bar if leaf_name == b'latest': add_error("error: cannot delete 'latest' symlink") else: branchname, branchitem = resolved[-2] if branchname not in dead_branches: dead = leaf_item, branchitem dead_saves.setdefault(branchname, []).append(dead) else: add_error("don't know how to remove %s yet" % path_msg(path)) if saved_errors: return None, None return dead_branches, dead_saves def bup_rm(repo, paths, compression=6, verbosity=None): dead_branches, dead_saves = dead_items(repo, paths) die_if_errors('not proceeding with any removals\n') updated_refs = {} # ref_name -> (original_ref, tip_commit(bin)) for branchname, branchitem in dead_branches.items(): ref = b'refs/heads/' + branchname assert(not ref in updated_refs) updated_refs[ref] = (branchitem.oid, None) if dead_saves: writer = git.PackWriter(compression_level=compression) try: for branch, saves in dead_saves.items(): assert(saves) updated_refs[b'refs/heads/' + branch] = rm_saves(saves, writer) except BaseException as ex: with pending_raise(ex): writer.abort() finally: writer.close() # Only update the refs here, at the very end, so that if something # goes wrong above, the old refs will be undisturbed. Make an attempt # to update each ref. for ref_name, info in updated_refs.items(): orig_ref, new_ref = info try: if not new_ref: git.delete_ref(ref_name, hexlify(orig_ref)) else: git.update_ref(ref_name, new_ref, orig_ref) if verbosity: log('updated %s (%s%s)\n' % (path_msg(ref_name), hexstr(orig_ref) + ' -> ' if orig_ref else '', hexstr(new_ref))) except (git.GitError, ClientError) as ex: if new_ref: add_error('while trying to update %s (%s%s): %s' % (path_msg(ref_name), hexstr(orig_ref) + ' -> ' if orig_ref else '', hexstr(new_ref), ex)) else: add_error('while trying to delete %r (%s): %s' % (ref_name, hexstr(orig_ref), ex)) bup-0.33.3/lib/bup/shquote.py000066400000000000000000000116141454333004200157650ustar00rootroot00000000000000 from __future__ import absolute_import import re from bup.compat import bytes_from_byte q = b"'" qq = b'"' class QuoteError(Exception): pass def _quotesplit(line): inquote = None inescape = None wordstart = 0 word = b'' for i in range(len(line)): c = bytes_from_byte(line[i]) if inescape: if inquote == q and c != q: word += b'\\' # single-q backslashes can only quote single-q word += c inescape = False elif c == b'\\': inescape = True elif c == inquote: inquote = None # this is un-sh-like, but do it for sanity when autocompleting yield (wordstart, word) word = b'' wordstart = i+1 elif not inquote and not word and c in (q, qq): # the 'not word' constraint on this is un-sh-like, but do it # for sanity when autocompleting inquote = c wordstart = i elif not inquote and c in [b' ', b'\n', b'\r', b'\t']: if word: yield (wordstart, word) word = b'' wordstart = i+1 else: word += c if word: yield (wordstart, word) if inquote or inescape or word: raise QuoteError() def quotesplit(line): """Split 'line' into a list of offset,word tuples. The words are produced after removing doublequotes, singlequotes, and backslash escapes. Note that this implementation isn't entirely sh-compatible. It only dequotes words that *start* with a quote character, that is, bytes like hello"world" will not have its quotes removed, while bytes like hello "world" will be turned into [(0, 'hello'), (6, 'world')] (ie. quotes removed). """ l = [] try: for i in _quotesplit(line): l.append(i) except QuoteError: pass return l def unfinished_word(line): """Returns the quotechar,word of any unfinished word at the end of 'line'. You can use this to determine if 'line' is a completely parseable line (ie. one that quotesplit() will finish successfully) or if you need to read more bytes first. Args: line: bytes Returns: quotechar,word: the initial quote char (or None), and the partial word. """ try: for (wordstart,word) in _quotesplit(line): pass except QuoteError: firstchar = bytes_from_byte(line[wordstart]) if firstchar in [q, qq]: return (firstchar, word) else: return (None, word) else: return (None, b'') def quotify(qtype, word, terminate): """Return a bytes corresponding to given word, quoted using qtype. The resulting bytes are dequotable using quotesplit() and can be joined with other quoted bytes by adding arbitrary whitespace separators. Args: qtype: one of '', shquote.qq, or shquote.q word: the bytes to quote. May contain arbitrary characters. terminate: include the trailing quote character, if any. Returns: The quoted bytes. """ if qtype == qq: return qq + word.replace(qq, b'\\"') + (terminate and qq or b'') elif qtype == q: return q + word.replace(q, b"\\'") + (terminate and q or b'') else: return re.sub(br'([\"\' \t\n\r])', br'\\\1', word) def quotify_list(words): """Return minimally-quoted bytes produced by quoting each word. This calculates the qtype for each word depending on whether the word already includes singlequote characters, doublequote characters, both, or neither. Args: words: the list of words to quote. Returns: The resulting bytes, with quoted words separated by ' '. """ wordout = [] for word in words: qtype = q if word and not re.search(br'[\s\"\']', word): qtype = b'' elif q in word and qq not in word: qtype = qq wordout.append(quotify(qtype, word, True)) return b' '.join(wordout) def what_to_add(qtype, origword, newword, terminate): """Return a qtype that is needed to finish a partial word. For example, given an origword of '\"frog' and a newword of '\"frogston', returns either: terminate=False: 'ston' terminate=True: 'ston\"' This is useful when calculating tab completions for readline. Args: qtype: the type of quoting to use (ie. the first character of origword) origword: the original word that needs completion. newword: the word we want it to be after completion. Must start with origword. terminate: true if we should add the actual quote character at the end. Returns: The bytes to append to origword to produce (quoted) newword. """ if not newword.startswith(origword): return b'' else: qold = quotify(qtype, origword, terminate=False) return quotify(qtype, newword, terminate=terminate)[len(qold):] bup-0.33.3/lib/bup/source_info.py000066400000000000000000000001421454333004200166020ustar00rootroot00000000000000commit='85fdce08bdb752eb81f5954e1dffae185b5a43c6' date='2023-12-28 11:28:02 -0600' modified=False bup-0.33.3/lib/bup/ssh.py000066400000000000000000000030751454333004200150740ustar00rootroot00000000000000"""SSH connection. Connect to a remote host via SSH and execute a command on the host. """ from __future__ import absolute_import, print_function import sys, os, re, subprocess from bup import path from bup.compat import environ def connect(rhost, port, subcmd, stderr=None): """Connect to 'rhost' and execute the bup subcommand 'subcmd' on it.""" assert not re.search(br'[^\w-]', subcmd) if rhost is None or rhost == b'-': argv = [path.exe(), subcmd] else: buglvl = int(environ.get(b'BUP_DEBUG', 0)) force_tty = int(environ.get(b'BUP_FORCE_TTY', 0)) tty_width = environ.get(b'BUP_TTY_WIDTH', None) if tty_width is not None: tty_width = b'BUP_TTY_WIDTH=%d' % int(tty_width) else: tty_width = b'' cmd = b""" sh -c 'BUP_DEBUG=%d BUP_FORCE_TTY=%d %s bup %s' """ % (buglvl, force_tty, tty_width, subcmd) argv = [b'ssh'] if port: argv.extend((b'-p', port)) argv.extend((rhost, b'--', cmd.strip())) #helpers.log('argv is: %r\n' % argv) if sys.version_info[0] < 3: return subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stderr, preexec_fn=lambda: os.setsid()) else: return subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stderr, start_new_session=True) bup-0.33.3/lib/bup/tree.py000066400000000000000000000011401454333004200152250ustar00rootroot00000000000000 from __future__ import absolute_import, print_function class TreeItem: __slots__ = 'name', 'mode', 'gitmode', 'oid', 'meta' def __init__(self, name, mode, gitmode, oid, meta): self.name = name self.mode = mode self.gitmode = gitmode self.oid = oid self.meta = meta class StackDir: __slots__ = 'name', 'items', 'meta' def __init__(self, name, meta): self.name = name self.meta = meta self.items = [] def append(self, name, mode, gitmode, oid, meta): self.items.append(TreeItem(name, mode, gitmode, oid, meta)) bup-0.33.3/lib/bup/version.py000066400000000000000000000015161454333004200157620ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from bup import source_info try: import bup.checkout_info as checkout_info except ModuleNotFoundError: checkout_info = None pass if checkout_info: date = checkout_info.date.encode('ascii') commit = checkout_info.commit.encode('ascii') modified = checkout_info.modified else: date = source_info.date.encode('ascii') commit = source_info.commit.encode('ascii') modified = source_info.modified assert not date.startswith(b'$Format') assert not commit.startswith(b'$Format') # The ~ in a version is a Debian-style "always less than" marker: # https://www.debian.org/doc/debian-policy/ch-controlfields.html#version base_version = b'0.33.3' version = base_version if version.endswith(b'~'): version += commit if modified: version += b'+' bup-0.33.3/lib/bup/vfs.py000066400000000000000000001226471454333004200151040ustar00rootroot00000000000000"""Virtual File System interface to bup repository content. This module provides a path-based interface to the content of a bup repository. The VFS is structured like this: /SAVE-NAME/latest/... /SAVE-NAME/SAVE-DATE/... /.tag/TAG-NAME/... Each path is represented by an item that has least an item.meta which may be either a Metadata object, or an integer mode. Functions like item_mode() and item_size() will return the mode and size in either case. Any item.meta Metadata instances must not be modified directly. Make a copy to modify via item.meta.copy() if needed, or call copy_item(). The want_meta argument is advisory for calls that accept it, and it may not be honored. Callers must be able to handle an item.meta value that is either an instance of Metadata or an integer mode, perhaps via item_mode() or augment_item_meta(). Setting want_meta=False is rarely desirable since it can limit the VFS to only the metadata that git itself can represent, and so for example, fifos and sockets will appear to be regular files (e.g. S_ISREG(item_mode(item)) will be true). But the option is still provided because it may be more efficient when just the path names or the more limited metadata is sufficient. Any given metadata object's size may be None, in which case the size can be computed via item_size() or augment_item_meta(..., include_size=True). When traversing a directory using functions like contents(), the meta value for any directories other than '.' will be a default directory mode, not a Metadata object. This is because the actual metadata for a directory is stored inside the directory (see fill_in_metadata_if_dir() or ensure_item_has_metadata()). Commit items represent commits (e.g. /.tag/some-commit or /foo/latest), and for most purposes, they appear as the underlying tree. S_ISDIR(item_mode(item)) will return true for both tree Items and Commits and the commit's oid is the tree hash; the commit hash is item.coid. """ from __future__ import absolute_import, print_function from binascii import hexlify, unhexlify from collections import namedtuple from errno import EINVAL, ELOOP, ENOTDIR from itertools import chain, groupby, tee from random import randrange from stat import S_IFDIR, S_IFLNK, S_IFREG, S_ISDIR, S_ISLNK, S_ISREG from time import localtime, strftime import re, sys from bup import git, vint from bup.compat import hexstr, pending_raise from bup.git import BUP_CHUNKED, parse_commit, tree_decode from bup.helpers import debug2, last, nullcontext_if_not from bup.io import path_msg from bup.metadata import Metadata from bup.vint import read_bvec, write_bvec from bup.vint import read_vint, write_vint from bup.vint import read_vuint, write_vuint if sys.version_info[0] < 3: from exceptions import IOError as py_IOError else: py_IOError = IOError # We currently assume that it's always appropriate to just forward IOErrors # to a remote client. class IOError(py_IOError): def __init__(self, errno, message, terminus=None): py_IOError.__init__(self, errno, message) self.terminus = terminus def write_ioerror(port, ex): assert isinstance(ex, IOError) write_vuint(port, (1 if ex.errno is not None else 0) | (2 if ex.strerror is not None else 0) | (4 if ex.terminus is not None else 0)) if ex.errno is not None: write_vint(port, ex.errno) if ex.strerror is not None: write_bvec(port, ex.strerror.encode('utf-8')) if ex.terminus is not None: write_resolution(port, ex.terminus) def read_ioerror(port): mask = read_vuint(port) no = read_vint(port) if 1 & mask else None msg = read_bvec(port).decode('utf-8') if 2 & mask else None term = read_resolution(port) if 4 & mask else None return IOError(errno=no, message=msg, terminus=term) default_file_mode = S_IFREG | 0o644 default_dir_mode = S_IFDIR | 0o755 default_symlink_mode = S_IFLNK | 0o755 def _default_mode_for_gitmode(gitmode): if S_ISREG(gitmode): return default_file_mode if S_ISDIR(gitmode): return default_dir_mode if S_ISLNK(gitmode): return default_symlink_mode raise Exception('unexpected git mode ' + oct(gitmode)) def _normal_or_chunked_file_size(repo, oid): """Return the size of the normal or chunked file indicated by oid.""" # FIXME: --batch-format CatPipe? it = repo.cat(hexlify(oid)) _, obj_t, size = next(it) ofs = 0 while obj_t == b'tree': mode, name, last_oid = last(tree_decode(b''.join(it))) ofs += int(name, 16) it = repo.cat(hexlify(last_oid)) _, obj_t, size = next(it) return ofs + sum(len(b) for b in it) def _skip_chunks_before_offset(tree, offset): prev_ent = next(tree, None) if not prev_ent: return tree ent = None for ent in tree: ent_ofs = int(ent[1], 16) if ent_ofs > offset: return chain([prev_ent, ent], tree) if ent_ofs == offset: return chain([ent], tree) prev_ent = ent return [prev_ent] def _tree_chunks(repo, tree, startofs): "Tree should be a sequence of (name, mode, hash) as per tree_decode()." assert(startofs >= 0) # name is the chunk's hex offset in the original file for mode, name, oid in _skip_chunks_before_offset(tree, startofs): ofs = int(name, 16) skipmore = startofs - ofs if skipmore < 0: skipmore = 0 it = repo.cat(hexlify(oid)) _, obj_t, size = next(it) data = b''.join(it) if S_ISDIR(mode): assert obj_t == b'tree' for b in _tree_chunks(repo, tree_decode(data), skipmore): yield b else: assert obj_t == b'blob' yield data[skipmore:] class _ChunkReader: def __init__(self, repo, oid, startofs): it = repo.cat(hexlify(oid)) _, obj_t, size = next(it) isdir = obj_t == b'tree' data = b''.join(it) if isdir: self.it = _tree_chunks(repo, tree_decode(data), startofs) self.blob = None else: self.it = None self.blob = data[startofs:] self.ofs = startofs def next(self, size): out = b'' while len(out) < size: if self.it and not self.blob: try: self.blob = next(self.it) except StopIteration: self.it = None if self.blob: want = size - len(out) out += self.blob[:want] self.blob = self.blob[want:] if not self.it: break debug2('next(%d) returned %d\n' % (size, len(out))) self.ofs += len(out) return out class _FileReader(object): def __init__(self, repo, oid, known_size=None): assert len(oid) == 20 self.closed = False self.oid = oid self.ofs = 0 self.reader = None self._repo = repo self._size = known_size def _compute_size(self): if not self._size: self._size = _normal_or_chunked_file_size(self._repo, self.oid) return self._size def seek(self, ofs): if ofs < 0 or ofs > self._compute_size(): raise IOError(EINVAL, 'Invalid seek offset: %d' % ofs) self.ofs = ofs def tell(self): return self.ofs def read(self, count=-1): size = self._compute_size() if self.ofs >= size: return b'' if count < 0: count = size - self.ofs if not self.reader or self.reader.ofs != self.ofs: self.reader = _ChunkReader(self._repo, self.oid, self.ofs) try: buf = self.reader.next(count) except: self.reader = None raise # our offsets will be all screwed up otherwise self.ofs += len(buf) return buf def close(self): self.closed = True def __del__(self): assert self.closed def __enter__(self): return self def __exit__(self, type, value, traceback): with pending_raise(value, rethrow=False): self.close() _multiple_slashes_rx = re.compile(br'//+') def _decompose_path(path): """Return a boolean indicating whether the path is absolute, and a reversed list of path elements, omitting any occurrences of "." and ignoring any leading or trailing slash. If the path is effectively '/' or '.', return an empty list. """ path = re.sub(_multiple_slashes_rx, b'/', path) if path == b'/': return True, True, [] is_absolute = must_be_dir = False if path.startswith(b'/'): is_absolute = True path = path[1:] for suffix in (b'/', b'/.'): if path.endswith(suffix): must_be_dir = True path = path[:-len(suffix)] parts = [x for x in path.split(b'/') if x != b'.'] parts.reverse() if not parts: must_be_dir = True # e.g. path was effectively '.' or '/', etc. return is_absolute, must_be_dir, parts Item = namedtuple('Item', ('meta', 'oid')) Chunky = namedtuple('Chunky', ('meta', 'oid')) FakeLink = namedtuple('FakeLink', ('meta', 'target')) Root = namedtuple('Root', ('meta')) Tags = namedtuple('Tags', ('meta')) RevList = namedtuple('RevList', ('meta', 'oid')) Commit = namedtuple('Commit', ('meta', 'oid', 'coid')) item_types = (Item, Chunky, Root, Tags, RevList, Commit) real_tree_types = (Item, Commit) def write_item(port, item): kind = type(item) name = bytes(kind.__name__.encode('ascii')) meta = item.meta has_meta = 1 if isinstance(meta, Metadata) else 0 if kind in (Item, Chunky, RevList): assert len(item.oid) == 20 if has_meta: vint.send(port, 'sVs', name, has_meta, item.oid) Metadata.write(meta, port, include_path=False) else: vint.send(port, 'sVsV', name, has_meta, item.oid, item.meta) elif kind in (Root, Tags): if has_meta: vint.send(port, 'sV', name, has_meta) Metadata.write(meta, port, include_path=False) else: vint.send(port, 'sVV', name, has_meta, item.meta) elif kind == Commit: assert len(item.oid) == 20 assert len(item.coid) == 20 if has_meta: vint.send(port, 'sVss', name, has_meta, item.oid, item.coid) Metadata.write(meta, port, include_path=False) else: vint.send(port, 'sVssV', name, has_meta, item.oid, item.coid, item.meta) elif kind == FakeLink: if has_meta: vint.send(port, 'sVs', name, has_meta, item.target) Metadata.write(meta, port, include_path=False) else: vint.send(port, 'sVsV', name, has_meta, item.target, item.meta) else: assert False def read_item(port): def read_m(port, has_meta): if has_meta: m = Metadata.read(port) return m return read_vuint(port) kind, has_meta = vint.recv(port, 'sV') if kind == b'Item': oid, meta = read_bvec(port), read_m(port, has_meta) return Item(oid=oid, meta=meta) if kind == b'Chunky': oid, meta = read_bvec(port), read_m(port, has_meta) return Chunky(oid=oid, meta=meta) if kind == b'RevList': oid, meta = read_bvec(port), read_m(port, has_meta) return RevList(oid=oid, meta=meta) if kind == b'Root': return Root(meta=read_m(port, has_meta)) if kind == b'Tags': return Tags(meta=read_m(port, has_meta)) if kind == b'Commit': oid, coid = vint.recv(port, 'ss') meta = read_m(port, has_meta) return Commit(oid=oid, coid=coid, meta=meta) if kind == b'FakeLink': target, meta = read_bvec(port), read_m(port, has_meta) return FakeLink(target=target, meta=meta) assert False def write_resolution(port, resolution): write_vuint(port, len(resolution)) for name, item in resolution: write_bvec(port, name) if item: port.write(b'\x01') write_item(port, item) else: port.write(b'\x00') def read_resolution(port): n = read_vuint(port) result = [] for i in range(n): name = read_bvec(port) have_item = ord(port.read(1)) assert have_item in (0, 1) item = read_item(port) if have_item else None result.append((name, item)) return tuple(result) _root = Root(meta=default_dir_mode) _tags = Tags(meta=default_dir_mode) ### vfs cache ### A general purpose shared cache with (currently) cheap random ### eviction. At the moment there is no weighting so a single commit ### item is just as likely to be evicted as an entire "rev-list". See ### is_valid_cache_key for a description of the expected content. _cache = {} _cache_keys = [] _cache_max_items = 30000 def clear_cache(): global _cache, _cache_keys _cache = {} _cache_keys = [] def is_valid_cache_key(x): """Return logically true if x looks like it could be a valid cache key (with respect to structure). Current valid cache entries: res:... -> resolution itm:OID -> Commit rvl:OID -> {'.', commit, '2012...', next_commit, ...} """ # Suspect we may eventually add "(container_oid, name) -> ...", and others. if isinstance(x, bytes): tag = x[:4] if tag in (b'itm:', b'rvl:') and len(x) == 24: return True if tag == b'res:': return True return False def cache_get(key): global _cache if not is_valid_cache_key(key): raise Exception('invalid cache key: ' + repr(key)) return _cache.get(key) def cache_notice(key, value, overwrite=False): global _cache, _cache_keys, _cache_max_items if not is_valid_cache_key(key): raise Exception('invalid cache key: ' + repr(key)) if key in _cache: if overwrite: _cache[key] = value return if len(_cache) < _cache_max_items: _cache_keys.append(key) _cache[key] = value return victim_i = randrange(0, len(_cache_keys)) victim = _cache_keys[victim_i] del _cache[victim] _cache_keys[victim_i] = key _cache[key] = value def _has_metadata_if_needed(item, need_meta): if not need_meta: return True if isinstance(item.meta, Metadata): return True return False def cache_get_commit_item(oid, need_meta=True): """Return the requested tree item if it can be found in the cache. When need_meta is true don't return a cached item that only has a mode.""" # tree might be stored independently, or as '.' with its entries. commit_key = b'itm:' + oid item = cache_get(commit_key) if item: if _has_metadata_if_needed(item, need_meta): return item entries = cache_get(b'rvl:' + oid) if entries: item = entries[b'.'] if _has_metadata_if_needed(item, need_meta): return item return None def copy_item(item): """Return a completely independent copy of item, such that modifications will not affect the original. """ meta = getattr(item, 'meta', None) if isinstance(meta, Metadata): return(item._replace(meta=meta.copy())) return item def item_mode(item): """Return the integer mode (stat st_mode) for item.""" m = item.meta if isinstance(m, Metadata): return m.mode return m def _read_dir_meta(bupm): # This is because save writes unmodified Metadata() entries for # fake parents -- test-save-strip-graft.sh demonstrates. m = Metadata.read(bupm) if not m: return default_dir_mode assert m.mode is not None if m.size is None: m.size = 0 return m def tree_data_and_bupm(repo, oid): """Return (tree_bytes, bupm_oid) where bupm_oid will be None if the tree has no metadata (i.e. older bup save, or non-bup tree). """ assert len(oid) == 20 it = repo.cat(hexlify(oid)) _, item_t, size = next(it) data = b''.join(it) if item_t == b'commit': commit = parse_commit(data) it = repo.cat(commit.tree) _, item_t, size = next(it) data = b''.join(it) assert item_t == b'tree' elif item_t != b'tree': raise Exception('%s is not a tree or commit' % hexstr(oid)) for _, mangled_name, sub_oid in tree_decode(data): if mangled_name == b'.bupm': return data, sub_oid if mangled_name > b'.bupm': break return data, None def _find_treeish_oid_metadata(repo, oid): """Return the metadata for the tree or commit oid, or None if the tree has no metadata (i.e. older bup save, or non-bup tree). """ tree_data, bupm_oid = tree_data_and_bupm(repo, oid) if bupm_oid: with _FileReader(repo, bupm_oid) as meta_stream: return _read_dir_meta(meta_stream) return None def _readlink(repo, oid): return b''.join(repo.join(hexlify(oid))) def readlink(repo, item): """Return the link target of item, which must be a symlink. Reads the target from the repository if necessary.""" assert repo assert S_ISLNK(item_mode(item)) if isinstance(item, FakeLink): return item.target if isinstance(item.meta, Metadata): target = item.meta.symlink_target if target: return target return _readlink(repo, item.oid) def _compute_item_size(repo, item): mode = item_mode(item) if S_ISREG(mode): size = _normal_or_chunked_file_size(repo, item.oid) return size if S_ISLNK(mode): if isinstance(item, FakeLink): return len(item.target) return len(_readlink(repo, item.oid)) return 0 def item_size(repo, item): """Return the size of item, computing it if necessary.""" m = item.meta if isinstance(m, Metadata) and m.size is not None: return m.size return _compute_item_size(repo, item) def tree_data_reader(repo, oid): """Return an open reader for all of the data contained within oid. If oid refers to a tree, recursively concatenate all of its contents.""" return _FileReader(repo, oid) def fopen(repo, item): """Return an open reader for the given file item.""" assert S_ISREG(item_mode(item)) return tree_data_reader(repo, item.oid) def _commit_item_from_data(oid, data): info = parse_commit(data) return Commit(meta=default_dir_mode, oid=unhexlify(info.tree), coid=oid) def _commit_item_from_oid(repo, oid, require_meta): commit = cache_get_commit_item(oid, need_meta=require_meta) if commit and ((not require_meta) or isinstance(commit.meta, Metadata)): return commit it = repo.cat(hexlify(oid)) _, typ, size = next(it) assert typ == b'commit' commit = _commit_item_from_data(oid, b''.join(it)) if require_meta: meta = _find_treeish_oid_metadata(repo, commit.oid) if meta: commit = commit._replace(meta=meta) commit_key = b'itm:' + oid cache_notice(commit_key, commit, overwrite=True) return commit def _revlist_item_from_oid(repo, oid, require_meta): commit = _commit_item_from_oid(repo, oid, require_meta) return RevList(oid=oid, meta=commit.meta) def root_items(repo, names=None, want_meta=True): """Yield (name, item) for the items in '/' in the VFS. Return everything if names is logically false, otherwise return only items with a name in the collection. """ # FIXME: what about non-leaf refs like 'refs/heads/foo/bar/baz? global _root, _tags if not names: yield b'.', _root yield b'.tag', _tags # FIXME: maybe eventually support repo.clone() or something # and pass in two repos, so we can drop the tuple() and stream # in parallel (i.e. meta vs refs). for name, oid in tuple(repo.refs([], limit_to_heads=True)): assert(name.startswith(b'refs/heads/')) yield name[11:], _revlist_item_from_oid(repo, oid, want_meta) return if b'.' in names: yield b'.', _root if b'.tag' in names: yield b'.tag', _tags for ref in names: if ref in (b'.', b'.tag'): continue it = repo.cat(b'refs/heads/' + ref) oidx, typ, size = next(it) if not oidx: for _ in it: pass continue assert typ == b'commit' commit = parse_commit(b''.join(it)) yield ref, _revlist_item_from_oid(repo, unhexlify(oidx), want_meta) def ordered_tree_entries(tree_data, bupm=None): """Yields (name, mangled_name, kind, gitmode, oid) for each item in tree, sorted by name. """ # Sadly, the .bupm entries currently aren't in git tree order, # but in unmangled name order. They _do_ account for the fact # that git sorts trees (including chunked trees) as if their # names ended with "/" (so "fo" sorts after "fo." iff fo is a # directory), but we apply this on the unmangled names in save # rather than on the mangled names. # This makes streaming impossible when we need the metadata. def result_from_tree_entry(tree_entry): gitmode, mangled_name, oid = tree_entry name, kind = git.demangle_name(mangled_name, gitmode) return name, mangled_name, kind, gitmode, oid tree_ents = (result_from_tree_entry(x) for x in tree_decode(tree_data)) if bupm: tree_ents = sorted(tree_ents, key=lambda x: x[0]) for ent in tree_ents: yield ent def tree_items(oid, tree_data, names=frozenset(), bupm=None): def tree_item(ent_oid, kind, gitmode): if kind == BUP_CHUNKED: meta = Metadata.read(bupm) if bupm else default_file_mode return Chunky(oid=ent_oid, meta=meta) if S_ISDIR(gitmode): # No metadata here (accessable via '.' inside ent_oid). return Item(meta=default_dir_mode, oid=ent_oid) meta = Metadata.read(bupm) if bupm else None # handle the case of metadata being empty/missing in bupm # (or there not being bupm at all) if meta is None: meta = _default_mode_for_gitmode(gitmode) return Item(oid=ent_oid, meta=meta) assert len(oid) == 20 if not names: dot_meta = _read_dir_meta(bupm) if bupm else default_dir_mode yield b'.', Item(oid=oid, meta=dot_meta) tree_entries = ordered_tree_entries(tree_data, bupm) for name, mangled_name, kind, gitmode, ent_oid in tree_entries: if mangled_name == b'.bupm': continue assert name != b'.' yield name, tree_item(ent_oid, kind, gitmode) return # Assumes the tree is properly formed, i.e. there are no # duplicates, and entries will be in git tree order. if not isinstance(names, (frozenset, set)): names = frozenset(names) remaining = len(names) # Account for the bupm sort order issue (cf. ordered_tree_entries above) last_name = max(names) if bupm else max(names) + b'/' if b'.' in names: dot_meta = _read_dir_meta(bupm) if bupm else default_dir_mode yield b'.', Item(oid=oid, meta=dot_meta) if remaining == 1: return remaining -= 1 tree_entries = ordered_tree_entries(tree_data, bupm) for name, mangled_name, kind, gitmode, ent_oid in tree_entries: if mangled_name == b'.bupm': continue assert name != b'.' if name not in names: if name > last_name: break # given bupm sort order, we're finished if (kind == BUP_CHUNKED or not S_ISDIR(gitmode)) and bupm: Metadata.read(bupm) continue yield name, tree_item(ent_oid, kind, gitmode) if remaining == 1: break remaining -= 1 def tree_items_with_meta(repo, oid, tree_data, names): # For now, the .bupm order doesn't quite match git's, and we don't # load the tree data incrementally anyway, so we just work in RAM # via tree_data. assert len(oid) == 20 bupm = None for _, mangled_name, sub_oid in tree_decode(tree_data): if mangled_name == b'.bupm': bupm = _FileReader(repo, sub_oid) break if mangled_name > b'.bupm': break with nullcontext_if_not(bupm): for item in tree_items(oid, tree_data, names, bupm): yield item _save_name_rx = re.compile(br'^\d\d\d\d-\d\d-\d\d-\d{6}(-\d+)?$') def _reverse_suffix_duplicates(strs): """Yields the elements of strs, with any runs of duplicate values suffixed with -N suffixes, where the zero padded integer N decreases to 0 by 1 (e.g. 10, 09, ..., 00). """ for name, duplicates in groupby(strs): ndup = len(tuple(duplicates)) if ndup == 1: yield name else: ndig = len(str(ndup - 1)) fmt = b'%s-' + b'%0' + (b'%d' % ndig) + b'd' for i in range(ndup - 1, -1, -1): yield fmt % (name, i) def parse_rev(f): items = f.readline().split(None) assert len(items) == 2 tree, auth_sec = items return unhexlify(tree), int(auth_sec) def _name_for_rev(rev): commit_oidx, (tree_oid, utc) = rev return strftime('%Y-%m-%d-%H%M%S', localtime(utc)).encode('ascii') def _item_for_rev(rev): commit_oidx, (tree_oid, utc) = rev coid = unhexlify(commit_oidx) item = cache_get_commit_item(coid, need_meta=False) if item: return item item = Commit(meta=default_dir_mode, oid=tree_oid, coid=coid) commit_key = b'itm:' + coid cache_notice(commit_key, item) return item # non-string singleton _HAS_META_ENTRY = object() def cache_commit(repo, oid, require_meta=True): """Build, cache, and return a "name -> commit_item" dict of the entire commit rev-list. """ entries = {} entries[b'.'] = _revlist_item_from_oid(repo, oid, require_meta) revs = repo.rev_list((hexlify(oid),), format=b'%T %at', parse=parse_rev) rev_items, rev_names = tee(revs) revs = None # Don't disturb the tees rev_names = _reverse_suffix_duplicates(_name_for_rev(x) for x in rev_names) rev_items = (_item_for_rev(x) for x in rev_items) tip = None for item in rev_items: name = next(rev_names) tip = tip or (name, item) entries[name] = item entries[b'latest'] = FakeLink(meta=default_symlink_mode, target=tip[0]) revlist_key = b'rvl:' + tip[1].coid entries[_HAS_META_ENTRY] = require_meta cache_notice(revlist_key, entries, overwrite=True) return entries def revlist_items(repo, oid, names, require_meta=True): assert len(oid) == 20 # Special case '.' instead of caching the whole history since it's # the only way to get the metadata for the commit. if names and all(x == b'.' for x in names): yield b'.', _revlist_item_from_oid(repo, oid, require_meta) return # For now, don't worry about the possibility of the contents being # "too big" for the cache. revlist_key = b'rvl:' + oid entries = cache_get(revlist_key) if entries and require_meta and not entries[_HAS_META_ENTRY]: entries = None if not entries: entries = cache_commit(repo, oid, require_meta) if not names: for name in sorted((n for n in entries.keys() if n != _HAS_META_ENTRY)): yield name, entries[name] return names = frozenset(name for name in names if _save_name_rx.match(name) or name in (b'.', b'latest')) if b'.' in names: yield b'.', entries[b'.'] for name in (n for n in names if n != b'.'): if name == _HAS_META_ENTRY: continue commit = entries.get(name) if commit: yield name, commit def tags_items(repo, names): global _tags def tag_item(oid): assert len(oid) == 20 cached = cache_get_commit_item(oid, need_meta=False) if cached: return cached oidx = hexlify(oid) it = repo.cat(oidx) _, typ, size = next(it) if typ == b'commit': return _commit_item_from_data(oid, b''.join(it)) for _ in it: pass if typ == b'blob': return Item(meta=default_file_mode, oid=oid) elif typ == b'tree': return Item(meta=default_dir_mode, oid=oid) raise Exception('unexpected tag type ' + typ.decode('ascii') + ' for tag ' + path_msg(name)) if not names: yield b'.', _tags # We have to pull these all into ram because tag_item calls cat() for name, oid in tuple(repo.refs(names, limit_to_tags=True)): assert(name.startswith(b'refs/tags/')) name = name[10:] yield name, tag_item(oid) return # Assumes no duplicate refs if not isinstance(names, (frozenset, set)): names = frozenset(names) remaining = len(names) last_name = max(names) if b'.' in names: yield b'.', _tags if remaining == 1: return remaining -= 1 for name, oid in repo.refs(names, limit_to_tags=True): assert(name.startswith(b'refs/tags/')) name = name[10:] if name > last_name: return if name not in names: continue yield name, tag_item(oid) if remaining == 1: return remaining -= 1 def contents(repo, item, names=None, want_meta=True): """Yields information about the items contained in item. Yields (name, item) for each name in names, if the name exists, in an unspecified order. If there are no names, then yields (name, item) for all items, including, a first item named '.' representing the container itself. The meta value for any directories other than '.' will be a default directory mode, not a Metadata object. This is because the actual metadata for a directory is stored inside the directory (see fill_in_metadata_if_dir() or ensure_item_has_metadata()). Note that want_meta is advisory. For any given item, item.meta might be a Metadata instance or a mode, and if the former, meta.size might be None. Missing sizes can be computed via via item_size() or augment_item_meta(..., include_size=True). Do not modify any item.meta Metadata instances directly. If needed, make a copy via item.meta.copy() and modify that instead. """ # Q: are we comfortable promising '.' first when no names? global _root, _tags assert repo assert S_ISDIR(item_mode(item)) if isinstance(item, real_tree_types): it = repo.cat(hexlify(item.oid)) _, obj_t, size = next(it) data = b''.join(it) if obj_t != b'tree': for _ in it: pass # Note: it shouldn't be possible to see an Item with type # 'commit' since a 'commit' should always produce a Commit. raise Exception('unexpected git ' + obj_t.decode('ascii')) if want_meta: item_gen = tree_items_with_meta(repo, item.oid, data, names) else: item_gen = tree_items(item.oid, data, names) elif isinstance(item, RevList): item_gen = revlist_items(repo, item.oid, names, require_meta=want_meta) elif isinstance(item, Root): item_gen = root_items(repo, names, want_meta) elif isinstance(item, Tags): item_gen = tags_items(repo, names) else: raise Exception('unexpected VFS item ' + str(item)) for x in item_gen: yield x def _resolve_path(repo, path, parent=None, want_meta=True, follow=True): cache_key = b'res:%d%d%d:%s\0%s' \ % (bool(want_meta), bool(follow), repo.id(), (b'/'.join(x[0] for x in parent) if parent else b''), path) resolution = cache_get(cache_key) if resolution: return resolution def notice_resolution(r): cache_notice(cache_key, r) return r def raise_dir_required_but_not_dir(path, parent, past): raise IOError(ENOTDIR, "path %s%s resolves to non-directory %r" % (path, ' (relative to %r)' % parent if parent else '', past), terminus=past) global _root assert repo assert len(path) if parent: for x in parent: assert len(x) == 2 assert isinstance(x[0], (bytes, str)) assert isinstance(x[1], item_types) assert parent[0][1] == _root if not S_ISDIR(item_mode(parent[-1][1])): raise IOError(ENOTDIR, 'path resolution parent %r is not a directory' % (parent,)) is_absolute, must_be_dir, future = _decompose_path(path) if must_be_dir: follow = True if not future: # path was effectively '.' or '/' if is_absolute: return notice_resolution(((b'', _root),)) if parent: return notice_resolution(tuple(parent)) return notice_resolution(((b'', _root),)) if is_absolute: past = [(b'', _root)] else: past = list(parent) if parent else [(b'', _root)] hops = 0 while True: if not future: if must_be_dir and not S_ISDIR(item_mode(past[-1][1])): raise_dir_required_but_not_dir(path, parent, past) return notice_resolution(tuple(past)) segment = future.pop() if segment == b'..': assert len(past) > 0 if len(past) > 1: # .. from / is / assert S_ISDIR(item_mode(past[-1][1])) past.pop() else: parent_name, parent_item = past[-1] wanted = (segment,) if not want_meta else (b'.', segment) items = tuple(contents(repo, parent_item, names=wanted, want_meta=want_meta)) if not want_meta: item = items[0][1] if items else None else: # First item will be '.' and have the metadata item = items[1][1] if len(items) == 2 else None dot, dot_item = items[0] assert dot == b'.' past[-1] = parent_name, parent_item if not item: past.append((segment, None),) return notice_resolution(tuple(past)) mode = item_mode(item) if not S_ISLNK(mode): if not S_ISDIR(mode): past.append((segment, item),) if future: raise IOError(ENOTDIR, 'path %r%s ends internally in non-directory here: %r' % (path, ' (relative to %r)' % parent if parent else '', past), terminus=past) if must_be_dir: raise_dir_required_but_not_dir(path, parent, past) return notice_resolution(tuple(past)) # It's treeish if want_meta and isinstance(item, real_tree_types): dir_meta = _find_treeish_oid_metadata(repo, item.oid) if dir_meta: item = item._replace(meta=dir_meta) past.append((segment, item)) else: # symlink if not future and not follow: past.append((segment, item),) continue if hops > 100: raise IOError(ELOOP, 'too many symlinks encountered while resolving %r%s' % (path, ' relative to %r' % parent if parent else ''), terminus=tuple(past + [(segment, item)])) target = readlink(repo, item) is_absolute, _, target_future = _decompose_path(target) if is_absolute: if not target_future: # path was effectively '/' return notice_resolution(((b'', _root),)) past = [(b'', _root)] future = target_future else: future.extend(target_future) hops += 1 def resolve(repo, path, parent=None, want_meta=True, follow=True): """Follow the path in the virtual filesystem and return a tuple representing the location, if any, denoted by the path. Each element in the result tuple will be (name, info), where info will be a VFS item that can be passed to functions like item_mode(). If follow is false, and if the final path element is a symbolic link, don't follow it, just return it in the result. If a path segment that does not exist is encountered during resolution, the result will represent the location of the missing item, and that item in the result will be None. Any attempt to traverse a non-directory will raise a VFS ENOTDIR IOError exception. Any symlinks along the path, including at the end, will be resolved. A VFS IOError with the errno attribute set to ELOOP will be raised if too many symlinks are traversed while following the path. That exception is effectively like a normal ELOOP IOError exception, but will include a terminus element describing the location of the failure, which will be a tuple of (name, info) elements. The parent, if specified, must be a sequence of (name, item) tuples, and will provide the starting point for the resolution of the path. If no parent is specified, resolution will start at '/'. The result may include elements of parent directly, so they must not be modified later. If this is a concern, pass in "name, copy_item(item) for name, item in parent" instead. When want_meta is true, detailed metadata will be included in each result item if it's avaiable, otherwise item.meta will be an integer mode. The metadata size may or may not be provided, but can be computed by item_size() or augment_item_meta(..., include_size=True). Setting want_meta=False is rarely desirable since it can limit the VFS to just the metadata git itself can represent, and so, as an example, fifos and sockets will appear to be regular files (e.g. S_ISREG(item_mode(item)) will be true) . But the option is provided because it may be more efficient when only the path names or the more limited metadata is sufficient. Do not modify any item.meta Metadata instances directly. If needed, make a copy via item.meta.copy() and modify that instead. """ if repo.is_remote(): # Redirect to the more efficient remote version return repo.resolve(path, parent=parent, want_meta=want_meta, follow=follow) result = _resolve_path(repo, path, parent=parent, want_meta=want_meta, follow=follow) _, leaf_item = result[-1] if leaf_item and follow: assert not S_ISLNK(item_mode(leaf_item)) return result def try_resolve(repo, path, parent=None, want_meta=True): """If path does not refer to a symlink, does not exist, or refers to a valid symlink, behave exactly like resolve(..., follow=True). If path refers to an invalid symlink, behave like resolve(..., follow=False). """ res = resolve(repo, path, parent=parent, want_meta=want_meta, follow=False) leaf_name, leaf_item = res[-1] if not leaf_item: return res if not S_ISLNK(item_mode(leaf_item)): return res follow = resolve(repo, leaf_name, parent=res[:-1], want_meta=want_meta) follow_name, follow_item = follow[-1] if follow_item: return follow return res def augment_item_meta(repo, item, include_size=False): """Ensure item has a Metadata instance for item.meta. If item.meta is currently a mode, replace it with a compatible "fake" Metadata instance. If include_size is true, ensure item.meta.size is correct, computing it if needed. If item.meta is a Metadata instance, this call may modify it in place or replace it. """ # If we actually had parallelism, we'd need locking... assert repo m = item.meta if isinstance(m, Metadata): if include_size and m.size is None: m.size = _compute_item_size(repo, item) return item._replace(meta=m) return item # m is mode meta = Metadata() meta.mode = m meta.uid = meta.gid = None meta.atime = meta.mtime = meta.ctime = 0 if S_ISLNK(m): if isinstance(item, FakeLink): target = item.target else: target = _readlink(repo, item.oid) meta.symlink_target = target meta.size = len(target) elif include_size: meta.size = _compute_item_size(repo, item) return item._replace(meta=meta) def fill_in_metadata_if_dir(repo, item): """If item is a directory and item.meta is not a Metadata instance, attempt to find the metadata for the directory. If found, return a new item augmented to include that metadata. Otherwise, return item. May be useful for the output of contents(). """ if S_ISDIR(item_mode(item)) and not isinstance(item.meta, Metadata): items = tuple(contents(repo, item, (b'.',), want_meta=True)) assert len(items) == 1 assert items[0][0] == b'.' item = items[0][1] return item def ensure_item_has_metadata(repo, item, include_size=False): """If item is a directory, attempt to find and add its metadata. If the item still doesn't have a Metadata instance for item.meta, give it one via augment_item_meta(). May be useful for the output of contents(). """ return augment_item_meta(repo, fill_in_metadata_if_dir(repo, item), include_size=include_size) bup-0.33.3/lib/bup/vint.py000066400000000000000000000112261454333004200152540ustar00rootroot00000000000000"""Binary encodings for bup.""" # Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. # Variable length integers are encoded as vints -- see lucene. from __future__ import absolute_import from io import BytesIO from bup import compat from bup import _helpers def write_vuint(port, x): port.write(encode_vuint(x)) def encode_vuint(x): try: return _helpers.vuint_encode(x) except OverflowError: ret = b'' bytes_from_uint = compat.bytes_from_uint if x < 0: raise Exception("vuints must not be negative") assert x, "the C version should have picked this up" while True: seven_bits = x & 0x7f x >>= 7 if x: ret += bytes_from_uint(0x80 | seven_bits) else: ret += bytes_from_uint(seven_bits) break return ret def read_vuint(port): c = port.read(1) if not c: raise EOFError('encountered EOF while reading vuint') assert isinstance(c, bytes) if ord(c) == 0: return 0 result = 0 offset = 0 while True: b = ord(c) if b & 0x80: result |= ((b & 0x7f) << offset) offset += 7 c = port.read(1) if not c: raise EOFError('encountered EOF while reading vuint') else: result |= (b << offset) break return result def write_vint(port, x): # Sign is handled with the second bit of the first byte. All else # matches vuint. port.write(encode_vint(x)) def encode_vint(x): try: return _helpers.vint_encode(x) except OverflowError: bytes_from_uint = compat.bytes_from_uint assert x != 0, "the C version should have picked this up" if x < 0: x = -x sign_and_six_bits = (x & 0x3f) | 0x40 else: sign_and_six_bits = x & 0x3f x >>= 6 assert x, "the C version should have picked this up" return bytes_from_uint(0x80 | sign_and_six_bits) + encode_vuint(x) def read_vint(port): c = port.read(1) if not c: raise EOFError('encountered EOF while reading vint') assert isinstance(c, bytes) negative = False result = 0 offset = 0 # Handle first byte with sign bit specially. b = ord(c) if b & 0x40: negative = True result |= (b & 0x3f) if b & 0x80: offset += 6 c = port.read(1) elif negative: return -result else: return result while True: b = ord(c) if b & 0x80: result |= ((b & 0x7f) << offset) offset += 7 c = port.read(1) if not c: raise EOFError('encountered EOF while reading vint') else: result |= (b << offset) break if negative: return -result else: return result def write_bvec(port, x): write_vuint(port, len(x)) port.write(x) def read_bvec(port): n = read_vuint(port) return port.read(n) def encode_bvec(x): return _helpers.vuint_encode(len(x)) + x def skip_bvec(port): port.read(read_vuint(port)) def send(port, types, *args): if len(types) != len(args): raise Exception('number of arguments does not match format string') for (type, value) in zip(types, args): if type == 'V': write_vuint(port, value) elif type == 'v': write_vint(port, value) elif type == 's': write_bvec(port, value) else: raise Exception('unknown xpack format string item "' + type + '"') def recv(port, types): result = [] for type in types: if type == 'V': result.append(read_vuint(port)) elif type == 'v': result.append(read_vint(port)) elif type == 's': result.append(read_bvec(port)) else: raise Exception('unknown xunpack format string item "' + type + '"') return result def pack(types, *args): try: return _helpers.limited_vint_pack(types, args) except OverflowError: assert len(types) == len(args) ret = [] for typ, value in zip(types, args): if typ == 'V': ret.append(encode_vuint(value)) elif typ == 'v': ret.append(encode_vint(value)) elif typ == 's': ret.append(encode_bvec(value)) else: assert False return b''.join(ret) def unpack(types, data): port = BytesIO(data) return recv(port, types) bup-0.33.3/lib/bup/xstat.py000066400000000000000000000126741454333004200154470ustar00rootroot00000000000000"""Enhanced stat operations for bup.""" from __future__ import absolute_import import os, sys import stat as pystat from bup import _helpers try: _bup_utimensat = _helpers.bup_utimensat except AttributeError as e: _bup_utimensat = False try: _bup_utimes = _helpers.bup_utimes except AttributeError as e: _bup_utimes = False try: _bup_lutimes = _helpers.bup_lutimes except AttributeError as e: _bup_lutimes = False assert sys.version_info[0] < 3 \ or not (_bup_utimensat or _bup_utimes or _bup_lutimes) def timespec_to_nsecs(ts): ts_s, ts_ns = ts return ts_s * 10**9 + ts_ns def nsecs_to_timespec(ns): """Return (s, ns) where ns is always non-negative and t = s + ns / 10e8""" # metadata record rep ns = int(ns) return (ns // 10**9, ns % 10**9) def nsecs_to_timeval(ns): """Return (s, us) where ns is always non-negative and t = s + us / 10e5""" ns = int(ns) return (ns // 10**9, (ns % 10**9) // 1000) def fstime_floor_secs(ns): """Return largest integer not greater than ns / 10e8.""" return int(ns) // 10**9; def fstime_to_timespec(ns): return nsecs_to_timespec(ns) def fstime_to_sec_bytes(fstime): (s, ns) = fstime_to_timespec(fstime) if(s < 0): s += 1 if ns == 0: return b'%d' % s else: return b'%d.%09d' % (s, ns) if sys.version_info[0] > 2: def utime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" os.utime(path, ns=times) def lutime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" os.utime(path, ns=times, follow_symlinks=False) elif _bup_utimensat: def utime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" atime = nsecs_to_timespec(times[0]) mtime = nsecs_to_timespec(times[1]) _bup_utimensat(_helpers.AT_FDCWD, path, (atime, mtime), 0) def lutime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" atime = nsecs_to_timespec(times[0]) mtime = nsecs_to_timespec(times[1]) _bup_utimensat(_helpers.AT_FDCWD, path, (atime, mtime), _helpers.AT_SYMLINK_NOFOLLOW) else: # Must have these if utimensat isn't available. def utime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" atime = nsecs_to_timeval(times[0]) mtime = nsecs_to_timeval(times[1]) _bup_utimes(path, (atime, mtime)) def lutime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" atime = nsecs_to_timeval(times[0]) mtime = nsecs_to_timeval(times[1]) _bup_lutimes(path, (atime, mtime)) _cygwin_sys = sys.platform.startswith('cygwin') def _fix_cygwin_id(id): if id < 0: id += 0x100000000 assert(id >= 0) return id class stat_result: __slots__ = ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_rdev', 'st_size', 'st_atime', 'st_mtime', 'st_ctime') @staticmethod def from_xstat_rep(st): global _cygwin_sys result = stat_result() (result.st_mode, result.st_ino, result.st_dev, result.st_nlink, result.st_uid, result.st_gid, result.st_rdev, result.st_size, result.st_atime, result.st_mtime, result.st_ctime) = st # Inlined timespec_to_nsecs after profiling result.st_atime = result.st_atime[0] * 10**9 + result.st_atime[1] result.st_mtime = result.st_mtime[0] * 10**9 + result.st_mtime[1] result.st_ctime = result.st_ctime[0] * 10**9 + result.st_ctime[1] if _cygwin_sys: result.st_uid = _fix_cygwin_id(result.st_uid) result.st_gid = _fix_cygwin_id(result.st_gid) return result def stat(path): return stat_result.from_xstat_rep(_helpers.stat(path)) def fstat(path): return stat_result.from_xstat_rep(_helpers.fstat(path)) def lstat(path): return stat_result.from_xstat_rep(_helpers.lstat(path)) def mode_str(mode): result = '' # FIXME: Other types? if pystat.S_ISREG(mode): result += '-' elif pystat.S_ISDIR(mode): result += 'd' elif pystat.S_ISCHR(mode): result += 'c' elif pystat.S_ISBLK(mode): result += 'b' elif pystat.S_ISFIFO(mode): result += 'p' elif pystat.S_ISLNK(mode): result += 'l' elif pystat.S_ISSOCK(mode): result += 's' else: result += '?' result += 'r' if (mode & pystat.S_IRUSR) else '-' result += 'w' if (mode & pystat.S_IWUSR) else '-' result += 'x' if (mode & pystat.S_IXUSR) else '-' result += 'r' if (mode & pystat.S_IRGRP) else '-' result += 'w' if (mode & pystat.S_IWGRP) else '-' result += 'x' if (mode & pystat.S_IXGRP) else '-' result += 'r' if (mode & pystat.S_IROTH) else '-' result += 'w' if (mode & pystat.S_IWOTH) else '-' result += 'x' if (mode & pystat.S_IXOTH) else '-' return result def classification_str(mode, include_exec): if pystat.S_ISREG(mode): if include_exec \ and (pystat.S_IMODE(mode) \ & (pystat.S_IXUSR | pystat.S_IXGRP | pystat.S_IXOTH)): return '*' else: return '' elif pystat.S_ISDIR(mode): return '/' elif pystat.S_ISLNK(mode): return '@' elif pystat.S_ISFIFO(mode): return '|' elif pystat.S_ISSOCK(mode): return '=' else: return '' bup-0.33.3/lib/cmd/000077500000000000000000000000001454333004200136755ustar00rootroot00000000000000bup-0.33.3/lib/cmd/bup-import-rdiff-backup000077500000000000000000000033651454333004200202630ustar00rootroot00000000000000#!/usr/bin/env bash cmd_dir="$(cd "$(dirname "$0")" && pwd)" || exit $? set -o pipefail must() { local file=${BASH_SOURCE[0]} local line=${BASH_LINENO[0]} "$@" local rc=$? if test $rc -ne 0; then echo "Failed at line $line in $file" 1>&2 exit $rc fi } usage() { echo "Usage: bup import-rdiff-backup [-n]" \ " " echo "-n,--dry-run: just print what would be done" exit 1 } control_c() { echo "bup import-rdiff-backup: signal 2 received" 1>&2 exit 128 } must trap control_c INT dry_run= while [ "$1" = "-n" -o "$1" = "--dry-run" ]; do dry_run=echo shift done bup() { $dry_run "$cmd_dir/bup" "$@" } snapshot_root="$1" branch="$2" [ -n "$snapshot_root" -a "$#" = 2 ] || usage if [ ! -e "$snapshot_root/." ]; then echo "'$snapshot_root' isn't a directory!" exit 1 fi backups=$(must rdiff-backup --list-increments --parsable-output "$snapshot_root") \ || exit $? backups_count=$(echo "$backups" | must wc -l) || exit $? counter=1 echo "$backups" | while read timestamp type; do tmpdir=$(must mktemp -d import-rdiff-backup-XXXXXXX) || exit $? echo "Importing backup from $(date -d @$timestamp +%c) " \ "($counter / $backups_count)" 1>&2 echo 1>&2 echo "Restoring from rdiff-backup..." 1>&2 must rdiff-backup -r $timestamp "$snapshot_root" "$tmpdir" echo 1>&2 echo "Importing into bup..." 1>&2 tmpidx=$(must mktemp -u import-rdiff-backup-idx-XXXXXXX) || exit $? must bup index -ux -f "$tmpidx" "$tmpdir" must bup save --strip --date="$timestamp" -f "$tmpidx" -n "$branch" "$tmpdir" must rm -f "$tmpidx" must rm -rf "$tmpdir" counter=$((counter+1)) echo 1>&2 echo 1>&2 done bup-0.33.3/lib/cmd/bup-import-rsnapshot000077500000000000000000000026271454333004200177470ustar00rootroot00000000000000#!/bin/sh # Does an import of a rsnapshot archive. cmd_dir="$(cd "$(dirname "$0")" && pwd)" || exit $? usage() { echo "Usage: bup import-rsnapshot [-n]" \ " []" echo "-n,--dry-run: just print what would be done" exit 1 } DRY_RUN= while [ "$1" = "-n" -o "$1" = "--dry-run" ]; do DRY_RUN=echo shift done bup() { $DRY_RUN "$cmd_dir/bup" "$@" } SNAPSHOT_ROOT=$1 TARGET=$2 [ -n "$SNAPSHOT_ROOT" -a "$#" -le 2 ] || usage if [ ! -e "$SNAPSHOT_ROOT/." ]; then echo "'$SNAPSHOT_ROOT' isn't a directory!" exit 1 fi cd "$SNAPSHOT_ROOT" || exit 2 for SNAPSHOT in *; do [ -e "$SNAPSHOT/." ] || continue echo "snapshot='$SNAPSHOT'" >&2 for BRANCH_PATH in "$SNAPSHOT/"*; do BRANCH=$(basename "$BRANCH_PATH") || exit $? [ -e "$BRANCH_PATH/." ] || continue [ -z "$TARGET" -o "$TARGET" = "$BRANCH" ] || continue echo "snapshot='$SNAPSHOT' branch='$BRANCH'" >&2 # Get the snapshot's ctime DATE=$(perl -e '@a=stat($ARGV[0]) or die "$ARGV[0]: $!"; print $a[10];' "$BRANCH_PATH") [ -n "$DATE" ] || exit 3 TMPIDX=bupindex.$BRANCH.tmp bup index -ux -f "$TMPIDX" "$BRANCH_PATH/" || exit $? bup save --strip --date="$DATE" \ -f "$TMPIDX" -n "$BRANCH" \ "$BRANCH_PATH/" || exit $? rm "$TMPIDX" || exit $? done done bup-0.33.3/lib/cmd/bup.c000066400000000000000000000267621454333004200146440ustar00rootroot00000000000000 #define PY_SSIZE_T_CLEAN #define _GNU_SOURCE 1 // asprintf #undef NDEBUG // According to Python, its header has to go first: // http://docs.python.org/3/c-api/intro.html#include-files #include #if PY_MAJOR_VERSION < 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 7) #define BUP_STR(x) #x #define BUP_XSTR(x) BUP_STR(x) #pragma message "Python versions older than 3.7 are not supported; detected X.Y " \ BUP_XSTR(PY_MAJOR_VERSION) "." BUP_XSTR(PY_MINOR_VERSION) #error "Halting" #endif #include #include #include #include #include #include #include #if defined(__FreeBSD__) || defined(__NetBSD__) # include #endif #include #include #include "bup/compat.h" #include "bup/intprops.h" #include "bup/io.h" static int prog_argc = 0; static char **prog_argv = NULL; static char *orig_env_pythonpath = NULL; static PyObject* get_argv(PyObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "")) return NULL; PyObject *result = PyList_New(prog_argc); int i; for (i = 0; i < prog_argc; i++) { PyObject *s = PyBytes_FromString(prog_argv[i]); if (!s) die(2, "cannot convert argument to bytes: %s\n", prog_argv[i]); PyList_SET_ITEM(result, i, s); } return result; } static PyMethodDef bup_main_methods[] = { {"argv", get_argv, METH_VARARGS, "Return the program's current argv array as a list of byte strings." }, {NULL, NULL, 0, NULL} }; static int setup_module(PyObject *mod) { if (!orig_env_pythonpath) { PyObject_SetAttrString(mod, "env_pythonpath", Py_None); } else { PyObject *py_p = PyBytes_FromString(orig_env_pythonpath); if (!py_p) die(2, "cannot convert PYTHONPATH to bytes: %s\n", orig_env_pythonpath); PyObject_SetAttrString(mod, "env_pythonpath", py_p); Py_DECREF(py_p); } return 1; } static struct PyModuleDef bup_main_module_def = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "bup_main", .m_doc = "Built-in bup module providing direct access to argv.", .m_size = -1, .m_methods = bup_main_methods }; PyObject * PyInit_bup_main(void) { PyObject *mod = PyModule_Create(&bup_main_module_def); if (!setup_module(mod)) { Py_DECREF(mod); return NULL; } return mod; } static void setup_bup_main_module(void) { char *path = getenv("PYTHONPATH"); if (path) orig_env_pythonpath = strdup(path); if (PyImport_AppendInittab("bup_main", PyInit_bup_main) == -1) die(2, "unable to register bup_main module\n"); } /* * Older realpath implementations (e.g. 4.4BSD) required the second * argument to be non-NULL, and then POSIX added the option of NULL * with the semantics of malloc'ing a big-enough buffer. Define a * helper function with the NULL semantics to accomodate older * platforms. * * gnulib has a list of systems that are known to reject NULL as the * 2nd argument: * https://www.gnu.org/software/gnulib/manual/html_node/realpath.html */ #define BUP_HAVE_POSIX_REALPATH // FreeBSD < 7: bup's FreeBSD code does not use realpath(3) #if defined(__NetBSD__) # if !__NetBSD_Prereq__(7,0,0) # undef BUP_HAVE_POSIX_REALPATH # endif // OpenBSD: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/sys/sys/param.h.diff?r1=1.91&r2=1.92&f=h #elif defined(__OpenBSD__) && __OpenBSD__ < 201111 # undef BUP_HAVE_POSIX_REALPATH #endif char * bup_realpath(const char *pathname) { #ifdef BUP_HAVE_POSIX_REALPATH return realpath(pathname, NULL); #else char resolvedname[PATH_MAX]; char *ret = realpath(pathname, resolvedname); if (ret != NULL) { assert(ret == resolvedname); ret = strdup(ret); } return ret; #endif } #if defined(__APPLE__) && defined(__MACH__) static char *exe_parent_dir(const char * const argv_0) { char *mpath = NULL; char spath[2048]; uint32_t size = sizeof(spath); int rc = _NSGetExecutablePath(spath, &size); if (rc == -1) { mpath = malloc(size); if (!mpath) die(2, "unable to allocate memory for executable path\n"); rc = _NSGetExecutablePath(mpath, &size); } if(rc != 0) die(2, "unable to find executable path\n"); char *path = mpath ? mpath : spath; char *abs_exe = bup_realpath(path); if (!abs_exe) die(2, "cannot resolve path (%s): %s\n", strerror(errno), path); char * const abs_parent = strdup(dirname(abs_exe)); assert(abs_parent); if (mpath) free(mpath); free(abs_exe); return abs_parent; } #elif defined(__FreeBSD__) static char *exe_path () { const int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1}; size_t path_len; int rc = sysctl (mib, 4, NULL, &path_len, NULL, 0); if (rc != 0) die(2, "unable to determine executable path length\n"); char *path = malloc (path_len); if (!path) die(2, "unable to allocate memory for executable path\n"); rc = sysctl (mib, 4, path, &path_len, NULL, 0); if (rc != 0) die(2, "unable to determine executable path via sysctl\n"); return path; } static char *exe_parent_dir(const char * const argv_0) { char * const exe = exe_path(); if (!exe) die(2, "unable to determine executable path\n"); char * const parent = strdup(dirname(exe)); if (!parent) die(2, "unable to determine parent directory of executable\n"); free(exe); return parent; } #else // not defined(__FreeBSD__) /// Use /proc if possible, and if all else fails, search in the PATH #if defined(__linux__) # define PROC_SELF_EXE "/proc/self/exe" #elif defined(__sun) || defined (sun) # define PROC_SELF_EXE "/proc/self/path/a.out" #endif static char *find_in_path(const char * const name, const char * const path) { char *result = NULL; char *tmp_path = strdup(path); assert(tmp_path); const char *elt; char *tok_path = tmp_path; while ((elt = strtok(tok_path, ":")) != NULL) { tok_path = NULL; char *candidate; int rc = asprintf(&candidate, "%s/%s", elt, name); assert(rc >= 0); struct stat st; rc = stat(candidate, &st); if (rc != 0) { switch (errno) { case EACCES: case ELOOP: case ENOENT: case ENAMETOOLONG: case ENOTDIR: break; default: die(2, "cannot stat %s: %s\n", candidate, strerror(errno)); break; } } else if (S_ISREG(st.st_mode)) { if (access(candidate, X_OK) == 0) { result = candidate; break; } switch (errno) { case EACCES: case ELOOP: case ENOENT: case ENAMETOOLONG: case ENOTDIR: break; default: die(2, "cannot determine executability of %s: %s\n", candidate, strerror(errno)); break; } } free(candidate); } free(tmp_path); return result; } static char *find_exe_parent(const char * const argv_0) { char *candidate = NULL; const char * const slash = strchr(argv_0, '/'); if (slash) { candidate = strdup(argv_0); assert(candidate); } else { const char * const env_path = getenv("PATH"); if (!env_path) die(2, "no PATH and executable isn't relative or absolute: %s\n", argv_0); char *path_exe = find_in_path(argv_0, env_path); if (path_exe) { char * abs_exe = bup_realpath(path_exe); if (!abs_exe) die(2, "cannot resolve path (%s): %s\n", strerror(errno), path_exe); free(path_exe); candidate = abs_exe; } } if (!candidate) return NULL; char * const abs_exe = bup_realpath(candidate); if (!abs_exe) die(2, "cannot resolve path (%s): %s\n", strerror(errno), candidate); free(candidate); char * const abs_parent = strdup(dirname(abs_exe)); assert(abs_parent); free(abs_exe); return abs_parent; } static char *exe_parent_dir(const char * const argv_0) { #ifdef PROC_SELF_EXE char sbuf[2048]; char *path = sbuf; size_t path_n = sizeof(sbuf); ssize_t len; while (1) { len = readlink(PROC_SELF_EXE, path, path_n); if (len == -1 || (size_t) len != path_n) break; if (!INT_MULTIPLY_OK(path_n, 2, &path_n)) die(2, "memory buffer for executable path would be too big\n"); if (path != sbuf) free(path); path = malloc(path_n); if (!path) die(2, "unable to allocate memory for executable path\n"); } if (len != -1) { path[len] = '\0'; char *result = strdup(dirname(path)); if (path != sbuf) free(path); return result; } switch (errno) { case ENOENT: case EACCES: case EINVAL: case ELOOP: case ENOTDIR: case ENAMETOOLONG: break; default: die(2, "cannot resolve %s: %s\n", path, strerror(errno)); break; } if (path != sbuf) free(path); #endif return find_exe_parent(argv_0); } #endif // use /proc if possible, and if all else fails, search in the PATh static void setenv_or_die(const char *name, const char *value) { int rc = setenv(name, value, 1); if (rc != 0) die(2, "setenv %s=%s failed (%s)\n", name, value, strerror(errno)); } static void prepend_lib_to_pythonpath(const char * const exec_path, const char * const relative_path) { char *parent = exe_parent_dir(exec_path); assert(parent); char *bupmodpath; int rc = asprintf(&bupmodpath, "%s/%s", parent, relative_path); assert(rc >= 0); struct stat st; rc = stat(bupmodpath, &st); if (rc != 0) die(2, "unable find lib dir (%s): %s\n", strerror(errno), bupmodpath); if (!S_ISDIR(st.st_mode)) die(2, "lib path is not dir: %s\n", bupmodpath); char *curpypath = getenv("PYTHONPATH"); if (curpypath) { char *path; int rc = asprintf(&path, "%s:%s", bupmodpath, curpypath); assert(rc >= 0); setenv_or_die("PYTHONPATH", path); free(path); } else { setenv_or_die("PYTHONPATH", bupmodpath); } free(bupmodpath); free(parent); } #if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 8 # define bup_py_main bup_py_bytes_main #else # define bup_py_main Py_BytesMain #endif #if defined(BUP_DEV_BUP_PYTHON) && defined(BUP_DEV_BUP_EXEC) # error "Both BUP_DEV_BUP_PYTHON and BUP_DEV_BUP_EXEC are defined" #endif #ifdef BUP_DEV_BUP_PYTHON int main(int argc, char **argv) { assert(argc > 0); prog_argc = argc; prog_argv = argv; setup_bup_main_module(); prepend_lib_to_pythonpath(argv[0], "../lib"); return bup_py_main (argc, argv); } #elif defined(BUP_DEV_BUP_EXEC) int main(int argc, char **argv) { assert(argc > 0); prog_argc = argc - 1; prog_argv = argv + 1; setup_bup_main_module(); prepend_lib_to_pythonpath(argv[0], "../lib"); if (argc == 1) return bup_py_main (1, argv); // This can't handle a script with a name like "-c", but that's // python's problem, not ours. return bup_py_main (2, argv); } #else // normal bup command int main(int argc, char **argv) { assert(argc > 0); prog_argc = argc; prog_argv = argv; setup_bup_main_module(); prepend_lib_to_pythonpath(argv[0], ".."); char *bup_argv[] = { argv[0], "-m", "bup.main" }; return bup_py_main (3, bup_argv); } #endif // normal bup command bup-0.33.3/lib/web/000077500000000000000000000000001454333004200137075ustar00rootroot00000000000000bup-0.33.3/lib/web/list-directory.html000066400000000000000000000030051454333004200175500ustar00rootroot00000000000000{% comment This template expects the default xhtml autoescaping. %} Directory listing for {{ path }}
{% if files_hidden %}
{% if hidden_shown %} Hide hidden files {% else %} Show hidden files {% end %}
{% end %} {% for (display, link, size) in dir_contents %} {% end %}
Name Size
{{ display }} {% if size is not None %}{{ size }}{% else %} {% end %}
bup-0.33.3/lib/web/static/000077500000000000000000000000001454333004200151765ustar00rootroot00000000000000bup-0.33.3/lib/web/static/styles.css000066400000000000000000000003311454333004200172300ustar00rootroot00000000000000body { font-family: sans-serif } #wrapper { width: 90%; margin: auto; } #breadcrumb { margin: 10px 0; } table { width: auto; } th { text-align: left; } .dir-size { padding-left:15px; }bup-0.33.3/note/000077500000000000000000000000001454333004200133315ustar00rootroot00000000000000bup-0.33.3/note/0.27.1-from-0.27.md000066400000000000000000000016331454333004200157300ustar00rootroot00000000000000 Notable changes in 0.27.1 as compared to 0.27 ============================================= May require attention --------------------- * In previous versions, a `--sparse` restore might have produced incorrect data. Please treat any existing `--sparse` restores as suspect. The problem should be fixed in this release, and the `--sparse` tests have been substantially augmented. Thanks to (at least) ==================== Frank Gevaerts (1): restore: test --sparse with zeros at 64k boundary Greg Troxel Marcus Schopen Rob Browning (7): Use $RANDOM seed for --sparse random tests restore: add generative --sparse testing restore: fix --sparse corruption Merge restore --sparse corruption fix Add note/0.27.1-from-0.27.md and mention in README restore: fix --sparse fix (find_non_sparse_end) test_server_split_with_indexes: close packwriter Robert S. Edmonds bup-0.33.3/note/0.28-from-0.27.1.md000066400000000000000000000107731454333004200157360ustar00rootroot00000000000000 Notable changes in 0.28 as compared to 0.27.1 ============================================= May require attention --------------------- * The default install PREFIX is now "/usr/local". * BINDIR, DOCDIR, LIBDIR, and MANDIR settings no longer side-step DESTDIR. i.e. `make DESTDIR=/x MANDIR=/y` install will install the manpages to "/x/y" not just "/y". * The index format has changed, which will trigger a full index rebuild on the next index run, making that run more expensive than usual. * When given `--xdev`, `bup save` should no longer skip directories that are explicitly listed on the command line when the directory is both on a separate filesystem, and a subtree of another path listed on the command line. Previously `bup save --xdev / /usr` could skip "/usr" if it was on a separate filesystem from "/". * Tags along a branch are no longer shown in the branch's directory in the virtual filesystem (VFS). i.e. given `bup tag special /foo/latest`, "/foo/special" will no longer be visible via `bup ls`, `bup web`, `bup fuse`, etc., but the tag will still be available as "/.tag/special". General ------- * bup now provides experimental `rm` and `gc` subcommands, which should allow branches and saves to be deleted, and their storage space reclaimed (assuming nothing else refers to the relevant data). For the moment, these commands require an `--unsafe` argument and should be treated accordingly. Although if an attempt to `join` or `restore` the data you still care about after a `gc` succeeds, that's a fairly encouraging sign that the commands worked correctly. (The `t/compare-trees` command in the source tree can be used to help test before/after results.) Note that the current `gc` command is probabilistic, which means it may not remove *all* of the obsolete data from the repository, but also means that the command should be fairly efficient, even for large repositories. * bup may have less impact on the filesystem cache. It now attempts to leave the cache roughly the way it found it when running a `save` or `split`. * A specific Python can be specified at `./configure` time via PYTHON, i.e. `PYTHON=/some/python ./configure`, and that Python will be embedded in all of the relevant scripts as an explicit "#!/..." line during `make install`. * `bup web` will now attempt an orderly shutdown when it receives a SIGTERM. * `bup web` will listen on a filesystem socket when given an address like "unix://...". * bup no longer limits the number of files in a directory to 100000. The limit is now UINT_MAX. * `bup fuse` now has a `--verbose` argument, and responds to `--debug`. Bugs ---- * bup save should not fail when asked to save a subdirectory of a directory that was completely up to date in the index. Previously this could cause a "shalists" assertion failure. * The way bup writes the data to disk (the packfiles in particular), should be a bit safer now if there is a coincident power failure or system crash. * A problem has been fixed that could cause bup to ignore the current TZ setting when computing the local time. * bup should no longer generate broken commits when the timezone offset is not an integer number of hours (e.g. TZ=Australia/Adelaide). * `bup midx --output` should now work when used with `--auto` or `--force`. * `bup import-rsnapshot` should exit with a status of 1, not -1. * bup should be more likely to get the data to permanent storage safely on OS X, which appears to follow a surprising interpretation of the `fsync()` specification. * `bup web` should handle non-ASCII paths better. It will no longer treat them as (and try to convert them to) Unicode (which they're not). * `bup restore` should no longer crash when an attempt to remove an xattr returns EACCES. Build system ------------ * The tests can now be run in parallel (and possibly much more quickly) via `make -j check`. * The build system now creates and uses cmd/bup-python which refers to the `./configure` selected python. Thanks to (at least) ==================== Aidan Hobson Sayers, Ben Kelly, Ben Wiederhake, Brandon Smith, Brian Minton, David Kettler, Frank Gevaerts, Gabriel Filion, Greg Troxel, James Lott, Karl-Philipp Richter, Luis Sanchez Sanchez, Marcus Schopen, Mark J Hewitt, Markus, Mathieu Schroeter, Michael March, Nimen Nachname, Nix, Patrick Rouleau, Paul Kronenwetter, Rob Browning, Robert Edmonds, Simon Persson, Tadej Janež, Thomas Klausner, Tilo Schwarz, Tim Riemenschneider, Wayne Scott, pspdevel, and stevelr bup-0.33.3/note/0.28.1-from-0.28.md000066400000000000000000000007571454333004200157400ustar00rootroot00000000000000 Notable changes in 0.28.1 as compared to 0.28 ============================================= General ------- * Builds from unpacked release archives (created via "git archive TAG") should work again. Build system ------------ * test-web.sh and test-meta.sh should now work on newer versions of OS X, and with Homebrew rsync. * cmd/bup-python's permissions should now respect the umask. Thanks to (at least) ==================== Gernot Schulz, Karl Semich, Rob Browning, and ilovezfs bup-0.33.3/note/0.29-from-0.28.1.md000066400000000000000000000044531454333004200157360ustar00rootroot00000000000000 Notable changes in 0.29 as compared to 0.28.1 ============================================= May require attention --------------------- * The minimum Python version is now to 2.6. * The index format has been adjusted to handle a larger number of entries, which will trigger a full index rebuild on the next index update, making that run more expensive than usual. * The `gc` command should now clean up its temporary bloom filters, but filters created by earlier invocations may still exist in your repositories in the objects/pack/ directory as tmp-gc-*.bloom files, It should be safe to delete these files when no bup commands are running. General ------- * Some Python 2.6 compatibility problems have been fixed. * `index` runs may be much less expensive for parts of the filesystem that haven't changed since the last save. * An experimental `prune-older` command has been added. It removes (permanently deletes) all saves except those preserved by a set of arguments like `--keep-monthlies-for 3y`. See `bup help prune-older` for further information. * `gc` should now only require up to one packfile (about 1GB) of temporary space while running. Previously it might require much more. * `gc` should read much less data now, which may make it notably faster. * The `gc` `--threshold` argument should actually be allowed now. * `gc` should be able to handle deeper filesystem trees without crashing. Previously it was constrained by the default Python stack depth limit. * `save` and `split` should reject invalid `-n` names immediately instead of waiting until after their work is complete. * bup should no longer crash when trying to fsync on an SMB filesystem under OS X. * `save` and `restore` should work on ntfs-3g filesystems now. Previously they might crash when trying to manipulate file attrs. Build system ------------ * The web tests should be skipped if tornado is not detected. * The fuse tests should be skipped if the fuse module is not detected. * `make clean` should work better on non-Linux systems. Thanks to (at least) ==================== Andrew Skretvedt, Ben Kelly, Bruno Bigras, Greg Troxel, Jacob Edelman, Jonathan Wright, Julien Sanchez, Mark J Hewitt, Nick Alcock, Pascal Honoré, Rob Browning, Wayne Scott, axion, ilovezfs, phillipproell, and vi0oss bup-0.33.3/note/0.29.1-from-0.29.md000066400000000000000000000012431454333004200157310ustar00rootroot00000000000000 Notable changes in 0.29.1 as compared to 0.29 ============================================= May require attention --------------------- * Running gc with a --threshold of 0 no longer runs the risk of corrupting the repository. (The default threshold is 10). Previously, gc could delete a packfile after rewriting it when the packfile didn't change. Build system ------------ * The `bup rm` tests should be less sensitive to variations in rsync's output. * The `bup ls` tests should work on current Cygwin64. Thanks to (at least) ==================== AL, Alexander Barton, Andrea Arrighetti, Paul Kronenwetter, Rob Browning, Tim Riemenschneider and Tru Huynh bup-0.33.3/note/0.29.2-from-0.29.1.md000066400000000000000000000012441454333004200160720ustar00rootroot00000000000000 Notable changes in 0.29.2 as compared to 0.29.1 =============================================== May require attention --------------------- * Non-python commands like import-rdiff-backup and import-rsnapshot should now be installed correctly. Previously they might be truncated during the install. Build system ------------ * The `bup rm` tests should be less sensitive to variations in git's output. * The superuser detection on CygWin should work with newer systems. Thanks to (at least) ==================== Andrew Skretvedt, Basil Mohamed Gohar, Ben Kelly, Greg Troxel, Iar De, Johannes Berg, Paul Kronenwetter, Rob Browning, Ruvim Pinka, larpon, and renpj bup-0.33.3/note/0.29.3-from-0.29.2.md000066400000000000000000000013721454333004200160760ustar00rootroot00000000000000 Notable changes in 0.29.3 as compared to 0.29.2 =============================================== General ------- * Newer versions of par2 can process in parallel using multiple threads/cores, and they do so automatically. This ends up competing with `bup fsck`s own parallelism, enabled by `-j`, in such a way that the command can actually run much *slower* (and be much more expensive) than it would have been with no parallelism at all. When appropriate `bup fsck` now disables par2's competing parallelism (via its `-t1` option) to avoid the contention. Build system ------------ * test-prune-older should no longer be disturbed by git's automatic background gc. Thanks to (at least) ==================== Rob Browning and Robert Edmonds bup-0.33.3/note/0.30-from-0.29.3.md000066400000000000000000000121301454333004200157200ustar00rootroot00000000000000 Notable changes in 0.30 as compared to 0.29.3 ============================================= May require attention --------------------- * The minimum `git` version required is now 1.5.6. * The `prune-older` command now keeps the most recent save in each period group (day, week, month, ...) rather than the oldest. * `bup` now adds a zero-padded suffix to the names of saves with the same timestamp (e.g. 1970-01-01-214640-07) in order to avoid duplicates. The sequence number currently represents the save's reversed position in default `git rev-list` order, so that given: /foo/1970-01-01-214640-09 /foo/1970-01-01-214640-10 In the normal case, the -10 save would be the next save made after -09 (and the -09 save would be the single parent commit for -10). * `bup` is not currently compatible with Python 3 and will now refuse to run if the Python version is not 2 unless `BUP_ALLOW_UNEXPECTED_PYTHON_VERSION=true` is set in the environment (which can be useful for development and testing). * `bup ls -s` now reports the tree hash for commits unless `--commit-hash` is also specified. General ------- * `bup get` has been added. This command allows the transfer or rewriting of data within and between repositories, local or remote. Among other things, it can be used to append remote saves to a local branch, which by extension supports merging repositories. See `bup-get(1)` for further information, and please note, this is a new *EXPERIMENTAL* command that can (intentionally) modify your data in destructive ways. It is potentially much more dangerous than most `bup` commands. Treat with caution. * `bup` can now restore directly from a remote repository via `bup restore -r host:path ...`. See `bup-restore(1)` for more information. * `bup ls` can now report information for remote repositories via `bup ls -r host:path ...`. See `bup-ls(1)` for more information. * `bup` should respect the git pack.packSizeLimit setting when writing packfiles, though at the moment it will only affect a remote repository when the option is set there directly. * `bup save` now stores the size for all links and normal files. For directories saved using this new format retrieving file sizes for larger files should be notably less expensive. Among other things this may improve the performance of commands like `bup ls -l` or `find /some/fuse/dir -ls`. * The VFS (Virtual File System) that underlies many operations, and provides the basis for commands like `restore`, `ls`, etc. has been rewritten in a way that makes remote repository access easier, should decrease the memory footprint in some cases (e.g. for bup fuse), and should make it easier to provide more selective caching. At the moment, data is just evicted at random once a threshold is reached. * A `--noop <--blobs|--tree>` option has been added to `bup split` which prints the resulting id without storing the data in the repository. Bugs ---- * The way `bup` handles output from subprocesses (diagnostics, progress, etc.) has been adjusted in a way that should make it less likely that bup might continue running after the main process has exited, say via a C-c (SIGINT). * `bup` should now respect the specified compression level when writing to a remote repository. * `bup restore` now creates FIFOs with mkfifo, not mknod, which is more portable. The previous approach did not work correctly on (at least) some versions of NetBSD. * `bup` should no longer just crash when it encounters a commit with a "mergetag" header. For the moment, it just ignores them, and they'll be discarded whenever `bup` rewrites a commit, say via the `rm`, `prune-older`, or `get` commands. * The bloom command should now end progress messages with \r, not \n, which avoids leaving spurious output lines behind at exit. * A missing space has been added to the `bup split --bench` output. * Various Python version compatibility problems have been fixed, including some of the incompatibilities introduced by Python 3. * Some issues with mincore on WSL have been fixed. * Some Android build incompatibilities have been fixed. Build system ------------ * The tests no longer assume pwd is in /bin. * The tests should be less sensitive to the locale. * `test-meta` should no longer try to apply chattr +T to files. 'T' only works for directories, and newer Linux kernels actually reject the attempt (as of at least 4.12, and maybe 4.10). * `test-rm` should no longer fail when newer versions of git automatically create packed-refs. * `test-sparse-files` should be less likely to fail when run inside a container. * `test-index-check-device` and `test-xdev` now use separate files for their loopback mounts. Previously each was mounting the same image twice, which could produce the same device number. Thanks to (at least) ==================== Alexander Barton, Artem Leshchev, Ben Kelly, Fabian 'xx4h' Melters, Greg Troxel, Jamie Wyrick, Julien Goodwin, Mateusz Konieczny, Nathaniel Filardo, Patrick Rouleau, Paul Kronenwetter, Rob Browning, Robert Evans, Tim Riemenschneider, and bedhanger bup-0.33.3/note/0.30.1-from-0.30.md000066400000000000000000000066041454333004200157170ustar00rootroot00000000000000 Notable changes in 0.30.1 as compared to 0.30 ============================================= May require attention --------------------- * Previous versions of bup might have saved filesystem directories with incorrect metadata, but the file contents should be fine. This could have happened if bup encountered an error while trying to read the metadata for one of the files in a directory, or if bup were asked to save two different files with the same name to the same destination directory (e.g. via the strip/graft options). In cases where this has happened bup may present either generic or incorrect metadata for some of the paths in the affected directory. * The way `bup index --fake-valid` works has been made to match the documentation in the man page so that it can actually be used for the stated purpose (of avoiding 'boring' files.) General ------- * bup should more accurately recognize git versions. Previously, for example, it would reject relase candidates like "1.5.2-rc3" or (apparently) "1.5.2-rc3 (something ...)". * When `BUP_ASSUME_GIT_VERSION_IS_FINE` is set to true, yes or 1 in the environment, bup will assume the version of git that's available in the `PATH` is acceptable, and skip the version check. Bugs ---- * bup should close files more aggressively during various operations which, for example, should return space to the filesystem sooner during midx operations, whether explicit as `bup midx ...` or implicit during other operations like `bup save`. Previously, it could leave deleted files open, preventing their space from being returned until bup exited. Similar improvements should prevent bup from crashing during some operations like `gc` on filesystems that don't actually remove a deleted files's path from a directory until it the file is closed. * bup should no longer crash when trying to read the target of synthetic symlinks like `/BRANCH/latest`. For example: File "/usr/local/lib/bup/bup/vfs.py", line 524, in _compute_item_size return len(_readlink(repo, item.oid)) AttributeError: 'FakeLink' object has no attribute 'oid' * `bup bloom` should notice that it needs to regenerate the filter in some cases. For example, it was supposed to regenerate the filter if the `-k` value changed, but it wouldn't. * The DESIGN document is clearer about the fact that `bup save --smaller` will actually omit larger files from the saved tree entirely. * Failing to specify a port for `bup web` on the command line (only specifying a hostname) should no longer cause an unrelated syntax error. * Commit date timezone offsets of 0 will no longer be interpreted as local time instead in some cases. * bup should release some midx-related data structures (mmap, etc.) immediately on close, instead of relying on garbage collection. * A memory leak caused by some of bup's C-level stat calls has been fixed. * Some syntax issues in the manpages have been fixed. Build system ------------ * The automated tests have moved from FreeBSD 11.2 to 12.1. * t/test-fuse should be more portable. In particular, a compatibility issue with Fedora 31 has been fixed, and it should be less affected by local timezone variances. * Some internal build dependencies have been fixed. Thanks to (at least) ==================== Aidan Hobson Sayers, Greg Troxel, Johannes Berg, Luca Carlon, Reinier Maas, Rob Browning, and Wyatt Alt bup-0.33.3/note/0.31-from-0.30.1.md000066400000000000000000000056021454333004200157150ustar00rootroot00000000000000 Notable changes in 0.31 (since 0.30.1) ====================================== * Python 3 is now supported, and Python 2 support is deprecated. It's possible that we'll stop new development for Python 2 fairly soon. If so, we'll probably continue to fix bugs in the last Python 2 compatible version for a while, but please make plans to migrate. * `bup features` has been added. It reports information about bup itself, including the Python version, and the current availability of features like readline or support for POSIX ACLs. May require attention --------------------- * bup now relies on libacl directly instead of python-pylibacl, which will require installing the relevant packages (e.g. libacl1-dev) before building. * bup now relies on libreadline directly instead of python's built-in support, which will require installing the relevant packages (e.g. libreadline-dev) before building. * `bup version --tag` has been removed. It was actually a synonym for `bup version`, which still works fine. The fact that the version may have a corresponding git tag is no longer relevant to the command. * `git describe` style strings will no longer appear in the `bup version` for non-release builds. The version in that case will currently just be formatted as `PENDING_RELEASE~HASH`, where `~` has the [Debian semantics](https://www.debian.org/doc/debian-policy/ch-controlfields.html#version), for example, 0.31~5ac3821c0f1fbd6a1b1742e91ffd556cd1116041). This is part of the fix for the issue with varying `git archive` content mentioned below. General ------- * `bup fsck` should now avoid displaying `par2` errors when testing it for parallel processing support. * The documentation for the hashsplit algorithm in DESIGN has been updated to reflect quirks of the implementation, which didn't quite match the original specification. Bugs ---- * When running `bup on` with a remote ssh `ForceCommand`, bup should now respect that setting when running sub-commands. * It should no longer be possible for the content of archives generated by `git archive` (including releases retrieved from github) to vary based on the current set of repository refs (tags, branches, etc.). Previously archives generated from the same tag could differ slightly in content. Build and install ----------------- * `bup` itself is now located in now located in the cmd/ directory in the install tree and finds sub-commands, etc. relative to its own location. * The metadata tests should no longer fail on systems with SELinux enabled. Thanks to (at least) ==================== Aaron M. Ucko, Aidan Hobson Sayers, Alexander Barton, Brian Minton, Christian Cornelssen, Eric Waguespack, Gernot Schulz, Greg Troxel, Hartmut Krafft, Johannes Berg, Luca Carlon, Mark J Hewitt, Ralf Hemmecke, Reinier Maas, Rob Browning, Robert Edmonds, Wyatt Alt, Zev Eisenberg, gkonstandinos, and kd7spq bup-0.33.3/note/0.32-from-0.31.md000066400000000000000000000034601454333004200155600ustar00rootroot00000000000000 Notable changes in 0.32 since 0.31 ================================== * Python 3 is now preferred. Python 2 support is deprecated, and it's possible that we'll stop new development for Python 2 fairly soon. If so, we'll probably continue to fix bugs in the last Python 2 compatible version for a while, but please make plans to migrate. At the moment, ./configure doesn't explicitly look for any python newer than python3.8 by default (though one might be selected by the eventual python3 fallback). If desired, you can explicitly select a version like this: PYTHON=python3.9 ./configure * With Python 3.9, the command line reported in tools like `ps` and `top` will be `python...` rather than `bup...`. This was caused by an upstream change that appears to have been reverted. We're likely to avoid the issue entirely in a future relese. Bugs ---- * A number of Python 3 compatibility problems have been fixed. * `bup web` should no longer crash when attempting to listen on IPV6 interfaces. * `bup restore -vv` should no longer crash when printing paths with Python 3. * `bup --prune-older --pretend` should format the plus/minus lines correctly now. * The `TTY_WIDTH` should now be correctly propagated to subprocesses and remotes.. * Errors encountered while writing packfiles should be handled more carefully. * Some issues with the handling of integral type signs and sizes on the C side have been fixed. Build and install ----------------- * The tests are now handled by pytest. See the [README](../README#getting-started) for the additional dependency information and further instructions. Thanks to (at least) ==================== Christian Brabandt, Greg Troxel, Gustavo Goretkin, Jean-Paul Marmorat, Johannes Berg, Karl-Philipp Richter, Rob Browning, and danpawlik bup-0.33.3/note/0.32.1-from-0.32.md000066400000000000000000000024431454333004200157200ustar00rootroot00000000000000 Notable changes in 0.32.1 since 0.32 ==================================== Bugs ---- * POSIX1e ACLs should be restored more correctly now. Previously bup incorrectly restored default (`ACL_TYPE_DEFAULT`) ACLs as access acls (`ACL_TYPE_ACCESS`). When both existed, it restored the access ACL first and then the default ACL as an access ACL. Now, bup should restore each with the proper type. This issue only affects saves created on platforms where bup currently supports ACLs, so presumably mostly just saves created on Linux since the current ACL support depends on non-standard functions like `acl_extended(3)`. There is one remaining issue, which isn't fixed in this release, but is fixed in 0.33.1 (because fixing it here could create saves that are backward incompatible with 0.33). The problem is that in this version and older versions, bup stores ACLs in the `acl_to_any_text(3)` format with a newline delimiter, when the standard (and `acl_from_text(3)` which restore depends on) requires commas. This may cause restores that include ACLs (likely only those from Linux right now) to fail on some platforms (e.g. Cygwin). Build and install ----------------- * pytest 7 is now supported. Thanks to (at least) ==================== Johannes Berg and Rob Browning bup-0.33.3/note/0.32.2-from-0.32.1.md000066400000000000000000000011471454333004200160600ustar00rootroot00000000000000 Notable changes in 0.32.2 since 0.32.1 ====================================== Bugs ---- * `bup gc` should no longer remove stale packfiles before it creates the new midx at the end of a run. This could cause midx to complain about missing files, e.g.: warning: index pack-....idx missing Build system ------------ * The automated FreeBSD tests have moved from 12.1 to 12.4 and to Python 3.9, and have dropped testing of Python 2. * The automated macOS tests have moved from Catalina to Monterey and from x86 to ARM. Thanks to (at least) ==================== Johannes Berg and Rob Browning bup-0.33.3/note/0.33-from-0.32.md000066400000000000000000000073031454333004200155620ustar00rootroot00000000000000 Notable changes in 0.33 since 0.32 ================================== * Python 3 is now required, in particular 3.7 or newer. * The version of Python is no longer determined by the value of `PYTHON` during `./configure`. It is now determined by the version of `python-config` selected during configuration. The `BUP_PYTHON_CONFIG` environment variable can be set during configuration to select a particular executable (e.g. `BUP_PYTHON_CONFIG=python3.9-config ./configure`). See the `README` for further details. * The `bup` executable is now a binary rather than a Python script. Practically speaking, this means that the Python version is determined by the `libpython` that the executable is linked against (selected via `python-config`), and it means that bup will show up as "bup" in process listings rather than a python invocation. * The output of `bup ls` for multiple paths includes each path before its contents, more closely matching the system `ls`. * The `bup ftp` `ls` command should now respect the working directory (set by `cd`), and that directory has been added to the prompt. * Some minor changes have been made to `bup web`'s interface. * The `index-cache` is no longer included in the `bup midx` `--dir` default. * Performance may have improved after the relocation of a number of operations from Python to C, and a number of improvements to the VFS. * The `#bup` IRC channel has moved to https://libera.chat/. Bugs ---- * Bup should now respect the current umask, directory sgid bit, etc. when creating new files (e.g. new packfiles). Previously, it would create them via mkstemp, which would cause them to be readable only by the current user. * Bup should now be able to handle repositories containing signed commits. * `bup tag` has been changed to rely on `git update-ref` instead of writing the (loose) refs directly so that it can handle packed refs correctly. * `bup save` should be more careful about noting an error and skipping paths whose type (link, regular file, ...) has changed since indexing. * There should no longer be a narrow window where `save` could store conflicting symlink targets or conflicting sizes (in the metadata record and the packfile symlink blob) for a path if the filesystem changed at just the wrong time during a save. * `bup fuse` should no longer become unusable after an unfinished read of commits associated with tags. The underlying VFS issue may have affected other commands too. * Bup's packfile names should now match git's. Previously bup computed the SHA1 from just the sorted object list (cf. `git-index-pack(1)`), but git appears to include the entire pack file in the hash. * Passing idx files to `bup midx` along with `--dir` should now work correctly. * The `bup index --print --long ...` mode fields should now be formatted correctly with Python 3 (it changed the `oct()` format). * Resource usage (e.g. memory) may have improved, given a number of changes intended to make bup release resources more carefully and sooner. Build and install ----------------- * As mentioned above, `PYTHON`'s role during configuration has been replaced by `BUP_PYTHON_CONFIG`. * `./configure` now supports `--with-pylint=[yes|no|maybe]`. * Any `CC`, `CPPFLAGS`, `CFLAGS`, or `LDFLAGS` set during `./configure` should now be preserved across future make invocations. * The build process should now work on systems where `make` isn't GNU Make (the `Makefile` automatically redirects to `GNUmakefile`). * The `PANDOC` path can contain spaces. Thanks to (at least) ==================== Abdel Said, Arthur Ward, Bas Stottelaar, Brian Minton, Greg Troxel, Johannes Berg, Mark Hewitt, Muh Muhten, and Rob Browning bup-0.33.3/note/0.33.1-from-0.33.md000066400000000000000000000027141454333004200157230ustar00rootroot00000000000000 Notable changes in 0.33.1 since 0.33 ==================================== Bugs ---- * POSIX1e ACLs should be restored correctly now. Previously there were two problems. First, bup incorrectly restored default (`ACL_TYPE_DEFAULT`) ACLs as access acls (`ACL_TYPE_ACCESS`). When both existed, it restored the access ACL first and then the default ACL as an access ACL. Now, bup should restore each with the proper type. This issue only affects saves created on platforms where bup currently supports ACLs, so presumably mostly just saves created on Linux since the current ACL support depends on non-standard functions like `acl_extended(3)`. Second, bup stored ACLs in the `acl_to_any_text(3)` format with a newlne delimiter, when the standard (and `acl_from_text(3)` which restore depends on) requires commas. Now bup uses commas, and translates previously created saves during restore when possible. If a previously created ACL entry contains a comma, then bup will give up, report an error, and skip it. If nothing else, this could cause restores of relevant saves to fail on some platforms. Build and install ----------------- * `MAKE=path ./configure ...` now supports relative `path`s. * `./configure` will now look for `python3.11-config`, and won't look for `python3.6-config` (bup requires 3.7+). * pytest 7 is now supported. Thanks to (at least) ==================== Greg Troxel, Johannes Berg, Moritz Lell, and Rob Browning bup-0.33.3/note/0.33.2-from-0.33.1.md000066400000000000000000000004651454333004200160640ustar00rootroot00000000000000 Notable changes in 0.33.2 since 0.33.1 ====================================== Bugs ---- * The fix for the POSIX1e ACL issue addressed by 0.33.1 should no longer crash due to a missing path argument. Thanks to (at least) ==================== Johannes Berg, Phil Sutter, Rob Browning, and Robert Edmonds bup-0.33.3/note/0.33.3-from-0.33.2.md000066400000000000000000000033061454333004200160630ustar00rootroot00000000000000 Notable changes in 0.33.3 since 0.33.2 ====================================== General ------- * The performance of `bup ls` when columnating a large number of results should be *much* better. Bugs ---- * `bup fuse` should no longer linger after its mount point has been unmounted: https://bugs.debian.org/cgi-bin/1050040 * `bup --version` should work again. * `bup gc` should no longer remove stale packfiles before it creates the new midx at the end of a run. This could cause midx to complain about missing files, e.g.: warning: index pack-....idx missing * `bup` will no longer (since 0.33) ignore `BUP_DIR` looking for a `pack.packSizeLimit`. * A VFS optimization of the handling of requests for specific names within a given tree has been restored. * Incorrect uses of some exception handling code designed for compatibility with Python 2 have been replaced with the built-in Python 3 equivalents. * Some client calls to `super(CLASS, self)` have been replaced by `super()` to avoid errors during `__del__`. Build system ------------ * `test-help` should no longer hang when run interactively, and should no longer be affected by the current locale. Previously it might fail in a non-C locale. * The `bup damage` invocation in `test-fsck` has been made much more aggressive in order to avoid transient failures caused both by unpredictable `par2` behavior and the (currently) probabilstic nature of the damage. * OpenBSD builds should no longer fail on `-Werror` related issues involving `PROC_SELF_EXE`. Thanks to (at least) ==================== Alain Cochard, Austin Schuh, Johannes Berg, Julian Smith, Lucas Nussbaum, Nix, Rob Browning, Rob Leslie, and Robert Edmonds bup-0.33.3/pylint000077500000000000000000000021451454333004200136330ustar00rootroot00000000000000#!/usr/bin/env bash # Changes here might also be appropriate for ./pytest set -eu with_pylint=$(cat config/config.var/with-pylint) case "$with_pylint" in yes) ;; no) echo "./pylint: doing nothing given ./configure --with-pylint=no" 1>&2 exit 0 ;; maybe) rc=0 dev/have-pylint || rc=$? case "$rc" in 0) ;; 1) echo "./pylint: doing nothing (pylint not found)" 1>&2 exit 0 ;; *) exit "$rc" ;; esac ;; *) printf "./pylint: unexpected config/config.var/with-pylint value %q\n" \ "$with_pylint" 1>&2 exit 2 ;; esac script_home="$(cd "$(dirname "$0")" && pwd -P)" testlibdir="$script_home/test/lib" export PYTHONPATH="$testlibdir${PYTHONPATH:+:$PYTHONPATH}" if test "$#" -eq 0; then set -x dev/bup-python -m pylint lib # unused-wildcard-import: we always "import * from wvpytest" dev/bup-python -m pylint -d unused-wildcard-import test/lib test/int else set -x exec dev/bup-python -m pylint "$@" fi bup-0.33.3/pytest000077500000000000000000000004211454333004200136370ustar00rootroot00000000000000#!/bin/sh # Changes here might also be appropriate for ./pylint set -eu script_home="$(cd "$(dirname "$0")" && pwd -P)" testlibdir="$script_home/test/lib" export PYTHONPATH="$testlibdir${PYTHONPATH:+:$PYTHONPATH}" exec dev/bup-python -m pytest -v -m 'not release' "$@" bup-0.33.3/pytest.ini000066400000000000000000000001651454333004200144170ustar00rootroot00000000000000 [pytest] testpaths = test/int test/ext markers = release: tests to check that the tree is ready for a release bup-0.33.3/src/000077500000000000000000000000001454333004200131535ustar00rootroot00000000000000bup-0.33.3/src/bup/000077500000000000000000000000001454333004200137415ustar00rootroot00000000000000bup-0.33.3/src/bup/compat.c000066400000000000000000000023461454333004200153750ustar00rootroot00000000000000 #define PY_SSIZE_T_CLEAN #define _GNU_SOURCE 1 // asprintf #undef NDEBUG // According to Python, its header has to go first: // http://docs.python.org/3/c-api/intro.html#include-files #include #include "bup/compat.h" #include "bup/io.h" #if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 8 int bup_py_bytes_main(int argc, char **argv) { assert(argc > 0); wchar_t **wargv = PyMem_RawMalloc(argc * sizeof(wchar_t *)); if (!wargv) die(2, "memory insufficient to decode command line arguments"); int i; for (i = 0; i < argc; i++) { size_t wargn; wargv[i] = Py_DecodeLocale(argv[i], &wargn); if (!wargv[i]) { switch (wargn) { case (size_t) -1: die(2, "too little memory to decode command line argument %d\n", i); break; case (size_t) -2: die(2, "unable to decode command line argument %d\n", i); break; default: die(2, "unexpected error from Py_DecodeLocale(): %zu\n", wargn); break; } exit(2); } } return Py_Main(argc, wargv); } #endif // PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 8 bup-0.33.3/src/bup/compat.h000066400000000000000000000000741454333004200153760ustar00rootroot00000000000000#pragma once int bup_py_bytes_main(int argc, char **argv); bup-0.33.3/src/bup/intprops.h000066400000000000000000000715731454333004200160050ustar00rootroot00000000000000/* intprops.h -- properties of integer types Copyright (C) 2001-2021 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see . */ /* Written by Paul Eggert. */ /* This is the gnulib "Integer Properties" header: https://www.gnu.org/software/gnulib/manual/html_node/Integer-Properties.html Copied into bup from gnulib as of this commit: commit 257183ec1ea1517d5ae6dac2e8724a90e23e864a Author: Paul Eggert Date: Sun Mar 14 21:28:40 2021 -0700 intprops: improve commentary * lib/intprops.h: Improve comments about promotion etc. */ #ifndef _GL_INTPROPS_H #define _GL_INTPROPS_H #include /* Return a value with the common real type of E and V and the value of V. Do not evaluate E. */ #define _GL_INT_CONVERT(e, v) ((1 ? 0 : (e)) + (v)) /* Act like _GL_INT_CONVERT (E, -V) but work around a bug in IRIX 6.5 cc; see . */ #define _GL_INT_NEGATE_CONVERT(e, v) ((1 ? 0 : (e)) - (v)) /* The extra casts in the following macros work around compiler bugs, e.g., in Cray C 5.0.3.0. */ /* True if the arithmetic type T is an integer type. bool counts as an integer. */ #define TYPE_IS_INTEGER(t) ((t) 1.5 == 1) /* True if the real type T is signed. */ #define TYPE_SIGNED(t) (! ((t) 0 < (t) -1)) /* Return 1 if the real expression E, after promotion, has a signed or floating type. Do not evaluate E. */ #define EXPR_SIGNED(e) (_GL_INT_NEGATE_CONVERT (e, 1) < 0) /* Minimum and maximum values for integer types and expressions. */ /* The width in bits of the integer type or expression T. Do not evaluate T. T must not be a bit-field expression. Padding bits are not supported; this is checked at compile-time below. */ #define TYPE_WIDTH(t) (sizeof (t) * CHAR_BIT) /* The maximum and minimum values for the integer type T. */ #define TYPE_MINIMUM(t) ((t) ~ TYPE_MAXIMUM (t)) #define TYPE_MAXIMUM(t) \ ((t) (! TYPE_SIGNED (t) \ ? (t) -1 \ : ((((t) 1 << (TYPE_WIDTH (t) - 2)) - 1) * 2 + 1))) /* The maximum and minimum values for the type of the expression E, after integer promotion. E is not evaluated. */ #define _GL_INT_MINIMUM(e) \ (EXPR_SIGNED (e) \ ? ~ _GL_SIGNED_INT_MAXIMUM (e) \ : _GL_INT_CONVERT (e, 0)) #define _GL_INT_MAXIMUM(e) \ (EXPR_SIGNED (e) \ ? _GL_SIGNED_INT_MAXIMUM (e) \ : _GL_INT_NEGATE_CONVERT (e, 1)) #define _GL_SIGNED_INT_MAXIMUM(e) \ (((_GL_INT_CONVERT (e, 1) << (TYPE_WIDTH (+ (e)) - 2)) - 1) * 2 + 1) /* Work around OpenVMS incompatibility with C99. */ #if !defined LLONG_MAX && defined __INT64_MAX # define LLONG_MAX __INT64_MAX # define LLONG_MIN __INT64_MIN #endif /* This include file assumes that signed types are two's complement without padding bits; the above macros have undefined behavior otherwise. If this is a problem for you, please let us know how to fix it for your host. This assumption is tested by the intprops-tests module. */ /* Does the __typeof__ keyword work? This could be done by 'configure', but for now it's easier to do it by hand. */ #if (2 <= __GNUC__ \ || (4 <= __clang_major__) \ || (1210 <= __IBMC__ && defined __IBM__TYPEOF__) \ || (0x5110 <= __SUNPRO_C && !__STDC__)) # define _GL_HAVE___TYPEOF__ 1 #else # define _GL_HAVE___TYPEOF__ 0 #endif /* Return 1 if the integer type or expression T might be signed. Return 0 if it is definitely unsigned. T must not be a bit-field expression. This macro does not evaluate its argument, and expands to an integer constant expression. */ #if _GL_HAVE___TYPEOF__ # define _GL_SIGNED_TYPE_OR_EXPR(t) TYPE_SIGNED (__typeof__ (t)) #else # define _GL_SIGNED_TYPE_OR_EXPR(t) 1 #endif /* Bound on length of the string representing an unsigned integer value representable in B bits. log10 (2.0) < 146/485. The smallest value of B where this bound is not tight is 2621. */ #define INT_BITS_STRLEN_BOUND(b) (((b) * 146 + 484) / 485) /* Bound on length of the string representing an integer type or expression T. T must not be a bit-field expression. Subtract 1 for the sign bit if T is signed, and then add 1 more for a minus sign if needed. Because _GL_SIGNED_TYPE_OR_EXPR sometimes returns 1 when its argument is unsigned, this macro may overestimate the true bound by one byte when applied to unsigned types of size 2, 4, 16, ... bytes. */ #define INT_STRLEN_BOUND(t) \ (INT_BITS_STRLEN_BOUND (TYPE_WIDTH (t) - _GL_SIGNED_TYPE_OR_EXPR (t)) \ + _GL_SIGNED_TYPE_OR_EXPR (t)) /* Bound on buffer size needed to represent an integer type or expression T, including the terminating null. T must not be a bit-field expression. */ #define INT_BUFSIZE_BOUND(t) (INT_STRLEN_BOUND (t) + 1) /* Range overflow checks. The INT__RANGE_OVERFLOW macros return 1 if the corresponding C operators might not yield numerically correct answers due to arithmetic overflow. They do not rely on undefined or implementation-defined behavior. Their implementations are simple and straightforward, but they are harder to use and may be less efficient than the INT__WRAPV, INT__OK, and INT__OVERFLOW macros described below. Example usage: long int i = ...; long int j = ...; if (INT_MULTIPLY_RANGE_OVERFLOW (i, j, LONG_MIN, LONG_MAX)) printf ("multiply would overflow"); else printf ("product is %ld", i * j); Restrictions on *_RANGE_OVERFLOW macros: These macros do not check for all possible numerical problems or undefined or unspecified behavior: they do not check for division by zero, for bad shift counts, or for shifting negative numbers. These macros may evaluate their arguments zero or multiple times, so the arguments should not have side effects. The arithmetic arguments (including the MIN and MAX arguments) must be of the same integer type after the usual arithmetic conversions, and the type must have minimum value MIN and maximum MAX. Unsigned types should use a zero MIN of the proper type. Because all arguments are subject to integer promotions, these macros typically do not work on types narrower than 'int'. These macros are tuned for constant MIN and MAX. For commutative operations such as A + B, they are also tuned for constant B. */ /* Return 1 if A + B would overflow in [MIN,MAX] arithmetic. See above for restrictions. */ #define INT_ADD_RANGE_OVERFLOW(a, b, min, max) \ ((b) < 0 \ ? (a) < (min) - (b) \ : (max) - (b) < (a)) /* Return 1 if A - B would overflow in [MIN,MAX] arithmetic. See above for restrictions. */ #define INT_SUBTRACT_RANGE_OVERFLOW(a, b, min, max) \ ((b) < 0 \ ? (max) + (b) < (a) \ : (a) < (min) + (b)) /* Return 1 if - A would overflow in [MIN,MAX] arithmetic. See above for restrictions. */ #define INT_NEGATE_RANGE_OVERFLOW(a, min, max) \ ((min) < 0 \ ? (a) < - (max) \ : 0 < (a)) /* Return 1 if A * B would overflow in [MIN,MAX] arithmetic. See above for restrictions. Avoid && and || as they tickle bugs in Sun C 5.11 2010/08/13 and other compilers; see . */ #define INT_MULTIPLY_RANGE_OVERFLOW(a, b, min, max) \ ((b) < 0 \ ? ((a) < 0 \ ? (a) < (max) / (b) \ : (b) == -1 \ ? 0 \ : (min) / (b) < (a)) \ : (b) == 0 \ ? 0 \ : ((a) < 0 \ ? (a) < (min) / (b) \ : (max) / (b) < (a))) /* Return 1 if A / B would overflow in [MIN,MAX] arithmetic. See above for restrictions. Do not check for division by zero. */ #define INT_DIVIDE_RANGE_OVERFLOW(a, b, min, max) \ ((min) < 0 && (b) == -1 && (a) < - (max)) /* Return 1 if A % B would overflow in [MIN,MAX] arithmetic. See above for restrictions. Do not check for division by zero. Mathematically, % should never overflow, but on x86-like hosts INT_MIN % -1 traps, and the C standard permits this, so treat this as an overflow too. */ #define INT_REMAINDER_RANGE_OVERFLOW(a, b, min, max) \ INT_DIVIDE_RANGE_OVERFLOW (a, b, min, max) /* Return 1 if A << B would overflow in [MIN,MAX] arithmetic. See above for restrictions. Here, MIN and MAX are for A only, and B need not be of the same type as the other arguments. The C standard says that behavior is undefined for shifts unless 0 <= B < wordwidth, and that when A is negative then A << B has undefined behavior and A >> B has implementation-defined behavior, but do not check these other restrictions. */ #define INT_LEFT_SHIFT_RANGE_OVERFLOW(a, b, min, max) \ ((a) < 0 \ ? (a) < (min) >> (b) \ : (max) >> (b) < (a)) /* True if __builtin_add_overflow (A, B, P) and __builtin_sub_overflow (A, B, P) work when P is non-null. */ /* __builtin_{add,sub}_overflow exists but is not reliable in GCC 5.x and 6.x, see . */ #if 7 <= __GNUC__ && !defined __ICC # define _GL_HAS_BUILTIN_ADD_OVERFLOW 1 #elif defined __has_builtin # define _GL_HAS_BUILTIN_ADD_OVERFLOW __has_builtin (__builtin_add_overflow) #else # define _GL_HAS_BUILTIN_ADD_OVERFLOW 0 #endif /* True if __builtin_mul_overflow (A, B, P) works when P is non-null. */ #ifdef __clang__ /* Work around Clang bug . */ # define _GL_HAS_BUILTIN_MUL_OVERFLOW 0 #else # define _GL_HAS_BUILTIN_MUL_OVERFLOW _GL_HAS_BUILTIN_ADD_OVERFLOW #endif /* True if __builtin_add_overflow_p (A, B, C) works, and similarly for __builtin_sub_overflow_p and __builtin_mul_overflow_p. */ #if defined __clang__ || defined __ICC /* Clang 11 lacks __builtin_mul_overflow_p, and even if it did it would presumably run afoul of Clang bug 16404. ICC 2021.1's __builtin_add_overflow_p etc. are not treated as integral constant expressions even when all arguments are. */ # define _GL_HAS_BUILTIN_OVERFLOW_P 0 #elif defined __has_builtin # define _GL_HAS_BUILTIN_OVERFLOW_P __has_builtin (__builtin_mul_overflow_p) #else # define _GL_HAS_BUILTIN_OVERFLOW_P (7 <= __GNUC__) #endif /* The _GL*_OVERFLOW macros have the same restrictions as the *_RANGE_OVERFLOW macros, except that they do not assume that operands (e.g., A and B) have the same type as MIN and MAX. Instead, they assume that the result (e.g., A + B) has that type. */ #if _GL_HAS_BUILTIN_OVERFLOW_P # define _GL_ADD_OVERFLOW(a, b, min, max) \ __builtin_add_overflow_p (a, b, (__typeof__ ((a) + (b))) 0) # define _GL_SUBTRACT_OVERFLOW(a, b, min, max) \ __builtin_sub_overflow_p (a, b, (__typeof__ ((a) - (b))) 0) # define _GL_MULTIPLY_OVERFLOW(a, b, min, max) \ __builtin_mul_overflow_p (a, b, (__typeof__ ((a) * (b))) 0) #else # define _GL_ADD_OVERFLOW(a, b, min, max) \ ((min) < 0 ? INT_ADD_RANGE_OVERFLOW (a, b, min, max) \ : (a) < 0 ? (b) <= (a) + (b) \ : (b) < 0 ? (a) <= (a) + (b) \ : (a) + (b) < (b)) # define _GL_SUBTRACT_OVERFLOW(a, b, min, max) \ ((min) < 0 ? INT_SUBTRACT_RANGE_OVERFLOW (a, b, min, max) \ : (a) < 0 ? 1 \ : (b) < 0 ? (a) - (b) <= (a) \ : (a) < (b)) # define _GL_MULTIPLY_OVERFLOW(a, b, min, max) \ (((min) == 0 && (((a) < 0 && 0 < (b)) || ((b) < 0 && 0 < (a)))) \ || INT_MULTIPLY_RANGE_OVERFLOW (a, b, min, max)) #endif #define _GL_DIVIDE_OVERFLOW(a, b, min, max) \ ((min) < 0 ? (b) == _GL_INT_NEGATE_CONVERT (min, 1) && (a) < - (max) \ : (a) < 0 ? (b) <= (a) + (b) - 1 \ : (b) < 0 && (a) + (b) <= (a)) #define _GL_REMAINDER_OVERFLOW(a, b, min, max) \ ((min) < 0 ? (b) == _GL_INT_NEGATE_CONVERT (min, 1) && (a) < - (max) \ : (a) < 0 ? (a) % (b) != ((max) - (b) + 1) % (b) \ : (b) < 0 && ! _GL_UNSIGNED_NEG_MULTIPLE (a, b, max)) /* Return a nonzero value if A is a mathematical multiple of B, where A is unsigned, B is negative, and MAX is the maximum value of A's type. A's type must be the same as (A % B)'s type. Normally (A % -B == 0) suffices, but things get tricky if -B would overflow. */ #define _GL_UNSIGNED_NEG_MULTIPLE(a, b, max) \ (((b) < -_GL_SIGNED_INT_MAXIMUM (b) \ ? (_GL_SIGNED_INT_MAXIMUM (b) == (max) \ ? (a) \ : (a) % (_GL_INT_CONVERT (a, _GL_SIGNED_INT_MAXIMUM (b)) + 1)) \ : (a) % - (b)) \ == 0) /* Check for integer overflow, and report low order bits of answer. The INT__OVERFLOW macros return 1 if the corresponding C operators might not yield numerically correct answers due to arithmetic overflow. The INT__WRAPV macros compute the low-order bits of the sum, difference, and product of two C integers, and return 1 if these low-order bits are not numerically correct. These macros work correctly on all known practical hosts, and do not rely on undefined behavior due to signed arithmetic overflow. Example usage, assuming A and B are long int: if (INT_MULTIPLY_OVERFLOW (a, b)) printf ("result would overflow\n"); else printf ("result is %ld (no overflow)\n", a * b); Example usage with WRAPV flavor: long int result; bool overflow = INT_MULTIPLY_WRAPV (a, b, &result); printf ("result is %ld (%s)\n", result, overflow ? "after overflow" : "no overflow"); Restrictions on these macros: These macros do not check for all possible numerical problems or undefined or unspecified behavior: they do not check for division by zero, for bad shift counts, or for shifting negative numbers. These macros may evaluate their arguments zero or multiple times, so the arguments should not have side effects. The WRAPV macros are not constant expressions. They support only +, binary -, and *. Because the WRAPV macros convert the result, they report overflow in different circumstances than the OVERFLOW macros do. For example, in the typical case with 16-bit 'short' and 32-bit 'int', if A, B and R are all of type 'short' then INT_ADD_OVERFLOW (A, B) returns false because the addition cannot overflow after A and B are converted to 'int', whereas INT_ADD_WRAPV (A, B, &R) returns true or false depending on whether the sum fits into 'short'. These macros are tuned for their last input argument being a constant. Return 1 if the integer expressions A * B, A - B, -A, A * B, A / B, A % B, and A << B would overflow, respectively. */ #define INT_ADD_OVERFLOW(a, b) \ _GL_BINARY_OP_OVERFLOW (a, b, _GL_ADD_OVERFLOW) #define INT_SUBTRACT_OVERFLOW(a, b) \ _GL_BINARY_OP_OVERFLOW (a, b, _GL_SUBTRACT_OVERFLOW) #if _GL_HAS_BUILTIN_OVERFLOW_P # define INT_NEGATE_OVERFLOW(a) INT_SUBTRACT_OVERFLOW (0, a) #else # define INT_NEGATE_OVERFLOW(a) \ INT_NEGATE_RANGE_OVERFLOW (a, _GL_INT_MINIMUM (a), _GL_INT_MAXIMUM (a)) #endif #define INT_MULTIPLY_OVERFLOW(a, b) \ _GL_BINARY_OP_OVERFLOW (a, b, _GL_MULTIPLY_OVERFLOW) #define INT_DIVIDE_OVERFLOW(a, b) \ _GL_BINARY_OP_OVERFLOW (a, b, _GL_DIVIDE_OVERFLOW) #define INT_REMAINDER_OVERFLOW(a, b) \ _GL_BINARY_OP_OVERFLOW (a, b, _GL_REMAINDER_OVERFLOW) #define INT_LEFT_SHIFT_OVERFLOW(a, b) \ INT_LEFT_SHIFT_RANGE_OVERFLOW (a, b, \ _GL_INT_MINIMUM (a), _GL_INT_MAXIMUM (a)) /* Return 1 if the expression A B would overflow, where OP_RESULT_OVERFLOW (A, B, MIN, MAX) does the actual test, assuming MIN and MAX are the minimum and maximum for the result type. Arguments should be free of side effects. */ #define _GL_BINARY_OP_OVERFLOW(a, b, op_result_overflow) \ op_result_overflow (a, b, \ _GL_INT_MINIMUM (_GL_INT_CONVERT (a, b)), \ _GL_INT_MAXIMUM (_GL_INT_CONVERT (a, b))) /* Store the low-order bits of A + B, A - B, A * B, respectively, into *R. Return 1 if the result overflows. See above for restrictions. */ #if _GL_HAS_BUILTIN_ADD_OVERFLOW # define INT_ADD_WRAPV(a, b, r) __builtin_add_overflow (a, b, r) # define INT_SUBTRACT_WRAPV(a, b, r) __builtin_sub_overflow (a, b, r) #else # define INT_ADD_WRAPV(a, b, r) \ _GL_INT_OP_WRAPV (a, b, r, +, _GL_INT_ADD_RANGE_OVERFLOW) # define INT_SUBTRACT_WRAPV(a, b, r) \ _GL_INT_OP_WRAPV (a, b, r, -, _GL_INT_SUBTRACT_RANGE_OVERFLOW) #endif #if _GL_HAS_BUILTIN_MUL_OVERFLOW # if ((9 < __GNUC__ + (3 <= __GNUC_MINOR__) \ || (__GNUC__ == 8 && 4 <= __GNUC_MINOR__)) \ && !defined __ICC) # define INT_MULTIPLY_WRAPV(a, b, r) __builtin_mul_overflow (a, b, r) # else /* Work around GCC bug 91450. */ # define INT_MULTIPLY_WRAPV(a, b, r) \ ((!_GL_SIGNED_TYPE_OR_EXPR (*(r)) && EXPR_SIGNED (a) && EXPR_SIGNED (b) \ && _GL_INT_MULTIPLY_RANGE_OVERFLOW (a, b, 0, (__typeof__ (*(r))) -1)) \ ? ((void) __builtin_mul_overflow (a, b, r), 1) \ : __builtin_mul_overflow (a, b, r)) # endif #else # define INT_MULTIPLY_WRAPV(a, b, r) \ _GL_INT_OP_WRAPV (a, b, r, *, _GL_INT_MULTIPLY_RANGE_OVERFLOW) #endif /* Nonzero if this compiler has GCC bug 68193 or Clang bug 25390. See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68193 https://llvm.org/bugs/show_bug.cgi?id=25390 For now, assume all versions of GCC-like compilers generate bogus warnings for _Generic. This matters only for compilers that lack relevant builtins. */ #if __GNUC__ || defined __clang__ # define _GL__GENERIC_BOGUS 1 #else # define _GL__GENERIC_BOGUS 0 #endif /* Store the low-order bits of A B into *R, where OP specifies the operation and OVERFLOW the overflow predicate. Return 1 if the result overflows. See above for restrictions. */ #if 201112 <= __STDC_VERSION__ && !_GL__GENERIC_BOGUS # define _GL_INT_OP_WRAPV(a, b, r, op, overflow) \ (_Generic \ (*(r), \ signed char: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ signed char, SCHAR_MIN, SCHAR_MAX), \ unsigned char: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ unsigned char, 0, UCHAR_MAX), \ short int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ short int, SHRT_MIN, SHRT_MAX), \ unsigned short int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ unsigned short int, 0, USHRT_MAX), \ int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ int, INT_MIN, INT_MAX), \ unsigned int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ unsigned int, 0, UINT_MAX), \ long int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long int, \ long int, LONG_MIN, LONG_MAX), \ unsigned long int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long int, \ unsigned long int, 0, ULONG_MAX), \ long long int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long long int, \ long long int, LLONG_MIN, LLONG_MAX), \ unsigned long long int: \ _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long long int, \ unsigned long long int, 0, ULLONG_MAX))) #else /* Store the low-order bits of A B into *R, where OP specifies the operation and OVERFLOW the overflow predicate. If *R is signed, its type is ST with bounds SMIN..SMAX; otherwise its type is UT with bounds U..UMAX. ST and UT are narrower than int. Return 1 if the result overflows. See above for restrictions. */ # if _GL_HAVE___TYPEOF__ # define _GL_INT_OP_WRAPV_SMALLISH(a,b,r,op,overflow,st,smin,smax,ut,umax) \ (TYPE_SIGNED (__typeof__ (*(r))) \ ? _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, st, smin, smax) \ : _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, ut, 0, umax)) # else # define _GL_INT_OP_WRAPV_SMALLISH(a,b,r,op,overflow,st,smin,smax,ut,umax) \ (overflow (a, b, smin, smax) \ ? (overflow (a, b, 0, umax) \ ? (*(r) = _GL_INT_OP_WRAPV_VIA_UNSIGNED (a,b,op,unsigned,st), 1) \ : (*(r) = _GL_INT_OP_WRAPV_VIA_UNSIGNED (a,b,op,unsigned,st)) < 0) \ : (overflow (a, b, 0, umax) \ ? (*(r) = _GL_INT_OP_WRAPV_VIA_UNSIGNED (a,b,op,unsigned,st)) >= 0 \ : (*(r) = _GL_INT_OP_WRAPV_VIA_UNSIGNED (a,b,op,unsigned,st), 0))) # endif # define _GL_INT_OP_WRAPV(a, b, r, op, overflow) \ (sizeof *(r) == sizeof (signed char) \ ? _GL_INT_OP_WRAPV_SMALLISH (a, b, r, op, overflow, \ signed char, SCHAR_MIN, SCHAR_MAX, \ unsigned char, UCHAR_MAX) \ : sizeof *(r) == sizeof (short int) \ ? _GL_INT_OP_WRAPV_SMALLISH (a, b, r, op, overflow, \ short int, SHRT_MIN, SHRT_MAX, \ unsigned short int, USHRT_MAX) \ : sizeof *(r) == sizeof (int) \ ? (EXPR_SIGNED (*(r)) \ ? _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ int, INT_MIN, INT_MAX) \ : _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned int, \ unsigned int, 0, UINT_MAX)) \ : _GL_INT_OP_WRAPV_LONGISH(a, b, r, op, overflow)) # ifdef LLONG_MAX # define _GL_INT_OP_WRAPV_LONGISH(a, b, r, op, overflow) \ (sizeof *(r) == sizeof (long int) \ ? (EXPR_SIGNED (*(r)) \ ? _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long int, \ long int, LONG_MIN, LONG_MAX) \ : _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long int, \ unsigned long int, 0, ULONG_MAX)) \ : (EXPR_SIGNED (*(r)) \ ? _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long long int, \ long long int, LLONG_MIN, LLONG_MAX) \ : _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long long int, \ unsigned long long int, 0, ULLONG_MAX))) # else # define _GL_INT_OP_WRAPV_LONGISH(a, b, r, op, overflow) \ (EXPR_SIGNED (*(r)) \ ? _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long int, \ long int, LONG_MIN, LONG_MAX) \ : _GL_INT_OP_CALC (a, b, r, op, overflow, unsigned long int, \ unsigned long int, 0, ULONG_MAX)) # endif #endif /* Store the low-order bits of A B into *R, where the operation is given by OP. Use the unsigned type UT for calculation to avoid overflow problems. *R's type is T, with extrema TMIN and TMAX. T must be a signed integer type. Return 1 if the result overflows. */ #define _GL_INT_OP_CALC(a, b, r, op, overflow, ut, t, tmin, tmax) \ (overflow (a, b, tmin, tmax) \ ? (*(r) = _GL_INT_OP_WRAPV_VIA_UNSIGNED (a, b, op, ut, t), 1) \ : (*(r) = _GL_INT_OP_WRAPV_VIA_UNSIGNED (a, b, op, ut, t), 0)) /* Return the low-order bits of A B, where the operation is given by OP. Use the unsigned type UT for calculation to avoid undefined behavior on signed integer overflow, and convert the result to type T. UT is at least as wide as T and is no narrower than unsigned int, T is two's complement, and there is no padding or trap representations. Assume that converting UT to T yields the low-order bits, as is done in all known two's-complement C compilers. E.g., see: https://gcc.gnu.org/onlinedocs/gcc/Integers-implementation.html According to the C standard, converting UT to T yields an implementation-defined result or signal for values outside T's range. However, code that works around this theoretical problem runs afoul of a compiler bug in Oracle Studio 12.3 x86. See: https://lists.gnu.org/r/bug-gnulib/2017-04/msg00049.html As the compiler bug is real, don't try to work around the theoretical problem. */ #define _GL_INT_OP_WRAPV_VIA_UNSIGNED(a, b, op, ut, t) \ ((t) ((ut) (a) op (ut) (b))) /* Return true if the numeric values A + B, A - B, A * B fall outside the range TMIN..TMAX. Arguments should be integer expressions without side effects. TMIN should be signed and nonpositive. TMAX should be positive, and should be signed unless TMIN is zero. */ #define _GL_INT_ADD_RANGE_OVERFLOW(a, b, tmin, tmax) \ ((b) < 0 \ ? (((tmin) \ ? ((EXPR_SIGNED (_GL_INT_CONVERT (a, (tmin) - (b))) || (b) < (tmin)) \ && (a) < (tmin) - (b)) \ : (a) <= -1 - (b)) \ || ((EXPR_SIGNED (a) ? 0 <= (a) : (tmax) < (a)) && (tmax) < (a) + (b))) \ : (a) < 0 \ ? (((tmin) \ ? ((EXPR_SIGNED (_GL_INT_CONVERT (b, (tmin) - (a))) || (a) < (tmin)) \ && (b) < (tmin) - (a)) \ : (b) <= -1 - (a)) \ || ((EXPR_SIGNED (_GL_INT_CONVERT (a, b)) || (tmax) < (b)) \ && (tmax) < (a) + (b))) \ : (tmax) < (b) || (tmax) - (b) < (a)) #define _GL_INT_SUBTRACT_RANGE_OVERFLOW(a, b, tmin, tmax) \ (((a) < 0) == ((b) < 0) \ ? ((a) < (b) \ ? !(tmin) || -1 - (tmin) < (b) - (a) - 1 \ : (tmax) < (a) - (b)) \ : (a) < 0 \ ? ((!EXPR_SIGNED (_GL_INT_CONVERT ((a) - (tmin), b)) && (a) - (tmin) < 0) \ || (a) - (tmin) < (b)) \ : ((! (EXPR_SIGNED (_GL_INT_CONVERT (tmax, b)) \ && EXPR_SIGNED (_GL_INT_CONVERT ((tmax) + (b), a))) \ && (tmax) <= -1 - (b)) \ || (tmax) + (b) < (a))) #define _GL_INT_MULTIPLY_RANGE_OVERFLOW(a, b, tmin, tmax) \ ((b) < 0 \ ? ((a) < 0 \ ? (EXPR_SIGNED (_GL_INT_CONVERT (tmax, b)) \ ? (a) < (tmax) / (b) \ : ((INT_NEGATE_OVERFLOW (b) \ ? _GL_INT_CONVERT (b, tmax) >> (TYPE_WIDTH (+ (b)) - 1) \ : (tmax) / -(b)) \ <= -1 - (a))) \ : INT_NEGATE_OVERFLOW (_GL_INT_CONVERT (b, tmin)) && (b) == -1 \ ? (EXPR_SIGNED (a) \ ? 0 < (a) + (tmin) \ : 0 < (a) && -1 - (tmin) < (a) - 1) \ : (tmin) / (b) < (a)) \ : (b) == 0 \ ? 0 \ : ((a) < 0 \ ? (INT_NEGATE_OVERFLOW (_GL_INT_CONVERT (a, tmin)) && (a) == -1 \ ? (EXPR_SIGNED (b) ? 0 < (b) + (tmin) : -1 - (tmin) < (b) - 1) \ : (tmin) / (a) < (b)) \ : (tmax) / (b) < (a))) /* The following macros compute A + B, A - B, and A * B, respectively. If no overflow occurs, they set *R to the result and return 1; otherwise, they return 0 and may modify *R. Example usage: long int result; if (INT_ADD_OK (a, b, &result)) printf ("result is %ld\n", result); else printf ("overflow\n"); A, B, and *R should be integers; they need not be the same type, and they need not be all signed or all unsigned. These macros work correctly on all known practical hosts, and do not rely on undefined behavior due to signed arithmetic overflow. These macros are not constant expressions. These macros may evaluate their arguments zero or multiple times, so the arguments should not have side effects. These macros are tuned for B being a constant. */ #define INT_ADD_OK(a, b, r) ! INT_ADD_WRAPV (a, b, r) #define INT_SUBTRACT_OK(a, b, r) ! INT_SUBTRACT_WRAPV (a, b, r) #define INT_MULTIPLY_OK(a, b, r) ! INT_MULTIPLY_WRAPV (a, b, r) #endif /* _GL_INTPROPS_H */ bup-0.33.3/src/bup/io.c000066400000000000000000000011751454333004200145200ustar00rootroot00000000000000 #define _GNU_SOURCE 1 #undef NDEBUG #include #include #include "bup/io.h" __attribute__ ((format(printf, 2, 3))) void msg(FILE* f, const char * const msg, ...) { if (fputs("bup: ", f) == EOF) exit(3); va_list ap; va_start(ap, msg); if (vfprintf(f, msg, ap) < 0) exit(3); va_end(ap); } __attribute__ ((format(printf, 2, 3))) void die(int exit_status, const char * const msg, ...) { if (fputs("bup: ", stderr) == EOF) exit(3); va_list ap; va_start(ap, msg); if (vfprintf(stderr, msg, ap) < 0) exit(3); va_end(ap); exit(exit_status); } bup-0.33.3/src/bup/io.h000066400000000000000000000002121454333004200145140ustar00rootroot00000000000000#pragma once #include void msg(FILE* f, const char * const msg, ...); void die(int exit_status, const char * const msg, ...); bup-0.33.3/test/000077500000000000000000000000001454333004200133435ustar00rootroot00000000000000bup-0.33.3/test/__init__.py000066400000000000000000000000001454333004200154420ustar00rootroot00000000000000bup-0.33.3/test/bin/000077500000000000000000000000001454333004200141135ustar00rootroot00000000000000bup-0.33.3/test/bin/sort-z000077700000000000000000000000001454333004200176542../../dev/sort-zustar00rootroot00000000000000bup-0.33.3/test/ext/000077500000000000000000000000001454333004200141435ustar00rootroot00000000000000bup-0.33.3/test/ext/conftest.py000066400000000000000000000063721454333004200163520ustar00rootroot00000000000000 from pathlib import Path from subprocess import CalledProcessError import pytest, subprocess, sys from bup.compat import fsdecode from bup.io import byte_stream # Handle all test-* files as wvtest protocol subprocesses # cf. https://docs.pytest.org/en/latest/example/nonpython.html # version_tuple was added in 7 use_node_path = hasattr(pytest, 'version_tuple') class BupSubprocFailure(Exception): def __init__(self, msg, cmd, status, failures): super().__init__(msg) self.cmd = cmd self.status = status self.failures = failures class BupSubprocTestRunner(pytest.Item): def __init__(self, name, parent): super().__init__(name, parent) def runtest(self): cmd = str(self.fspath) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = p.communicate()[0] sys.stdout.flush() byte_stream(sys.stdout).write(out) lines = out.splitlines() for line in lines: if line.startswith(b'!') and line.lower().endswith(b' skip ok'): pytest.skip(line.decode('ascii')) return failures = [line for line in lines if (line.startswith(b'!') and line.lower().endswith(b' failed'))] if b'AssertionError' in out: raise BupSubprocFailure('AssertionError detected') if failures or p.returncode != 0: raise BupSubprocFailure('%s failed (exit %d, %d failures)' % (cmd, p.returncode, len(failures)), cmd, p.returncode, failures) def repr_failure(self, excinfo): ex = excinfo.value if isinstance(ex, BupSubprocFailure): msg = ['Exit status: %d' % ex.status, 'Failures:'] msg.extend(fsdecode(s) for s in ex.failures) return '\n'.join(msg) def reportinfo(self): # This does not appear to be documented, but is in the # example, and sets the final report header line (at least) # for failures. test_name = str(self.fspath) linenum = None return self.fspath, linenum, test_name class BupSubprocTestFile(pytest.File): def collect(self): name = self.fspath.basename # name='' because there's only one test: running the command. # i.e there are no sub-tests. Otherwise the status messages # duplicate the test name like this: # test/ext/test-cat-file.sh::test-cat-file.sh PASSED ... try: yield BupSubprocTestRunner.from_parent(self, name='') except AttributeError: yield BupSubprocTestRunner('', self) def pytest_collect_file(parent, path): base = path.basename if base.startswith('test-') and not base.endswith('~'): if use_node_path: item = BupSubprocTestFile.from_parent(parent, path=Path(path)) else: try: item = BupSubprocTestFile.from_parent(parent, fspath=path) except AttributeError: item = BupSubprocTestFile(path, parent) if base == 'test-release-archive': item.add_marker(pytest.mark.release) return item bup-0.33.3/test/ext/test-cat-file000077500000000000000000000026241454333004200165360ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS bup init WVPASS cd "$tmpdir" WVSTART "cat-file" WVPASS mkdir src WVPASS date > src/foo WVPASS bup index src WVPASS bup save -n src src WVPASS bup cat-file "src/latest/$(pwd)/src/foo" > cat-foo WVPASS diff -u src/foo cat-foo WVSTART "cat-file --meta" WVPASS bup meta --create --no-paths src/foo > src-foo.meta WVPASS bup cat-file --meta "src/latest/$(pwd)/src/foo" > cat-foo.meta WVPASS bup meta -tvvf src-foo.meta | WVPASS grep -vE '^atime: ' > src-foo.list WVPASS bup meta -tvvf cat-foo.meta | WVPASS grep -vE '^atime: ' > cat-foo.list WVPASS diff -u src-foo.list cat-foo.list WVSTART "cat-file --bupm" WVPASS bup cat-file --bupm "src/latest/$(pwd)/src/" > bup-cat-bupm src_hash=$(WVPASS bup ls -s "src/latest/$(pwd)" | cut -d' ' -f 1) || exit $? bupm_hash=$(WVPASS git ls-tree "$src_hash" | grep -F .bupm | cut -d' ' -f 3) \ || exit $? bupm_hash=$(WVPASS echo "$bupm_hash" | cut -d' ' -f 1) || exit $? WVPASS "$top/dev/git-cat-tree" "$bupm_hash" > git-cat-bupm if ! cmp git-cat-bupm bup-cat-bupm; then cmp -l git-cat-bupm bup-cat-bupm diff -uN <(bup meta -tvvf git-cat-bupm) <(bup meta -tvvf bup-cat-bupm) WVPASS cmp git-cat-bupm bup-cat-bupm fi WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-command-without-init-fails000077500000000000000000000005261454333004200222250ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail WVSTART 'all' top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS mkdir "$tmpdir/foo" bup index "$tmpdir/foo" &> /dev/null index_rc=$? WVPASSEQ "$index_rc" "15" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-comparative-split-join000077500000000000000000000074201454333004200214510ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail if test -z "$BUP_TEST_OTHER_BUP"; then WVSKIP 'Other bup not specified by BUP_TEST_OTHER_BUP; skipping test' exit 0 fi seed="${BUP_TEST_RANDOM_SEED:-$RANDOM}" WVSTART "split/join against $BUP_TEST_OTHER_BUP (random seed $seed)" top="$(WVPASS pwd)" || exit $? this_bup="${BUP_TEST_THIS_BUP:-$top/bup}" this-bup() { "$this_bup" -d "$this_bup_dir" "$@"; } other-bup() { "$BUP_TEST_OTHER_BUP" -d "$other_bup_dir" "$@"; } this_version="$(WVPASS this-bup version)" other_version="$(WVPASS other-bup version)" packname-flavor () { # In bb0e9cbf3900e65d2fddbe888e6cb21c59b308df the packfile name # hashing was changed to match git, which itself may have changed # over time. Classify bup versions into categories based on the # approach so we can know when we should expect the names to # match. local version="$1" case "$version" in # Versions are now generally 0.32 or 0.32+, but just look at # the leading integers, and assume anything after indicates # "newer". 0.?) echo 0 ;; 0.[0-9]) echo 0 ;; 0.[0-9][^0-9]*) echo 0 ;; 0.[12][0-9]) echo 0 ;; 0.[12][0-9][^0-9]*) echo 0 ;; 0.3[01]) echo 0 ;; 0.3[01][^0-9]*) echo 0 ;; # Fix was added during 0.33~, but unfortunately, the # base_version wasn't updated immediately after the release, # so many of those commits report 0.32*. Given that, just # treat all 0.32* as "who knows". 0.32|0.32[^0-9]*) echo 1 ;; *) echo 2 ;; esac } case "$(packname-flavor "$this_version")""$(packname-flavor "$other_version")" in 00|22) test_packnames=true ;; *) test_packnames='' ;; esac tmpdir="$(WVPASS wvmktempdir)" || exit $? WVPASS cd "$tmpdir" test-split-join() { local size="$1" orig_dir orig_dir="$(WVPASS pwd)" WVSTART "split/join of $(($size / 1024))kb" WVPASS mkdir split-join WVPASS cd split-join this_bup_dir="$(WVPASS pwd)/this-bup" other_bup_dir="$(WVPASS pwd)/other-bup" WVPASS this-bup init WVPASS other-bup init WVPASS this-bup random --seed "$RANDOM" "$size" > data WVPASS other-bup split -t data > other-split-tree WVPASS this-bup split -t data > this-split-tree WVPASSEQ "$( other-join.data WVPASS this-bup join "$( this-join.data WVPASS cmp other-join.data this-join.data if ! test "$test_packnames"; then # Make sure there's just one of each file in each repo and # compare those via cmp, then delete them. WVPASS test -f other-bup/objects/pack/pack-*.idx WVPASS test -f other-bup/objects/pack/pack-*.pack WVPASS test -f this-bup/objects/pack/pack-*.idx WVPASS test -f this-bup/objects/pack/pack-*.pack WVPASS cmp {other,this}-bup/objects/pack/pack-*.idx WVPASS cmp {other,this}-bup/objects/pack/pack-*.pack WVPASS rm {other,this}-bup/objects/pack/pack-*.idx WVPASS rm {other,this}-bup/objects/pack/pack-*.pack # The bloom filter includes the (differing) idx names WVPASS rm {other,this}-bup/objects/pack/bup.bloom fi WVPASS test "ref: refs/heads/main" = "$(< other-bup/HEAD)" \ -o "ref: refs/heads/master" = "$(< other-bup/HEAD)" WVPASS test "ref: refs/heads/main" = "$(< this-bup/HEAD)" \ -o "ref: refs/heads/master" = "$(< this-bup/HEAD)" WVPASS rm {other,this}-bup/HEAD WVPASS "$top/dev/compare-trees" --no-times other-bup/ this-bup/ WVPASS cd "$orig_dir" WVPASS rm -r split-join } test-split-join 0 for i in {1..5}; do test-split-join $(($RANDOM * 1024)) done cd "$top" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-compression000077500000000000000000000024301454333004200174060ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } fs-size() { tar cf - "$@" | wc -c; } WVSTART "compression" WVPASS cd "$tmpdir" D=compression0.tmp WVPASS force-delete "$BUP_DIR" WVPASS bup init WVPASS mkdir $D WVPASS bup index "$top/Documentation" WVPASS bup save -n compression -0 --strip "$top/Documentation" # Some platforms set -A by default when root, so just use it everywhere. expected="$(WVPASS ls -A "$top/Documentation" | WVPASS sort)" || exit $? actual="$(WVPASS bup ls -A compression/latest/ | WVPASS sort)" || exit $? WVPASSEQ "$actual" "$expected" compression_0_size=$(WVPASS fs-size "$BUP_DIR") || exit $? D=compression9.tmp WVPASS force-delete "$BUP_DIR" WVPASS bup init WVPASS mkdir $D WVPASS bup index "$top/Documentation" WVPASS bup save -n compression -9 --strip "$top/Documentation" expected="$(ls -A "$top/Documentation" | sort)" || exit $? actual="$(bup ls -A compression/latest/ | sort)" || exit $? WVPASSEQ "$actual" "$expected" compression_9_size=$(WVPASS fs-size "$BUP_DIR") || exit $? WVPASS [ "$compression_9_size" -lt "$compression_0_size" ] WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-drecurse000077500000000000000000000035671454333004200166750ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" # These tests aren't comprehensive, but test-save-restore-excludes.sh # exercises some of the same code more thoroughly via index, and # --xdev is handled in test-xdev.sh. WVSTART "drecurse" WVPASS bup init WVPASS mkdir src src/a src/b WVPASS touch src/a/1 src/a/2 src/b/1 src/b/2 src/c (cd src && WVPASS ln -s a a-link) WVPASSEQ "$(bup drecurse src)" "src/c src/b/2 src/b/1 src/b/ src/a/2 src/a/1 src/a/ src/a-link src/" WVSTART "drecurse --exclude (file)" WVPASSEQ "$(bup drecurse --exclude src/b/2 src)" "src/c src/b/1 src/b/ src/a/2 src/a/1 src/a/ src/a-link src/" WVSTART "drecurse --exclude (dir)" WVPASSEQ "$(bup drecurse --exclude src/b/ src)" "src/c src/a/2 src/a/1 src/a/ src/a-link src/" WVSTART "drecurse --exclude (symlink)" WVPASSEQ "$(bup drecurse --exclude src/a-link src)" "src/c src/b/2 src/b/1 src/b/ src/a/2 src/a/1 src/a/ src/" WVSTART "drecurse --exclude (absolute path)" WVPASSEQ "$(bup drecurse --exclude src/b/2 "$(pwd)/src")" "$(pwd)/src/c $(pwd)/src/b/1 $(pwd)/src/b/ $(pwd)/src/a/2 $(pwd)/src/a/1 $(pwd)/src/a/ $(pwd)/src/a-link $(pwd)/src/" WVSTART "drecurse --exclude-from" WVPASS echo "src/b" > exclude-list WVPASSEQ "$(bup drecurse --exclude-from exclude-list src)" "src/c src/a/2 src/a/1 src/a/ src/a-link src/" WVSTART "drecurse --exclude-rx (trivial)" WVPASSEQ "$(bup drecurse --exclude-rx '^src/b' src)" "src/c src/a/2 src/a/1 src/a/ src/a-link src/" WVSTART "drecurse --exclude-rx (trivial - absolute path)" WVPASSEQ "$(bup drecurse --exclude-rx "^$(pwd)/src/b" "$(pwd)/src")" \ "$(pwd)/src/c $(pwd)/src/a/2 $(pwd)/src/a/1 $(pwd)/src/a/ $(pwd)/src/a-link $(pwd)/src/" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-fsck000077500000000000000000000044311454333004200157760ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? bup() { "$top/bup" "$@"; } WVPASS "$top/dev/sync-tree" "$top/test/sampledata/" "$tmpdir/src/" export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" WVPASS bup init WVPASS cd "$tmpdir" WVSTART "fsck" WVPASS bup index src WVPASS bup save -n fsck-test src/b2 WVPASS bup save -n fsck-test src/var/cmd WVPASS bup save -n fsck-test src/var/doc WVPASS bup save -n fsck-test src/var/lib WVPASS bup save -n fsck-test src/y WVPASS bup fsck WVPASS bup fsck "$BUP_DIR"/objects/pack/pack-*.pack WVPASS bup fsck --quick if bup fsck --par2-ok; then WVSTART "fsck (par2)" else WVSTART "fsck (PAR2 IS MISSING)" fi WVPASS bup fsck -g WVPASS bup fsck -r WVPASS bup damage "$BUP_DIR"/objects/pack/*.pack -n10 -s1 -S0 WVFAIL bup fsck --quick WVFAIL bup fsck --quick --disable-par2 WVPASS chmod u+w "$BUP_DIR"/objects/pack/*.idx WVPASS bup damage "$BUP_DIR"/objects/pack/*.idx -n10 -s1 -S0 WVFAIL bup fsck --quick -j4 WVPASS bup damage "$BUP_DIR"/objects/pack/*.pack -n10 -s1024 --percent 0.4 -S0 WVFAIL bup fsck --quick WVFAIL bup fsck --quick -rvv -j99 # fails because repairs were needed if bup fsck --par2-ok; then WVPASS bup fsck -r # ok because of repairs from last time # This must do "too much" damage. Currently par2 is invoked with # -c200, which should allow up to 200 damaged "blocks", but since # we don't specify the block size, it's dynamically computed. # Even if we did specify a size, the actual size appears to be # affected by the input file sizes, and the specific behavior # doesn't appear to be documented/promised -- see par2 # comandline.cpp. Also worth noting that bup damage's output is # currently probabilistic, so it might not actually damage any # given byte. For now, just try to overdo it -- randomly change # (or not 1/256th of the time) 600 evenly spaced bytes in each # pack file. WVPASS bup damage "$BUP_DIR"/objects/pack/*.pack -n600 -s1 --equal -S0 WVFAIL bup fsck WVFAIL bup fsck -rvv # too many errors to be repairable WVFAIL bup fsck -r # too many errors to be repairable else WVFAIL bup fsck --quick -r # still fails because par2 was missing fi WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-fuse000077500000000000000000000060641454333004200160160ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail unset BLOCKSIZE BLOCK_SIZE DF_BLOCK_SIZE top="$(WVPASS pwd)" || exit $? bup() { "$top/bup" "$@"; } # Note: this relies on the import checks happening first # before the command-line is evaluated, and us doing an # exit with status 2 on failures bup fuse -h 2>/dev/null if [ $? -eq 2 ]; then WSKIP 'unable to import fuse/check version; skipping test' exit 0 fi if test -n "$(type -p modprobe)" && ! modprobe fuse; then WVSKIP 'Unable to load fuse module; skipping dependent tests.' exit 0 fi if ! fusermount -V; then WVSKIP 'skipping FUSE tests: fusermount does not appear to work' exit 0 fi if ! test -w /dev/fuse; then WVSKIP 'skipping FUSE tests; no access to /dev/fuse' exit 0 fi clean_up() { fusermount -uz mnt || true; } trap clean_up EXIT top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" # Some versions of bash's printf don't support the relevant date expansion. savename() { readonly secs="$1" WVPASS bup-cfg-py -c "from time import strftime, localtime; \ print(strftime('%Y-%m-%d-%H%M%S', localtime($secs)))" } export TZ=UTC WVPASS bup init WVPASS cd "$tmpdir" savestamp1=$(WVPASS bup-cfg-py -c 'import time; print(int(time.time()))') || exit $? savestamp2=$(($savestamp1 + 1)) savename1="$(savename "$savestamp1")" || exit $? savename2="$(savename "$savestamp2")" || exit $? WVPASS mkdir src WVPASS echo content > src/foo WVPASS chmod 644 src/foo WVPASS touch -t 201111111111 src/foo # FUSE, python-fuse, something, can't handle negative epoch times. # Use pre-epoch to make sure bup properly "bottoms out" at 0 for now. WVPASS echo content > src/pre-epoch WVPASS chmod 644 src/pre-epoch WVPASS touch -t 196907202018 src/pre-epoch WVPASS bup index src WVPASS bup save -n src -d "$savestamp1" --strip src WVSTART "basics" WVPASS mkdir mnt bup fuse -f mnt & fuse_pid=$! while ! test -d mnt/src; do sleep 0.1 done result=$(WVPASS ls mnt) || exit $? WVPASSEQ src "$result" result=$(WVPASS ls mnt/src) || exit $? WVPASSEQ "$result" "$savename1 latest" result=$(WVPASS ls mnt/src/latest) || exit $? WVPASSEQ "$result" "foo pre-epoch" result=$(WVPASS cat mnt/src/latest/foo) || exit $? WVPASSEQ "$result" "content" # Right now we don't detect new saves. WVPASS bup save -n src -d "$savestamp2" --strip src result=$(WVPASS ls mnt/src) || exit $? WVPASSEQ "$result" "$savename1 latest" WVPASS fusermount -uz mnt WVPASS wait "$fuse_pid" fuse_pid='' WVSTART "extended metadata" bup fuse -f --meta mnt & fuse_pid=$! while ! test -d mnt/src; do sleep 0.1 done readonly user=$(WVPASS id -un) || $? readonly group=$(WVPASS id -gn) || $? result="$(stat --format='%A %U %G %x' mnt/src/latest/foo)" WVPASSEQ "$result" "-rw-r--r-- $user $group 2011-11-11 11:11:00.000000000 +0000" result="$(stat --format='%A %U %G %x' mnt/src/latest/pre-epoch)" WVPASSEQ "$result" "-rw-r--r-- $user $group 1970-01-01 00:00:00.000000000 +0000" WVPASS fusermount -uz mnt WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-gc000077500000000000000000000156101454333004200154420ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" GC_OPTS=--unsafe bup() { "$top/bup" "$@"; } compare-trees() { "$top/dev/compare-trees" "$@"; } data-size() { "$top/dev/data-size" "$@"; } WVPASS cd "$tmpdir" WVPASS bup init WVSTART "gc (unchanged repo)" WVPASS mkdir src-1 WVPASS bup random 1k > src-1/1 WVPASS bup index src-1 WVPASS bup save --strip -n src-1 src-1 WVPASS bup gc $GC_OPTS -v WVPASS bup restore -C "$tmpdir/restore" /src-1/latest WVPASS compare-trees src-1/ "$tmpdir/restore/latest/" WVSTART "gc (unchanged, new branch)" WVPASS mkdir src-2 WVPASS bup random 10M > src-2/1 WVPASS bup index src-2 WVPASS bup save --strip -n src-2 src-2 WVPASS bup gc $GC_OPTS -v WVPASS rm -r "$tmpdir/restore" WVPASS bup restore -C "$tmpdir/restore" /src-1/latest WVPASS compare-trees src-1/ "$tmpdir/restore/latest/" WVPASS rm -r "$tmpdir/restore" WVPASS bup restore -C "$tmpdir/restore" /src-2/latest WVPASS compare-trees src-2/ "$tmpdir/restore/latest/" WVSTART "gc (removed branch)" size_before=$(WVPASS data-size "$BUP_DIR") || exit $? WVPASS rm "$BUP_DIR/refs/heads/src-2" WVPASS bup gc $GC_OPTS -v size_after=$(WVPASS data-size "$BUP_DIR") || exit $? WVPASS [ "$size_before" -gt 5000000 ] WVPASS [ "$size_after" -lt 50000 ] WVPASS rm -r "$tmpdir/restore" WVPASS bup restore -C "$tmpdir/restore" /src-1/latest WVPASS compare-trees src-1/ "$tmpdir/restore/latest/" WVPASS rm -r "$tmpdir/restore" WVFAIL bup restore -C "$tmpdir/restore" /src-2/latest WVPASS mkdir src-ab-clean src-ab-clean/a src-ab-clean/b WVPASS bup random 1k > src-ab-clean/a/1 WVPASS bup random 10M > src-ab-clean/b/1 WVSTART "gc (rewriting)" WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS rm -rf src-ab WVPASS cp -pPR src-ab-clean src-ab WVPASS bup index src-ab WVPASS bup save --strip -n src-ab src-ab WVPASS bup index --clear WVPASS bup index src-ab WVPASS bup save -vvv --strip -n a src-ab/a size_before=$(WVPASS data-size "$BUP_DIR") || exit $? WVPASS rm "$BUP_DIR/refs/heads/src-ab" WVPASS bup gc $GC_OPTS -v size_after=$(WVPASS data-size "$BUP_DIR") || exit $? WVPASS [ "$size_before" -gt 5000000 ] WVPASS [ "$size_after" -lt 100000 ] WVPASS rm -r "$tmpdir/restore" WVPASS bup restore -C "$tmpdir/restore" /a/latest WVPASS compare-trees src-ab/a/ "$tmpdir/restore/latest/" WVPASS rm -r "$tmpdir/restore" WVFAIL bup restore -C "$tmpdir/restore" /src-ab/latest WVSTART "gc (save -r after repo rewriting)" WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS bup -d bup-remote init WVPASS rm -rf src-ab WVPASS cp -pPR src-ab-clean src-ab WVPASS bup index src-ab WVPASS bup save -r :bup-remote --strip -n src-ab src-ab WVPASS bup index --clear WVPASS bup index src-ab WVPASS bup save -r :bup-remote -vvv --strip -n a src-ab/a size_before=$(WVPASS data-size bup-remote) || exit $? WVPASS rm bup-remote/refs/heads/src-ab WVPASS bup -d bup-remote gc $GC_OPTS -v size_after=$(WVPASS data-size bup-remote) || exit $? WVPASS [ "$size_before" -gt 5000000 ] WVPASS [ "$size_after" -lt 100000 ] WVPASS rm -rf "$tmpdir/restore" WVPASS bup -d bup-remote restore -C "$tmpdir/restore" /a/latest WVPASS compare-trees src-ab/a/ "$tmpdir/restore/latest/" WVPASS rm -r "$tmpdir/restore" WVFAIL bup -d bup-remote restore -C "$tmpdir/restore" /src-ab/latest # Make sure a post-gc index/save that includes gc-ed data works WVPASS bup index src-ab WVPASS bup save -r :bup-remote --strip -n src-ab src-ab WVPASS rm -r "$tmpdir/restore" WVPASS bup -d bup-remote restore -C "$tmpdir/restore" /src-ab/latest WVPASS compare-trees src-ab/ "$tmpdir/restore/latest/" WVSTART "gc (bup on after repo rewriting)" WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS rm -rf src-ab WVPASS cp -pPR src-ab-clean src-ab WVPASS bup on - index src-ab WVPASS bup on - save --strip -n src-ab src-ab WVPASS bup index --clear WVPASS bup on - index src-ab WVPASS bup on - save -vvv --strip -n a src-ab/a size_before=$(WVPASS data-size "$BUP_DIR") || exit $? WVPASS rm "$BUP_DIR/refs/heads/src-ab" WVPASS bup gc $GC_OPTS -v size_after=$(WVPASS data-size "$BUP_DIR") || exit $? WVPASS [ "$size_before" -gt 5000000 ] WVPASS [ "$size_after" -lt 100000 ] WVPASS rm -r "$tmpdir/restore" WVPASS bup restore -C "$tmpdir/restore" /a/latest WVPASS compare-trees src-ab/a/ "$tmpdir/restore/latest/" WVPASS rm -r "$tmpdir/restore" WVFAIL bup restore -C "$tmpdir/restore" /src-ab/latest # Make sure a post-gc index/save that includes gc-ed data works WVPASS bup on - index src-ab WVPASS bup on - save --strip -n src-ab src-ab WVPASS rm -r "$tmpdir/restore" WVPASS bup restore -C "$tmpdir/restore" /src-ab/latest WVPASS compare-trees src-ab/ "$tmpdir/restore/latest/" WVSTART "gc (threshold)" WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS rm -rf src && mkdir src WVPASS echo 0 > src/0 WVPASS echo 1 > src/1 WVPASS bup index src WVPASS bup save -n src-1 src WVPASS rm src/0 WVPASS bup index src WVPASS bup save -n src-2 src WVPASS bup rm --unsafe src-1 packs_before="$(ls "$BUP_DIR/objects/pack/"*.pack)" || exit $? WVPASS bup gc -v $GC_OPTS --threshold 99 2>&1 | tee gc.log packs_after="$(ls "$BUP_DIR/objects/pack/"*.pack)" || exit $? WVPASSEQ 0 "$(grep -cE '^rewriting ' gc.log)" WVPASSEQ "$packs_before" "$packs_after" WVPASS bup gc -v $GC_OPTS --threshold 1 2>&1 | tee gc.log packs_after="$(ls "$BUP_DIR/objects/pack/"*.pack)" || exit $? WVPASSEQ 1 "$(grep -cE '^rewriting ' gc.log)" # Check that only one pack was rewritten # Accommodate some systems that apparently used to change the default # ls sort order which must match LC_COLLATE for comm to work. packs_before="$(sort <(echo "$packs_before"))" || die $? packs_after="$(sort <(echo "$packs_after"))" || die $? only_in_before="$(comm -2 -3 <(echo "$packs_before") <(echo "$packs_after"))" \ || die $? only_in_after="$(comm -1 -3 <(echo "$packs_before") <(echo "$packs_after"))" \ || die $? in_both="$(comm -1 -2 <(echo "$packs_before") <(echo "$packs_after"))" || die $? WVPASSEQ 1 $(echo "$only_in_before" | wc -l) WVPASSEQ 1 $(echo "$only_in_after" | wc -l) WVPASSEQ 1 $(echo "$in_both" | wc -l) WVSTART "gc (threshold 0)" WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS rm -rf src && mkdir src WVPASS echo 0 > src/0 WVPASS echo 1 > src/1 WVPASS bup index src WVPASS bup save -n src-1 src pack_contents_before="$(git show-index < "$BUP_DIR/objects/pack/"*.idx | cut -d' ' -f2- | sort)" || exit $? WVPASS bup gc -v $GC_OPTS --threshold 0 2>&1 | tee gc.log pack_contents_after="$(git show-index < "$BUP_DIR/objects/pack/"*.idx | cut -d' ' -f2- | sort)" || exit $? # Check that the pack was rewritten or a new pack written, but # with the same objects. Note that the name of the pack will # likely change as the *order* of objects is different. The # "git show-index | cut | sort" ignores the offsets but checks # the object and their crc. WVPASSEQ 1 "$(grep -cE '^rewriting ' gc.log)" WVPASSEQ "$pack_contents_before" "$pack_contents_after" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-help000077500000000000000000000014571454333004200160050ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } # FIXME: send help to stdout if requested (exit 0), stderr on error # (exit nonzero) bup -? rc=$? WVPASSEQ 99 "$rc" bup --help rc=$? WVPASSEQ 99 "$rc" if ! test -e Documentation/bup-save.1; then WVPASS rm -rf "$tmpdir" exit 0 fi mkdir -p "$tmpdir/man" (cd "$tmpdir/man" && ln -s "$top/Documentation" man1) export MANPATH="$tmpdir/man" PAGER=cat WVPASS bup help save PAGER=cat WVPASS bup save --help WVPASSEQ 1 $(bup help save | grep -cE 'bup.save . create a new bup backup set') WVPASSEQ 1 $(bup save --help | grep -cE 'bup.save . create a new bup backup set') WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-import-duplicity000077500000000000000000000031601454333004200203640ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail if ! [ "$(type -p duplicity)" != "" ]; then WVSKIP 'Cannot find duplicity; skipping test)' exit 0 fi top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? bup() { "$top/bup" "$@"; } dup() { duplicity --archive-dir "$tmpdir/dup-cache" "$@"; } WVSTART "import-duplicity" WVPASS "$top/dev/sync-tree" "$top/test/sampledata/" "$tmpdir/src/" export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" export PASSPHRASE=bup_duplicity_passphrase WVPASS bup init WVPASS cd "$tmpdir" WVPASS mkdir duplicity WVPASS dup src file://duplicity WVPASS bup tick WVPASS touch src/new-file WVPASS dup src file://duplicity WVPASS bup import-duplicity "file://duplicity" import-duplicity WVPASSEQ $(bup ls import-duplicity/ | wc -l) 3 WVPASSEQ "$(bup ls import-duplicity/latest/ | sort)" "$(ls src | sort)" WVPASS bup restore -C restore/ import-duplicity/latest/ WVFAIL "$top/dev/compare-trees" src/ restore/ > tmp-compare-trees WVPASSEQ $(cat tmp-compare-trees | wc -l) 4 # Note: OS X rsync itemize output is currently only 9 chars, not 11. # FreeBSD may output 12 chars instead - accept 9-12 # Expect something like this (without the leading spaces): # .d..t...... ./ # .L..t...... abs-symlink -> /home/foo/bup/test/sampledata/var/abs-symlink-target # .L..t...... b -> a # .L..t...... c -> b expected_diff_rx='^\.d\.\.t\.{4,7} \./$|^\.L\.\.t\.{4,7} ' if ! grep -qE "$expected_diff_rx" tmp-compare-trees; then echo -n 'tmp-compare-trees: ' 1>&2 cat tmp-compare-trees 1>&2 fi WVPASS grep -qE "$expected_diff_rx" tmp-compare-trees WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-import-rdiff-backup000077500000000000000000000014601454333004200207140ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } if ! [ "$(type -p rdiff-backup)" != "" ]; then WVSKIP 'Cannot find rdiff-backup; skipping test)' exit 0 fi D=rdiff-backup.tmp WVSTART "import-rdiff-backup" WVPASS bup init WVPASS cd "$tmpdir" WVPASS mkdir rdiff-backup WVPASS rdiff-backup "$top/lib/cmd" rdiff-backup WVPASS bup tick WVPASS rdiff-backup "$top/Documentation" rdiff-backup WVPASS bup import-rdiff-backup rdiff-backup import-rdiff-backup WVPASSEQ $(bup ls import-rdiff-backup/ | wc -l) 3 WVPASSEQ "$(bup ls -A import-rdiff-backup/latest/ | sort)" \ "$(ls -A "$top/Documentation" | sort)" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-index000077500000000000000000000051121454333004200161540ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" WVPASS bup init WVSTART "index" D=bupdata.tmp WVPASS force-delete $D WVPASS mkdir $D WVFAIL bup index --exclude-from $D/cannot-exist $D WVPASSEQ "$(bup index --check -p)" "" WVPASSEQ "$(bup index --check -p $D)" "" WVFAIL [ -e $D.fake ] WVFAIL bup index --check -u $D.fake WVPASS bup index --check -u $D WVPASSEQ "$(bup index --check -p $D)" "$D/" WVPASS touch $D/a WVPASS bup random 128k >$D/b WVPASS mkdir $D/d $D/d/e WVPASS bup random 512 >$D/f WVPASS ln -s non-existent-file $D/g WVPASSEQ "$(bup index -s $D/)" "A $D/" WVPASSEQ "$(bup index -s $D/b)" "" WVPASSEQ "$(bup index --check -us $D/b)" "A $D/b" WVPASSEQ "$(bup index --check -us $D/b $D/d)" \ "A $D/d/e/ A $D/d/ A $D/b" WVPASS touch $D/d/z WVPASS bup tick WVPASSEQ "$(bup index --check -usx $D)" \ "A $D/g A $D/f A $D/d/z A $D/d/e/ A $D/d/ A $D/b A $D/a A $D/" WVPASSEQ "$(bup index --check -us $D/a $D/b --fake-valid)" \ " $D/b $D/a" WVPASSEQ "$(bup index --check -us $D/a)" " $D/a" # stays unmodified WVPASSEQ "$(bup index --check -us $D/d --fake-valid)" \ " $D/d/z $D/d/e/ $D/d/" WVPASS touch $D/d/z WVPASS bup index -u $D/d/z # becomes modified WVPASSEQ "$(bup index -s $D/a $D $D/b)" \ "A $D/g A $D/f M $D/d/z $D/d/e/ M $D/d/ $D/b $D/a A $D/" WVPASS bup index -u $D/d/e $D/a --fake-invalid WVPASSEQ "$(cd $D && bup index -m .)" \ "./g ./f ./d/z ./d/e/ ./d/ ./a ./" WVPASSEQ "$(cd $D && bup index -m)" \ "g f d/z d/e/ d/ a ./" WVPASSEQ "$(cd $D && bup index -s .)" "$(cd $D && bup index -s .)" WVFAIL bup save -t $D/doesnt-exist-filename WVPASS mv "$BUP_DIR/bupindex" "$BUP_DIR/bi.old" WVFAIL bup save -t $D/d/e/fifotest WVPASS mkfifo $D/d/e/fifotest WVPASS bup index -u $D/d/e/fifotest WVPASS bup save -t $D/d/e/fifotest WVPASS bup save -t $D/d/e WVPASS rm -f $D/d/e/fifotest WVPASS bup index -u $D/d/e WVFAIL bup save -t $D/d/e/fifotest WVPASS mv "$BUP_DIR/bi.old" "$BUP_DIR/bupindex" WVPASS bup index -u $D/d/e WVPASS bup save -t $D/d/e WVPASSEQ "$(cd $D && bup index -m)" \ "g f d/z d/ a ./" WVPASS bup save -t $D/d WVPASS bup index --fake-invalid $D/d/z WVPASS bup save -t $D/d/z WVPASS bup save -t $D/d/z # test regenerating trees when no files are changed WVPASS bup save -t $D/d WVPASSEQ "$(cd $D && bup index -m)" \ "g f a ./" WVPASS bup save -r ":$BUP_DIR" -n r-test $D WVFAIL bup save -r ":$BUP_DIR/fake/path" -n r-test $D WVFAIL bup save -r ":$BUP_DIR" -n r-test $D/fake/path WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-index-check-device000077500000000000000000000044711454333004200204730ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . ./dev/lib.sh || exit $? set -o pipefail root_status="$(dev/root-status)" || exit $? if [ "$root_status" != root ]; then WVSKIP 'Not root: skipping --check-device tests.' exit 0 fi if test -n "$(type -p modprobe)" && ! modprobe loop; then WVSKIP 'Unable to load loopback module; skipping --check-device test.' exit 0 fi if test -z "$(type -p losetup)"; then WVSKIP 'Unable to find losetup: skipping --check-device tests.' exit 0 fi if test -z "$(type -p mke2fs)"; then WVSKIP 'Unable to find mke2fs: skipping --check-device tests.' exit 0 fi WVSTART '--check-device' top="$(pwd)" tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } srcmnt="$(WVPASS wvmkmountpt)" || exit $? tmpmnt1="$(WVPASS wvmkmountpt)" || exit $? tmpmnt2="$(WVPASS wvmkmountpt)" || exit $? WVPASS cd "$tmpdir" WVPASS dd if=/dev/zero of=testfs.img bs=1M count=32 WVPASS mke2fs -F -j -m 0 testfs.img WVPASS mount -o loop testfs.img "$tmpmnt1" # Hide, so that tests can't create risks. WVPASS chown root:root "$tmpmnt1" WVPASS chmod 0700 "$tmpmnt1" # Create trivial content. WVPASS date > "$tmpmnt1/foo" # A non-lazy umount was reported to fail on some systems WVPASS umount -l "$tmpmnt1" # Mount twice, so we'll have the same content with different devices. WVPASS cp -pP testfs.img testfs2.img WVPASS mount -oro,loop testfs.img "$tmpmnt1" WVPASS mount -oro,loop testfs2.img "$tmpmnt2" # Test default behavior: --check-device. WVPASS mount -oro --bind "$tmpmnt1" "$srcmnt" WVPASS bup init WVPASS bup index --fake-valid "$srcmnt" WVPASS umount "$srcmnt" WVPASS mount -oro --bind "$tmpmnt2" "$srcmnt" WVPASS bup index "$srcmnt" WVPASSEQ "$(bup index --status "$srcmnt")" \ "M $srcmnt/lost+found/ M $srcmnt/foo M $srcmnt/" WVPASS umount "$srcmnt" WVSTART '--no-check-device' WVPASS mount -oro --bind "$tmpmnt1" "$srcmnt" WVPASS bup index --clear WVPASS bup index --fake-valid "$srcmnt" WVPASS umount "$srcmnt" WVPASS mount -oro --bind "$tmpmnt2" "$srcmnt" WVPASS bup index --no-check-device "$srcmnt" WVPASS bup index --status "$srcmnt" WVPASSEQ "$(bup index --status "$srcmnt")" \ " $srcmnt/lost+found/ $srcmnt/foo $srcmnt/" WVPASS umount "$srcmnt" WVPASS umount "$tmpmnt1" WVPASS umount "$tmpmnt2" WVPASS rm -r "$tmpmnt1" "$tmpmnt2" "$tmpdir" bup-0.33.3/test/ext/test-index-clear000077500000000000000000000011171454333004200172410ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS bup init WVPASS cd "$tmpdir" WVSTART "index --clear" WVPASS mkdir src WVPASS touch src/foo src/bar WVPASS bup index -u src WVPASSEQ "$(bup index -p)" "src/foo src/bar src/ ./" WVPASS rm src/foo WVPASS bup index --clear WVPASS bup index -u src expected="$(WVPASS bup index -p)" || exit $? WVPASSEQ "$expected" "src/bar src/ ./" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-index-save-type-change000077500000000000000000000013411454333004200213120ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" WVPASS bup init WVPASS mkdir "$tmpdir/save" WVSTART "index vs. save file type change" # index a (dead) symlink WVPASS ln -s asdf "$tmpdir/save/test" WVPASS bup index "$tmpdir/save" WVPASS rm -f "$tmpdir/save/test" WVPASS echo now-a-regular-file > "$tmpdir/save/test" # this should now log an error WVFAIL bup save -n test "$tmpdir/save" # can list the folder but it's empty WVPASS bup ls -ls "test/latest/$tmpdir/save/" WVPASSEQ "$(bup ls -ls "test/latest/$tmpdir/save/")" "" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-list-idx000077500000000000000000000014221454333004200166020ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail TOP="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$TOP/bup" "$@" } WVSTART 'bup list-idx' WVPASS bup init WVPASS cd "$tmpdir" WVPASS mkdir src WVPASS bup random 1k > src/data WVPASS bup index src WVPASS bup save -n src src WVPASS bup list-idx "$BUP_DIR"/objects/pack/*.idx hash1="$(WVPASS bup list-idx "$BUP_DIR"/objects/pack/*.idx)" || exit $? hash1="${hash1##* }" WVPASS bup list-idx --find "${hash1}" "$BUP_DIR"/objects/pack/*.idx \ > list-idx.log || exit $? found="$(cat list-idx.log)" || exit $? found="${found##* }" WVPASSEQ "$found" "$hash1" WVPASSEQ "$(wc -l < list-idx.log | tr -d ' ')" 1 WVPASS rm -r "$tmpdir" bup-0.33.3/test/ext/test-ls000077500000000000000000000224421454333004200154700ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" if test "$BUP_TEST_REMOTE_REPO"; then ls_cmd_desc='ls -r' else ls_cmd_desc='ls' fi bup() { "$top/bup" "$@"; } bup-ls() { if test "$BUP_TEST_REMOTE_REPO"; then "$top/bup" ls -r "$BUP_DIR" "$@" else "$top/bup" ls "$@" fi } export TZ=UTC WVPASS bup init WVPASS cd "$tmpdir" WVPASS mkdir src WVPASS touch src/.dotfile src/executable WVPASS mkfifo src/fifo WVPASS "$top"/dev/mksock src/socket WVPASS bup random 1k > src/file WVPASS chmod u+x src/executable WVPASS chmod -R u=rwX,g-rwx,o-rwx . WVPASS touch -t 200910032348 src/.dotfile src/* (WVPASS cd src; WVPASS ln -s file symlink) || exit $? (WVPASS cd src; WVPASS ln -s not-there bad-symlink) || exit $? WVPASS touch -t 200910032348 src WVPASS touch -t 200910032348 . WVPASS bup index src # Include two saves to test multiple results per ref from rev_list. WVPASS bup save -n src -d 242312159 --strip src WVPASS bup save -n src -d 242312160 --strip src WVPASS bup tag some-tag src uid="$(WVPASS id -u)" || exit $? gid="$(WVPASS bup-cfg-py -c 'import os; print(os.stat("src").st_gid)')" || exit $? user="$(WVPASS id -un)" || exit $? group="$(WVPASS bup-cfg-py -c 'import grp, os; print(grp.getgrgid(os.stat("src").st_gid)[0])')" || exit $? src_commit_hash=$(git log --format=%H -n1 src) src_tree_hash=$(git log --format=%T -n1 src) WVSTART "$ls_cmd_desc (short)" (export BUP_FORCE_TTY=3; WVPASSEQ "$(WVPASS bup-ls | tr -d ' ')" src) WVPASSEQ "$(WVPASS bup-ls /)" "src" WVPASSEQ "$(WVPASS bup-ls -A /)" ".tag src" WVPASSEQ "$(WVPASS bup-ls -AF /)" ".tag/ src/" WVPASSEQ "$(WVPASS bup-ls -a /)" ". .. .tag src" WVPASSEQ "$(WVPASS bup-ls -aF /)" "./ ../ .tag/ src/" WVPASSEQ "$(WVPASS bup-ls /.tag)" "some-tag" WVPASSEQ "$(WVPASS bup-ls /src)" \ "1977-09-05-125559 1977-09-05-125600 latest" WVPASSEQ "$(WVPASS bup-ls src/latest)" "bad-symlink executable fifo file socket symlink" WVPASSEQ "$(WVPASS bup-ls -A src/latest)" ".dotfile bad-symlink executable fifo file socket symlink" WVPASSEQ "$(WVPASS bup-ls -a src/latest)" ". .. .dotfile bad-symlink executable fifo file socket symlink" WVPASSEQ "$(WVPASS bup-ls -F src/latest)" "bad-symlink@ executable* fifo| file socket= symlink@" WVPASSEQ "$(WVPASS bup-ls --file-type src/latest)" "bad-symlink@ executable fifo| file socket= symlink@" WVPASSEQ "$(WVPASS bup-ls -d src/latest)" "src/latest" WVSTART "$ls_cmd_desc (long)" WVPASSEQ "$(WVPASS bup-ls -l / | tr -s ' ' ' ')" \ "drwx------ $user/$group 0 2009-10-03 23:48 src" WVPASSEQ "$(WVPASS bup-ls -lA / | tr -s ' ' ' ')" \ "drwxr-xr-x ?/? 0 1970-01-01 00:00 .tag drwx------ $user/$group 0 2009-10-03 23:48 src" WVPASSEQ "$(WVPASS bup-ls -lAF / | tr -s ' ' ' ')" \ "drwxr-xr-x ?/? 0 1970-01-01 00:00 .tag/ drwx------ $user/$group 0 2009-10-03 23:48 src/" WVPASSEQ "$(WVPASS bup-ls -la / | tr -s ' ' ' ')" \ "drwxr-xr-x ?/? 0 1970-01-01 00:00 . drwxr-xr-x ?/? 0 1970-01-01 00:00 .. drwxr-xr-x ?/? 0 1970-01-01 00:00 .tag drwx------ $user/$group 0 2009-10-03 23:48 src" WVPASSEQ "$(WVPASS bup-ls -laF / | tr -s ' ' ' ')" \ "drwxr-xr-x ?/? 0 1970-01-01 00:00 ./ drwxr-xr-x ?/? 0 1970-01-01 00:00 ../ drwxr-xr-x ?/? 0 1970-01-01 00:00 .tag/ drwx------ $user/$group 0 2009-10-03 23:48 src/" socket_mode="$(WVPASS ls -l src/socket | cut -b -10)" || exit $? bad_symlink_mode="$(WVPASS ls -l src/bad-symlink | cut -b -10)" || exit $? bad_symlink_bup_info="$(WVPASS bup-ls -l src/latest | grep bad-symlink)" \ || exit $? bad_symlink_date="$(WVPASS echo "$bad_symlink_bup_info" \ | WVPASS perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $2')" \ || exit $? test "$bad_symlink_date" || exit 1 if test "$(uname -s)" != NetBSD; then bad_symlink_size="$(WVPASS bup-cfg-py -c "import os print(os.lstat('src/bad-symlink').st_size)")" || exit $? else # NetBSD appears to return varying sizes, so for now, just ignore it. bad_symlink_size="$(WVPASS echo "$bad_symlink_bup_info" \ | WVPASS perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $1')" \ || exit $? fi symlink_mode="$(WVPASS ls -l src/symlink | cut -b -10)" || exit $? symlink_bup_info="$(WVPASS bup-ls -l src/latest | grep -E '[^-]symlink')" \ || exit $? symlink_date="$(WVPASS echo "$symlink_bup_info" \ | WVPASS perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $2')" \ || exit $? test "$symlink_date" || exit 1 if test "$(uname -s)" != NetBSD; then symlink_size="$(WVPASS bup-cfg-py -c "import os print(os.lstat('src/symlink').st_size)")" || exit $? else # NetBSD appears to return varying sizes, so for now, just ignore it. symlink_size="$(WVPASS echo "$symlink_bup_info" \ | WVPASS perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $1')" \ || exit $? fi WVPASSEQ "$(bup-ls -l src/latest | tr -s ' ' ' ')" \ "$bad_symlink_mode $user/$group $bad_symlink_size $bad_symlink_date bad-symlink -> not-there -rwx------ $user/$group 0 2009-10-03 23:48 executable prw------- $user/$group 0 2009-10-03 23:48 fifo -rw------- $user/$group 1024 2009-10-03 23:48 file $socket_mode $user/$group 0 2009-10-03 23:48 socket $symlink_mode $user/$group $symlink_size $symlink_date symlink -> file" WVPASSEQ "$(bup-ls -la src/latest | tr -s ' ' ' ')" \ "drwx------ $user/$group 0 2009-10-03 23:48 . drwx------ $user/$group 0 2009-10-03 23:48 .. -rw------- $user/$group 0 2009-10-03 23:48 .dotfile $bad_symlink_mode $user/$group $bad_symlink_size $bad_symlink_date bad-symlink -> not-there -rwx------ $user/$group 0 2009-10-03 23:48 executable prw------- $user/$group 0 2009-10-03 23:48 fifo -rw------- $user/$group 1024 2009-10-03 23:48 file $socket_mode $user/$group 0 2009-10-03 23:48 socket $symlink_mode $user/$group $symlink_size $symlink_date symlink -> file" WVPASSEQ "$(bup-ls -lA src/latest | tr -s ' ' ' ')" \ "-rw------- $user/$group 0 2009-10-03 23:48 .dotfile $bad_symlink_mode $user/$group $bad_symlink_size $bad_symlink_date bad-symlink -> not-there -rwx------ $user/$group 0 2009-10-03 23:48 executable prw------- $user/$group 0 2009-10-03 23:48 fifo -rw------- $user/$group 1024 2009-10-03 23:48 file $socket_mode $user/$group 0 2009-10-03 23:48 socket $symlink_mode $user/$group $symlink_size $symlink_date symlink -> file" WVPASSEQ "$(bup-ls -lF src/latest | tr -s ' ' ' ')" \ "$bad_symlink_mode $user/$group $bad_symlink_size $bad_symlink_date bad-symlink@ -> not-there -rwx------ $user/$group 0 2009-10-03 23:48 executable* prw------- $user/$group 0 2009-10-03 23:48 fifo| -rw------- $user/$group 1024 2009-10-03 23:48 file $socket_mode $user/$group 0 2009-10-03 23:48 socket= $symlink_mode $user/$group $symlink_size $symlink_date symlink@ -> file" WVPASSEQ "$(bup-ls -l --file-type src/latest | tr -s ' ' ' ')" \ "$bad_symlink_mode $user/$group $bad_symlink_size $bad_symlink_date bad-symlink@ -> not-there -rwx------ $user/$group 0 2009-10-03 23:48 executable prw------- $user/$group 0 2009-10-03 23:48 fifo| -rw------- $user/$group 1024 2009-10-03 23:48 file $socket_mode $user/$group 0 2009-10-03 23:48 socket= $symlink_mode $user/$group $symlink_size $symlink_date symlink@ -> file" WVPASSEQ "$(bup-ls -ln src/latest | tr -s ' ' ' ')" \ "$bad_symlink_mode $uid/$gid $bad_symlink_size $bad_symlink_date bad-symlink -> not-there -rwx------ $uid/$gid 0 2009-10-03 23:48 executable prw------- $uid/$gid 0 2009-10-03 23:48 fifo -rw------- $uid/$gid 1024 2009-10-03 23:48 file $socket_mode $uid/$gid 0 2009-10-03 23:48 socket $symlink_mode $uid/$gid $symlink_size $symlink_date symlink -> file" WVPASSEQ "$(bup-ls -ld "src/latest" | tr -s ' ' ' ')" \ "lrwxr-xr-x ?/? 17 1970-01-01 00:00 src/latest -> 1977-09-05-125600" WVSTART "$ls_cmd_desc (backup set - long)" WVPASSEQ "$(bup-ls -l --numeric-ids src | cut -d' ' -f 1-2)" \ "drwx------ $uid/$gid drwx------ $uid/$gid lrwxr-xr-x ?/?" WVPASSEQ "$(bup-ls -ds "src/1977-09-05-125600" | tr -s ' ' ' ')" \ "$src_tree_hash src/1977-09-05-125600" WVPASSEQ "$(bup-ls -ds --commit-hash "src/1977-09-05-125600" | tr -s ' ' ' ')" \ "$src_commit_hash src/1977-09-05-125600" WVSTART "$ls_cmd_desc (dates TZ != UTC)" export TZ=America/Chicago bad_symlink_date_central="$(bup-ls -l src/latest | grep bad-symlink)" bad_symlink_date_central="$(echo "$bad_symlink_date_central" \ | perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $2')" symlink_date_central="$(bup-ls -l src/latest | grep -E '[^-]symlink')" symlink_date_central="$(echo "$symlink_date_central" \ | perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $2')" WVPASSEQ "$(bup-ls -ln src/latest | tr -s ' ' ' ')" \ "$bad_symlink_mode $uid/$gid $bad_symlink_size $bad_symlink_date_central bad-symlink -> not-there -rwx------ $uid/$gid 0 2009-10-03 18:48 executable prw------- $uid/$gid 0 2009-10-03 18:48 fifo -rw------- $uid/$gid 1024 2009-10-03 18:48 file $socket_mode $uid/$gid 0 2009-10-03 18:48 socket $symlink_mode $uid/$gid $symlink_size $symlink_date_central symlink -> file" export TZ=UTC WVSTART "$ls_cmd_desc bad-symlink" WVPASSEQ "$(bup-ls "src/latest/bad-symlink")" "src/latest/bad-symlink" WVSTART "$ls_cmd_desc -l bad-symlink" WVPASSEQ "$(bup-ls -l src/latest/bad-symlink | tr -s ' ' ' ')" \ "$bad_symlink_mode $user/$group $bad_symlink_size $bad_symlink_date src/latest/bad-symlink -> not-there" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-ls-remote000077500000000000000000000000751454333004200167570ustar00rootroot00000000000000#!/usr/bin/env bash BUP_TEST_REMOTE_REPO=t test/ext/test-ls bup-0.33.3/test/ext/test-main000077500000000000000000000005541454333004200157760ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail TOP="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$TOP/bup" "$@"; } WVPASS cd "$tmpdir" WVSTART 'main' bup rc=$? WVPASSEQ "$rc" 99 # Check --x=y handling WVPASS bup --bup-dir=repo init WVPASS rm -r "$tmpdir" bup-0.33.3/test/ext/test-meta000077500000000000000000000700001454333004200157710ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail root_status="$(dev/root-status)" || exit $? TOP="$(WVPASS pwd)" || exit $? export PATH="$TOP/test/bin:$PATH" tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" # Assume that mvmktempdir will always use the same dir. timestamp_resolutions="$(dev/ns-timestamp-resolutions "$tmpdir/canary")" \ || exit $? atime_resolution="$(echo $timestamp_resolutions | WVPASS cut -d' ' -f 1)" \ || exit $? mtime_resolution="$(echo $timestamp_resolutions | WVPASS cut -d' ' -f 2)" \ || exit $? WVPASS rm "$tmpdir/canary" bup() { "$TOP/bup" "$@" } hardlink-sets() { "$TOP/dev/hardlink-sets" "$@" } id-other-than() { "$TOP/dev/id-other-than" "$@" } # Very simple metadata tests -- create a test tree then check that bup # meta can reproduce the metadata correctly (according to bup xstat) # via create, extract, start-extract, and finish-extract. The current # tests are crude, and this does not fully test devices, varying # users/groups, acls, attrs, etc. genstat() { ( export PATH="$TOP/bin:$PATH" # pick up bup bup version # Skip atime (test elsewhere) to avoid the observer effect. WVPASS find . -print0 | WVPASS sort-z \ | WVPASS xargs -0 bup xstat \ --mtime-resolution "$mtime_resolution"ns \ --exclude-fields ctime,atime,size ) } test-src-create-extract() { # Test bup meta create/extract for ./src -> ./src-restore. # Also writes to ./src-stat and ./src-restore-stat. ( (WVPASS cd src; WVPASS genstat) > src-stat || exit $? WVPASS bup meta --create --recurse --file src.meta src # Test extract. WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS cd src-restore WVPASS bup meta --extract --file ../src.meta WVPASS test -d src (WVPASS cd src; WVPASS genstat >../../src-restore-stat) || exit $? WVPASS diff -U5 ../src-stat ../src-restore-stat # Test start/finish extract. WVPASS force-delete src WVPASS bup meta --start-extract --file ../src.meta WVPASS test -d src WVPASS bup meta --finish-extract --file ../src.meta (WVPASS cd src; WVPASS genstat >../../src-restore-stat) || exit $? WVPASS diff -U5 ../src-stat ../src-restore-stat ) } test-src-save-restore() { # Test bup save/restore metadata for ./src -> ./src-restore. Also # writes to BUP_DIR. Note that for now this just tests the # restore below src/, in order to avoid having to worry about # operations that require root (like chown /home). ( WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS bup index src WVPASS bup save -t -n src src # Test extract. WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src WVPASS "$TOP/dev/compare-trees" -c src/ src-restore/src/ WVPASS rm -rf src.bup ) } setup-test-tree() { WVPASS "$TOP/dev/sync-tree" "$TOP/test/sampledata/" "$tmpdir/src/" # Add some hard links for the general tests. ( WVPASS cd "$tmpdir"/src WVPASS touch hardlink-target WVPASS ln hardlink-target hardlink-1 WVPASS ln hardlink-target hardlink-2 WVPASS ln hardlink-target hardlink-3 ) || exit $? # Add some trivial files for the index, modify, save tests. ( WVPASS cd "$tmpdir"/src WVPASS mkdir volatile WVPASS touch volatile/{1,2,3} ) || exit $? # Regression test for metadata sort order. Previously, these two # entries would sort in the wrong order because the metadata # entries were being sorted by mangled name, but the index isn't. WVPASS dd if=/dev/zero of="$tmpdir"/src/foo bs=1k count=33 WVPASS touch -t 201111111111 "$tmpdir"/src/foo WVPASS touch -t 201112121111 "$tmpdir"/src/foo-bar dev/mksock "$tmpdir"/src/test-socket || true } # Use the test tree to check bup meta. WVSTART 'meta --create/--extract' ( tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" WVPASS setup-test-tree WVPASS cd "$tmpdir" WVPASS test-src-create-extract # Test a top-level file (not dir). WVPASS touch src-file WVPASS bup meta -cf src-file.meta src-file WVPASS mkdir dest WVPASS cd dest WVPASS bup meta -xf ../src-file.meta WVPASS rm -r "$tmpdir" ) || exit $? # Use the test tree to check bup save/restore metadata. WVSTART 'metadata save/restore (general)' ( tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" WVPASS setup-test-tree WVPASS cd "$tmpdir" WVPASS test-src-save-restore # Test a deeper subdir/ to make sure top-level non-dir metadata is # restored correctly. We need at least one dir and one non-dir at # the "top-level". WVPASS test -d src/var/lib/bup WVPASS test -f src/var/lib/bup/git.py WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS touch -t 201111111111 src-restore # Make sure the top won't match. WVPASS bup index src WVPASS bup save -t -n src src WVPASS force-delete src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/src/var/." WVPASS touch -t 201211111111 src-restore # Make sure the top won't match. # Check that the only difference is the top dir. WVFAIL $TOP/dev/compare-trees -c src/var/ src-restore/ > tmp-compare-trees WVPASSEQ $(cat tmp-compare-trees | wc -l) 1 # The number of rsync status characters varies, so accept any # number of trailing dots. For example OS X native rsync produces # 9, but Homebrew's produces 12, while on other platforms, 11 is # common. expected_diff_rx='^\.d\.\.t\.\.\.(\.)+ \./$' if ! grep -qE "$expected_diff_rx" tmp-compare-trees; then echo -n 'tmp-compare-trees: ' 1>&2 cat tmp-compare-trees 1>&2 fi WVPASS grep -qE "$expected_diff_rx" tmp-compare-trees WVPASS rm -r "$tmpdir" ) || exit $? # Test that we pull the index (not filesystem) metadata for any # unchanged files whenever we're saving other files in a given # directory. WVSTART 'metadata save/restore (using index metadata)' ( tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" WVPASS setup-test-tree WVPASS cd "$tmpdir" # ...for now -- might be a problem with hardlink restores that was # causing noise wrt this test. WVPASS rm -rf src/hardlink* # Pause here to keep the filesystem changes far enough away from # the first index run that bup won't cap their index timestamps # (see "bup help index" for more information). Without this # sleep, the compare-trees test below "Bup should *not* pick up # these metadata..." may fail. WVPASS sleep 1 WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVPASS bup index src WVPASS bup save -t -n src src WVPASS force-delete src-restore-1 WVPASS mkdir src-restore-1 WVPASS bup restore -C src-restore-1 "/src/latest$(pwd)/" WVPASS test -d src-restore-1/src WVPASS "$TOP/dev/compare-trees" -c src/ src-restore-1/src/ WVPASS echo "blarg" > src/volatile/1 WVPASS cp -pP src/volatile/1 src-restore-1/src/volatile/ WVPASS bup index src # Bup should *not* pick up these metadata changes. WVPASS touch src/volatile/2 WVPASS bup save -t -n src src WVPASS force-delete src-restore-2 WVPASS mkdir src-restore-2 WVPASS bup restore -C src-restore-2 "/src/latest$(pwd)/" WVPASS test -d src-restore-2/src WVPASS "$TOP/dev/compare-trees" -c src-restore-1/src/ src-restore-2/src/ WVPASS rm -r "$tmpdir" ) || exit $? setup-hardlink-test() { WVPASS rm -rf "$tmpdir/src" "$BUP_DIR" WVPASS bup init WVPASS mkdir "$tmpdir/src" } hardlink-test-run-restore() { WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src } # Test hardlinks more carefully. WVSTART 'metadata save/restore (hardlinks)' ( tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" WVPASS setup-hardlink-test WVPASS cd "$tmpdir" # Test trivial case - single hardlink. ( WVPASS cd src WVPASS touch hardlink-target WVPASS ln hardlink-target hardlink-1 ) || exit $? WVPASS bup index src WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS "$TOP/dev/compare-trees" -c src/ src-restore/src/ # Test the case where the hardlink hasn't changed, but the tree # needs to be saved again. i.e. the save-cmd.py "if hashvalid:" # case. ( WVPASS cd src WVPASS echo whatever > something-new ) || exit $? WVPASS bup index src WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS "$TOP/dev/compare-trees" -c src/ src-restore/src/ # Test hardlink changes between index runs. # WVPASS setup-hardlink-test WVPASS cd src WVPASS touch hardlink-target-a WVPASS touch hardlink-target-b WVPASS ln hardlink-target-a hardlink-b-1 WVPASS ln hardlink-target-a hardlink-a-1 WVPASS cd .. WVPASS bup index -vv src WVPASS rm src/hardlink-b-1 WVPASS ln src/hardlink-target-b src/hardlink-b-1 WVPASS bup index -vv src WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS echo ./src/hardlink-a-1 > hardlink-sets.expected WVPASS echo ./src/hardlink-target-a >> hardlink-sets.expected WVPASS echo >> hardlink-sets.expected WVPASS echo ./src/hardlink-b-1 >> hardlink-sets.expected WVPASS echo ./src/hardlink-target-b >> hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored # Test hardlink changes between index and save -- hardlink set [a # b c d] changes to [a b] [c d]. At least right now bup should # notice and recreate the latter. WVPASS setup-hardlink-test WVPASS cd "$tmpdir"/src WVPASS touch a WVPASS ln a b WVPASS ln a c WVPASS ln a d WVPASS cd .. WVPASS bup index -vv src WVPASS rm src/c src/d WVPASS touch src/c WVPASS ln src/c src/d WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS echo ./src/a > hardlink-sets.expected WVPASS echo ./src/b >> hardlink-sets.expected WVPASS echo >> hardlink-sets.expected WVPASS echo ./src/c >> hardlink-sets.expected WVPASS echo ./src/d >> hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored # Test that we don't link outside restore tree. WVPASS setup-hardlink-test WVPASS cd "$tmpdir" WVPASS mkdir src/a src/b WVPASS touch src/a/1 WVPASS ln src/a/1 src/b/1 WVPASS bup index -vv src WVPASS bup save -t -n src src WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/src/a/" WVPASS test -e src-restore/1 WVPASS echo -n > hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored # Test that we do link within separate sub-trees. WVPASS setup-hardlink-test WVPASS cd "$tmpdir" WVPASS mkdir src/a src/b WVPASS touch src/a/1 WVPASS ln src/a/1 src/b/1 WVPASS bup index -vv src/a src/b WVPASS bup save -t -n src src/a src/b WVPASS hardlink-test-run-restore WVPASS echo ./src/a/1 > hardlink-sets.expected WVPASS echo ./src/b/1 >> hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored WVPASS rm -r "$tmpdir" ) || exit $? WVSTART 'meta --edit' ( tmpdir="$(WVPASS wvmktempdir)" || exit $? WVPASS cd "$tmpdir" WVPASS mkdir src WVPASS bup meta -cf src.meta src WVPASS bup meta --edit --set-uid 0 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^uid: 0' WVPASS bup meta --edit --set-uid 1000 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^uid: 1000' WVPASS bup meta --edit --set-gid 0 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^gid: 0' WVPASS bup meta --edit --set-gid 1000 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^gid: 1000' WVPASS bup meta --edit --set-user foo src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^user: foo' WVPASS bup meta --edit --set-user bar src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^user: bar' WVPASS bup meta --edit --unset-user src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^user:' WVPASS bup meta --edit --set-user bar --unset-user src.meta \ | WVPASS bup meta -tvvf - | WVPASS grep -qE '^user:' WVPASS bup meta --edit --unset-user --set-user bar src.meta \ | WVPASS bup meta -tvvf - | WVPASS grep -qE '^user: bar' WVPASS bup meta --edit --set-group foo src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^group: foo' WVPASS bup meta --edit --set-group bar src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^group: bar' WVPASS bup meta --edit --unset-group src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^group:' WVPASS bup meta --edit --set-group bar --unset-group src.meta \ | WVPASS bup meta -tvvf - | WVPASS grep -qE '^group:' WVPASS bup meta --edit --unset-group --set-group bar src.meta \ | WVPASS bup meta -tvvf - | grep -qE '^group: bar' WVPASS rm -r "$tmpdir" ) || exit $? WVSTART 'meta --no-recurse' ( tmpdir="$(WVPASS wvmktempdir)" || exit $? WVPASS cd "$tmpdir" WVPASS mkdir src WVPASS mkdir src/foo WVPASS touch src/foo/{1,2,3} WVPASS bup meta -cf src.meta src WVPASSEQ "$(bup meta -tf src.meta | LC_ALL=C sort)" "src/ src/foo/ src/foo/1 src/foo/2 src/foo/3" WVPASS bup meta --no-recurse -cf src.meta src WVPASSEQ "$(bup meta -tf src.meta | LC_ALL=C sort)" "src/" WVPASS rm -r "$tmpdir" ) || exit $? # Test ownership restoration (when not root or fakeroot). ( if [ "$root_status" != none ]; then exit 0 fi tmpdir="$(WVPASS wvmktempdir)" || exit $? # FIXME: binary groups first_group="$(WVPASS bup-cfg-py -c 'import os,grp; \ print(grp.getgrgid(os.getgroups()[0])[0])')" || exit $? last_group="$(bup-cfg-py -c 'import os,grp; \ print(grp.getgrgid(os.getgroups()[-1])[0])')" || exit $? last_group_erx="$(escape-erx "$last_group")" WVSTART 'metadata (restoration of ownership)' WVPASS cd "$tmpdir" WVPASS touch src # Some systems always assign the parent dir group to new paths # (sgid). Make sure the group is one we're in. WVPASS chgrp -R "$first_group" src WVPASS bup meta -cf src.meta src WVPASS mkdir dest WVPASS cd dest # Make sure we don't change (or try to change) the user when not root. WVPASS bup meta --edit --set-user root ../src.meta | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE '^user: root' WVPASS rm -rf src WVPASS bup meta --edit --unset-user --set-uid 0 ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE '^user: root' # Make sure we can restore one of the user's groups. WVPASS rm -rf src WVPASS bup meta --edit --set-group "$last_group" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^group: $last_group_erx" # Make sure we can restore one of the user's gids. user_gids="$(id -G)" || exit $? last_gid="$(echo ${user_gids/* /})" || exit $? WVPASS rm -rf src WVPASS bup meta --edit --unset-group --set-gid "$last_gid" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^gid: $last_gid" # Test --numeric-ids (gid). WVPASS rm -rf src current_gidx=$(bup meta -tvvf ../src.meta | grep -ae '^gid:') || exit $? WVPASS bup meta --edit --set-group "$last_group" ../src.meta \ | WVPASS bup meta -x --numeric-ids new_gidx=$(bup xstat src | grep -ae '^gid:') || exit $? WVPASSEQ "$current_gidx" "$new_gidx" # Test that restoring an unknown user works. unknown_user=$("$TOP"/dev/unknown-owner --user) || exit $? WVPASS rm -rf src current_uidx=$(bup meta -tvvf ../src.meta | grep -ae '^uid:') || exit $? WVPASS bup meta --edit --set-user "$unknown_user" ../src.meta \ | WVPASS bup meta -x new_uidx=$(bup xstat src | grep -ae '^uid:') || exit $? WVPASSEQ "$current_uidx" "$new_uidx" # Test that restoring an unknown group works. unknown_group=$("$TOP"/dev/unknown-owner --group) || exit $? WVPASS rm -rf src current_gidx=$(bup meta -tvvf ../src.meta | grep -ae '^gid:') || exit $? WVPASS bup meta --edit --set-group "$unknown_group" ../src.meta \ | WVPASS bup meta -x new_gidx=$(bup xstat src | grep -ae '^gid:') || exit $? WVPASSEQ "$current_gidx" "$new_gidx" WVPASS rm -r "$tmpdir" ) || exit $? # Test ownership restoration (when root or fakeroot). ( if [ "$root_status" = none ]; then exit 0 fi tmpdir="$(WVPASS wvmktempdir)" || exit $? uid=$(WVPASS id -un) || exit $? gid=$(WVPASS id -gn) || exit $? WVSTART 'metadata (restoration of ownership as root)' WVPASS cd "$tmpdir" WVPASS touch src WVPASS chown "$uid:$gid" src # In case the parent dir is sgid, etc. WVPASS bup meta -cf src.meta src WVPASS mkdir dest WVPASS chmod 700 dest # so we can't accidentally do something insecure WVPASS cd dest other_uinfo="$(id-other-than --user "$uid")" || exit $? other_user="${other_uinfo%%:*}" other_uid="${other_uinfo##*:}" other_ginfo="$(id-other-than --group "$gid")" || exit $? other_group="${other_ginfo%%:*}" other_gid="${other_ginfo##*:}" # Make sure we can restore a uid (must be in /etc/passwd b/c cygwin). WVPASS bup meta --edit --unset-user --set-uid "$other_uid" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^uid: $other_uid" # Make sure we can restore a gid (must be in /etc/group b/c cygwin). WVPASS bup meta --edit --unset-group --set-gid "$other_gid" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^gid: $other_gid" other_uinfo2="$(id-other-than --user "$(id -un)" "$other_user")" || exit $? other_user2="${other_uinfo2%%:*}" other_user2_erx="$(escape-erx "$other_user2")" || exit $? other_uid2="${other_uinfo2##*:}" other_ginfo2="$(id-other-than --group "$(id -gn)" "$other_group")" || exit $? other_group2="${other_ginfo2%%:*}" other_group2_erx="$(escape-erx "$other_group2")" || exit $? other_gid2="${other_ginfo2##*:}" # Try to restore a user (and see that user trumps uid when uid is not 0). WVPASS bup meta --edit \ --set-uid "$other_uid" --set-user "$other_user2" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^user: $other_user2_erx" # Try to restore a group (and see that group trumps gid when gid is not 0). WVPASS bup meta --edit \ --set-gid "$other_gid" --set-group "$other_group2" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^group: $other_group2_erx" # Test --numeric-ids (uid). Note the name 'root' is not handled # specially, so we use that here as the test user name. We assume # that the root user's uid is never 42. WVPASS rm -rf src WVPASS bup meta --edit --set-user root --set-uid "$other_uid" ../src.meta \ | WVPASS bup meta -x --numeric-ids new_uidx=$(bup xstat src | grep -e '^uid:') || exit $? WVPASSEQ "$new_uidx" "uid: $other_uid" # Test --numeric-ids (gid). Note the name 'root' is not handled # specially, so we use that here as the test group name. We # assume that the root group's gid is never 42. WVPASS rm -rf src WVPASS bup meta --edit --set-group root --set-gid "$other_gid" ../src.meta \ | WVPASS bup meta -x --numeric-ids new_gidx=$(bup xstat src | grep -e '^gid:') || exit $? WVPASSEQ "$new_gidx" "gid: $other_gid" # Test that restoring an unknown user works. unknown_user=$("$TOP"/dev/unknown-owner --user) || exit $? WVPASS rm -rf src WVPASS bup meta --edit \ --set-uid "$other_uid" --set-user "$unknown_user" ../src.meta \ | WVPASS bup meta -x new_uidx=$(bup xstat src | grep -e '^uid:') || exit $? WVPASSEQ "$new_uidx" "uid: $other_uid" # Test that restoring an unknown group works. unknown_group=$("$TOP"/dev/unknown-owner --group) || exit $? WVPASS rm -rf src WVPASS bup meta --edit \ --set-gid "$other_gid" --set-group "$unknown_group" ../src.meta \ | WVPASS bup meta -x new_gidx=$(bup xstat src | grep -e '^gid:') || exit $? WVPASSEQ "$new_gidx" "gid: $other_gid" if ! [[ $(uname) =~ CYGWIN ]]; then # For now, skip these on Cygwin because it doesn't allow # restoring an unknown uid/gid. # Make sure a uid of 0 trumps a non-root user. WVPASS bup meta --edit --set-user "$other_user2" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE "^user: $other_user2_erx" WVPASS bup xstat src | WVPASS grep -qE "^uid: 0" # Make sure a gid of 0 trumps a non-root group. WVPASS bup meta --edit --set-group "$other_group2" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE "^group: $other_group2_erx" WVPASS bup xstat src | WVPASS grep -qE "^gid: 0" fi WVPASS rm -r "$tmpdir" ) || exit $? # Root-only tests that require an FS with all the trimmings: ACLs, # Linux attr, Linux xattr, etc. if [ "$root_status" = root ]; then ( # Some cleanup handled in universal-cleanup() above. # These tests are only likely to work under Linux for now # (patches welcome). [[ $(uname) =~ Linux ]] || exit 0 if ! modprobe loop; then echo 'Unable to load loopback module; skipping dependent tests.' 1>&2 exit 0 # FIXME: allow intermixed WVSKIPs fi testfs="$(WVPASS wvmkmountpt)" || exit $? testfs_limited="$(WVPASS wvmkmountpt)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" WVSTART 'meta - general (as root)' WVPASS setup-test-tree WVPASS cd "$tmpdir" umount "$testfs" WVPASS dd if=/dev/zero of=testfs.img bs=1M count=32 # Make sure we have all the options the chattr test needs # (i.e. create a "normal" ext4 filesystem). WVPASS mke2fs -F -m 0 \ -I 256 \ -O has_journal,extent,huge_file,flex_bg,uninit_bg,dir_nlink,extra_isize \ testfs.img WVPASS mount -o loop,acl,user_xattr testfs.img "$testfs" # Hide, so that tests can't create risks. WVPASS chown root:root "$testfs" WVPASS chmod 0700 "$testfs" umount "$testfs_limited" WVPASS dd if=/dev/zero of=testfs-limited.img bs=1M count=32 WVPASS mkfs -t vfat testfs-limited.img WVPASS mount -o loop,uid=root,gid=root,umask=0077 \ testfs-limited.img "$testfs_limited" WVPASS cp -pPR src "$testfs"/src (WVPASS cd "$testfs"; WVPASS test-src-create-extract) || exit $? WVSTART 'meta - atime (as root)' WVPASS force-delete "$testfs"/src WVPASS mkdir "$testfs"/src ( WVPASS mkdir "$testfs"/src/foo WVPASS touch "$testfs"/src/bar WVPASS bup-python -c "from bup import xstat; \ x = xstat.timespec_to_nsecs((42, 0));\ xstat.utime(b'$testfs/src/foo', (x, x));\ xstat.utime(b'$testfs/src/bar', (x, x));" WVPASS cd "$testfs" WVPASS bup meta -v --create --recurse --file src.meta src WVPASS bup meta -tvf src.meta # Test extract. WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS cd src-restore WVPASS bup meta --extract --file ../src.meta WVPASSEQ "$(bup xstat --include-fields=atime src/foo)" "atime: 42" WVPASSEQ "$(bup xstat --include-fields=atime src/bar)" "atime: 42" # Test start/finish extract. WVPASS force-delete src WVPASS bup meta --start-extract --file ../src.meta WVPASS test -d src WVPASS bup meta --finish-extract --file ../src.meta WVPASSEQ "$(bup xstat --include-fields=atime src/foo)" "atime: 42" WVPASSEQ "$(bup xstat --include-fields=atime src/bar)" "atime: 42" ) || exit $? WVSTART 'meta - Linux attr (as root)' WVPASS force-delete "$testfs"/src WVPASS mkdir "$testfs"/src ( WVPASS touch "$testfs"/src/foo WVPASS mkdir "$testfs"/src/bar WVPASS chattr +acdeijstuADS "$testfs"/src/foo WVPASS chattr +acdeijstuADST "$testfs"/src/bar (WVPASS cd "$testfs"; WVPASS test-src-create-extract) || exit $? # Test restoration to a limited filesystem (vfat). ( WVPASS bup meta --create --recurse --file "$testfs"/src.meta \ "$testfs"/src WVPASS force-delete "$testfs_limited"/src-restore WVPASS mkdir "$testfs_limited"/src-restore WVPASS cd "$testfs_limited"/src-restore WVFAIL bup meta --extract --file "$testfs"/src.meta 2>&1 \ | WVPASS grep -e '^Linux chattr:' \ | WVPASS bup-cfg-py -c \ 'import sys; exit(not len(sys.stdin.readlines()) == 3)' ) || exit $? ) || exit $? WVSTART 'meta - Linux xattr (as root)' WVPASS force-delete "$testfs"/src WVPASS mkdir "$testfs"/src WVPASS touch "$testfs"/src/foo WVPASS mkdir "$testfs"/src/bar WVPASS attr -s foo -V bar "$testfs"/src/foo WVPASS attr -s foo -V bar "$testfs"/src/bar (WVPASS cd "$testfs"; WVPASS test-src-create-extract) || exit $? # Test restoration to a limited filesystem (vfat). ( WVPASS bup meta --create --recurse --file "$testfs"/src.meta \ "$testfs"/src WVPASS force-delete "$testfs_limited"/src-restore WVPASS mkdir "$testfs_limited"/src-restore WVPASS cd "$testfs_limited"/src-restore WVFAIL bup meta --extract --file "$testfs"/src.meta WVFAIL bup meta --extract --file "$testfs"/src.meta 2>&1 \ | WVPASS grep -e "^xattr\.set u\?'" \ | WVPASS bup-cfg-py -c \ 'import sys; exit(not len(sys.stdin.readlines()) == 2)' ) || exit $? WVSTART 'meta - POSIX.1e ACLs (as root)' WVPASS force-delete "$testfs"/src WVPASS mkdir "$testfs"/src WVPASS touch "$testfs"/src/foo WVPASS mkdir "$testfs"/src/bar WVPASS setfacl -m u:root:r "$testfs"/src/foo WVPASS setfacl -m u:root:r "$testfs"/src/bar (WVPASS cd "$testfs"; WVPASS test-src-create-extract) || exit $? # Test restoration to a limited filesystem (vfat). ( WVPASS bup meta --create --recurse --file "$testfs"/src.meta \ "$testfs"/src WVPASS force-delete "$testfs_limited"/src-restore WVPASS mkdir "$testfs_limited"/src-restore WVPASS cd "$testfs_limited"/src-restore WVFAIL bup meta --extract --file "$testfs"/src.meta 2>&1 \ | WVPASS grep -e '^POSIX1e ACL applyto:' \ | WVPASS bup-cfg-py -c \ 'import sys; exit(not len(sys.stdin.readlines()) == 2)' ) || exit $? WVPASS umount "$testfs" WVPASS umount "$testfs_limited" WVPASS rm -r "$testfs" "$testfs_limited" WVPASS rm -r "$tmpdir" ) || exit $? fi WVPASS rm -r "$tmpdir" bup-0.33.3/test/ext/test-meta-acls000077500000000000000000000065561454333004200167300ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail if ! command -v getfacl > /dev/null || ! command -v setfacl > /dev/null; then WVSKIP "No getfacl and setfacl; skipping test-meta-acls" exit 0 fi top="$(WVPASS pwd)" || exit $? bup() { "$top/bup" "$@"; } compare-trees() { "$top/dev/compare-trees" "$@"; } id-other-than() { "$top/dev/id-other-than" "$@"; } if ! bup features | grep -qi 'posix acls: yes'; then WVSKIP "bup features missing POSIX ACLs; skipping test-meta-acls" exit 0 fi if ! compare-trees --features | grep -qi 'posix acls: yes'; then WVSKIP "compare-trees --features missing POSIX ACLs; skipping test-meta-acls" exit 0 fi tmpdir="$(WVPASS wvmktempdir)" || exit $? bup_dir="$tmpdir/bup" export BUP_DIR="$bup_dir" uid=$(WVPASS id -un) || exit $? other_uinfo="$(id-other-than --user "$uid")" || exit $? other_user="${other_uinfo%%:*}" other_uid="${other_uinfo##*:}" gid=$(WVPASS id -gn) || exit $? other_ginfo="$(id-other-than --group "$gid")" || exit $? other_group="${other_ginfo%%:*}" other_gid="${other_ginfo##*:}" WVPASS cd "$tmpdir" WVPASS mkdir src WVPASS touch src/u-r if ! setfacl -m "u:$other_user:r" src/u-r; then WVSKIP "setfacl $top/testfile failed; skipping test-meta-acls" exit 0 fi WVSTART "Basic ACL support (setup)" # file ACL_USER access acl(5) for perm in r rw rwx; do WVPASS touch src/u-"$perm" WVPASS setfacl -m "u:$other_user:$perm" src/u-"$perm" done # file ACL_GROUP access acl(5) for perm in r rw rwx; do WVPASS touch src/g-"$perm" WVPASS setfacl -m "g:$other_group:$perm" src/g-"$perm" done # directory ACL_USER access acl(5) for perm in r rw rwx; do WVPASS mkdir src/d-u-"$perm" WVPASS setfacl -m "u:$other_user:$perm" src/d-u-"$perm" done # directory ACL_GROUP access acl(5) for perm in r rw rwx; do WVPASS mkdir src/d-g-"$perm" WVPASS setfacl -m "g:$other_group:$perm" src/d-g-"$perm" done # directory ACL_USER default acl(5) for perm in r rw rwx; do WVPASS mkdir src/d-def-u-"$perm" WVPASS setfacl -d -m "u:$other_user:$perm" src/d-def-u-"$perm" done # directory ACL_GROUP default acl(5) for perm in r rw rwx; do WVPASS mkdir src/d-def-g-"$perm" WVPASS setfacl -d -m "g:$other_group:$perm" src/d-def-g-"$perm" done # directory ACL_USER access and default acl(5) for perm in r rw rwx; do WVPASS mkdir src/d-both-u-"$perm" WVPASS setfacl -m "u:$other_user:$perm" src/d-both-u-"$perm" WVPASS setfacl -d -m "u:$other_user:$perm" src/d-both-u-"$perm" done # directory ACL_GROUP access and default acl(5) for perm in r rw rwx; do WVPASS mkdir src/d-both-g-"$perm" WVPASS setfacl -m "g:$other_group:$perm" src/d-both-g-"$perm" WVPASS setfacl -d -m "g:$other_group:$perm" src/d-both-g-"$perm" done test-save-restore() { WVPASS test "$#" -eq 2 local saver="$1" restorer="$2" WVPASS rm -rf "$bup_dir" dest WVPASS "$saver" init WVPASS "$saver" index -u src WVPASS "$saver" save --strip -n acls src WVPASS "$restorer" restore -C dest acls/latest/. WVPASS compare-trees src/ dest/ } WVSTART "Basic ACL support (save and restore)" test-save-restore bup bup if test -z "$BUP_TEST_OTHER_BUP"; then # FIXME: need ability to "skip", without skipping this entire file WVMSG 'BUP_TEST_OTHER_BUP not set; skipping cross-version test' else test-save-restore "$BUP_TEST_OTHER_BUP" bup fi cd "$top" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-misc000077500000000000000000000105101454333004200157760ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } sha1sum() { "$top/dev/checksum" -t sha1 "$@"; } WVPASS cd "$tmpdir" WVSTART "init" WVPASS bup init # Be independent of git defaults or a configured defaultBranch git --git-dir "$BUP_DIR" symbolic-ref HEAD refs/heads/main D=bupdata.tmp WVPASS force-delete $D WVPASS mkdir $D WVPASS touch $D/a WVPASS bup random 128k >$D/b WVPASS mkdir $D/d $D/d/e WVPASS bup random 512 >$D/f WVPASS touch $D/d/z WVPASS touch $D/d/z WVPASS bup index $D WVPASS bup save -t $D WVSTART "bloom" WVPASS bup bloom -c $(ls -1 "$BUP_DIR"/objects/pack/*.idx|head -n1) WVPASS rm "$BUP_DIR"/objects/pack/bup.bloom WVPASS bup bloom -k 4 WVPASS bup bloom -c $(ls -1 "$BUP_DIR"/objects/pack/*.idx|head -n1) WVPASS bup bloom -d "$BUP_DIR"/objects/pack --ruin --force WVFAIL bup bloom -c $(ls -1 "$BUP_DIR"/objects/pack/*.idx|head -n1) WVPASS bup bloom --force -k 5 WVPASS bup bloom -c $(ls -1 "$BUP_DIR"/objects/pack/*.idx|head -n1) WVSTART "memtest" WVPASS bup memtest -c1 -n100 WVPASS bup memtest -c1 -n100 --existing WVSTART "save/git-fsck" ( WVPASS cd "$BUP_DIR" #git repack -Ad #git prune WVPASS bup random 4k | WVPASS bup split -b (WVPASS cd "$top/test/sampledata" && WVPASS bup save -vvn main /) || exit $? result="$(LC_ALL=C git fsck --full --strict 2>&1)" || exit $? n=$(echo "$result" | WVFAIL egrep -v 'dangling (commit|tree|blob)' | WVPASS tee -a /dev/stderr | WVPASS wc -l) || exit $? WVPASS [ "$n" -eq 0 ] ) || exit $? WVSTART "pack name and idx same as git" ( # reuse packs from previous test WVPASS cd "$BUP_DIR"/objects/pack/ WVPASS ls *.pack for pack in *.pack ; do bup_idx_sha=$(sha1sum $(basename $pack .pack).idx) || exit $? gitname=$(git index-pack $pack) || exit $? # make sure we named it correctly (like git) WVPASSEQ pack-$gitname.pack $pack # make sure we wrote the index correctly git_idx_sha=$(sha1sum $(basename $pack .pack).idx) || exit $? WVPASSEQ "$bup_idx_sha" "$git_idx_sha" done ) || exit $? WVSTART "ftp" WVPASS bup ftp "cat /main/latest/$tmpdir/$D/b" >$D/b.new WVPASS bup ftp "cat /main/latest/$tmpdir/$D/f" >$D/f.new WVPASS bup ftp "cat /main/latest/$tmpdir/$D/f"{,} >$D/f2.new WVPASS bup ftp "cat /main/latest/$tmpdir/$D/a" >$D/a.new WVPASSEQ "$(sha1sum <$D/b)" "$(sha1sum <$D/b.new)" WVPASSEQ "$(sha1sum <$D/f)" "$(sha1sum <$D/f.new)" WVPASSEQ "$(cat $D/f.new{,} | sha1sum)" "$(sha1sum <$D/f2.new)" WVPASSEQ "$(sha1sum <$D/a)" "$(sha1sum <$D/a.new)" WVSTART "tag" WVFAIL bup tag -d v0.n 2>/dev/null WVFAIL bup tag v0.n non-existant 2>/dev/null WVPASSEQ "$(bup tag)" "" WVPASS bup tag v0.1 main WVPASSEQ "$(bup tag)" "v0.1" WVFAIL bup tag v0.1 main WVPASS bup tag -f v0.1 main WVPASS bup tag -d v0.1 WVPASS bup tag -f -d v0.1 WVFAIL bup tag -d v0.1 WVSTART "indexfile" D=indexfile.tmp INDEXFILE=tmpindexfile.tmp WVPASS rm -f $INDEXFILE WVPASS force-delete $D WVPASS mkdir $D export BUP_DIR="$D/.bup" WVPASS bup init WVPASS touch $D/a WVPASS touch $D/b WVPASS mkdir $D/c WVPASS bup index -ux $D WVPASS bup save --strip -n bupdir $D WVPASSEQ "$(bup ls -F bupdir/latest/)" "a b c/" WVPASS bup index -f $INDEXFILE --exclude=$D/c -ux $D WVPASS bup save --strip -n indexfile -f $INDEXFILE $D WVPASSEQ "$(bup ls indexfile/latest/)" "a b" WVSTART "import-rsnapshot" D=rsnapshot.tmp export BUP_DIR="$tmpdir/$D/.bup" WVPASS force-delete $D WVPASS mkdir $D WVPASS bup init WVPASS mkdir -p $D/hourly.0/buptest/a WVPASS touch $D/hourly.0/buptest/a/b WVPASS mkdir -p $D/hourly.0/buptest/c/d WVPASS touch $D/hourly.0/buptest/c/d/e WVPASS true WVPASS bup import-rsnapshot $D/ WVPASSEQ "$(bup ls -F buptest/latest/)" "a/ c/" WVSTART features expect_py_ver=$(LC_CTYPE=C "$top/dev/python" \ -c 'import platform; print(platform.python_version())') \ || exit $? actual_py_ver=$(bup features | grep Python: | sed -Ee 's/ +Python: //') || exit $? WVPASSEQ "$expect_py_ver" "$actual_py_ver" WVSTART id-other-than result=$("$top/dev/id-other-than" --user 0) || exit $? WVPASS echo "$result" | WVPASS grep -qE '.*:[0-9]+$' result=$("$top/dev/id-other-than" --group 0) || exit $? WVPASS echo "$result" | WVPASS grep -qE '.*:[0-9]+$' WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-on000077500000000000000000000032511454333004200154630ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . ./dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } compare-trees() { "$top/dev/compare-trees" "$@"; } WVPASS bup init WVPASS cd "$tmpdir" WVSTART "index/save" WVPASS mkdir src src/foo WVPASS date > src/bar WVPASS bup random 1k > src/baz WVPASS bup on - index src WVPASS bup on - save -ctn src src > get.log WVPASSEQ $(WVPASS cat get.log | WVPASS wc -l) 2 tree_id=$(WVPASS awk 'FNR == 1' get.log) || exit $? commit_id=$(WVPASS awk 'FNR == 2' get.log) || exit $? WVPASS git ls-tree "$tree_id" WVPASS git cat-file commit "$commit_id" | head -n 1 \ | WVPASS grep "^tree $tree_id\$" WVPASS bup restore -C restore "src/latest/$(pwd)/src/." WVPASS compare-trees src/ restore/ WVPASS rm -r restore WVSTART "split" WVPASS bup on - split -ctn baz src/baz > get.log tree_id=$(WVPASS awk 'FNR == 1' get.log) || exit $? commit_id=$(WVPASS awk 'FNR == 2' get.log) || exit $? WVPASS git ls-tree "$tree_id" WVPASS git cat-file commit "$commit_id" | head -n 1 \ | WVPASS grep "^tree $tree_id\$" WVPASS bup join baz > restore-baz WVPASS cmp src/baz restore-baz WVSTART "index-cache" # the 'a-zA-Z0-9_' is '\w' from python, # the trailing _ is because there's no dir specified # and that should thus be empty hostname=$(uname -n) idxcache=$(echo "$hostname" | sed 's/[^@a-zA-Z0-9_]/_/g')_ # there should be an index-cache now for idx in "$tmpdir"/bup/objects/pack/*.idx ; do cachedidx="$tmpdir/bup/index-cache/$idxcache/$(basename "$idx")" WVPASS cmp "$idx" "$cachedidx" done WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-packsizelimit000077500000000000000000000014301454333004200177140ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" WVSTART 'pack size limit' WVPASS bup init WVPASSEQ $(WVPASS find "$BUP_DIR"/objects/pack -name "*.pack" | wc -l) 0 WVPASS bup random 50k | WVPASS bup split -n foo WVPASSEQ 1 $(WVPASS find "$BUP_DIR"/objects/pack/*.pack | wc -l) rm -rf "$BUP_DIR" WVPASS bup init WVPASS git config pack.packSizeLimit 10k WVPASSEQ $(WVPASS find "$BUP_DIR"/objects/pack -name "*.pack" | wc -l) 0 WVPASS bup random 50k | WVPASS bup split -n foo WVPASS test $(WVPASS find "$BUP_DIR"/objects/pack/*.pack | wc -l) -gt 2 WVPASS rm -r "$tmpdir" bup-0.33.3/test/ext/test-redundant-saves000077500000000000000000000032071454333004200201530ustar00rootroot00000000000000#!/usr/bin/env bash # Test that running save more than once with no other changes produces # the exact same tree. # Note: we can't compare the top-level hash (i.e. the output of "save # -t" because that currently pulls the metadata for unindexed parent # directories directly from the filesystem, and the relevant atimes # may change between runs. So instead we extract the roots of the # indexed trees for comparison via dev/subtree-hash. . ./wvtest-bup.sh || exit $? set -o pipefail WVSTART 'all' top="$(pwd)" tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$BUP_DIR" bup() { "$top/bup" "$@"; } WVPASS mkdir -p "$tmpdir/src" WVPASS mkdir -p "$tmpdir/src/d" WVPASS mkdir -p "$tmpdir/src/d/e" WVPASS touch "$tmpdir/src/"{f,b,a,d} WVPASS touch "$tmpdir/src/d/z" WVPASS bup init WVPASS bup index -u "$tmpdir/src" declare -a indexed_top IFS=/ indexed_top="${tmpdir##/}" indexed_top=(${indexed_top%%/}) unset IFS tree1=$(WVPASS bup save -t "$tmpdir/src") || exit $? indexed_tree1="$(WVPASS dev/subtree-hash "$tree1" "${indexed_top[@]}" src)" \ || exit $? result="$(WVPASS cd "$tmpdir/src"; WVPASS bup index -m)" || exit $? WVPASSEQ "$result" "" tree2=$(WVPASS bup save -t "$tmpdir/src") || exit $? indexed_tree2="$(WVPASS dev/subtree-hash "$tree2" "${indexed_top[@]}" src)" \ || exit $? WVPASSEQ "$indexed_tree1" "$indexed_tree2" result="$(WVPASS bup index -s / | WVFAIL grep ^D)" || exit $? WVPASSEQ "$result" "" tree3=$(WVPASS bup save -t /) || exit $? indexed_tree3="$(WVPASS dev/subtree-hash "$tree3" "${indexed_top[@]}" src)" || exit $? WVPASSEQ "$indexed_tree1" "$indexed_tree3" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-release-archive000077500000000000000000000017261454333004200201130ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail bup_make=$(< config/config.var/bup-make) WVPASS git status > /dev/null if ! git diff-index --quiet HEAD; then WVDIE "uncommitted changes; cannot continue" fi top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" WVPASS git clone "$top" clone for ver in 11.11 11.11.11; do WVSTART "version $ver" WVPASS cd clone WVPASS git tag "$ver" WVPASS git archive --prefix=bup-"$ver"/ -o "$tmpdir"/bup-"$ver".tgz "$ver" WVPASS cd "$tmpdir" WVPASS tar xzf bup-"$ver".tgz WVPASS cd bup-"$ver" WVPASS "$bup_make" WVPASSEQ "$ver" "$(./bup version)" WVPASS cd "$tmpdir" done WVSTART 'make check in unpacked archive' WVPASS cd bup-11.11.11 if ! "$bup_make" -j5 check > archive-tests.log 2>&1; then cat archive-tests.log 1>&2 WVPASS false fi WVPASS cd "$top" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-restore-map-owner000077500000000000000000000064741454333004200204470ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? root_status="$(dev/root-status)" || exit $? if [ "$root_status" != root ]; then WVSKIP 'Not root: skipping restore --map-* tests.' exit 0 fi top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } uid=$(WVPASS id -u) || exit $? user=$(WVPASS id -un) || exit $? gid=$(WVPASS id -g) || exit $? group=$(WVPASS id -gn) || exit $? other_uinfo=$(WVPASS dev/id-other-than --user "$user") || exit $? other_user="${other_uinfo%%:*}" other_uid="${other_uinfo##*:}" other_ginfo=$(WVPASS dev/id-other-than --group "$group" 0) || exit $? other_group="${other_ginfo%%:*}" other_gid="${other_ginfo##*:}" WVPASS bup init WVPASS cd "$tmpdir" WVSTART "restore --map-user/group/uid/gid (control)" WVPASS mkdir src WVPASS touch src/foo # Some systems assign the parent dir group to new paths. WVPASS chgrp -R "$group" src WVPASS bup index src WVPASS bup save -n src src WVPASS bup restore -C dest "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $user\$" foo-xstat WVPASS grep -qE "^uid: $uid\$" foo-xstat WVPASS grep -qE "^group: $group\$" foo-xstat WVPASS grep -qE "^gid: $gid\$" foo-xstat WVSTART "restore --map-user/group/uid/gid (user/group)" WVPASS rm -rf dest # Have to remap uid/gid too because we're root and 0 would win). WVPASS bup restore -C dest \ --map-uid "$uid=$other_uid" --map-gid "$gid=$other_gid" \ --map-user "$user=$other_user" --map-group "$group=$other_group" \ "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $other_user\$" foo-xstat WVPASS grep -qE "^uid: $other_uid\$" foo-xstat WVPASS grep -qE "^group: $other_group\$" foo-xstat WVPASS grep -qE "^gid: $other_gid\$" foo-xstat WVSTART "restore --map-user/group/uid/gid (user/group trumps uid/gid)" WVPASS rm -rf dest WVPASS bup restore -C dest \ --map-uid "$uid=$other_uid" --map-gid "$gid=$other_gid" \ "src/latest/$(pwd)/src/" # Should be no changes. WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $user\$" foo-xstat WVPASS grep -qE "^uid: $uid\$" foo-xstat WVPASS grep -qE "^group: $group\$" foo-xstat WVPASS grep -qE "^gid: $gid\$" foo-xstat WVSTART "restore --map-user/group/uid/gid (uid/gid)" WVPASS rm -rf dest WVPASS bup restore -C dest \ --map-user "$user=" --map-group "$group=" \ --map-uid "$uid=$other_uid" --map-gid "$gid=$other_gid" \ "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $other_user\$" foo-xstat WVPASS grep -qE "^uid: $other_uid\$" foo-xstat WVPASS grep -qE "^group: $other_group\$" foo-xstat WVPASS grep -qE "^gid: $other_gid\$" foo-xstat has_uid_gid_0=$(WVPASS bup-cfg-py -c " import grp, pwd try: pwd.getpwuid(0) grp.getgrgid(0) print('yes') except KeyError as ex: pass ") || exit $? if [ "$has_uid_gid_0" == yes ] then WVSTART "restore --map-user/group/uid/gid (zero uid/gid trumps all)" WVPASS rm -rf dest WVPASS bup restore -C dest \ --map-user "$user=$other_user" --map-group "$group=$other_group" \ --map-uid "$uid=0" --map-gid "$gid=0" \ "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^uid: 0\$" foo-xstat WVPASS grep -qE "^gid: 0\$" foo-xstat WVPASS rm -rf "$tmpdir" fi bup-0.33.3/test/ext/test-restore-single-file000077500000000000000000000013061454333004200207250ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail WVSTART 'all' top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS mkdir "$tmpdir/foo" WVPASS mkdir "$tmpdir/foo/bar" # Make sure a dir sorts before baz (regression test). WVPASS touch "$tmpdir/foo/baz" WVPASS WVPASS bup init WVPASS WVPASS bup index "$tmpdir/foo" WVPASS bup save -n foo "$tmpdir/foo" # Make sure the timestamps will differ if metadata isn't being restored. WVPASS bup tick WVPASS bup restore -C "$tmpdir/restore" "foo/latest/$tmpdir/foo/baz" WVPASS "$top/dev/compare-trees" "$tmpdir/foo/baz" "$tmpdir/restore/baz" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-rm000077500000000000000000000202441454333004200154660ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . ./dev/lib.sh || exit $? top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } compare-trees() { "$top/dev/compare-trees" "$@"; } wv_matches_rx() { local caller_file=${BASH_SOURCE[0]} local caller_line=${BASH_LINENO[0]} local src="$caller_file:$caller_line" if test $# -ne 2; then echo "! $src wv_matches_rx requires 2 arguments FAILED" 1>&2 return fi local str="$1" local rx="$2" echo "Matching:" 1>&2 || exit $? echo "$str" | sed 's/^\(.*\)/ \1/' 1>&2 || exit $? echo "Against:" 1>&2 || exit $? echo "$rx" | sed 's/^\(.*\)/ \1/' 1>&2 || exit $? if [[ "$str" =~ ^${rx}$ ]]; then echo "! $src regex matches ok" 1>&2 || exit $? else echo "! $src regex doesn't match FAILED" 1>&2 || exit $? fi } WVPASS bup init WVPASS cd "$tmpdir" WVSTART "rm /foo (lone branch)" WVPASS mkdir src src/foo WVPASS echo twisty-maze > src/1 WVPASS bup index src WVPASS bup save -n src src WVPASS "$top"/dev/sync-tree bup/ bup-baseline/ # FIXME: test -n WVPASS bup tick # Make sure we always get the timestamp changes below WVPASS bup rm --unsafe /src observed="$(compare-trees bup/ bup-baseline/ | LC_ALL=C sort)" || exit $? wv_matches_rx "$observed" \ '\*deleting[ ]+logs/refs/heads/src \*deleting[ ]+refs/heads/src( \.d\.\.t\.\.\.[.]*[ ]+\./)? \.d\.\.t\.\.\.[.]*[ ]+logs/refs/heads/ \.d\.\.t\.\.\.[.]*[ ]+refs/heads/( >f\+\+\+\+\+\+\+\+\+[ ]+packed-refs)?' WVSTART "rm /foo (one of many)" WVPASS rm -rf bup WVPASS mv bup-baseline bup WVPASS echo twisty-maze > src/2 WVPASS bup index src WVPASS bup save -n src-2 src WVPASS echo twisty-maze > src/3 WVPASS bup index src WVPASS bup save -n src-3 src WVPASS "$top"/dev/sync-tree bup/ bup-baseline/ WVPASS bup tick # Make sure we always get the timestamp changes below WVPASS bup rm --unsafe /src observed="$(compare-trees bup/ bup-baseline/ | LC_ALL=C sort)" || exit $? wv_matches_rx "$observed" \ "\*deleting[ ]+logs/refs/heads/src \*deleting[ ]+refs/heads/src( \.d\.\.t\.\.\.[.]*[ ]+\./)? \.d\.\.t\.\.\.[.]*[ ]+logs/refs/heads/ \.d\.\.t\.\.\.[.]*[ ]+refs/heads/( >f\+\+\+\+\+\+\+\+\+[ ]+packed-refs)?" WVSTART "rm /foo /bar (multiple of many)" WVPASS rm -rf bup WVPASS mv bup-baseline bup WVPASS echo twisty-maze > src/4 WVPASS bup index src WVPASS bup save -n src-4 src WVPASS echo twisty-maze > src/5 WVPASS bup index src WVPASS bup save -n src-5 src WVPASS "$top"/dev/sync-tree bup/ bup-baseline/ WVPASS bup tick # Make sure we always get the timestamp changes below WVPASS bup rm --unsafe /src-2 /src-4 observed="$(compare-trees bup/ bup-baseline/ | LC_ALL=C sort)" || exit $? wv_matches_rx "$observed" \ "\*deleting[ ]+logs/refs/heads/src-2 \*deleting[ ]+logs/refs/heads/src-4 \*deleting[ ]+refs/heads/src-2 \*deleting[ ]+refs/heads/src-4( \.d\.\.t\.\.\.[.]*[ ]+\./)? \.d\.\.t\.\.\.[.]*[ ]+logs/refs/heads/ \.d\.\.t\.\.\.[.]*[ ]+refs/heads/( >f\+\+\+\+\+\+\+\+\+[ ]+packed-refs)?" WVSTART "rm /foo /bar (all)" WVPASS rm -rf bup WVPASS mv bup-baseline bup WVPASS "$top"/dev/sync-tree bup/ bup-baseline/ WVPASS bup tick # Make sure we always get the timestamp changes below WVPASS bup rm --unsafe /src /src-2 /src-3 /src-4 /src-5 observed="$(compare-trees bup/ bup-baseline/ | LC_ALL=C sort)" || exit $? wv_matches_rx "$observed" \ "\*deleting[ ]+logs/refs/heads/src \*deleting[ ]+logs/refs/heads/src-2 \*deleting[ ]+logs/refs/heads/src-3 \*deleting[ ]+logs/refs/heads/src-4 \*deleting[ ]+logs/refs/heads/src-5 \*deleting[ ]+refs/heads/src \*deleting[ ]+refs/heads/src-2 \*deleting[ ]+refs/heads/src-3 \*deleting[ ]+refs/heads/src-4 \*deleting[ ]+refs/heads/src-5( \.d\.\.t\.\.\.[.]*[ ]+\./)? \.d\.\.t\.\.\.[.]*[ ]+logs/refs/heads/ \.d\.\.t\.\.\.[.]*[ ]+refs/heads/( >f\+\+\+\+\+\+\+\+\+[ ]+packed-refs)?" WVSTART "rm /foo/bar (lone save - equivalent to rm /foo)" WVPASS rm -rf bup bup-baseline src WVPASS bup init WVPASS mkdir src WVPASS echo twisty-maze > src/1 WVPASS bup index src WVPASS bup save -n src src WVPASS bup ls src > tmp-ls save1="$(WVPASS head -n 1 tmp-ls)" || exit $? WVPASS "$top"/dev/sync-tree bup/ bup-baseline/ WVPASS bup tick # Make sure we always get the timestamp changes below WVFAIL bup rm --unsafe /src/latest WVPASS bup rm --unsafe /src/"$save1" observed="$(compare-trees bup/ bup-baseline/ | LC_ALL=C sort)" || exit $? wv_matches_rx "$observed" \ "\*deleting[ ]+logs/refs/heads/src \*deleting[ ]+refs/heads/src( \.d\.\.t\.\.\.[.]*[ ]+\./)? \.d\.\.t\.\.\.[.]*[ ]+logs/refs/heads/ \.d\.\.t\.\.\.[.]*[ ]+refs/heads/( >f\+\+\+\+\+\+\+\+\+[ ]+packed-refs)?" verify-changes-caused-by-rewriting-save() { local before="$1" after="$2" tmpdir tmpdir="$(WVPASS wvmktempdir)" || exit $? (WVPASS cd "$before" && WVPASS find . | WVPASS sort) \ > "$tmpdir/before" || exit $? (WVPASS cd "$after" && WVPASS find . | WVPASS sort) \ > "$tmpdir/after" || exit $? local new_paths new_idx new_pack observed new_paths="$(WVPASS comm -13 "$tmpdir/before" "$tmpdir/after")" || exit $? new_idx="$(echo "$new_paths" | WVPASS grep -E '^\./objects/pack/pack-.*\.idx$' | cut -b 3-)" || exit $? new_pack="$(echo "$new_paths" | WVPASS grep -E '^\./objects/pack/pack-.*\.pack$' | cut -b 3-)" || exit $? wv_matches_rx "$(compare-trees "$after/" "$before/")" \ ">fcst\.\.\.[.]*[ ]+logs/refs/heads/src \.d\.\.t\.\.\.[.]*[ ]+objects/ \.d\.\.t\.\.\.[.]*[ ]+objects/pack/ >fcst\.\.\.[.]*[ ]+objects/pack/bup\.bloom >f\+\+\+\+\+\+\+[+]*[ ]+$new_idx >f\+\+\+\+\+\+\+[+]*[ ]+$new_pack \.d\.\.t\.\.\.[.]*[ ]+refs/heads/ >fc\.t\.\.\.[.]*[ ]+refs/heads/src" WVPASS rm -rf "$tmpdir" } commit-hash-n() { local n="$1" repo="$2" branch="$3" GIT_DIR="$repo" WVPASS git rev-list --reverse "$branch" \ | WVPASS awk "FNR == $n" } rm-safe-cinfo() { local n="$1" repo="$2" branch="$3" hash hash="$(commit-hash-n "$n" "$repo" "$branch")" || exit $? local fmt='Tree: %T%n' fmt="${fmt}Author: %an <%ae> %ai%n" fmt="${fmt}Committer: %cn <%ce> %ci%n" fmt="${fmt}%n%s%n%b" GIT_DIR="$repo" WVPASS git log -n1 --pretty=format:"$fmt" "$hash" } WVSTART 'rm /foo/BAR (setup)' WVPASS rm -rf bup bup-baseline src WVPASS bup init WVPASS mkdir src WVPASS echo twisty-maze > src/1 WVPASS bup index src WVPASS bup save -n src src WVPASS echo twisty-maze > src/2 WVPASS bup index src WVPASS bup tick WVPASS bup save -n src src WVPASS echo twisty-maze > src/3 WVPASS bup index src WVPASS bup tick WVPASS bup save -n src src WVPASS mv bup bup-baseline WVPASS bup tick # Make sure we always get the timestamp changes below WVSTART "rm /foo/BAR (first of many)" WVPASS "$top"/dev/sync-tree bup-baseline/ bup/ WVPASS bup ls src > tmp-ls victim="$(WVPASS head -n 1 tmp-ls)" || exit $? WVPASS bup rm --unsafe /src/"$victim" verify-changes-caused-by-rewriting-save bup-baseline bup observed=$(WVPASS git rev-list src | WVPASS wc -l) || exit $? WVPASSEQ 2 $observed WVPASSEQ "$(rm-safe-cinfo 1 bup src)" "$(rm-safe-cinfo 2 bup-baseline src)" WVPASSEQ "$(rm-safe-cinfo 2 bup src)" "$(rm-safe-cinfo 3 bup-baseline src)" WVSTART "rm /foo/BAR (one of many)" WVPASS "$top"/dev/sync-tree bup-baseline/ bup/ victim="$(WVPASS bup ls src | tail -n +2 | head -n 1)" || exit $? WVPASS bup rm --unsafe /src/"$victim" verify-changes-caused-by-rewriting-save bup-baseline bup observed=$(git rev-list src | wc -l) || exit $? WVPASSEQ 2 $observed WVPASSEQ "$(commit-hash-n 1 bup src)" "$(commit-hash-n 1 bup-baseline src)" WVPASSEQ "$(rm-safe-cinfo 2 bup src)" "$(rm-safe-cinfo 3 bup-baseline src)" WVSTART "rm /foo/BAR (last of many)" WVPASS "$top"/dev/sync-tree bup-baseline/ bup/ victim="$(WVPASS bup ls src | tail -n 2 | head -n 1)" || exit $? WVPASS bup rm --unsafe -vv /src/"$victim" observed="$(compare-trees bup/ bup-baseline/ | LC_ALL=C sort)" || exit $? wv_matches_rx "$observed" \ "\.d\.\.t\.\.\.[.]*[ ]+refs/heads/ >fc\.t\.\.\.[.]*[ ]+refs/heads/src >fcst\.\.\.[.]*[ ]+logs/refs/heads/src" observed=$(git rev-list src | wc -l) || exit $? WVPASSEQ 2 $observed WVPASSEQ "$(commit-hash-n 1 bup src)" "$(commit-hash-n 1 bup-baseline src)" WVPASSEQ "$(commit-hash-n 2 bup src)" "$(commit-hash-n 2 bup-baseline src)" # FIXME: test that committer changes when rewriting, when appropriate WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-rm-between-index-and-save000077500000000000000000000040601454333004200217140ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" D="$tmpdir/data" bup() { "$top/bup" "$@"; } WVSTART "remove file" # Fixed in commit 8585613c1f45f3e20feec00b24fc7e3a948fa23e ("Store # metadata in the index....") WVPASS mkdir "$D" WVPASS bup init WVPASS echo "content" > "$D"/foo WVPASS echo "content" > "$D"/bar WVPASS bup tick WVPASS bup index -ux "$D" WVPASS bup save -n save-fail-missing "$D" WVPASS echo "content" > "$D"/baz WVPASS bup tick WVPASS bup index -ux "$D" WVPASS rm "$D"/foo # When "bup tick" is removed above, this may fail (complete with warning), # since the ctime/mtime of "foo" might be pushed back: WVPASS bup save -n save-fail-missing "$D" # when the save-call failed, foo is missing from output, since only # then bup notices, that it was removed: WVPASSEQ "$(bup ls -A save-fail-missing/latest/$TOP/$D/)" "bar baz foo" # index/save again WVPASS bup tick WVPASS bup index -ux "$D" WVPASS bup save -n save-fail-missing "$D" # now foo is gone: WVPASSEQ "$(bup ls -A save-fail-missing/latest/$TOP/$D/)" "bar baz" # TODO: Test for racecondition between reading a file and reading its metadata? WVSTART "remove dir" WVPASS rm -r "$D" WVPASS mkdir "$D" WVPASS rm -r "$BUP_DIR" WVPASS bup init WVPASS mkdir "$D"/foo WVPASS mkdir "$D"/bar WVPASS bup tick WVPASS bup index -ux "$D" WVPASS bup save -n save-fail-missing "$D" WVPASS touch "$D"/bar WVPASS mkdir "$D"/baz WVPASS bup tick WVPASS bup index -ux "$D" WVPASS rmdir "$D"/foo # with directories, bup notices that foo is missing, so it fails # (complete with delayed error) WVFAIL bup save -n save-fail-missing "$D" # ...but foo is still saved since it was just fine in the index WVPASSEQ "$(bup ls -AF save-fail-missing/latest/$TOP/$D/)" "bar/ baz/ foo/" # Index again: WVPASS bup tick WVPASS bup index -ux "$D" # no non-zero-exitcode anymore: WVPASS bup save -n save-fail-missing "$D" # foo is now gone WVPASSEQ "$(bup ls -AF save-fail-missing/latest/$TOP/$D/)" "bar/ baz/" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-creates-no-unrefs000077500000000000000000000007751454333004200213530ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? WVSTART 'all' top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$BUP_DIR" bup() { "$top/bup" "$@"; } WVPASS mkdir -p "$tmpdir/src" WVPASS touch "$tmpdir/src/foo" WVPASS bup init WVPASS bup index "$tmpdir/src" WVPASS bup save -n src "$tmpdir/src" WVPASSEQ "$(git fsck --unreachable)" "" WVPASS bup save -n src "$tmpdir/src" WVPASSEQ "$(git fsck --unreachable)" "" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-data-race000077500000000000000000000031421454333004200176230ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } # Inject code to coordinate test WVPASS rm -rf "$tmpdir/mod" WVPASS mkdir -p "$tmpdir/mod" cat > "$tmpdir/mod/pause_file_save.py" << EOF import os, time import bup.cmd.save def test_save_data_race_pause_save(name): if name == b'$tmpdir/save/data': with open('$tmpdir/waiting-to-save', 'w') as f: pass while os.path.exists('$tmpdir/block-save'): time.sleep(0.01) bup.cmd.save.before_saving_regular_file = test_save_data_race_pause_save EOF instrumented-bup() { PYTHONPATH="$tmpdir/mod" bup --import-py-module pause_file_save "$@" } WVPASS cd "$tmpdir" WVPASS bup init WVPASS mkdir "$tmpdir/save" WVPASS echo "some random file content" > "$tmpdir/save/data" WVPASS bup index "$tmpdir/save" WVPASS touch "$tmpdir/block-save" ( set -e while ! test -e "$tmpdir/waiting-to-save"; do "$top/dev/python" -c 'import time; time.sleep(0.01)' done echo 'truncated' > "$tmpdir/save/data" rm "$tmpdir/block-save" ) & truncator=$! trap "kill $truncator" EXIT WVPASS instrumented-bup save -n test "$tmpdir/save" meta_size=$(WVPASS bup ls -nl "test/latest/$tmpdir/save/data" | sed 's/[^ ]* [^ ]* *\([^ ]*\).*/\1/') data_size=$(git -C "$BUP_DIR" show $(WVPASS bup ls -ls "test/latest/$tmpdir/save/data" | sed 's/ .*//') | wc -c) WVPASSEQ 10 $meta_size WVPASSEQ 10 $data_size WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-errors000077500000000000000000000073211454333004200173210ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" # necessary for 0 == 1970-01-01 00:00 export TZ=UTC WVSTART "init" WVPASS bup init mkdir "$tmpdir/save" for f in $(seq 9) ; do touch -t 200${f}01010000 "$tmpdir/save/$f" done mkdir "$tmpdir/save/a" touch -t 199901010000 "$tmpdir/save/a/1" WVSTART "metadata read error for a file" WVPASS bup index "$tmpdir/save" # Inject save errors while reading metadata via --import-py-module. WVPASS rm -rf "$tmpdir/mod" WVPASS mkdir -p "$tmpdir/mod" cat > "$tmpdir/mod/bup_fail_on_5.py" << EOF from bup import metadata orig_from_path = metadata.from_path def from_path(path, *args, **kw): if path.endswith(b'/5'): raise IOError('intentionally failing metadata read for .../5') return orig_from_path(path, *args, **kw) metadata.from_path = from_path EOF PYTHONPATH="$tmpdir/mod" \ bup --import-py-module bup_fail_on_5 save -n test "$tmpdir/save" # this should work anyway WVPASS bup ls -l "test/latest/$tmpdir/save" # also check the *right* data was returned lsout="$(bup ls -l "test/latest/$tmpdir/save")" for f in 1 2 3 4 6 7 8 9 ; do if ! echo "$lsout" | grep "200${f}-01-01 00:00 $f" ; then WVFAIL echo incorrect date for $f fi done # and ensure we actually failed, and the above script/hack didn't break if echo "$lsout" | grep " 5$" ; then WVFAIL echo unexpectedly stored data for file 5 fi WVSTART "metadata read error for a folder" WVPASS bup index --clear WVPASS bup index "$tmpdir/save" # Inject save errors while reading metadata via --import-py-module. WVPASS rm -rf "$tmpdir/mod" WVPASS mkdir -p "$tmpdir/mod" cat > "$tmpdir/mod/bup_fail_on_a.py" << EOF from bup import metadata orig_from_path = metadata.from_path def from_path(path, *args, **kw): if path.endswith(b'/a'): raise IOError('intentionally failing metadata read for .../a') return orig_from_path(path, *args, **kw) metadata.from_path = from_path EOF PYTHONPATH="$tmpdir/mod" \ bup --import-py-module bup_fail_on_a save -n test "$tmpdir/save" # this should work anyway WVPASS bup ls -l "test/latest/$tmpdir/save" if ! bup ls -l "test/latest/$tmpdir/save/a" | grep '1999-01-01 00:00 1' ; then WVFAIL unexpected date for file a/1 fi # and ensure we actually failed, and the above script/hack didn't break if ! bup ls -l "test/latest/$tmpdir/save" | grep "1970-01-01 00:00 a" ; then WVFAIL unexpected date for directory a fi WVSTART "duplicate entries" WVPASS bup index --clear WVPASS bup index "$tmpdir/save" # Inject save errors while reading metadata via --import-py-module. WVPASS rm -rf "$tmpdir/mod" WVPASS mkdir -p "$tmpdir/mod" cat > "$tmpdir/mod/bup_dup_reader_path.py" << EOF from bup import index Reader = index.Reader class DupReader(index.Reader): def filter(self, *args, **kw): for transname, ent in Reader.filter(self, *args, **kw): # duplicate a file and a folder if ent.name.endswith(b'/5') or ent.name.endswith(b'/a/'): yield transname, ent yield transname, ent index.Reader = DupReader EOF PYTHONPATH="$tmpdir/mod" \ bup --import-py-module bup_dup_reader_path save -n test "$tmpdir/save" # this should work WVPASS bup ls -l "test/latest/$tmpdir/save" # check that there are no duplicates lsout=$(bup ls -l "test/latest/$tmpdir/save") WVPASSEQ "$(echo "$lsout" | sort | uniq -d)" "" # and we should get the *right* data for each entry for f in $(seq 9) ; do if ! echo "$lsout" | grep "200${f}-01-01 00:00 $f" ; then WVFAIL echo incorrect metadata for $f fi done WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-restore000077500000000000000000000113461454333004200174720ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } validate-local-and-remote-restore() { local src="$1" dest="$2" cmp_src="$3" cmp_dest="$4" force-delete "$dest" WVPASS bup restore -C "$dest" "$src" WVPASS "$top/dev/compare-trees" "$cmp_src" "$cmp_dest" force-delete "$dest" WVPASS bup restore -r ":$BUP_DIR" -C "$dest" "$src" WVPASS "$top/dev/compare-trees" "$cmp_src" "$cmp_dest" } WVPASS cd "$tmpdir" WVSTART "init" WVPASS bup init # Be independent of git defaults or a configured defaultBranch git --git-dir "$BUP_DIR" symbolic-ref HEAD refs/heads/main D=bupdata.tmp WVPASS force-delete $D WVPASS mkdir $D WVPASS touch $D/a WVPASS bup random 128k >$D/b WVPASS mkdir $D/d $D/d/e WVPASS bup random 512 >$D/f WVPASS touch $D/d/z WVPASS touch $D/d/z WVPASS bup index $D WVPASS bup save -t $D WVSTART "restore" WVPASS force-delete buprestore.tmp WVFAIL bup restore boink WVPASS touch "$tmpdir/$D/$D" WVPASS bup index -u "$tmpdir/$D" WVPASS bup save -n main / WVPASS bup restore -C buprestore.tmp "/main/latest/$tmpdir/$D" WVPASSEQ "$(ls buprestore.tmp)" "bupdata.tmp" WVPASS force-delete buprestore.tmp WVPASS bup restore -C buprestore.tmp "/main/latest/$tmpdir/$D/" WVPASS touch $D/non-existent-file buprestore.tmp/non-existent-file # else diff fails WVPASS diff -ur $D/ buprestore.tmp/ WVPASS force-delete buprestore.tmp WVPASS echo -n "" | WVPASS bup split -n split_empty_string.tmp WVPASS bup restore -C buprestore.tmp split_empty_string.tmp/latest/ WVPASSEQ "$(cat buprestore.tmp/data)" "" ( tmp=testrestore.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(pwd)/$tmp/bup" WVPASS WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save --strip -n foo $tmp/src WVSTART "restore /foo/latest" validate-local-and-remote-restore \ /foo/latest "$tmp/restore" \ "$tmp/src/" "$tmp/restore/latest/" WVSTART "restore /foo/latest/." WVPASS force-delete "$tmp/restore" validate-local-and-remote-restore \ /foo/latest/. "$tmp"/restore \ "$tmp"/src/ "$tmp"/restore WVSTART "restore /foo/latest/x" WVPASS force-delete "$tmp/restore" validate-local-and-remote-restore \ /foo/latest/x "$tmp"/restore \ "$tmp"/src/x/ "$tmp"/restore/x/ WVSTART "restore /foo/latest/x/" WVPASS force-delete "$tmp/restore" WVPASS bup restore -C "$tmp"/restore /foo/latest/x/ for x in "$tmp"/src/x/*; do WVPASS "$top/dev/compare-trees" "$x/" "$tmp/restore/$(basename $x)" done WVPASS force-delete "$tmp/restore" WVPASS bup restore -r ":$BUP_DIR" -C "$tmp"/restore /foo/latest/x/ for x in "$tmp"/src/x/*; do WVPASS "$top/dev/compare-trees" "$x/" "$tmp/restore/$(basename $x)" done WVSTART "restore /foo/latest/x/." WVPASS force-delete "$tmp/restore" validate-local-and-remote-restore \ /foo/latest/x/. "$tmp"/restore \ "$tmp"/src/x/ "$tmp"/restore/ ) || exit $? WVSTART "save (no index)" ( tmp=save-no-index.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVFAIL bup save -n nothing / WVPASS rm -r "$tmp" ) || exit $? WVSTART "save disjoint top-level directories" ( # Resolve any symlinks involving the top top-level dirs. real_pwd="$(WVPASS resolve-parent .)" || exit $? real_tmp="$(WVPASS resolve-parent /tmp/.)" || exit $? pwd_top="$(echo $real_pwd | WVPASS awk -F "/" '{print $2}')" || exit $? tmp_top="$(echo $real_tmp | WVPASS awk -F "/" '{print $2}')" || exit $? if [ "$pwd_top" = "$tmp_top" ]; then echo "(running from within /$tmp_top; skipping test)" 1>&2 exit 0 # FIXME: allow intermixed WVSKIPs fi D=bupdata.tmp WVPASS force-delete $D WVPASS mkdir -p $D/x WVPASS date > $D/x/1 tmpdir2="$(WVPASS mktemp -d $real_tmp/bup-test-XXXXXXX)" || exit $? cleanup() { WVPASS rm -r "$tmpdir2"; } WVPASS trap cleanup EXIT WVPASS date > "$tmpdir2/2" export BUP_DIR="$tmpdir/bup" WVPASS test -d "$BUP_DIR" && WVPASS rm -r "$BUP_DIR" WVPASS bup init WVPASS bup index -vu $(pwd)/$D/x "$tmpdir2" WVPASS bup save -t -n src $(pwd)/$D/x "$tmpdir2" # For now, assume that "ls -a" and "sort" use the same order. actual="$(WVPASS bup ls -AF src/latest)" || exit $? expected="$(echo -e "$pwd_top/\n$tmp_top/" | WVPASS sort)" || exit $? WVPASSEQ "$actual" "$expected" ) || exit $? WVPASS cd "$top" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-restore-excludes000077500000000000000000000164451454333004200213110ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" WVSTART "index excludes bupdir" WVPASS force-delete src "$BUP_DIR" WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS bup random 128k >src/b WVPASS mkdir src/d src/d/e WVPASS bup random 512 >src/f WVPASS bup index -ux src WVPASS bup save -n exclude-bupdir src WVPASSEQ "$(bup ls -AF "exclude-bupdir/latest/$tmpdir/src/")" "a b d/ f" WVSTART "index --exclude" WVPASS force-delete src "$BUP_DIR" WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS bup random 128k >src/b WVPASS mkdir src/d src/d/e WVPASS bup random 512 >src/f WVPASS bup random 512 >src/j WVPASS bup index -ux --exclude src/d --exclude src/j src WVPASS bup save -n exclude src WVPASSEQ "$(bup ls "exclude/latest/$tmpdir/src/")" "a b f" WVPASS mkdir src/g src/h WVPASS bup index -ux --exclude src/d --exclude $tmpdir/src/g --exclude src/h \ --exclude "$tmpdir/src/j" src WVPASS bup save -n exclude src WVPASSEQ "$(bup ls "exclude/latest/$tmpdir/src/")" "a b f" WVSTART "index --exclude-from" WVPASS force-delete src "$BUP_DIR" WVPASS bup init WVPASS mkdir src WVPASS echo "src/d $tmpdir/src/g src/h src/i" > exclude-list WVPASS touch src/a WVPASS bup random 128k >src/b WVPASS mkdir src/d src/d/e WVPASS bup random 512 >src/f WVPASS mkdir src/g src/h WVPASS bup random 128k > src/i WVPASS bup index -ux --exclude-from exclude-list src WVPASS bup save -n exclude-from src WVPASSEQ "$(bup ls "exclude-from/latest/$tmpdir/src/")" "a b f" WVPASS rm exclude-list # bup index --exclude-rx ... # ========================== WVSTART "index --exclude-rx '^/foo' (root anchor)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS mkdir src/sub1 WVPASS mkdir src/sub2 WVPASS touch src/sub1/a WVPASS touch src/sub2/b WVPASS bup index -u src --exclude-rx "^$(pwd)/src/sub1/" WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub2 ./sub2/b" WVSTART "index --exclude-rx '/foo$' (non-dir, tail anchor)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS touch src/foo WVPASS mkdir src/sub WVPASS mkdir src/sub/foo WVPASS touch src/sub/foo/a WVPASS bup index -u src --exclude-rx '/foo$' WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub ./sub/foo ./sub/foo/a" WVSTART "index --exclude-rx '/foo/$' (dir, tail anchor)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS touch src/foo WVPASS mkdir src/sub WVPASS mkdir src/sub/foo WVPASS touch src/sub/foo/a WVPASS bup index -u src --exclude-rx '/foo/$' WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub" WVSTART "index --exclude-rx '/foo/.' (dir content)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS touch src/foo WVPASS mkdir src/sub WVPASS mkdir src/sub/foo WVPASS touch src/sub/foo/a WVPASS bup index -u src --exclude-rx '/foo/.' WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub ./sub/foo" # bup index --exclude-rx-from ... # =============================== WVSTART "index --exclude-rx-from" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS mkdir src/sub1 WVPASS mkdir src/sub2 WVPASS touch src/sub1/a WVPASS touch src/sub2/b # exclude-rx-file includes blank lines to check that we ignore them. WVPASS echo "^$(pwd)/src/sub1/ " > exclude-rx-file WVPASS bup index -u src --exclude-rx-from exclude-rx-file WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub2 ./sub2/b" # bup restore --exclude-rx ... # ============================ WVSTART "restore --exclude-rx '^/foo' (root anchor)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS mkdir src/sub1 WVPASS mkdir src/sub2 WVPASS touch src/sub1/a WVPASS touch src/sub2/b WVPASS bup index -u src WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp --exclude-rx "^/sub1/" /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub2 ./sub2/b" WVSTART "restore --exclude-rx '/foo$' (non-dir, tail anchor)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS touch src/foo WVPASS mkdir src/sub WVPASS mkdir src/sub/foo WVPASS touch src/sub/foo/a WVPASS bup index -u src WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp --exclude-rx '/foo$' /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub ./sub/foo ./sub/foo/a" WVSTART "restore --exclude-rx '/foo/$' (dir, tail anchor)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS touch src/foo WVPASS mkdir src/sub WVPASS mkdir src/sub/foo WVPASS touch src/sub/foo/a WVPASS bup index -u src WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp --exclude-rx '/foo/$' /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub" WVSTART "restore --exclude-rx '/foo/.' (dir content)" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS touch src/foo WVPASS mkdir src/sub WVPASS mkdir src/sub/foo WVPASS touch src/sub/foo/a WVPASS bup index -u src WVPASS bup save --strip -n bupdir src WVPASS bup restore -C buprestore.tmp --exclude-rx '/foo/.' /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub ./sub/foo" # bup restore --exclude-rx-from ... # ================================= WVSTART "restore --exclude-rx-from" WVPASS rm -rf src "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir src WVPASS touch src/a WVPASS touch src/b WVPASS mkdir src/sub1 WVPASS mkdir src/sub2 WVPASS touch src/sub1/a WVPASS touch src/sub2/b WVPASS bup index -u src WVPASS bup save --strip -n bupdir src WVPASS echo "^/sub1/" > exclude-rx-file WVPASS bup restore -C buprestore.tmp \ --exclude-rx-from exclude-rx-file /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub2 ./sub2/b" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-smaller000077500000000000000000000037561454333004200174540ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } sha1sum() { "$top/dev/checksum" -t sha1 "$@"; } WVPASS cd "$tmpdir" WVSTART "init" WVPASS bup init WVPASS mkdir "$tmpdir/save" WVPASS echo small0 > "$tmpdir/save/small" WVPASS echo bigbigbigbigbig01 > "$tmpdir/save/big1" big1sha="$(sha1sum < "$tmpdir/save/big1")" WVPASS bup index "$tmpdir/save" WVPASS bup save -vv -n test "$tmpdir/save" WVPASS mkdir "$tmpdir/restore1" WVPASS bup restore -v --outdir="$tmpdir/restore1/" "/test/latest$tmpdir/save/" WVPASS cmp "$tmpdir/restore1/small" "$tmpdir/save/small" WVPASS cmp "$tmpdir/restore1/big1" "$tmpdir/save/big1" WVSTART "save --smaller" WVPASS echo bigbigbigbigbig02 > "$tmpdir/save/big1" WVPASS echo bigbigbigbigbig03 > "$tmpdir/save/big2" WVPASS bup index "$tmpdir/save" WVPASS bup save -vv -n test --smaller=10 "$tmpdir/save" WVPASS mkdir "$tmpdir/restore2" WVPASS bup restore -v --outdir="$tmpdir/restore2/" "/test/latest$tmpdir/save/" WVPASS cmp "$tmpdir/restore2/small" "$tmpdir/save/small" # (per the original DESIGN document, we should've had the old version # of the modified large file, but really that isn't implemented) # must _not_ have this file at all WVFAIL test -f "$tmpdir/restore2/big1" # and not the new one either WVFAIL test -f "$tmpdir/restore2/big2" WVSTART "index --fake-valid / save" WVPASS echo bigbigbigbigbig02 > "$tmpdir/save/big1" WVPASS echo bigbigbigbigbig03 > "$tmpdir/save/big2" WVPASS bup index "$tmpdir/save" WVPASS bup index --fake-valid "$tmpdir/save/big1" "$tmpdir/save/big2" WVPASS bup save -vv -n test "$tmpdir/save" WVPASS mkdir "$tmpdir/restore3" WVPASS bup restore -v --outdir="$tmpdir/restore3/" "/test/latest$tmpdir/save/" WVPASS cmp "$tmpdir/restore3/small" "$tmpdir/save/small" WVPASSEQ "$(sha1sum < "$tmpdir/restore3/big1")" "$big1sha" WVPASS cmp "$tmpdir/restore3/big2" "$tmpdir/save/big2" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-strip-graft000077500000000000000000000117251454333004200202520ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } compare-trees() { "$top/dev/compare-trees" "$@"; } WVPASS cd "$tmpdir" WVSTART "save --strip" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save --strip -n foo src/x/y WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/x/y/ restore/latest/ WVSTART "save --strip-path (relative)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save --strip-path src -n foo src/x WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/ restore/latest/ WVSTART "save --strip-path (absolute)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save --strip-path "$tmpdir" -n foo src WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/ "restore/latest/src/" WVSTART "save --strip-path (no match)" if test $(WVPASS path-filesystems . | WVPASS sort -u | WVPASS wc -l) -ne 1 then # Skip the test because the attempt to restore parent dirs to the # current filesystem may fail -- i.e. running from # /foo/ext4/bar/btrfs will fail when bup tries to restore linux # attrs above btrfs to the restore tree *inside* btrfs. # FIXME: allow intermixed WVSKIPs echo "(running from tree with mixed filesystems; skipping test)" 1>&2 exit 0 else WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save --strip-path foo -n foo src/x WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/ "restore/latest/$tmpdir/src/" fi WVSTART "save --graft (empty graft points disallowed)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir src WVFAIL bup save --graft =/grafted -n graft-point-absolute src 2>&1 \ | WVPASS grep 'error: a graft point cannot be empty' WVFAIL bup save --graft $top/$tmp= -n graft-point-absolute src 2>&1 \ | WVPASS grep 'error: a graft point cannot be empty' WVSTART "save --graft /x/y=/a/b (relative paths)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save --graft src=x -n foo src WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/ "restore/latest/$tmpdir/x/" WVSTART "save --graft /x/y=/a/b (matching structure)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save -v --graft "$tmpdir/src/x/y=$tmpdir/src/a/b" -n foo src/x/y WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/x/y/ "restore/latest/$tmpdir/src/a/b/" WVSTART "save --graft /x/y=/a (shorter target)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save -v --graft "$tmpdir/src/x/y=/a" -n foo src/x/y WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/x/y/ "restore/latest/a/" WVSTART "save --graft /x=/a/b (longer target)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save -v --graft "$tmpdir/src=$tmpdir/src/a/b/c" -n foo src WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/ "restore/latest/$tmpdir/src/a/b/c/" WVSTART "save --graft /x=/ (root target)" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/y/z WVPASS bup random 8k > src/x/y/random-1 WVPASS bup random 8k > src/x/y/z/random-2 WVPASS bup index -u src WVPASS bup save -v --graft "$tmpdir/src/x=/" -n foo src/x WVPASS bup restore -C restore /foo/latest WVPASS compare-trees src/x/ "restore/latest/" #WVSTART "save --graft /=/x/ (root source)" # FIXME: Not tested for now -- will require cleverness, or caution as root. WVSTART "save collision" WVPASS force-delete "$BUP_DIR" src restore WVPASS bup init WVPASS mkdir -p src/x/1 src/y/1 WVPASS bup index -u src WVFAIL bup save --strip -n foo src/x src/y 2> tmp-err.log WVPASS grep -F "error: ignoring duplicate path 1 in /" tmp-err.log WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-symlink-race000077500000000000000000000031741454333004200204050ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . wvtest-bup.sh . dev/lib.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } # Inject code to coordinate test WVPASS rm -rf "$tmpdir/mod" WVPASS mkdir -p "$tmpdir/mod" cat > "$tmpdir/mod/pause_after_save_stat.py" << EOF import os, time import bup.cmd.save import sys def test_save_symlink_race_pause_save(name): if name == b'$tmpdir/save/link': with open('$tmpdir/waiting-after-save-stat', 'w') as f: pass while os.path.exists('$tmpdir/block-save'): time.sleep(0.01) bup.cmd.save.after_nondir_metadata_stat = test_save_symlink_race_pause_save EOF instrumented-bup() { PYTHONPATH="$tmpdir/mod" bup --import-py-module pause_after_save_stat "$@" } WVPASS cd "$tmpdir" WVPASS bup init WVPASS mkdir "$tmpdir/save" WVSTART "symlink metadata vs. content race" WVPASS ln -sf a "$tmpdir/save/link" WVPASS bup index "$tmpdir/save" WVPASS touch "$tmpdir/block-save" ( set -e while ! test -e "$tmpdir/waiting-after-save-stat"; do "$top/dev/python" -c 'import time; time.sleep(0.01)' done ln -sf abc "$tmpdir/save/link" rm "$tmpdir/block-save" ) & truncator=$! trap "kill $truncator" EXIT WVPASS instrumented-bup save -n test "$tmpdir/save" meta_tgt=$(WVPASS bup ls -ls "test/latest/$tmpdir/save/link" | sed 's/.* -> //') data_tgt=$(git -C "$BUP_DIR" show $(WVPASS bup ls -ls "test/latest/$tmpdir/save/link" | sed 's/ .*//')) WVPASSEQ abc $meta_tgt WVPASSEQ abc $data_tgt WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-save-with-valid-parent000077500000000000000000000016741454333004200213510ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } compare-trees() { "$top/dev/compare-trees" "$@"; } WVPASS cd "$tmpdir" # Make sure that we can explicitly save a path whose parent is up to # date. WVSTART "save path with up to date parent" WVPASS bup init WVPASS mkdir -p src/a src/b WVPASS touch src/a/1 src/b/2 WVPASS bup index -u src WVPASS bup save -n src src WVPASS bup save -n src src/b WVPASS bup restore -C restore "src/latest/$(pwd)/" WVPASS test ! -e restore/src/a WVPASS "$top/dev/compare-trees" -c src/b/ restore/src/b/ WVPASS bup save -n src src/a/1 WVPASS rm -r restore WVPASS bup restore -C restore "src/latest/$(pwd)/" WVPASS test ! -e restore/src/b WVPASS "$top/dev/compare-trees" -c src/a/ restore/src/a/ WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-sparse-files000077500000000000000000000130001454333004200174350ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail mb=1048576 top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? readonly mb top tmpdir export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS cd "$tmpdir" # The 3MB guess is semi-arbitrary, but we've been informed that # Lustre, for example, uses 1MB, so guess higher than that, at least. block_size=$(bup-cfg-py -c \ "import os; print(getattr(os.stat('.'), 'st_blksize', 0)) or $mb * 3") \ || exit $? data_size=$((block_size * 20)) readonly block_size data_size WVPASS dd if=/dev/zero of=test-sparse-probe seek="$data_size" bs=1 count=1 probe_size=$(WVPASS du -k -s test-sparse-probe | WVPASS cut -f1) || exit $? if [ "$probe_size" -ge "$((data_size / 1024))" ]; then WVSKIP "no sparse support detected -- skipping tests" exit 0 fi WVSTART "sparse restore on $(current-filesystem), assuming ${block_size}B blocks" WVPASS bup init WVPASS mkdir src WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1 WVPASS bup index src WVPASS bup save -n src src WVSTART "sparse file restore (all sparse)" WVPASS bup restore -C restore "src/latest/$(pwd)/" restore_size=$(WVPASS du -k -s restore/src/foo | WVPASS cut -f1) || exit $? WVPASS [ "$restore_size" -ge "$((data_size / 1024))" ] WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --no-sparse (all sparse)" WVPASS rm -r restore WVPASS bup restore --no-sparse -C restore "src/latest/$(pwd)/" restore_size=$(WVPASS du -k -s restore/src/foo | WVPASS cut -f1) || exit $? WVPASS [ "$restore_size" -ge "$((data_size / 1024))" ] WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (all sparse)" WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" restore_size=$(WVPASS du -k -s restore/src/foo | WVPASS cut -f1) || exit $? WVPASS [ "$restore_size" -le "$((3 * (block_size / 1024)))" ] WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (sparse end)" WVPASS echo "start" > src/foo WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1 conv=notrunc WVPASS bup index src WVPASS bup save -n src src WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" restore_size=$(WVPASS du -k -s restore/src/foo | WVPASS cut -f1) || exit $? WVPASS [ "$restore_size" -le "$((3 * (block_size / 1024)))" ] WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (sparse middle)" WVPASS echo "end" >> src/foo WVPASS bup index src WVPASS bup save -n src src WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" restore_size=$(WVPASS du -k -s restore/src/foo | WVPASS cut -f1) || exit $? WVPASS [ "$restore_size" -le "$((5 * (block_size / 1024)))" ] WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (bracketed zero run in buf)" WVPASS echo 'x' > src/foo WVPASS dd if=/dev/zero bs=1 count=512 >> src/foo WVPASS echo 'y' >> src/foo WVPASS bup index src WVPASS bup save -n src src WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (sparse start)" WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1 WVPASS echo "end" >> src/foo WVPASS bup index src WVPASS bup save -n src src WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" restore_size=$(WVPASS du -k -s restore/src/foo | WVPASS cut -f1) || exit $? WVPASS [ "$restore_size" -le "$((5 * (block_size / 1024)))" ] WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (sparse start and end)" WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1 WVPASS echo "middle" >> src/foo WVPASS dd if=/dev/zero of=src/foo seek=$((2 * data_size)) bs=1 count=1 conv=notrunc WVPASS bup index src WVPASS bup save -n src src WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" restore_size=$(WVPASS du -k -s restore/src/foo | WVPASS cut -f1) || exit $? WVPASS [ "$restore_size" -le "$((5 * (block_size / 1024)))" ] WVPASS "$top/dev/compare-trees" -c src/ restore/src/ if test "$block_size" -gt $mb; then random_size="$block_size" else random_size=1M fi WVSTART "sparse file restore --sparse (random $random_size)" WVPASS bup random --seed "$RANDOM" 1M > src/foo WVPASS bup index src WVPASS bup save -n src src WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (random sparse regions)" WVPASS rm -rf "$BUP_DIR" src WVPASS bup init WVPASS mkdir src for sparse_dataset in 0 1 2 3 4 5 6 7 8 9 do WVPASS "$top/dev/sparse-test-data" "src/foo-$sparse_dataset" done WVPASS bup index src WVPASS bup save -n src src WVPASS rm -r restore WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/" WVPASS "$top/dev/compare-trees" -c src/ restore/src/ WVSTART "sparse file restore --sparse (short zero runs around boundary)" WVPASS bup-cfg-py > src/foo <noop.tmp WVPASSEQ '' "$(tags1n.tmp WVPASS bup split --noop -t <"$top/test/testfile2" >tags2tn.tmp WVPASSEQ $(find "$BUP_DIR/objects/pack" -name '*.pack' | wc -l) 0 WVSTART "split" WVPASS echo a >a.tmp WVPASS echo b >b.tmp WVPASS bup split -b a.tmp >taga.tmp WVPASS bup split -b b.tmp >tagb.tmp WVPASS cat a.tmp b.tmp | WVPASS bup split -b >tagab.tmp WVPASSEQ $(cat taga.tmp | wc -l) 1 WVPASSEQ $(cat tagb.tmp | wc -l) 1 WVPASSEQ $(cat tagab.tmp | wc -l) 1 WVPASSEQ $(cat tag[ab].tmp | wc -l) 2 WVPASSEQ "$(bup split -b a.tmp b.tmp)" "$(cat tagab.tmp)" WVPASSEQ "$(bup split -b --keep-boundaries a.tmp b.tmp)" "$(cat tag[ab].tmp)" WVPASSEQ "$(cat tag[ab].tmp | bup split -b --keep-boundaries --git-ids)" \ "$(cat tag[ab].tmp)" WVPASSEQ "$(cat tag[ab].tmp | bup split -b --git-ids)" \ "$(cat tagab.tmp)" WVPASS bup split --bench -b <"$top/test/testfile1" >tags1.tmp WVPASS bup split -vvvv -b "$top/test/testfile2" >tags2.tmp WVPASS echo -n "" | WVPASS bup split -n split_empty_string.tmp WVPASS bup margin WVPASS bup midx -f WVPASS bup midx --check -a WVPASS bup midx -o "$BUP_DIR/objects/pack/test1.midx" \ "$BUP_DIR"/objects/pack/*.idx WVPASS bup midx --check -a WVPASS bup midx -o "$BUP_DIR"/objects/pack/test1.midx \ "$BUP_DIR"/objects/pack/*.idx \ "$BUP_DIR"/objects/pack/*.idx WVPASS bup midx --check -a all=$(echo "$BUP_DIR"/objects/pack/*.idx "$BUP_DIR"/objects/pack/*.midx) WVPASS bup midx -o "$BUP_DIR"/objects/pack/zzz.midx $all WVPASS bup tick WVPASS bup midx -o "$BUP_DIR"/objects/pack/yyy.midx $all WVPASS bup midx -a WVPASSEQ "$(echo "$BUP_DIR"/objects/pack/*.midx)" \ ""$BUP_DIR"/objects/pack/yyy.midx" WVPASS bup margin WVPASS bup split -t "$top/test/testfile2" >tags2t.tmp WVPASS bup split -t "$top/test/testfile2" --fanout 3 >tags2tf.tmp WVPASS bup split -r "$BUP_DIR" -c "$top/test/testfile2" >tags2c.tmp WVPASS bup split -r ":$BUP_DIR" -c "$top/test/testfile2" >tags2c.tmp WVPASS ls -lR \ | WVPASS bup split -r ":$BUP_DIR" -c --fanout 3 --max-pack-objects 3 -n lslr \ || exit $? WVPASS bup ls WVFAIL bup ls /does-not-exist WVPASS bup ls /lslr WVPASS bup ls /lslr/latest WVPASS bup ls /lslr/latest/ #WVPASS bup ls /lslr/1971-01-01 # all dates always exist WVFAIL diff -u tags1.tmp tags2.tmp WVPASS diff -u tags1.tmp tags1n.tmp WVPASS diff -u tags2t.tmp tags2tn.tmp # fanout must be different from non-fanout WVFAIL diff tags2t.tmp tags2tf.tmp WVPASS wc -c "$top/test/testfile1" "$top/test/testfile2" WVPASS wc -l tags1.tmp tags2.tmp WVSTART "join" WVPASS bup join $(cat tags1.tmp) >out1.tmp WVPASS bup join out2.tmp WVPASS bup join out2c.tmp WVPASS bup join -r ":$BUP_DIR" out2c.tmp WVPASS diff -u "$top/test/testfile1" out1.tmp WVPASS diff -u "$top/test/testfile2" out2.tmp WVPASS diff -u "$top/test/testfile2" out2t.tmp WVPASS diff -u "$top/test/testfile2" out2c.tmp WVPASSEQ "$(bup join split_empty_string.tmp)" "" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-tz000077500000000000000000000010771454333004200155100ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVSTART "half hour TZ" export TZ=ACDT-10:30 WVPASS bup init WVPASS cd "$tmpdir" WVPASS mkdir src WVPASS bup index src WVPASS bup save -n src -d 1420164180 src WVPASSEQ "$(WVPASS git cat-file commit src | sed -ne 's/^author .*> //p')" \ "1420164180 +1030" WVPASSEQ "$(WVPASS bup ls /src)" \ "2015-01-02-123300 latest" WVPASS rm -rf "$tmpdir" bup-0.33.3/test/ext/test-web000077500000000000000000000036041454333004200156260ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest-bup.sh || exit $? . dev/lib.sh || exit $? set -o pipefail TOP="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$TOP/bup" "$@" } wait-for-server-start() { curl --unix-socket ./socket http://localhost/ curl_status=$? while test $curl_status -eq 7; do sleep 0.2 curl --unix-socket ./socket http://localhost/ curl_status=$? done WVPASSEQ $curl_status 0 } WVPASS cd "$tmpdir" if test -z "$(type -p curl)"; then WVSKIP 'curl does not appear to be installed; skipping test' exit 0 fi WVPASS bup-cfg-py -c "import socket as s; s.socket(s.AF_UNIX).bind('socket')" curl -s --unix-socket ./socket http://localhost/foo if test $? -ne 7; then WVSKIP 'curl does not appear to support --unix-socket; skipping test' exit 0 fi if ! bup-python -c 'import tornado' 2> /dev/null; then WVSKIP 'unable to import tornado; skipping test' exit 0 fi WVSTART 'web' WVPASS bup init WVPASS mkdir src WVPASS echo '¡excitement!' > src/data WVPASS echo -e 'whee \x80\x90\xff' > "$(echo -ne 'src/whee \x80\x90\xff')" WVPASS bup index src WVPASS bup save -n '¡excitement!' --strip src "$TOP/bup" web unix://socket bup-web.log 2>&1 & web_pid=$! # output the log if something fails trap 'cat bup-web.log' EXIT wait-for-server-start WVPASS curl --unix-socket ./socket \ 'http://localhost/%C2%A1excitement%21/latest/data' > result WVPASS curl --unix-socket ./socket \ 'http://localhost/%C2%A1excitement%21/latest/whee%20%80%90%ff' > result2 WVPASSEQ "$(curl --unix-socket ./socket http://localhost/static/styles.css)" \ "$(cat "$TOP/lib/web/static/styles.css")" WVPASSEQ '¡excitement!' "$(cat result)" WVPASS cmp "$(echo -ne 'src/whee \x80\x90\xff')" result2 WVPASS kill -s TERM "$web_pid" WVPASS wait "$web_pid" trap - EXIT WVPASS rm -r "$tmpdir" bup-0.33.3/test/ext/test-xdev000077500000000000000000000072461454333004200160250ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh || exit $? set -o pipefail root_status="$(dev/root-status)" || exit $? if [ "$root_status" != root ]; then WVSKIP 'not root: skipping tests' exit 0 fi if ! modprobe loop; then WVSKIP 'unable to load loopback module; skipping tests' exit 0 fi # These tests are only likely to work under Linux for now # (patches welcome). if ! [[ $(uname) =~ Linux ]]; then WVSKIP 'not Linux: skipping tests' exit 0 fi top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS bup init WVPASS pushd "$tmpdir" WVSTART 'drecurse' WVPASS dd if=/dev/zero of=testfs-1.img bs=1M count=32 WVPASS dd if=/dev/zero of=testfs-2.img bs=1M count=32 WVPASS mkfs -F testfs-1.img # Don't care what type (though must have symlinks) WVPASS mkfs -F testfs-2.img # Don't care what type (though must have symlinks) WVPASS mkdir -p src/mnt-1/hidden-1 src/mnt-2/hidden-2 WVPASS mount -o loop testfs-1.img src/mnt-1 WVPASS mount -o loop testfs-2.img src/mnt-2 WVPASS touch src/1 WVPASS mkdir -p src/mnt-1/x WVPASS touch src/mnt-1/2 src/mnt-1/x/3 WVPASS touch src/mnt-2/4 (WVPASS cd src && WVPASS ln -s mnt-2 mnt-link) (WVPASS cd src && WVPASS ln -s . top) WVPASSEQ "$(bup drecurse src | grep -vF lost+found)" "src/top src/mnt-link src/mnt-2/4 src/mnt-2/ src/mnt-1/x/3 src/mnt-1/x/ src/mnt-1/2 src/mnt-1/ src/1 src/" WVPASSEQ "$(bup drecurse -x src)" "src/top src/mnt-link src/mnt-2/ src/mnt-1/ src/1 src/" WVSTART 'index/save/restore' WVPASS bup index src WVPASS bup save -n src src WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src WVPASS "$top/dev/compare-trees" -c src/ src-restore/src/ # Test -x when none of the mount points are explicitly indexed WVPASS rm -r "$BUP_DIR" src-restore WVPASS bup init WVPASS bup index -x src WVPASS bup save -n src src WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src WVPASSEQ "$(cd src-restore/src && find . -not -name lost+found | LC_ALL=C sort)" \ ". ./1 ./mnt-1 ./mnt-2 ./mnt-link ./top" # Test -x when a mount point is explicitly indexed. This should # include the mount. WVPASS rm -r "$BUP_DIR" src-restore WVPASS bup init WVPASS bup index -x src src/mnt-2 WVPASS bup save -n src src WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src WVPASSEQ "$(cd src-restore/src && find . -not -name lost+found | LC_ALL=C sort)" \ ". ./1 ./mnt-1 ./mnt-2 ./mnt-2/4 ./mnt-link ./top" # Test -x when a direct link to a mount point is explicitly indexed. # This should *not* include the mount. WVPASS rm -r "$BUP_DIR" src-restore WVPASS bup init WVPASS bup index -x src src/mnt-link WVPASS bup save -n src src WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src WVPASSEQ "$(cd src-restore/src && find . -not -name lost+found | LC_ALL=C sort)" \ ". ./1 ./mnt-1 ./mnt-2 ./mnt-link ./top" # Test -x when a path that resolves to a mount point is explicitly # indexed (i.e. dir symlnks that redirect the leaf to a mount point). # This should include the mount. WVPASS rm -r "$BUP_DIR" src-restore WVPASS bup init WVPASS bup index -x src src/top/top/mnt-2 WVPASS bup save -n src src WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src WVPASSEQ "$(cd src-restore/src && find . -not -name lost+found | LC_ALL=C sort)" \ ". ./1 ./mnt-1 ./mnt-2 ./mnt-2/4 ./mnt-link ./top" WVPASS cd "$top" WVPASS umount "$tmpdir/src/mnt-1" WVPASS umount "$tmpdir/src/mnt-2" WVPASS rm -r "$tmpdir" bup-0.33.3/test/ext/test_argv.py000066400000000000000000000007431454333004200165170ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from random import randint from subprocess import CalledProcessError, check_output from sys import stderr, stdout from test.lib.wvpytest import wvpasseq def rand_bytes(n): return bytes([randint(1, 255) for x in range(n)]) def test_argv(): for trial in range(100): cmd = [b'dev/echo-argv-bytes', rand_bytes(randint(1, 32))] out = check_output(cmd) wvpasseq(b'\0\n'.join(cmd) + b'\0\n', out) bup-0.33.3/test/ext/test_ftp.py000066400000000000000000000122251454333004200163470ustar00rootroot00000000000000 from os import chdir, mkdir, symlink, unlink from subprocess import PIPE from time import localtime, strftime, tzset import re from bup.compat import environ from bup.helpers import unlink as unlink_if_exists from buptest import ex, exo from wvpytest import wvfail, wvpass, wvpasseq, wvpassne, wvstart import bup.path bup_cmd = bup.path.exe() def bup(*args, **kwargs): if 'stdout' not in kwargs: return exo((bup_cmd,) + args, **kwargs) return ex((bup_cmd,) + args, **kwargs) def jl(*lines): return b''.join(line + b'\n' for line in lines) def match_rx_grp(rx, expected, src): match = re.fullmatch(rx, src) wvpass(match, 're.fullmatch(%r, %r)' % (rx, src)) if not match: return wvpasseq(expected, match.groups()) environ[b'GIT_AUTHOR_NAME'] = b'bup test' environ[b'GIT_COMMITTER_NAME'] = b'bup test' environ[b'GIT_AUTHOR_EMAIL'] = b'bup@a425bc70a02811e49bdf73ee56450e6f' environ[b'GIT_COMMITTER_EMAIL'] = b'bup@a425bc70a02811e49bdf73ee56450e6f' def test_ftp(tmpdir): environ[b'BUP_DIR'] = tmpdir + b'/repo' environ[b'GIT_DIR'] = tmpdir + b'/repo' environ[b'TZ'] = b'UTC' tzset() chdir(tmpdir) mkdir(b'src') chdir(b'src') mkdir(b'dir') with open(b'file-1', 'wb') as f: f.write(b'excitement!\n') with open(b'dir/file-2', 'wb') as f: f.write(b'more excitement!\n') symlink(b'file-1', b'file-symlink') symlink(b'dir', b'dir-symlink') symlink(b'not-there', b'bad-symlink') chdir(tmpdir) bup(b'init') bup(b'index', b'src') bup(b'save', b'-n', b'src', b'--strip', b'src') save_utc = int(exo((b'git', b'show', b'-s', b'--format=%at', b'src')).out.strip()) save_name = strftime('%Y-%m-%d-%H%M%S', localtime(save_utc)).encode('ascii') wvstart('help') wvpasseq(b'Commands: ls cd pwd cat get mget help quit\n', exo((bup_cmd, b'ftp'), input=b'help\n', stderr=PIPE).out) wvstart('pwd/cd') wvpasseq(b'/\n', bup(b'ftp', input=b'pwd\n').out) wvpasseq(b'', bup(b'ftp', input=b'cd src\n').out) wvpasseq(b'/src\n', bup(b'ftp', input=jl(b'cd src', b'pwd')).out) wvpasseq(b'/src\n/\n', bup(b'ftp', input=jl(b'cd src', b'pwd', b'cd ..', b'pwd')).out) wvpasseq(b'/src\n/\n', bup(b'ftp', input=jl(b'cd src', b'pwd', b'cd ..', b'cd ..', b'pwd')).out) wvpasseq(b'/src/%s/dir\n' % save_name, bup(b'ftp', input=jl(b'cd src/latest/dir-symlink', b'pwd')).out) wvpasseq(b'/src/%s/dir\n' % save_name, bup(b'ftp', input=jl(b'cd src latest dir-symlink', b'pwd')).out) match_rx_grp(br'(error: path does not exist: /src/)[0-9-]+(/not-there\n/\n)', (b'error: path does not exist: /src/', b'/not-there\n/\n'), bup(b'ftp', input=jl(b'cd src/latest/bad-symlink', b'pwd')).out) match_rx_grp(br'(error: path does not exist: /src/)[0-9-]+(/not-there\n/\n)', (b'error: path does not exist: /src/', b'/not-there\n/\n'), bup(b'ftp', input=jl(b'cd src/latest/not-there', b'pwd')).out) wvstart('ls') # FIXME: elaborate wvpasseq(b'src\n', bup(b'ftp', input=b'ls\n').out) wvpasseq(save_name + b'\nlatest\n', bup(b'ftp', input=b'ls src\n').out) wvstart('cat') wvpasseq(b'excitement!\n', bup(b'ftp', input=b'cat src/latest/file-1\n').out) wvpasseq(b'excitement!\nmore excitement!\n', bup(b'ftp', input=b'cat src/latest/file-1 src/latest/dir/file-2\n').out) wvstart('get') bup(b'ftp', input=jl(b'get src/latest/file-1 dest')) with open(b'dest', 'rb') as f: wvpasseq(b'excitement!\n', f.read()) unlink(b'dest') bup(b'ftp', input=jl(b'get src/latest/file-symlink dest')) with open(b'dest', 'rb') as f: wvpasseq(b'excitement!\n', f.read()) unlink(b'dest') match_rx_grp(br'(error: path does not exist: /src/)[0-9-]+(/not-there\n)', (b'error: path does not exist: /src/', b'/not-there\n'), bup(b'ftp', input=jl(b'get src/latest/bad-symlink dest')).out) match_rx_grp(br'(error: path does not exist: /src/)[0-9-]+(/not-there\n)', (b'error: path does not exist: /src/', b'/not-there\n'), bup(b'ftp', input=jl(b'get src/latest/not-there dest')).out) wvstart('mget') unlink_if_exists(b'file-1') bup(b'ftp', input=jl(b'mget src/latest/file-1')) with open(b'file-1', 'rb') as f: wvpasseq(b'excitement!\n', f.read()) unlink_if_exists(b'file-1') unlink_if_exists(b'file-2') bup(b'ftp', input=jl(b'mget src/latest/file-1 src/latest/dir/file-2')) with open(b'file-1', 'rb') as f: wvpasseq(b'excitement!\n', f.read()) with open(b'file-2', 'rb') as f: wvpasseq(b'more excitement!\n', f.read()) unlink_if_exists(b'file-symlink') bup(b'ftp', input=jl(b'mget src/latest/file-symlink')) with open(b'file-symlink', 'rb') as f: wvpasseq(b'excitement!\n', f.read()) # bup mget currently always does pattern matching bup(b'ftp', input=b'mget src/latest/not-there\n') bup-0.33.3/test/ext/test_get.py000066400000000000000000001253141454333004200163410ustar00rootroot00000000000000 from __future__ import print_function from errno import ENOENT from itertools import product from os import chdir, mkdir, rename from shutil import rmtree from subprocess import PIPE import pytest, re, sys from bup import compat, path from bup.compat import environ, getcwd from bup.helpers import bquote, merge_dict, unlink from bup.io import byte_stream from buptest import ex, exo from wvpytest import wvcheck, wvfail, wvmsg, wvpass, wvpasseq, wvpassne, wvstart import bup.path sys.stdout.flush() stdout = byte_stream(sys.stdout) # FIXME: per-test function environ[b'GIT_AUTHOR_NAME'] = b'bup test-get' environ[b'GIT_COMMITTER_NAME'] = b'bup test-get' environ[b'GIT_AUTHOR_EMAIL'] = b'bup@85430dcca2b611e4b2c3-8f5691723476' environ[b'GIT_COMMITTER_EMAIL'] = b'bup@85430dcca2b611e4b2c3-8f5691723476' # The clean-repo test can probably be applied more broadly. It was # initially just applied to test-pick to catch a bug. top = getcwd() bup_cmd = bup.path.exe() def rmrf(path): err = [] # because python's scoping mess... def onerror(function, path, excinfo): err.append((function, path, excinfo)) rmtree(path, onerror=onerror) if err: function, path, excinfo = err[0] ex_type, ex, traceback = excinfo if (not isinstance(ex, OSError)) or ex.errno != ENOENT: raise ex def verify_trees_match(path1, path2): global top exr = exo((top + b'/dev/compare-trees', b'-c', path1, path2), check=False) stdout.write(exr.out) sys.stdout.flush() wvcheck(exr.rc == 0, 'process exit %d == 0' % exr.rc) def verify_rcz(cmd, **kwargs): assert not kwargs.get('check') kwargs['check'] = False result = exo(cmd, **kwargs) stdout.write(result.out) rc = result.proc.returncode wvcheck(rc == 0, 'process exit %d == 0' % rc) return result # FIXME: multline, or allow opts generally? def verify_rx(rx, string): wvcheck(re.search(rx, string), 'rx %r matches %r' % (rx, string)) def verify_nrx(rx, string): wvcheck(not re.search(rx, string), "rx %r doesn't match %r" % (rx, string)) def validate_clean_repo(): out = verify_rcz((b'git', b'--git-dir', b'get-dest', b'fsck')).out verify_nrx(br'dangling|mismatch|missing|unreachable', out) def validate_blob(src_id, dest_id): global top rmrf(b'restore-src') rmrf(b'restore-dest') cat_tree = top + b'/dev/git-cat-tree' src_blob = verify_rcz((cat_tree, b'--git-dir', b'get-src', src_id)).out dest_blob = verify_rcz((cat_tree, b'--git-dir', b'get-src', src_id)).out wvpasseq(src_blob, dest_blob) def validate_tree(src_id, dest_id): rmrf(b'restore-src') rmrf(b'restore-dest') mkdir(b'restore-src') mkdir(b'restore-dest') commit_env = merge_dict(environ, {b'GIT_COMMITTER_DATE': b'2014-01-01 01:01'}) # Create a commit so the archive contents will have matching timestamps. src_c = exo((b'git', b'--git-dir', b'get-src', b'commit-tree', b'-m', b'foo', src_id), env=commit_env).out.strip() dest_c = exo((b'git', b'--git-dir', b'get-dest', b'commit-tree', b'-m', b'foo', dest_id), env=commit_env).out.strip() exr = verify_rcz(b'git --git-dir get-src archive %s | tar xvf - -C restore-src' % bquote(src_c), shell=True) if exr.rc != 0: return False exr = verify_rcz(b'git --git-dir get-dest archive %s | tar xvf - -C restore-dest' % bquote(dest_c), shell=True) if exr.rc != 0: return False # git archive doesn't include an entry for ./. unlink(b'restore-src/pax_global_header') unlink(b'restore-dest/pax_global_header') ex((b'touch', b'-r', b'restore-src', b'restore-dest')) verify_trees_match(b'restore-src/', b'restore-dest/') rmrf(b'restore-src') rmrf(b'restore-dest') def validate_commit(src_id, dest_id): exr = verify_rcz((b'git', b'--git-dir', b'get-src', b'cat-file', b'commit', src_id)) if exr.rc != 0: return False src_cat = exr.out exr = verify_rcz((b'git', b'--git-dir', b'get-dest', b'cat-file', b'commit', dest_id)) if exr.rc != 0: return False dest_cat = exr.out wvpasseq(src_cat, dest_cat) if src_cat != dest_cat: return False rmrf(b'restore-src') rmrf(b'restore-dest') mkdir(b'restore-src') mkdir(b'restore-dest') qsrc = bquote(src_id) qdest = bquote(dest_id) exr = verify_rcz((b'git --git-dir get-src archive ' + qsrc + b' | tar xf - -C restore-src'), shell=True) if exr.rc != 0: return False exr = verify_rcz((b'git --git-dir get-dest archive ' + qdest + b' | tar xf - -C restore-dest'), shell=True) if exr.rc != 0: return False # git archive doesn't include an entry for ./. ex((b'touch', b'-r', b'restore-src', b'restore-dest')) verify_trees_match(b'restore-src/', b'restore-dest/') rmrf(b'restore-src') rmrf(b'restore-dest') def _validate_save(orig_dir, save_path, commit_id, tree_id): global bup_cmd rmrf(b'restore') exr = verify_rcz((bup_cmd, b'-d', b'get-dest', b'restore', b'-C', b'restore', save_path + b'/.')) if exr.rc: return False verify_trees_match(orig_dir + b'/', b'restore/') if tree_id: # FIXME: double check that get-dest is correct exr = verify_rcz((b'git', b'--git-dir', b'get-dest', b'ls-tree', tree_id)) if exr.rc: return False cat = verify_rcz((b'git', b'--git-dir', b'get-dest', b'cat-file', b'commit', commit_id)) if cat.rc: return False wvpasseq(b'tree ' + tree_id, cat.out.splitlines()[0]) # FIXME: re-merge save and new_save? def validate_save(dest_name, restore_subpath, commit_id, tree_id, orig_value, get_out): out = get_out.splitlines() wvpasseq(2, len(out)) get_tree_id = out[0] get_commit_id = out[1] wvpasseq(tree_id, get_tree_id) wvpasseq(commit_id, get_commit_id) _validate_save(orig_value, dest_name + restore_subpath, commit_id, tree_id) def validate_new_save(dest_name, restore_subpath, commit_id, tree_id, orig_value, get_out): out = get_out.splitlines() wvpasseq(2, len(out)) get_tree_id = out[0] get_commit_id = out[1] wvpasseq(tree_id, get_tree_id) wvpassne(commit_id, get_commit_id) _validate_save(orig_value, dest_name + restore_subpath, get_commit_id, tree_id) def validate_tagged_save(tag_name, restore_subpath, commit_id, tree_id, orig_value, get_out): out = get_out.splitlines() wvpasseq(1, len(out)) get_tag_id = out[0] wvpasseq(commit_id, get_tag_id) # Make sure tmp doesn't already exist. exr = exo((b'git', b'--git-dir', b'get-dest', b'show-ref', b'tmp-branch-for-tag'), check=False) wvpasseq(1, exr.rc) ex((b'git', b'--git-dir', b'get-dest', b'branch', b'tmp-branch-for-tag', b'refs/tags/' + tag_name)) _validate_save(orig_value, b'tmp-branch-for-tag/latest' + restore_subpath, commit_id, tree_id) ex((b'git', b'--git-dir', b'get-dest', b'branch', b'-D', b'tmp-branch-for-tag')) def validate_new_tagged_commit(tag_name, commit_id, tree_id, get_out): out = get_out.splitlines() wvpasseq(1, len(out)) get_tag_id = out[0] wvpassne(commit_id, get_tag_id) validate_tree(tree_id, tag_name + b':') def _run_get(disposition, method, what): print('run_get:', repr((disposition, method, what)), file=sys.stderr) global bup_cmd if disposition == 'get': get_cmd = (bup_cmd, b'-d', b'get-dest', b'get', b'-vvct', b'--print-tags', b'-s', b'get-src') elif disposition == 'get-on': get_cmd = (bup_cmd, b'-d', b'get-dest', b'on', b'-', b'get', b'-vvct', b'--print-tags', b'-s', b'get-src') elif disposition == 'get-to': get_cmd = (bup_cmd, b'-d', b'get-dest', b'get', b'-vvct', b'--print-tags', b'-s', b'get-src', b'-r', b'-:' + getcwd() + b'/get-dest') else: raise Exception('error: unexpected get disposition ' + repr(disposition)) if isinstance(what, bytes): cmd = get_cmd + (method, what) else: assert not isinstance(what, str) # python 3 sanity check if method in (b'--ff', b'--append', b'--pick', b'--force-pick', b'--new-tag', b'--replace'): method += b':' src, dest = what cmd = get_cmd + (method, src, dest) result = exo(cmd, check=False, stderr=PIPE) fsck = ex((bup_cmd, b'-d', b'get-dest', b'fsck'), check=False) wvpasseq(0, fsck.rc) return result def run_get(disposition, method, what=None, given=None): global bup_cmd rmrf(b'get-dest') ex((bup_cmd, b'-d', b'get-dest', b'init')) if given: # FIXME: replace bup-get with independent commands as is feasible exr = _run_get(disposition, b'--replace', given) assert not exr.rc return _run_get(disposition, method, what) def _test_universal(get_disposition, src_info): methods = (b'--ff', b'--append', b'--pick', b'--force-pick', b'--new-tag', b'--replace', b'--unnamed') for method in methods: mmsg = method.decode('ascii') wvstart(get_disposition + ' ' + mmsg + ', missing source, fails') exr = run_get(get_disposition, method, b'not-there') wvpassne(0, exr.rc) verify_rx(br'cannot find source', exr.err) for method in methods: mmsg = method.decode('ascii') wvstart(get_disposition + ' ' + mmsg + ' / fails') exr = run_get(get_disposition, method, b'/') wvpassne(0, exr.rc) verify_rx(b'cannot fetch entire repository', exr.err) def verify_only_refs(**kwargs): for kind, refs in kwargs.items(): if kind == 'heads': abs_refs = [b'refs/heads/' + ref for ref in refs] karg = b'--heads' elif kind == 'tags': abs_refs = [b'refs/tags/' + ref for ref in refs] karg = b'--tags' else: raise TypeError('unexpected keyword argument %r' % kind) if abs_refs: verify_rcz([b'git', b'--git-dir', b'get-dest', b'show-ref', b'--verify', karg] + abs_refs) exr = exo((b'git', b'--git-dir', b'get-dest', b'show-ref', karg), check=False) wvpasseq(0, exr.rc) expected_refs = sorted(abs_refs) repo_refs = sorted([x.split()[1] for x in exr.out.splitlines()]) wvpasseq(expected_refs, repo_refs) else: # FIXME: can we just check "git show-ref --heads == ''"? exr = exo((b'git', b'--git-dir', b'get-dest', b'show-ref', karg), check=False) wvpasseq(1, exr.rc) wvpasseq(b'', exr.out.strip()) def _test_replace(get_disposition, src_info): wvstart(get_disposition + ' --replace to root fails') for item in (b'.tag/tinyfile', b'src/latest' + src_info['tinyfile-path'], b'.tag/subtree', b'src/latest' + src_info['subtree-vfs-path'], b'.tag/commit-1', b'src/latest', b'src'): exr = run_get(get_disposition, b'--replace', (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'impossible; can only overwrite branch or tag', exr.err) tinyfile_id = src_info['tinyfile-id'] tinyfile_path = src_info['tinyfile-path'] subtree_vfs_path = src_info['subtree-vfs-path'] subtree_id = src_info['subtree-id'] commit_2_id = src_info['commit-2-id'] tree_2_id = src_info['tree-2-id'] # Anything to tag existing_items = {'nothing' : None, 'blob' : (b'.tag/tinyfile', b'.tag/obj'), 'tree' : (b'.tag/tree-1', b'.tag/obj'), 'commit': (b'.tag/commit-1', b'.tag/obj')} for ex_type, ex_ref in existing_items.items(): wvstart(get_disposition + ' --replace ' + ex_type + ' with blob tag') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path): exr = run_get(get_disposition, b'--replace', (item ,b'.tag/obj'), given=ex_ref) wvpasseq(0, exr.rc) validate_blob(tinyfile_id, tinyfile_id) verify_only_refs(heads=[], tags=(b'obj',)) wvstart(get_disposition + ' --replace ' + ex_type + ' with tree tag') for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): exr = run_get(get_disposition, b'--replace', (item, b'.tag/obj'), given=ex_ref) validate_tree(subtree_id, subtree_id) verify_only_refs(heads=[], tags=(b'obj',)) wvstart(get_disposition + ' --replace ' + ex_type + ' with commitish tag') for item in (b'.tag/commit-2', b'src/latest', b'src'): exr = run_get(get_disposition, b'--replace', (item, b'.tag/obj'), given=ex_ref) validate_tagged_save(b'obj', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=[], tags=(b'obj',)) # Committish to branch. existing_items = (('nothing', None), ('branch', (b'.tag/commit-1', b'obj'))) for ex_type, ex_ref in existing_items: for item_type, item in (('commit', b'.tag/commit-2'), ('save', b'src/latest'), ('branch', b'src')): wvstart(get_disposition + ' --replace ' + ex_type + ' with ' + item_type) exr = run_get(get_disposition, b'--replace', (item, b'obj'), given=ex_ref) validate_save(b'obj/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'obj',), tags=[]) # Not committish to branch existing_items = (('nothing', None), ('branch', (b'.tag/commit-1', b'obj'))) for ex_type, ex_ref in existing_items: for item_type, item in (('blob', b'.tag/tinyfile'), ('blob', b'src/latest' + tinyfile_path), ('tree', b'.tag/subtree'), ('tree', b'src/latest' + subtree_vfs_path)): wvstart(get_disposition + ' --replace branch with ' + item_type + ' given ' + ex_type + ' fails') exr = run_get(get_disposition, b'--replace', (item, b'obj'), given=ex_ref) wvpassne(0, exr.rc) verify_rx(br'cannot overwrite branch with .+ for', exr.err) wvstart(get_disposition + ' --replace, implicit destinations') exr = run_get(get_disposition, b'--replace', b'src') validate_save(b'src/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'src',), tags=[]) exr = run_get(get_disposition, b'--replace', b'.tag/commit-2') validate_tagged_save(b'commit-2', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=[], tags=(b'commit-2',)) def _test_ff(get_disposition, src_info): wvstart(get_disposition + ' --ff to root fails') tinyfile_path = src_info['tinyfile-path'] for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path): exr = run_get(get_disposition, b'--ff', (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'source for .+ must be a branch, save, or commit', exr.err) subtree_vfs_path = src_info['subtree-vfs-path'] for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): exr = run_get(get_disposition, b'--ff', (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'is impossible; can only --append a tree to a branch', exr.err) for item in (b'.tag/commit-1', b'src/latest', b'src'): exr = run_get(get_disposition, b'--ff', (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'destination for .+ is a root, not a branch', exr.err) wvstart(get_disposition + ' --ff of not-committish fails') for src in (b'.tag/tinyfile', b'src/latest' + tinyfile_path): # FIXME: use get_item elsewhere? for given, get_item in ((None, (src, b'obj')), (None, (src, b'.tag/obj')), ((b'.tag/tinyfile', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/tree-1', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/commit-1', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/commit-1', b'obj'), (src, b'obj'))): exr = run_get(get_disposition, b'--ff', get_item, given=given) wvpassne(0, exr.rc) verify_rx(br'must be a branch, save, or commit', exr.err) for src in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): for given, get_item in ((None, (src, b'obj')), (None, (src, b'.tag/obj')), ((b'.tag/tinyfile', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/tree-1', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/commit-1', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/commit-1', b'obj'), (src, b'obj'))): exr = run_get(get_disposition, b'--ff', get_item, given=given) wvpassne(0, exr.rc) verify_rx(br'can only --append a tree to a branch', exr.err) wvstart(get_disposition + ' --ff committish, ff possible') save_2 = src_info['save-2'] for src in (b'.tag/commit-2', b'src/' + save_2, b'src'): for given, get_item, complaint in \ ((None, (src, b'.tag/obj'), br'destination .+ must be a valid branch name'), ((b'.tag/tinyfile', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a blob, not a branch'), ((b'.tag/tree-1', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a tree, not a branch'), ((b'.tag/commit-1', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a tagged commit, not a branch'), ((b'.tag/commit-2', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a tagged commit, not a branch')): exr = run_get(get_disposition, b'--ff', get_item, given=given) wvpassne(0, exr.rc) verify_rx(complaint, exr.err) # FIXME: use src or item and given or existing consistently in loops... commit_2_id = src_info['commit-2-id'] tree_2_id = src_info['tree-2-id'] for src in (b'.tag/commit-2', b'src/' + save_2, b'src'): for given in (None, (b'.tag/commit-1', b'obj'), (b'.tag/commit-2', b'obj')): exr = run_get(get_disposition, b'--ff', (src, b'obj'), given=given) wvpasseq(0, exr.rc) validate_save(b'obj/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'obj',), tags=[]) wvstart(get_disposition + ' --ff, implicit destinations') for item in (b'src', b'src/latest'): exr = run_get(get_disposition, b'--ff', item) wvpasseq(0, exr.rc) ex((b'find', b'get-dest/refs')) ex((bup_cmd, b'-d', b'get-dest', b'ls')) validate_save(b'src/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) #verify_only_refs(heads=('src',), tags=[]) wvstart(get_disposition + ' --ff, ff impossible') for given, get_item in (((b'unrelated-branch', b'src'), b'src'), ((b'.tag/commit-2', b'src'), (b'.tag/commit-1', b'src'))): exr = run_get(get_disposition, b'--ff', get_item, given=given) wvpassne(0, exr.rc) verify_rx(br'destination is not an ancestor of source', exr.err) def _test_append(get_disposition, src_info): tinyfile_path = src_info['tinyfile-path'] subtree_vfs_path = src_info['subtree-vfs-path'] wvstart(get_disposition + ' --append to root fails') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path): exr = run_get(get_disposition, b'--append', (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'source for .+ must be a branch, save, commit, or tree', exr.err) for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path, b'.tag/commit-1', b'src/latest', b'src'): exr = run_get(get_disposition, b'--append', (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'destination for .+ is a root, not a branch', exr.err) wvstart(get_disposition + ' --append of not-treeish fails') for src in (b'.tag/tinyfile', b'src/latest' + tinyfile_path): for given, item in ((None, (src, b'obj')), (None, (src, b'.tag/obj')), ((b'.tag/tinyfile', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/tree-1', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/commit-1', b'.tag/obj'), (src, b'.tag/obj')), ((b'.tag/commit-1', b'obj'), (src, b'obj'))): exr = run_get(get_disposition, b'--append', item, given=given) wvpassne(0, exr.rc) verify_rx(br'must be a branch, save, commit, or tree', exr.err) wvstart(get_disposition + ' --append committish failure cases') save_2 = src_info['save-2'] for src in (b'.tag/subtree', b'src/latest' + subtree_vfs_path, b'.tag/commit-2', b'src/' + save_2, b'src'): for given, item, complaint in \ ((None, (src, b'.tag/obj'), br'destination .+ must be a valid branch name'), ((b'.tag/tinyfile', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a blob, not a branch'), ((b'.tag/tree-1', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a tree, not a branch'), ((b'.tag/commit-1', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a tagged commit, not a branch'), ((b'.tag/commit-2', b'.tag/obj'), (src, b'.tag/obj'), br'destination .+ is a tagged commit, not a branch')): exr = run_get(get_disposition, b'--append', item, given=given) wvpassne(0, exr.rc) verify_rx(complaint, exr.err) wvstart(get_disposition + ' --append committish') commit_2_id = src_info['commit-2-id'] tree_2_id = src_info['tree-2-id'] for item in (b'.tag/commit-2', b'src/' + save_2, b'src'): for existing in (None, (b'.tag/commit-1', b'obj'), (b'.tag/commit-2', b'obj'), (b'unrelated-branch', b'obj')): exr = run_get(get_disposition, b'--append', (item, b'obj'), given=existing) wvpasseq(0, exr.rc) validate_new_save(b'obj/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'obj',), tags=[]) # Append ancestor save_1 = src_info['save-1'] commit_1_id = src_info['commit-1-id'] tree_1_id = src_info['tree-1-id'] for item in (b'.tag/commit-1', b'src/' + save_1, b'src-1'): exr = run_get(get_disposition, b'--append', (item, b'obj'), given=(b'.tag/commit-2', b'obj')) wvpasseq(0, exr.rc) validate_new_save(b'obj/latest', getcwd() + b'/src', commit_1_id, tree_1_id, b'src-1', exr.out) verify_only_refs(heads=(b'obj',), tags=[]) wvstart(get_disposition + ' --append tree') subtree_path = src_info['subtree-path'] subtree_id = src_info['subtree-id'] for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): for existing in (None, (b'.tag/commit-1', b'obj'), (b'.tag/commit-2', b'obj')): exr = run_get(get_disposition, b'--append', (item, b'obj'), given=existing) wvpasseq(0, exr.rc) validate_new_save(b'obj/latest', b'/', None, subtree_id, subtree_path, exr.out) verify_only_refs(heads=(b'obj',), tags=[]) wvstart(get_disposition + ' --append, implicit destinations') for item in (b'src', b'src/latest'): exr = run_get(get_disposition, b'--append', item) wvpasseq(0, exr.rc) validate_new_save(b'src/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'src',), tags=[]) def _test_pick_common(get_disposition, src_info, force=False): flavor = b'--force-pick' if force else b'--pick' flavormsg = flavor.decode('ascii') tinyfile_path = src_info['tinyfile-path'] subtree_vfs_path = src_info['subtree-vfs-path'] wvstart(get_disposition + ' ' + flavormsg + ' to root fails') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path, b'src'): exr = run_get(get_disposition, flavor, (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'can only pick a commit or save', exr.err) for item in (b'.tag/commit-1', b'src/latest'): exr = run_get(get_disposition, flavor, (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'destination is not a tag or branch', exr.err) for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): exr = run_get(get_disposition, flavor, (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'is impossible; can only --append a tree', exr.err) wvstart(get_disposition + ' ' + flavormsg + ' of blob or branch fails') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path, b'src'): for given, get_item in ((None, (item, b'obj')), (None, (item, b'.tag/obj')), ((b'.tag/tinyfile', b'.tag/obj'), (item, b'.tag/obj')), ((b'.tag/tree-1', b'.tag/obj'), (item, b'.tag/obj')), ((b'.tag/commit-1', b'.tag/obj'), (item, b'.tag/obj')), ((b'.tag/commit-1', b'obj'), (item, b'obj'))): exr = run_get(get_disposition, flavor, get_item, given=given) wvpassne(0, exr.rc) verify_rx(br'impossible; can only pick a commit or save', exr.err) wvstart(get_disposition + ' ' + flavormsg + ' of tree fails') for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): for given, get_item in ((None, (item, b'obj')), (None, (item, b'.tag/obj')), ((b'.tag/tinyfile', b'.tag/obj'), (item, b'.tag/obj')), ((b'.tag/tree-1', b'.tag/obj'), (item, b'.tag/obj')), ((b'.tag/commit-1', b'.tag/obj'), (item, b'.tag/obj')), ((b'.tag/commit-1', b'obj'), (item, b'obj'))): exr = run_get(get_disposition, flavor, get_item, given=given) wvpassne(0, exr.rc) verify_rx(br'impossible; can only --append a tree', exr.err) save_2 = src_info['save-2'] commit_2_id = src_info['commit-2-id'] tree_2_id = src_info['tree-2-id'] # FIXME: these two wvstart texts? if force: wvstart(get_disposition + ' ' + flavormsg + ' commit/save to existing tag') for item in (b'.tag/commit-2', b'src/' + save_2): for given in ((b'.tag/tinyfile', b'.tag/obj'), (b'.tag/tree-1', b'.tag/obj'), (b'.tag/commit-1', b'.tag/obj')): exr = run_get(get_disposition, flavor, (item, b'.tag/obj'), given=given) wvpasseq(0, exr.rc) validate_new_tagged_commit(b'obj', commit_2_id, tree_2_id, exr.out) verify_only_refs(heads=[], tags=(b'obj',)) else: # --pick wvstart(get_disposition + ' ' + flavormsg + ' commit/save to existing tag fails') for item in (b'.tag/commit-2', b'src/' + save_2): for given in ((b'.tag/tinyfile', b'.tag/obj'), (b'.tag/tree-1', b'.tag/obj'), (b'.tag/commit-1', b'.tag/obj')): exr = run_get(get_disposition, flavor, (item, b'.tag/obj'), given=given) wvpassne(0, exr.rc) verify_rx(br'cannot overwrite existing tag', exr.err) wvstart(get_disposition + ' ' + flavormsg + ' commit/save to tag') for item in (b'.tag/commit-2', b'src/' + save_2): exr = run_get(get_disposition, flavor, (item, b'.tag/obj')) wvpasseq(0, exr.rc) validate_clean_repo() validate_new_tagged_commit(b'obj', commit_2_id, tree_2_id, exr.out) verify_only_refs(heads=[], tags=(b'obj',)) wvstart(get_disposition + ' ' + flavormsg + ' commit/save to branch') for item in (b'.tag/commit-2', b'src/' + save_2): for given in (None, (b'.tag/commit-1', b'obj'), (b'.tag/commit-2', b'obj')): exr = run_get(get_disposition, flavor, (item, b'obj'), given=given) wvpasseq(0, exr.rc) validate_clean_repo() validate_new_save(b'obj/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'obj',), tags=[]) wvstart(get_disposition + ' ' + flavormsg + ' commit/save unrelated commit to branch') for item in(b'.tag/commit-2', b'src/' + save_2): exr = run_get(get_disposition, flavor, (item, b'obj'), given=(b'unrelated-branch', b'obj')) wvpasseq(0, exr.rc) validate_clean_repo() validate_new_save(b'obj/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'obj',), tags=[]) wvstart(get_disposition + ' ' + flavormsg + ' commit/save ancestor to branch') save_1 = src_info['save-1'] commit_1_id = src_info['commit-1-id'] tree_1_id = src_info['tree-1-id'] for item in (b'.tag/commit-1', b'src/' + save_1): exr = run_get(get_disposition, flavor, (item, b'obj'), given=(b'.tag/commit-2', b'obj')) wvpasseq(0, exr.rc) validate_clean_repo() validate_new_save(b'obj/latest', getcwd() + b'/src', commit_1_id, tree_1_id, b'src-1', exr.out) verify_only_refs(heads=(b'obj',), tags=[]) wvstart(get_disposition + ' ' + flavormsg + ', implicit destinations') exr = run_get(get_disposition, flavor, b'.tag/commit-2') wvpasseq(0, exr.rc) validate_clean_repo() validate_new_tagged_commit(b'commit-2', commit_2_id, tree_2_id, exr.out) verify_only_refs(heads=[], tags=(b'commit-2',)) exr = run_get(get_disposition, flavor, b'src/latest') wvpasseq(0, exr.rc) validate_clean_repo() validate_new_save(b'src/latest', getcwd() + b'/src', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=(b'src',), tags=[]) def _test_pick_force(get_disposition, src_info): _test_pick_common(get_disposition, src_info, force=True) def _test_pick_noforce(get_disposition, src_info): _test_pick_common(get_disposition, src_info, force=False) def _test_new_tag(get_disposition, src_info): tinyfile_id = src_info['tinyfile-id'] tinyfile_path = src_info['tinyfile-path'] commit_2_id = src_info['commit-2-id'] tree_2_id = src_info['tree-2-id'] subtree_id = src_info['subtree-id'] subtree_vfs_path = src_info['subtree-vfs-path'] wvstart(get_disposition + ' --new-tag to root fails') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path, b'.tag/subtree', b'src/latest' + subtree_vfs_path, b'.tag/commit-1', b'src/latest', b'src'): exr = run_get(get_disposition, b'--new-tag', (item, b'/')) wvpassne(0, exr.rc) verify_rx(br'destination for .+ must be a VFS tag', exr.err) # Anything to new tag. wvstart(get_disposition + ' --new-tag, blob tag') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path): exr = run_get(get_disposition, b'--new-tag', (item, b'.tag/obj')) wvpasseq(0, exr.rc) validate_blob(tinyfile_id, tinyfile_id) verify_only_refs(heads=[], tags=(b'obj',)) wvstart(get_disposition + ' --new-tag, tree tag') for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): exr = run_get(get_disposition, b'--new-tag', (item, b'.tag/obj')) wvpasseq(0, exr.rc) validate_tree(subtree_id, subtree_id) verify_only_refs(heads=[], tags=(b'obj',)) wvstart(get_disposition + ' --new-tag, committish tag') for item in (b'.tag/commit-2', b'src/latest', b'src'): exr = run_get(get_disposition, b'--new-tag', (item, b'.tag/obj')) wvpasseq(0, exr.rc) validate_tagged_save(b'obj', getcwd() + b'/src/', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=[], tags=(b'obj',)) # Anything to existing tag (fails). for ex_type, ex_tag in (('blob', (b'.tag/tinyfile', b'.tag/obj')), ('tree', (b'.tag/tree-1', b'.tag/obj')), ('commit', (b'.tag/commit-1', b'.tag/obj'))): for item_type, item in (('blob tag', b'.tag/tinyfile'), ('blob path', b'src/latest' + tinyfile_path), ('tree tag', b'.tag/subtree'), ('tree path', b'src/latest' + subtree_vfs_path), ('commit tag', b'.tag/commit-2'), ('save', b'src/latest'), ('branch', b'src')): wvstart(get_disposition + ' --new-tag of ' + item_type + ', given existing ' + ex_type + ' tag, fails') exr = run_get(get_disposition, b'--new-tag', (item, b'.tag/obj'), given=ex_tag) wvpassne(0, exr.rc) verify_rx(br'cannot overwrite existing tag .* \(requires --replace\)', exr.err) # Anything to branch (fails). for ex_type, ex_tag in (('nothing', None), ('blob', (b'.tag/tinyfile', b'.tag/obj')), ('tree', (b'.tag/tree-1', b'.tag/obj')), ('commit', (b'.tag/commit-1', b'.tag/obj'))): for item_type, item in (('blob tag', b'.tag/tinyfile'), ('blob path', b'src/latest' + tinyfile_path), ('tree tag', b'.tag/subtree'), ('tree path', b'src/latest' + subtree_vfs_path), ('commit tag', b'.tag/commit-2'), ('save', b'src/latest'), ('branch', b'src')): wvstart(get_disposition + ' --new-tag to branch of ' + item_type + ', given existing ' + ex_type + ' tag, fails') exr = run_get(get_disposition, b'--new-tag', (item, b'obj'), given=ex_tag) wvpassne(0, exr.rc) verify_rx(br'destination for .+ must be a VFS tag', exr.err) wvstart(get_disposition + ' --new-tag, implicit destinations') exr = run_get(get_disposition, b'--new-tag', b'.tag/commit-2') wvpasseq(0, exr.rc) validate_tagged_save(b'commit-2', getcwd() + b'/src/', commit_2_id, tree_2_id, b'src-2', exr.out) verify_only_refs(heads=[], tags=(b'commit-2',)) def _test_unnamed(get_disposition, src_info): tinyfile_id = src_info['tinyfile-id'] tinyfile_path = src_info['tinyfile-path'] subtree_vfs_path = src_info['subtree-vfs-path'] wvstart(get_disposition + ' --unnamed to root fails') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path, b'.tag/subtree', b'src/latest' + subtree_vfs_path, b'.tag/commit-1', b'src/latest', b'src'): for ex_ref in (None, (item, b'.tag/obj')): exr = run_get(get_disposition, b'--unnamed', (item, b'/'), given=ex_ref) wvpassne(0, exr.rc) verify_rx(br'usage: bup get ', exr.err) wvstart(get_disposition + ' --unnamed file') for item in (b'.tag/tinyfile', b'src/latest' + tinyfile_path): exr = run_get(get_disposition, b'--unnamed', item) wvpasseq(0, exr.rc) validate_blob(tinyfile_id, tinyfile_id) verify_only_refs(heads=[], tags=[]) exr = run_get(get_disposition, b'--unnamed', item, given=(item, b'.tag/obj')) wvpasseq(0, exr.rc) validate_blob(tinyfile_id, tinyfile_id) verify_only_refs(heads=[], tags=(b'obj',)) wvstart(get_disposition + ' --unnamed tree') subtree_id = src_info['subtree-id'] for item in (b'.tag/subtree', b'src/latest' + subtree_vfs_path): exr = run_get(get_disposition, b'--unnamed', item) wvpasseq(0, exr.rc) validate_tree(subtree_id, subtree_id) verify_only_refs(heads=[], tags=[]) exr = run_get(get_disposition, b'--unnamed', item, given=(item, b'.tag/obj')) wvpasseq(0, exr.rc) validate_tree(subtree_id, subtree_id) verify_only_refs(heads=[], tags=(b'obj',)) wvstart(get_disposition + ' --unnamed committish') save_2 = src_info['save-2'] commit_2_id = src_info['commit-2-id'] for item in (b'.tag/commit-2', b'src/' + save_2, b'src'): exr = run_get(get_disposition, b'--unnamed', item) wvpasseq(0, exr.rc) validate_commit(commit_2_id, commit_2_id) verify_only_refs(heads=[], tags=[]) exr = run_get(get_disposition, b'--unnamed', item, given=(item, b'.tag/obj')) wvpasseq(0, exr.rc) validate_commit(commit_2_id, commit_2_id) verify_only_refs(heads=[], tags=(b'obj',)) def create_get_src(): global bup_cmd, src_info wvstart('preparing') ex((bup_cmd, b'-d', b'get-src', b'init')) mkdir(b'src') open(b'src/unrelated', 'a').close() ex((bup_cmd, b'-d', b'get-src', b'index', b'src')) ex((bup_cmd, b'-d', b'get-src', b'save', b'-tcn', b'unrelated-branch', b'src')) ex((bup_cmd, b'-d', b'get-src', b'index', b'--clear')) rmrf(b'src') mkdir(b'src') open(b'src/zero', 'a').close() ex((bup_cmd, b'-d', b'get-src', b'index', b'src')) exr = exo((bup_cmd, b'-d', b'get-src', b'save', b'-tcn', b'src', b'src')) out = exr.out.splitlines() tree_0_id = out[0] commit_0_id = out[-1] exr = exo((bup_cmd, b'-d', b'get-src', b'ls', b'src')) save_0 = exr.out.splitlines()[0] ex((b'git', b'--git-dir', b'get-src', b'branch', b'src-0', b'src')) ex((b'cp', b'-RPp', b'src', b'src-0')) rmrf(b'src') mkdir(b'src') mkdir(b'src/x') mkdir(b'src/x/y') ex((bup_cmd + b' -d get-src random 1k > src/1'), shell=True) ex((bup_cmd + b' -d get-src random 1k > src/x/2'), shell=True) ex((bup_cmd, b'-d', b'get-src', b'index', b'src')) exr = exo((bup_cmd, b'-d', b'get-src', b'save', b'-tcn', b'src', b'src')) out = exr.out.splitlines() tree_1_id = out[0] commit_1_id = out[-1] exr = exo((bup_cmd, b'-d', b'get-src', b'ls', b'src')) save_1 = exr.out.splitlines()[1] ex((b'git', b'--git-dir', b'get-src', b'branch', b'src-1', b'src')) ex((b'cp', b'-RPp', b'src', b'src-1')) # Make a copy the current state of src so we'll have an ancestor. ex((b'cp', b'-RPp', b'get-src/refs/heads/src', b'get-src/refs/heads/src-ancestor')) with open(b'src/tiny-file', 'ab') as f: f.write(b'xyzzy') ex((bup_cmd, b'-d', b'get-src', b'index', b'src')) ex((bup_cmd, b'-d', b'get-src', b'tick')) # Ensure the save names differ exr = exo((bup_cmd, b'-d', b'get-src', b'save', b'-tcn', b'src', b'src')) out = exr.out.splitlines() tree_2_id = out[0] commit_2_id = out[-1] exr = exo((bup_cmd, b'-d', b'get-src', b'ls', b'src')) save_2 = exr.out.splitlines()[2] rename(b'src', b'src-2') src_root = getcwd() + b'/src' subtree_path = b'src-2/x' subtree_vfs_path = src_root + b'/x' # No support for "ls -d", so grep... exr = exo((bup_cmd, b'-d', b'get-src', b'ls', b'-s', b'src/latest' + src_root)) out = exr.out.splitlines() subtree_id = None for line in out: if b'x' in line: subtree_id = line.split()[0] assert(subtree_id) # With a tiny file, we'll get a single blob, not a chunked tree tinyfile_path = src_root + b'/tiny-file' exr = exo((bup_cmd, b'-d', b'get-src', b'ls', b'-s', b'src/latest' + tinyfile_path)) tinyfile_id = exr.out.splitlines()[0].split()[0] ex((bup_cmd, b'-d', b'get-src', b'tag', b'tinyfile', tinyfile_id)) ex((bup_cmd, b'-d', b'get-src', b'tag', b'subtree', subtree_id)) ex((bup_cmd, b'-d', b'get-src', b'tag', b'tree-0', tree_0_id)) ex((bup_cmd, b'-d', b'get-src', b'tag', b'tree-1', tree_1_id)) ex((bup_cmd, b'-d', b'get-src', b'tag', b'tree-2', tree_2_id)) ex((bup_cmd, b'-d', b'get-src', b'tag', b'commit-0', commit_0_id)) ex((bup_cmd, b'-d', b'get-src', b'tag', b'commit-1', commit_1_id)) ex((bup_cmd, b'-d', b'get-src', b'tag', b'commit-2', commit_2_id)) ex((b'git', b'--git-dir', b'get-src', b'branch', b'commit-1', commit_1_id)) ex((b'git', b'--git-dir', b'get-src', b'branch', b'commit-2', commit_2_id)) return {'tinyfile-path' : tinyfile_path, 'tinyfile-id' : tinyfile_id, 'subtree-id' : subtree_id, 'tree-0-id' : tree_0_id, 'tree-1-id' : tree_1_id, 'tree-2-id' : tree_2_id, 'commit-0-id' : commit_0_id, 'commit-1-id' : commit_1_id, 'commit-2-id' : commit_2_id, 'save-1' : save_1, 'save-2' : save_2, 'subtree-path' : subtree_path, 'subtree-vfs-path' : subtree_vfs_path} # FIXME: this fails in a strange way: # WVPASS given nothing get --ff not-there dispositions_to_test = ('get',) if int(environ.get(b'BUP_TEST_LEVEL', b'0')) >= 11: dispositions_to_test += ('get-on', 'get-to') categories = ('replace', 'universal', 'ff', 'append', 'pick_force', 'pick_noforce', 'new_tag', 'unnamed') @pytest.mark.parametrize("disposition,category", product(dispositions_to_test, categories)) def test_get(tmpdir, disposition, category): chdir(tmpdir) try: src_info = create_get_src() globals().get('_test_' + category)(disposition, src_info) finally: chdir(top) bup-0.33.3/test/ext/test_prune_older.py000066400000000000000000000210001454333004200200630ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from collections import defaultdict from itertools import chain, dropwhile, groupby, takewhile from os import chdir from random import choice, randint from shutil import copytree, rmtree from subprocess import PIPE from sys import stderr from time import localtime, strftime, time, tzset import random, sys if sys.version_info[:2] >= (3, 5): from difflib import diff_bytes, unified_diff else: from difflib import unified_diff from bup import compat from bup.compat import environ from bup.helpers import partition, period_as_secs, readpipe from bup.io import byte_stream from buptest import ex, exo from wvpytest import wvfail, wvpass, wvpasseq, wvpassne, wvstart import bup.path if sys.version_info[:2] < (3, 5): def diff_bytes(_, *args): return unified_diff(*args) def create_older_random_saves(n, start_utc, end_utc): with open(b'foo', 'wb') as f: pass ex([b'git', b'add', b'foo']) utcs = set() while len(utcs) != n: utcs.add(randint(start_utc, end_utc)) utcs = sorted(utcs) for utc in utcs: with open(b'foo', 'wb') as f: f.write(b'%d\n' % utc) ex([b'git', b'commit', b'--date', b'%d' % utc, b'-qam', b'%d' % utc]) ex([b'git', b'gc', b'--aggressive']) return utcs # There is corresponding code in bup for some of this, but the # computation method is different here, in part so that the test can # provide a more effective cross-check. period_kinds = [b'all', b'dailies', b'monthlies', b'yearlies'] period_scale = {b's': 1, b'min': 60, b'h': 60 * 60, b'd': 60 * 60 * 24, b'w': 60 * 60 * 24 * 7, b'm': 60 * 60 * 24 * 31, b'y': 60 * 60 * 24 * 366} period_scale_kinds = list(period_scale.keys()) def expected_retentions(utcs, utc_start, spec): if not spec: return utcs utcs = sorted(utcs, reverse=True) period_start = dict(spec) for kind, duration in period_start.items(): period_start[kind] = utc_start - period_as_secs(duration) period_start = defaultdict(lambda: float('inf'), period_start) all = list(takewhile(lambda x: x >= period_start[b'all'], utcs)) utcs = list(dropwhile(lambda x: x >= period_start[b'all'], utcs)) matches = takewhile(lambda x: x >= period_start[b'dailies'], utcs) dailies = [max(day_utcs) for yday, day_utcs in groupby(matches, lambda x: localtime(x).tm_yday)] utcs = list(dropwhile(lambda x: x >= period_start[b'dailies'], utcs)) matches = takewhile(lambda x: x >= period_start[b'monthlies'], utcs) monthlies = [max(month_utcs) for month, month_utcs in groupby(matches, lambda x: localtime(x).tm_mon)] utcs = dropwhile(lambda x: x >= period_start[b'monthlies'], utcs) matches = takewhile(lambda x: x >= period_start[b'yearlies'], utcs) yearlies = [max(year_utcs) for year, year_utcs in groupby(matches, lambda x: localtime(x).tm_year)] return chain(all, dailies, monthlies, yearlies) def period_spec(start_utc, end_utc): global period_kinds, period_scale, period_scale_kinds result = [] desired_specs = randint(1, 2 * len(period_kinds)) assert(desired_specs >= 1) # At least one --keep argument is required while len(result) < desired_specs: period = None if randint(1, 100) <= 5: period = b'forever' else: assert(end_utc > start_utc) period_secs = randint(1, end_utc - start_utc) scale = choice(period_scale_kinds) mag = int(float(period_secs) / period_scale[scale]) if mag != 0: period = (b'%d' % mag) + scale if period: result += [(choice(period_kinds), period)] return tuple(result) def unique_period_specs(n, start_utc, end_utc): invocations = set() while len(invocations) < n: invocations.add(period_spec(start_utc, end_utc)) return tuple(invocations) def period_spec_to_period_args(spec): return tuple(chain(*((b'--keep-' + kind + b'-for', period) for kind, period in spec))) def result_diffline(x): return (b'%d %s\n' % (x, strftime(' %Y-%m-%d-%H%M%S', localtime(x)).encode('ascii'))) def check_prune_result(expected): actual = sorted([int(x) for x in exo([b'git', b'log', b'--pretty=format:%at']).out.splitlines()]) if expected != actual: for x in expected: print('ex:', x, strftime('%Y-%m-%d-%H%M%S', localtime(x)), file=stderr) for line in diff_bytes(unified_diff, [result_diffline(x) for x in expected], [result_diffline(x) for x in actual], fromfile=b'expected', tofile=b'actual'): sys.stderr.flush() byte_stream(sys.stderr).write(line) wvpass(expected == actual) def test_prune_older(tmpdir): environ[b'GIT_AUTHOR_NAME'] = b'bup test' environ[b'GIT_COMMITTER_NAME'] = b'bup test' environ[b'GIT_AUTHOR_EMAIL'] = b'bup@a425bc70a02811e49bdf73ee56450e6f' environ[b'GIT_COMMITTER_EMAIL'] = b'bup@a425bc70a02811e49bdf73ee56450e6f' seed = int(environ.get(b'BUP_TEST_SEED', time())) random.seed(seed) print('random seed:', seed, file=stderr) save_population = int(environ.get(b'BUP_TEST_PRUNE_OLDER_SAVES', 2000)) prune_cycles = int(environ.get(b'BUP_TEST_PRUNE_OLDER_CYCLES', 20)) prune_gc_cycles = int(environ.get(b'BUP_TEST_PRUNE_OLDER_GC_CYCLES', 10)) bup_cmd = bup.path.exe() environ[b'BUP_DIR'] = tmpdir + b'/work/.git' environ[b'GIT_DIR'] = tmpdir + b'/work/.git' now = int(time()) three_years_ago = now - (60 * 60 * 24 * 366 * 3) chdir(tmpdir) ex([b'git', b'init', b'work']) ex([b'git', b'symbolic-ref', b'HEAD', b'refs/heads/main']) ex([b'git', b'config', b'gc.autoDetach', b'false']) wvstart('generating ' + str(save_population) + ' random saves') chdir(tmpdir + b'/work') save_utcs = create_older_random_saves(save_population, three_years_ago, now) chdir(tmpdir) test_set_hash = exo([b'git', b'show-ref', b'-s', b'main']).out.rstrip() ls_saves = exo((bup_cmd, b'ls', b'main')).out.splitlines() wvpasseq(save_population + 1, len(ls_saves)) wvstart('ensure everything kept, if no keep arguments') ex([b'git', b'reset', b'--hard', test_set_hash]) proc = ex((bup_cmd, b'prune-older', b'-v', b'--unsafe', b'--no-gc', b'--wrt', b'%d' % now) \ + (b'main',), stdout=None, stderr=PIPE, check=False) wvpassne(proc.rc, 0) wvpass(b'at least one keep argument is required' in proc.err) check_prune_result(save_utcs) wvstart('running %d generative no-gc tests on %d saves' % (prune_cycles, save_population)) for spec in unique_period_specs(prune_cycles, # Make it more likely we'll have # some outside the save range. three_years_ago - period_scale[b'm'], now): ex([b'git', b'reset', b'--hard', test_set_hash]) expected = sorted(expected_retentions(save_utcs, now, spec)) ex((bup_cmd, b'prune-older', b'-v', b'--unsafe', b'--no-gc', b'--wrt', b'%d' % now) \ + period_spec_to_period_args(spec) \ + (b'main',)) check_prune_result(expected) # More expensive because we have to recreate the repo each time wvstart('running %d generative gc tests on %d saves' % (prune_gc_cycles, save_population)) ex([b'git', b'reset', b'--hard', test_set_hash]) copytree(b'work/.git', b'clean-test-repo', symlinks=True) for spec in unique_period_specs(prune_gc_cycles, # Make it more likely we'll have # some outside the save range. three_years_ago - period_scale[b'm'], now): rmtree(b'work/.git') copytree(b'clean-test-repo', b'work/.git') expected = sorted(expected_retentions(save_utcs, now, spec)) ex((bup_cmd, b'prune-older', b'-v', b'--unsafe', b'--wrt', b'%d' % now) \ + period_spec_to_period_args(spec) \ + (b'main',)) check_prune_result(expected) bup-0.33.3/test/int/000077500000000000000000000000001454333004200141355ustar00rootroot00000000000000bup-0.33.3/test/int/__init__.py000066400000000000000000000000001454333004200162340ustar00rootroot00000000000000bup-0.33.3/test/int/sample.conf000066400000000000000000000003031454333004200162610ustar00rootroot00000000000000[bup] foo = bar bup = is great ;comments=are ignored #and=this kind too end = end ; comment at the end istrue1 = 1 istrue2 = 2 istrue3 = true isfalse1 = false isfalse2 = 0 isbad = ok hex = 0x777 bup-0.33.3/test/int/test_bloom.py000066400000000000000000000034001454333004200166530ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import os import errno, platform, tempfile import logging from bup import bloom def test_bloom(tmpdir): hashes = [os.urandom(20) for i in range(100)] class Idx: pass ix = Idx() ix.name = b'dummy.idx' ix.shatable = b''.join(hashes) for k in (4, 5): with bloom.create(tmpdir + b'/pybuptest.bloom', expected=100, k=k) as b: b.add_idx(ix) assert b.pfalse_positive() < .1 with bloom.ShaBloom(tmpdir + b'/pybuptest.bloom') as b: all_present = True for h in hashes: all_present &= (b.exists(h) or False) assert all_present false_positives = 0 for h in [os.urandom(20) for i in range(1000)]: if b.exists(h): false_positives += 1 assert false_positives < 5 os.unlink(tmpdir + b'/pybuptest.bloom') tf = tempfile.TemporaryFile(dir=tmpdir) with bloom.create(b'bup.bloom', f=tf, expected=100) as b: assert b.file == tf assert b.k == 5 # Test large (~1GiB) filter. This may fail on s390 (31-bit # architecture), and anywhere else where the address space is # sufficiently limited. tf = tempfile.TemporaryFile(dir=tmpdir) skip_test = False try: with bloom.create(b'bup.bloom', f=tf, expected=2**28, delaywrite=False) as b: assert b.k == 4 except EnvironmentError as ex: (ptr_width, linkage) = platform.architecture() if ptr_width == '32bit' and ex.errno == errno.ENOMEM: logging.getLogger().info('skipping large bloom filter test (mmap probably failed) ' + str(ex)) else: raise bup-0.33.3/test/int/test_client.py000066400000000000000000000127561454333004200170370ustar00rootroot00000000000000 from __future__ import absolute_import import os, time, random, subprocess, glob import pytest from bup import client, git, path from bup.compat import bytes_from_uint, environ def randbytes(sz): s = b'' for i in range(sz): s += bytes_from_uint(random.randrange(0,256)) return s s1 = randbytes(10000) s2 = randbytes(10000) s3 = randbytes(10000) IDX_PAT = b'/*.idx' def test_server_split_with_indexes(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir git.init_repo(bupdir) with git.PackWriter() as lw: lw.new_blob(s1) with client.Client(bupdir, create=True) as c, \ c.new_packwriter() as rw: rw.new_blob(s2) rw.breakpoint() rw.new_blob(s1) def test_multiple_suggestions(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir git.init_repo(bupdir) with git.PackWriter() as lw: lw.new_blob(s1) with git.PackWriter() as lw: lw.new_blob(s2) assert len(glob.glob(git.repo(b'objects/pack'+IDX_PAT))) == 2 with client.Client(bupdir, create=True) as c, \ c.new_packwriter() as rw: assert len(glob.glob(c.cachedir+IDX_PAT)) == 0 s1sha = rw.new_blob(s1) assert rw.exists(s1sha) s2sha = rw.new_blob(s2) # This is a little hacky, but ensures that we test the # code under test. First, flush to ensure that we've # actually sent all the command ('receive-objects-v2') # and their data to the server. This may be needed if # the output buffer size is bigger than the data (both # command and objects) we're writing. To see the need # for this, change the object sizes at the beginning # of this file to be very small (e.g. 10 instead of 10k) c.conn.outp.flush() # Then, check if we've already received the idx files. # This may happen if we're preempted just after writing # the data, then the server runs and suggests, and only # then we continue in PackWriter_Remote::_raw_write() # and check the has_input(), in that case we'll receive # the idx still in the rw.new_blob() calls above. # # In most cases though, that doesn't happen, and we'll # get past the has_input() check before the server has # a chance to respond - it has to actually hash the new # object here, so it takes some time. So also break out # of the loop if the server has sent something on the # connection. # # Finally, abort this after a little while (about one # second) just in case something's actually broken. n = 0 while (len(glob.glob(c.cachedir+IDX_PAT)) < 2 and not c.conn.has_input() and n < 10): time.sleep(0.1) n += 1 assert len(glob.glob(c.cachedir+IDX_PAT)) == 2 or c.conn.has_input() rw.new_blob(s2) assert rw.objcache.exists(s1sha) assert rw.objcache.exists(s2sha) rw.new_blob(s3) assert len(glob.glob(c.cachedir+IDX_PAT)) == 2 assert len(glob.glob(c.cachedir+IDX_PAT)) == 3 def test_dumb_client_server(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir git.init_repo(bupdir) open(git.repo(b'bup-dumb-server'), 'w').close() with git.PackWriter() as lw: lw.new_blob(s1) with client.Client(bupdir, create=True) as c, \ c.new_packwriter() as rw: assert len(glob.glob(c.cachedir+IDX_PAT)) == 1 rw.new_blob(s1) assert len(glob.glob(c.cachedir+IDX_PAT)) == 1 rw.new_blob(s2) assert len(glob.glob(c.cachedir+IDX_PAT)) == 2 def test_midx_refreshing(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir git.init_repo(bupdir) with client.Client(bupdir, create=True) as c, \ c.new_packwriter() as rw: rw.new_blob(s1) p1base = rw.breakpoint() p1name = os.path.join(c.cachedir, p1base) s1sha = rw.new_blob(s1) # should not be written; it's already in p1 s2sha = rw.new_blob(s2) p2base = rw.close() p2name = os.path.join(c.cachedir, p2base) with git.PackIdxList(bupdir + b'/objects/pack') as pi: assert len(pi.packs) == 2 pi.refresh() assert len(pi.packs) == 2 assert sorted([os.path.basename(i.name) for i in pi.packs]) \ == sorted([p1base, p2base]) with git.open_idx(p1name) as p1, \ git.open_idx(p2name) as p2: assert p1.exists(s1sha) assert not p2.exists(s1sha) assert p2.exists(s2sha) subprocess.call([path.exe(), b'midx', b'-f']) pi.refresh() assert len(pi.packs) == 1 pi.refresh(skip_midx=True) assert len(pi.packs) == 2 pi.refresh(skip_midx=False) assert len(pi.packs) == 1 def test_remote_parsing(): tests = ( (b':/bup', (b'file', None, None, b'/bup')), (b'file:///bup', (b'file', None, None, b'/bup')), (b'192.168.1.1:/bup', (b'ssh', b'192.168.1.1', None, b'/bup')), (b'ssh://192.168.1.1:2222/bup', (b'ssh', b'192.168.1.1', b'2222', b'/bup')), (b'ssh://[ff:fe::1]:2222/bup', (b'ssh', b'ff:fe::1', b'2222', b'/bup')), (b'bup://foo.com:1950', (b'bup', b'foo.com', b'1950', None)), (b'bup://foo.com:1950/bup', (b'bup', b'foo.com', b'1950', b'/bup')), (b'bup://[ff:fe::1]/bup', (b'bup', b'ff:fe::1', None, b'/bup')),) for remote, values in tests: assert client.parse_remote(remote) == values with pytest.raises(client.ClientError): client.parse_remote(b'http://asdf.com/bup') bup-0.33.3/test/int/test_compat.py000066400000000000000000000014211454333004200170270ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from bup.compat import pending_raise from wvpytest import wvpasseq def test_pending_raise(): outer = Exception('outer') inner = Exception('inner') try: try: raise outer except Exception as ex: with pending_raise(ex): pass except Exception as ex: wvpasseq(outer, ex) wvpasseq(None, getattr(outer, '__context__', None)) try: try: raise outer except Exception as ex: with pending_raise(ex): raise inner except Exception as ex: wvpasseq(inner, ex) wvpasseq(None, getattr(outer, '__context__', None)) wvpasseq(outer, getattr(inner, '__context__', None)) bup-0.33.3/test/int/test_git.py000066400000000000000000000537101454333004200163370ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import sys from binascii import hexlify, unhexlify from subprocess import check_call from functools import partial import struct, os import pytest from wvpytest import * from bup import git, path from bup.compat import bytes_from_byte, environ from bup.helpers import localtime, log, mkdirp, readpipe bup_exe = path.exe() def exc(*cmd): print(repr(cmd), file=sys.stderr) check_call(cmd) def exo(*cmd): print(repr(cmd), file=sys.stderr) return readpipe(cmd) def test_git_version_detection(): # Test version types from git's tag history for expected, ver in \ (('insufficient', b'git version 0.99'), ('insufficient', b'git version 0.99.1'), ('insufficient', b'git version 0.99.7a'), ('insufficient', b'git version 1.0rc1'), ('insufficient', b'git version 1.0.1'), ('insufficient', b'git version 1.4.2.1'), ('insufficient', b'git version 1.5.5'), ('insufficient', b'git version 1.5.6-rc0'), ('suitable', b'git version 1.5.6'), ('suitable', b'git version 1.5.6.1'), ('suitable', b'git version 2.14.0-rc0'), ('suitable', b'git version 2.14.0 (something ...)'), ('suitable', b'git version 111.222.333.444-rc555'), ('unrecognized', b'huh?')): assert expected == git.is_suitable_git(ver_str=ver) try: if expected == 'insufficient': with pytest.raises(SystemExit): git.require_suitable_git(ver) elif expected == 'suitable': git.require_suitable_git(ver_str=ver) elif expected == 'unrecognized': with pytest.raises(git.GitError): git.require_suitable_git(ver) else: assert False finally: git._git_great = None try: environ[b'BUP_GIT_VERSION_IS_FINE'] = b'true' git.require_suitable_git(ver_str=ver) finally: del environ[b'BUP_GIT_VERSION_IS_FINE'] git._git_great = None def test_mangle(): afile = 0o100644 afile2 = 0o100770 alink = 0o120000 adir = 0o040000 adir2 = 0o040777 assert git.mangle_name(b'a', adir2, adir) == b'a' assert git.mangle_name(b'.bup', adir2, adir) == b'.bup.bupl' assert git.mangle_name(b'a.bupa', adir2, adir) == b'a.bupa.bupl' WVPASSEQ(git.mangle_name(b'b.bup', alink, alink), b'b.bup.bupl') WVPASSEQ(git.mangle_name(b'b.bu', alink, alink), b'b.bu') WVPASSEQ(git.mangle_name(b'f', afile, afile2), b'f') WVPASSEQ(git.mangle_name(b'f.bup', afile, afile2), b'f.bup.bupl') WVPASSEQ(git.mangle_name(b'f.bup', afile, adir), b'f.bup.bup') WVPASSEQ(git.mangle_name(b'f', afile, adir), b'f.bup') WVPASSEQ(git.demangle_name(b'f.bup', afile), (b'f', git.BUP_CHUNKED)) WVPASSEQ(git.demangle_name(b'f.bupl', afile), (b'f', git.BUP_NORMAL)) WVPASSEQ(git.demangle_name(b'f.bup.bupl', afile), (b'f.bup', git.BUP_NORMAL)) WVPASSEQ(git.demangle_name(b'.bupm', afile), (b'', git.BUP_NORMAL)) WVPASSEQ(git.demangle_name(b'.bupm', adir), (b'', git.BUP_CHUNKED)) # for safety, we ignore .bup? suffixes we don't recognize. Future # versions might implement a .bup[a-z] extension as something other # than BUP_NORMAL. WVPASSEQ(git.demangle_name(b'f.bupa', afile), (b'f.bupa', git.BUP_NORMAL)) def test_encode(): s = b'hello world' packb = b''.join(git._encode_packobj(b'blob', s)) packt = b''.join(git._encode_packobj(b'tree', s)) packc = b''.join(git._encode_packobj(b'commit', s)) packlb = b''.join(git._encode_packobj(b'blob', s * 200)) WVPASSEQ(git._decode_packobj(packb), (b'blob', s)) WVPASSEQ(git._decode_packobj(packt), (b'tree', s)) WVPASSEQ(git._decode_packobj(packc), (b'commit', s)) WVPASSEQ(git._decode_packobj(packlb), (b'blob', s * 200)) def encode_pobj(n): return b''.join(git._encode_packobj(b'blob', s, compression_level=n)) WVEXCEPT(ValueError, encode_pobj, -1) WVEXCEPT(ValueError, encode_pobj, 10) WVEXCEPT(ValueError, encode_pobj, b'x') def test_packs(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' git.init_repo(bupdir) git.verbose = 1 with git.PackWriter() as w: w.new_blob(os.urandom(100)) w.new_blob(os.urandom(100)) w.abort() with git.PackWriter() as w: hashes = [] nobj = 1000 for i in range(nobj): hashes.append(w.new_blob(b'%d' % i)) log('\n') nameprefix = w.close() print(repr(nameprefix)) WVPASS(os.path.exists(nameprefix + b'.pack')) WVPASS(os.path.exists(nameprefix + b'.idx')) with git.open_idx(nameprefix + b'.idx') as r: print(repr(r.fanout)) for i in range(nobj): WVPASS(r.find_offset(hashes[i]) > 0) WVPASS(r.exists(hashes[99])) WVFAIL(r.exists(b'\0'*20)) pi = iter(r) for h in sorted(hashes): WVPASSEQ(hexlify(next(pi)), hexlify(h)) WVFAIL(r.find_offset(b'\0'*20)) with git.PackIdxList(bupdir + b'/objects/pack') as r: WVPASS(r.exists(hashes[5])) WVPASS(r.exists(hashes[6])) WVFAIL(r.exists(b'\0'*20)) def test_pack_name_lookup(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' git.init_repo(bupdir) git.verbose = 1 packdir = git.repo(b'objects/pack') idxnames = [] hashes = [] for start in range(0,28,2): with git.PackWriter() as w: for i in range(start, start+2): hashes.append(w.new_blob(b'%d' % i)) log('\n') idxnames.append(os.path.basename(w.close() + b'.idx')) with git.PackIdxList(packdir) as r: WVPASSEQ(len(r.packs), 2) for e,idxname in enumerate(idxnames): for i in range(e*2, (e+1)*2): WVPASSEQ(idxname, r.exists(hashes[i], want_source=True)) def test_long_index(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' git.init_repo(bupdir) idx = git.PackIdxV2Writer() obj_bin = struct.pack('!IIIII', 0x00112233, 0x44556677, 0x88990011, 0x22334455, 0x66778899) obj2_bin = struct.pack('!IIIII', 0x11223344, 0x55667788, 0x99001122, 0x33445566, 0x77889900) obj3_bin = struct.pack('!IIIII', 0x22334455, 0x66778899, 0x00112233, 0x44556677, 0x88990011) pack_bin = struct.pack('!IIIII', 0x99887766, 0x55443322, 0x11009988, 0x77665544, 0x33221100) idx.add(obj_bin, 1, 0xfffffffff) idx.add(obj2_bin, 2, 0xffffffffff) idx.add(obj3_bin, 3, 0xff) name = tmpdir + b'/tmp.idx' r = idx.write(name, pack_bin) with git.PackIdxV2(name, open(name, 'rb')) as i: WVPASSEQ(i.find_offset(obj_bin), 0xfffffffff) WVPASSEQ(i.find_offset(obj2_bin), 0xffffffffff) WVPASSEQ(i.find_offset(obj3_bin), 0xff) def test_check_repo_or_die(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' orig_cwd = os.getcwd() try: os.chdir(tmpdir) git.init_repo(bupdir) git.check_repo_or_die() # if we reach this point the call above passed WVPASS('check_repo_or_die') os.rename(bupdir + b'/objects/pack', bupdir + b'/objects/pack.tmp') open(bupdir + b'/objects/pack', 'w').close() try: git.check_repo_or_die() except SystemExit as e: WVPASSEQ(e.code, 14) else: WVFAIL() os.unlink(bupdir + b'/objects/pack') os.rename(bupdir + b'/objects/pack.tmp', bupdir + b'/objects/pack') try: git.check_repo_or_die(b'nonexistantbup.tmp') except SystemExit as e: WVPASSEQ(e.code, 15) else: WVFAIL() finally: os.chdir(orig_cwd) def test_commit_parsing(tmpdir): def restore_env_var(name, val): if val is None: del environ[name] else: environ[name] = val def showval(commit, val): return readpipe([b'git', b'show', b'-s', b'--pretty=format:%s' % val, commit]).strip() orig_cwd = os.getcwd() workdir = tmpdir + b'/work' repodir = workdir + b'/.git' orig_author_name = environ.get(b'GIT_AUTHOR_NAME') orig_author_email = environ.get(b'GIT_AUTHOR_EMAIL') orig_committer_name = environ.get(b'GIT_COMMITTER_NAME') orig_committer_email = environ.get(b'GIT_COMMITTER_EMAIL') environ[b'GIT_AUTHOR_NAME'] = b'bup test' environ[b'GIT_COMMITTER_NAME'] = environ[b'GIT_AUTHOR_NAME'] environ[b'GIT_AUTHOR_EMAIL'] = b'bup@a425bc70a02811e49bdf73ee56450e6f' environ[b'GIT_COMMITTER_EMAIL'] = environ[b'GIT_AUTHOR_EMAIL'] try: environ[b'GIT_DIR'] = environ[b'BUP_DIR'] = repodir readpipe([b'git', b'init', workdir]) exc(b'git', b'symbolic-ref', b'HEAD', b'refs/heads/main') git.check_repo_or_die(repodir) os.chdir(workdir) with open('foo', 'w') as f: print('bar', file=f) readpipe([b'git', b'add', b'.']) readpipe([b'git', b'commit', b'-am', b'Do something', b'--author', b'Someone ', b'--date', b'Sat Oct 3 19:48:49 2009 -0400']) commit = readpipe([b'git', b'show-ref', b'-s', b'main']).strip() parents = showval(commit, b'%P') tree = showval(commit, b'%T') cname = showval(commit, b'%cn') cmail = showval(commit, b'%ce') cdate = showval(commit, b'%ct') coffs = showval(commit, b'%ci') coffs = coffs[-5:] coff = (int(coffs[-4:-2]) * 60 * 60) + (int(coffs[-2:]) * 60) if bytes_from_byte(coffs[-5]) == b'-': coff = - coff commit_items = git.get_commit_items(commit, git.cp()) WVPASSEQ(commit_items.parents, []) WVPASSEQ(commit_items.tree, tree) WVPASSEQ(commit_items.author_name, b'Someone') WVPASSEQ(commit_items.author_mail, b'someone@somewhere') WVPASSEQ(commit_items.author_sec, 1254613729) WVPASSEQ(commit_items.author_offset, -(4 * 60 * 60)) WVPASSEQ(commit_items.committer_name, cname) WVPASSEQ(commit_items.committer_mail, cmail) WVPASSEQ(commit_items.committer_sec, int(cdate)) WVPASSEQ(commit_items.committer_offset, coff) WVPASSEQ(commit_items.message, b'Do something\n') with open(b'bar', 'wb') as f: f.write(b'baz\n') readpipe([b'git', b'add', '.']) readpipe([b'git', b'commit', b'-am', b'Do something else']) child = readpipe([b'git', b'show-ref', b'-s', b'main']).strip() parents = showval(child, b'%P') commit_items = git.get_commit_items(child, git.cp()) WVPASSEQ(commit_items.parents, [commit]) finally: os.chdir(orig_cwd) restore_env_var(b'GIT_AUTHOR_NAME', orig_author_name) restore_env_var(b'GIT_AUTHOR_EMAIL', orig_author_email) restore_env_var(b'GIT_COMMITTER_NAME', orig_committer_name) restore_env_var(b'GIT_COMMITTER_EMAIL', orig_committer_email) gpgsig_example_1 = b'''tree 3fab08ade2fbbda60bef180bb8e0cc5724d6bd4d parent 36db87b46a95ca5079f43dfe9b72220acab7c731 author Rob Browning 1633397238 -0500 committer Rob Browning 1633397238 -0500 gpgsig -----BEGIN PGP SIGNATURE----- ... -----END PGP SIGNATURE----- Sample signed commit. ''' gpgsig_example_2 = b'''tree 3fab08ade2fbbda60bef180bb8e0cc5724d6bd4d parent 36db87b46a95ca5079f43dfe9b72220acab7c731 author Rob Browning 1633397238 -0500 committer Rob Browning 1633397238 -0500 gpgsig -----BEGIN PGP SIGNATURE----- ... -----END PGP SIGNATURE----- Sample signed commit. ''' def test_commit_gpgsig_parsing(): c = git.parse_commit(gpgsig_example_1) assert c.gpgsig assert c.gpgsig.startswith(b'-----BEGIN PGP SIGNATURE-----\n') assert c.gpgsig.endswith(b'\n-----END PGP SIGNATURE-----\n') c = git.parse_commit(gpgsig_example_2) assert c.gpgsig assert c.gpgsig.startswith(b'-----BEGIN PGP SIGNATURE-----') assert c.gpgsig.endswith(b'\n-----END PGP SIGNATURE-----\n\n') def test_new_commit(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' git.init_repo(bupdir) git.verbose = 1 with git.PackWriter() as w: tree = os.urandom(20) parent = os.urandom(20) author_name = b'Author' author_mail = b'author@somewhere' adate_sec = 1439657836 cdate_sec = adate_sec + 1 committer_name = b'Committer' committer_mail = b'committer@somewhere' adate_tz_sec = cdate_tz_sec = None commit = w.new_commit(tree, parent, b'%s <%s>' % (author_name, author_mail), adate_sec, adate_tz_sec, b'%s <%s>' % (committer_name, committer_mail), cdate_sec, cdate_tz_sec, b'There is a small mailbox here') adate_tz_sec = -60 * 60 cdate_tz_sec = 120 * 60 commit_off = w.new_commit(tree, parent, b'%s <%s>' % (author_name, author_mail), adate_sec, adate_tz_sec, b'%s <%s>' % (committer_name, committer_mail), cdate_sec, cdate_tz_sec, b'There is a small mailbox here') commit_items = git.get_commit_items(hexlify(commit), git.cp()) local_author_offset = localtime(adate_sec).tm_gmtoff local_committer_offset = localtime(cdate_sec).tm_gmtoff WVPASSEQ(tree, unhexlify(commit_items.tree)) WVPASSEQ(1, len(commit_items.parents)) WVPASSEQ(parent, unhexlify(commit_items.parents[0])) WVPASSEQ(author_name, commit_items.author_name) WVPASSEQ(author_mail, commit_items.author_mail) WVPASSEQ(adate_sec, commit_items.author_sec) WVPASSEQ(local_author_offset, commit_items.author_offset) WVPASSEQ(committer_name, commit_items.committer_name) WVPASSEQ(committer_mail, commit_items.committer_mail) WVPASSEQ(cdate_sec, commit_items.committer_sec) WVPASSEQ(local_committer_offset, commit_items.committer_offset) commit_items = git.get_commit_items(hexlify(commit_off), git.cp()) WVPASSEQ(tree, unhexlify(commit_items.tree)) WVPASSEQ(1, len(commit_items.parents)) WVPASSEQ(parent, unhexlify(commit_items.parents[0])) WVPASSEQ(author_name, commit_items.author_name) WVPASSEQ(author_mail, commit_items.author_mail) WVPASSEQ(adate_sec, commit_items.author_sec) WVPASSEQ(adate_tz_sec, commit_items.author_offset) WVPASSEQ(committer_name, commit_items.committer_name) WVPASSEQ(committer_mail, commit_items.committer_mail) WVPASSEQ(cdate_sec, commit_items.committer_sec) WVPASSEQ(cdate_tz_sec, commit_items.committer_offset) def test_list_refs(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' src = tmpdir + b'/src' mkdirp(src) with open(src + b'/1', 'wb+') as f: f.write(b'something\n') with open(src + b'/2', 'wb+') as f: f.write(b'something else\n') git.init_repo(bupdir) emptyset = frozenset() WVPASSEQ(frozenset(git.list_refs()), emptyset) WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)), emptyset) WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)), emptyset) exc(bup_exe, b'index', src) exc(bup_exe, b'save', b'-n', b'src', b'--strip', src) src_hash = exo(b'git', b'--git-dir', bupdir, b'rev-parse', b'src').strip().split(b'\n') assert(len(src_hash) == 1) src_hash = unhexlify(src_hash[0]) tree_hash = unhexlify(exo(b'git', b'--git-dir', bupdir, b'rev-parse', b'src:').strip().split(b'\n')[0]) blob_hash = unhexlify(exo(b'git', b'--git-dir', bupdir, b'rev-parse', b'src:1').strip().split(b'\n')[0]) WVPASSEQ(frozenset(git.list_refs()), frozenset([(b'refs/heads/src', src_hash)])) WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)), emptyset) WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)), frozenset([(b'refs/heads/src', src_hash)])) exc(b'git', b'--git-dir', bupdir, b'tag', b'commit-tag', b'src') WVPASSEQ(frozenset(git.list_refs()), frozenset([(b'refs/heads/src', src_hash), (b'refs/tags/commit-tag', src_hash)])) WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)), frozenset([(b'refs/tags/commit-tag', src_hash)])) WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)), frozenset([(b'refs/heads/src', src_hash)])) exc(b'git', b'--git-dir', bupdir, b'tag', b'tree-tag', b'src:') exc(b'git', b'--git-dir', bupdir, b'tag', b'blob-tag', b'src:1') os.unlink(bupdir + b'/refs/heads/src') expected_tags = frozenset([(b'refs/tags/commit-tag', src_hash), (b'refs/tags/tree-tag', tree_hash), (b'refs/tags/blob-tag', blob_hash)]) WVPASSEQ(frozenset(git.list_refs()), expected_tags) WVPASSEQ(frozenset(git.list_refs(limit_to_heads=True)), frozenset([])) WVPASSEQ(frozenset(git.list_refs(limit_to_tags=True)), expected_tags) def test_git_date_str(): WVPASSEQ(b'0 +0000', git._git_date_str(0, 0)) WVPASSEQ(b'0 -0130', git._git_date_str(0, -90 * 60)) WVPASSEQ(b'0 +0130', git._git_date_str(0, 90 * 60)) def test_cat_pipe(tmpdir): environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' src = tmpdir + b'/src' mkdirp(src) with open(src + b'/1', 'wb+') as f: f.write(b'something\n') with open(src + b'/2', 'wb+') as f: f.write(b'something else\n') git.init_repo(bupdir) exc(bup_exe, b'index', src) oidx = exo(bup_exe, b'save', b'-cn', b'src', b'--strip', src).strip() typ = exo(b'git', b'--git-dir', bupdir, b'cat-file', b'-t', b'src').strip() size = int(exo(b'git', b'--git-dir', bupdir, b'cat-file', b'-s', b'src')) it = git.cp().get(b'src') get_info = next(it) for buf in next(it): pass WVPASSEQ((oidx, typ, size), get_info) def _create_idx(d, i): idx = git.PackIdxV2Writer() # add 255 vaguely reasonable entries for s in range(255): idx.add(struct.pack('18xBB', i, s), s, 100 * s) packbin = struct.pack('B19x', i) packname = os.path.join(d, b'pack-%s.idx' % hexlify(packbin)) idx.write(packname, packbin) def test_midx_close(tmpdir): fddir = b'/proc/self/fd' try: os.listdir(fddir) except Exception: # not supported, not Linux, I guess return def openfiles(): for fd in os.listdir(fddir): try: yield os.readlink(os.path.join(fddir, fd)) except OSError: pass def force_midx(objdir): args = [path.exe(), b'midx', b'--auto', b'--dir', objdir] check_call(args) environ[b'BUP_DIR'] = bupdir = tmpdir + b'/bup' git.init_repo(bupdir) # create a few dummy idxes for i in range(10): _create_idx(tmpdir, i) git.auto_midx(tmpdir) with git.PackIdxList(tmpdir) as l: # this doesn't exist (yet) WVPASSEQ(None, l.exists(struct.pack('18xBB', 10, 0))) for i in range(10, 15): _create_idx(tmpdir, i) # delete the midx ... # TODO: why do we need to? git.auto_midx() below doesn't?! for fn in os.listdir(tmpdir): if fn.endswith(b'.midx'): os.unlink(os.path.join(tmpdir, fn)) # and make a new one git.auto_midx(tmpdir) # check it still doesn't exist - we haven't refreshed WVPASSEQ(None, l.exists(struct.pack('18xBB', 10, 0))) # check that we still have the midx open, this really # just checks more for the kernel API ('deleted' string) for fn in openfiles(): if not b'midx-' in fn: continue WVPASSEQ(True, b'deleted' in fn) # refresh the PackIdxList l.refresh() # and check that an object in pack 10 exists now WVPASSEQ(True, l.exists(struct.pack('18xBB', 10, 0))) for fn in openfiles(): if not b'midx-' in fn: continue # check that we don't have it open anymore WVPASSEQ(False, b'deleted' in fn) def test_config(tmpdir): cfg_file = os.path.join(os.path.dirname(__file__), 'sample.conf') no_such_file = os.path.join(os.path.dirname(__file__), 'nosuch.conf') git_config_get = partial(git.git_config_get, cfg_file=cfg_file) WVPASSEQ(git_config_get(b'bup.foo'), b'bar') WVPASSEQ(git_config_get(b'bup.bup'), b'is great') WVPASSEQ(git_config_get(b'bup.end'), b'end') WVPASSEQ(git_config_get(b'bup.comments'), None) WVPASSEQ(git_config_get(b'bup.;comments'), None) WVPASSEQ(git_config_get(b'bup.and'), None) WVPASSEQ(git_config_get(b'bup.#and'), None) WVPASSEQ(git.git_config_get(b'bup.foo', cfg_file=no_such_file), None) WVEXCEPT(git.GitError, git_config_get, b'bup.isbad', opttype='bool') WVEXCEPT(git.GitError, git_config_get, b'bup.isbad', opttype='int') WVPASSEQ(git_config_get(b'bup.isbad'), b'ok') WVPASSEQ(True, git_config_get(b'bup.istrue1', opttype='bool')) WVPASSEQ(True, git_config_get(b'bup.istrue2', opttype='bool')) WVPASSEQ(True, git_config_get(b'bup.istrue3', opttype='bool')) WVPASSEQ(False, git_config_get(b'bup.isfalse1', opttype='bool')) WVPASSEQ(False, git_config_get(b'bup.isfalse2', opttype='bool')) WVPASSEQ(None, git_config_get(b'bup.nosuchkey', opttype='bool')) WVPASSEQ(1, git_config_get(b'bup.istrue1', opttype='int')) WVPASSEQ(2, git_config_get(b'bup.istrue2', opttype='int')) WVPASSEQ(0, git_config_get(b'bup.isfalse2', opttype='int')) WVPASSEQ(0x777, git_config_get(b'bup.hex', opttype='int')) # Make sure get_config respects the repo() git_dir = tmpdir + b'/repo' git.init_repo(git_dir) git.check_repo_or_die(git_dir) exc(b'git', b'--git-dir', git_dir, b'config', b'bup.foo', b'yep') assert b'yep' == git.git_config_get(b'bup.foo') bup-0.33.3/test/int/test_hashsplit.py000066400000000000000000000113271454333004200175510ustar00rootroot00000000000000 from __future__ import absolute_import from io import BytesIO from wvpytest import * from bup import hashsplit, _helpers, helpers from bup.compat import byte_int, bytes_from_uint def nr_regions(x, max_count=None): return list(hashsplit._nonresident_page_regions(bytearray(x), 1, max_count)) def test_nonresident_page_regions(): WVPASSEQ(nr_regions([]), []) WVPASSEQ(nr_regions([1]), []) WVPASSEQ(nr_regions([0]), [(0, 1)]) WVPASSEQ(nr_regions([1, 0]), [(1, 1)]) WVPASSEQ(nr_regions([0, 0]), [(0, 2)]) WVPASSEQ(nr_regions([1, 0, 1]), [(1, 1)]) WVPASSEQ(nr_regions([1, 0, 0]), [(1, 2)]) WVPASSEQ(nr_regions([0, 1, 0]), [(0, 1), (2, 1)]) WVPASSEQ(nr_regions([0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0]), [(0, 2), (5, 3), (9, 2)]) WVPASSEQ(nr_regions([2, 42, 3, 101]), [(0, 2)]) # Test limit WVPASSEQ(nr_regions([0, 0, 0], None), [(0, 3)]) WVPASSEQ(nr_regions([0, 0, 0], 1), [(0, 1), (1, 1), (2, 1)]) WVPASSEQ(nr_regions([0, 0, 0], 2), [(0, 2), (2, 1)]) WVPASSEQ(nr_regions([0, 0, 0], 3), [(0, 3)]) WVPASSEQ(nr_regions([0, 0, 0], 4), [(0, 3)]) WVPASSEQ(nr_regions([0, 0, 1], None), [(0, 2)]) WVPASSEQ(nr_regions([0, 0, 1], 1), [(0, 1), (1, 1)]) WVPASSEQ(nr_regions([0, 0, 1], 2), [(0, 2)]) WVPASSEQ(nr_regions([0, 0, 1], 3), [(0, 2)]) WVPASSEQ(nr_regions([1, 0, 0], None), [(1, 2)]) WVPASSEQ(nr_regions([1, 0, 0], 1), [(1, 1), (2, 1)]) WVPASSEQ(nr_regions([1, 0, 0], 2), [(1, 2)]) WVPASSEQ(nr_regions([1, 0, 0], 3), [(1, 2)]) WVPASSEQ(nr_regions([1, 0, 0, 0, 1], None), [(1, 3)]) WVPASSEQ(nr_regions([1, 0, 0, 0, 1], 1), [(1, 1), (2, 1), (3, 1)]) WVPASSEQ(nr_regions([1, 0, 0, 0, 1], 2), [(1, 2), (3, 1)]) WVPASSEQ(nr_regions([1, 0, 0, 0, 1], 3), [(1, 3)]) WVPASSEQ(nr_regions([1, 0, 0, 0, 1], 4), [(1, 3)]) def test_uncache_ours_upto(): history = [] def mock_fadvise_pages_done(f, ofs, len): history.append((f, ofs, len)) uncache_upto = hashsplit._uncache_ours_upto page_size = helpers.sc_page_size orig_pages_done = hashsplit._fadvise_pages_done try: hashsplit._fadvise_pages_done = mock_fadvise_pages_done history = [] uncache_upto(42, 0, (0, 1), iter([])) WVPASSEQ([], history) uncache_upto(42, page_size, (0, 1), iter([])) WVPASSEQ([(42, 0, 1)], history) history = [] uncache_upto(42, page_size, (0, 3), iter([(5, 2)])) WVPASSEQ([], history) uncache_upto(42, 2 * page_size, (0, 3), iter([(5, 2)])) WVPASSEQ([], history) uncache_upto(42, 3 * page_size, (0, 3), iter([(5, 2)])) WVPASSEQ([(42, 0, 3)], history) history = [] uncache_upto(42, 5 * page_size, (0, 3), iter([(5, 2)])) WVPASSEQ([(42, 0, 3)], history) history = [] uncache_upto(42, 6 * page_size, (0, 3), iter([(5, 2)])) WVPASSEQ([(42, 0, 3)], history) history = [] uncache_upto(42, 7 * page_size, (0, 3), iter([(5, 2)])) WVPASSEQ([(42, 0, 3), (42, 5, 2)], history) finally: hashsplit._fadvise_pages_done = orig_pages_done def test_rolling_sums(): WVPASS(_helpers.selftest()) def test_fanout_behaviour(): # Drop in replacement for bupsplit, but splitting if the int value of a # byte >= BUP_BLOBBITS basebits = _helpers.blobbits() def splitbuf(buf): ofs = 0 for b in buf: b = byte_int(b) ofs += 1 if b >= basebits: return ofs, b return 0, 0 old_splitbuf = _helpers.splitbuf _helpers.splitbuf = splitbuf old_BLOB_MAX = hashsplit.BLOB_MAX hashsplit.BLOB_MAX = 4 old_BLOB_READ_SIZE = hashsplit.BLOB_READ_SIZE hashsplit.BLOB_READ_SIZE = 10 old_fanout = hashsplit.fanout hashsplit.fanout = 2 levels = lambda f: [(len(b), l) for b, l in hashsplit.hashsplit_iter([f], True, None)] # Return a string of n null bytes z = lambda n: b'\x00' * n # Return a byte which will be split with a level of n sb = lambda n: bytes_from_uint(basebits + n) split_never = BytesIO(z(16)) split_first = BytesIO(z(1) + sb(3) + z(14)) split_end = BytesIO(z(13) + sb(1) + z(2)) split_many = BytesIO(sb(1) + z(3) + sb(2) + z(4) + sb(0) + z(4) + sb(5) + z(1)) WVPASSEQ(levels(split_never), [(4, 0), (4, 0), (4, 0), (4, 0)]) WVPASSEQ(levels(split_first), [(2, 3), (4, 0), (4, 0), (4, 0), (2, 0)]) WVPASSEQ(levels(split_end), [(4, 0), (4, 0), (4, 0), (2, 1), (2, 0)]) WVPASSEQ(levels(split_many), [(1, 1), (4, 2), (4, 0), (1, 0), (4, 0), (1, 5), (1, 0)]) _helpers.splitbuf = old_splitbuf hashsplit.BLOB_MAX = old_BLOB_MAX hashsplit.BLOB_READ_SIZE = old_BLOB_READ_SIZE hashsplit.fanout = old_fanout bup-0.33.3/test/int/test_helpers.py000066400000000000000000000176051454333004200172210ustar00rootroot00000000000000 from __future__ import absolute_import from time import tzset import os, os.path, re from bup import helpers from wvpytest import * from bup.compat import bytes_from_byte, bytes_from_uint, environ from bup.helpers import (atomically_replaced_file, batchpipe, detect_fakeroot, grafted_path_components, parse_num, path_components, readpipe, stripped_path_components, shstr, utc_offset_str) def test_parse_num(): pn = parse_num WVPASSEQ(pn(b'1'), 1) WVPASSEQ(pn('1'), 1) WVPASSEQ(pn('0'), 0) WVPASSEQ(pn('1.5k'), 1536) WVPASSEQ(pn('2 gb'), 2*1024*1024*1024) WVPASSEQ(pn('1e+9 k'), 1000000000 * 1024) WVPASSEQ(pn('-3e-3mb'), int(-0.003 * 1024 * 1024)) def test_detect_fakeroot(): if b'FAKEROOTKEY' in environ: WVPASS(detect_fakeroot()) else: WVPASS(not detect_fakeroot()) def test_path_components(): WVPASSEQ(path_components(b'/'), [(b'', b'/')]) WVPASSEQ(path_components(b'/foo'), [(b'', b'/'), (b'foo', b'/foo')]) WVPASSEQ(path_components(b'/foo/'), [(b'', b'/'), (b'foo', b'/foo')]) WVPASSEQ(path_components(b'/foo/bar'), [(b'', b'/'), (b'foo', b'/foo'), (b'bar', b'/foo/bar')]) WVEXCEPT(Exception, path_components, b'foo') def test_stripped_path_components(): WVPASSEQ(stripped_path_components(b'/', []), [(b'', b'/')]) WVPASSEQ(stripped_path_components(b'/', [b'']), [(b'', b'/')]) WVPASSEQ(stripped_path_components(b'/', [b'/']), [(b'', b'/')]) WVPASSEQ(stripped_path_components(b'/foo', [b'/']), [(b'', b'/'), (b'foo', b'/foo')]) WVPASSEQ(stripped_path_components(b'/', [b'/foo']), [(b'', b'/')]) WVPASSEQ(stripped_path_components(b'/foo', [b'/bar']), [(b'', b'/'), (b'foo', b'/foo')]) WVPASSEQ(stripped_path_components(b'/foo', [b'/foo']), [(b'', b'/foo')]) WVPASSEQ(stripped_path_components(b'/foo/bar', [b'/foo']), [(b'', b'/foo'), (b'bar', b'/foo/bar')]) WVPASSEQ(stripped_path_components(b'/foo/bar', [b'/bar', b'/foo', b'/baz']), [(b'', b'/foo'), (b'bar', b'/foo/bar')]) WVPASSEQ(stripped_path_components(b'/foo/bar/baz', [b'/foo/bar/baz']), [(b'', b'/foo/bar/baz')]) WVEXCEPT(Exception, stripped_path_components, b'foo', []) def test_grafted_path_components(): WVPASSEQ(grafted_path_components([(b'/chroot', b'/')], b'/foo'), [(b'', b'/'), (b'foo', b'/foo')]) WVPASSEQ(grafted_path_components([(b'/foo/bar', b'/')], b'/foo/bar/baz/bax'), [(b'', b'/foo/bar'), (b'baz', b'/foo/bar/baz'), (b'bax', b'/foo/bar/baz/bax')]) WVPASSEQ(grafted_path_components([(b'/foo/bar/baz', b'/bax')], b'/foo/bar/baz/1/2'), [(b'', None), (b'bax', b'/foo/bar/baz'), (b'1', b'/foo/bar/baz/1'), (b'2', b'/foo/bar/baz/1/2')]) WVPASSEQ(grafted_path_components([(b'/foo', b'/bar/baz/bax')], b'/foo/bar'), [(b'', None), (b'bar', None), (b'baz', None), (b'bax', b'/foo'), (b'bar', b'/foo/bar')]) WVPASSEQ(grafted_path_components([(b'/foo/bar/baz', b'/a/b/c')], b'/foo/bar/baz'), [(b'', None), (b'a', None), (b'b', None), (b'c', b'/foo/bar/baz')]) WVPASSEQ(grafted_path_components([(b'/', b'/a/b/c/')], b'/foo/bar'), [(b'', None), (b'a', None), (b'b', None), (b'c', b'/'), (b'foo', b'/foo'), (b'bar', b'/foo/bar')]) WVEXCEPT(Exception, grafted_path_components, b'foo', []) def test_shstr(): # Do nothing for strings and bytes WVPASSEQ(shstr(b''), b'') WVPASSEQ(shstr(b'1'), b'1') WVPASSEQ(shstr(b'1 2'), b'1 2') WVPASSEQ(shstr(b"1'2"), b"1'2") WVPASSEQ(shstr(''), '') WVPASSEQ(shstr('1'), '1') WVPASSEQ(shstr('1 2'), '1 2') WVPASSEQ(shstr("1'2"), "1'2") # Escape parts of sequences WVPASSEQ(shstr((b'1 2', b'3')), b"'1 2' 3") WVPASSEQ(shstr((b"1'2", b'3')), b"'1'\"'\"'2' 3") WVPASSEQ(shstr((b"'1", b'3')), b"''\"'\"'1' 3") WVPASSEQ(shstr(('1 2', '3')), "'1 2' 3") WVPASSEQ(shstr(("1'2", '3')), "'1'\"'\"'2' 3") WVPASSEQ(shstr(("'1", '3')), "''\"'\"'1' 3") def test_readpipe(): x = readpipe([b'echo', b'42']) WVPASSEQ(x, b'42\n') try: readpipe([b'bash', b'-c', b'exit 42']) except Exception as ex: rx = '^subprocess b?"bash -c \'exit 42\'" failed with status 42$' if not re.match(rx, str(ex)): WVPASSEQ(str(ex), rx) def test_batchpipe(): for chunk in batchpipe([b'echo'], []): WVPASS(False) out = b'' for chunk in batchpipe([b'echo'], [b'42']): out += chunk WVPASSEQ(out, b'42\n') try: batchpipe([b'bash', b'-c'], [b'exit 42']) except Exception as ex: WVPASSEQ(str(ex), "subprocess 'bash -c exit 42' failed with status 42") args = [str(x) for x in range(6)] # Force batchpipe to break the args into batches of 3. This # approach assumes all args are the same length. arg_max = \ helpers._argmax_base([b'echo']) + helpers._argmax_args_size(args[:3]) batches = batchpipe(['echo'], args, arg_max=arg_max) WVPASSEQ(next(batches), b'0 1 2\n') WVPASSEQ(next(batches), b'3 4 5\n') WVPASSEQ(next(batches, None), None) batches = batchpipe([b'echo'], [str(x) for x in range(5)], arg_max=arg_max) WVPASSEQ(next(batches), b'0 1 2\n') WVPASSEQ(next(batches), b'3 4\n') WVPASSEQ(next(batches, None), None) def test_atomically_replaced_file(tmpdir): target_file = os.path.join(tmpdir, b'test-atomic-write') with atomically_replaced_file(target_file, mode='w') as f: f.write('asdf') WVPASSEQ(f.mode, 'w') f = open(target_file, 'r') WVPASSEQ(f.read(), 'asdf') try: with atomically_replaced_file(target_file, mode='w') as f: f.write('wxyz') raise Exception() except: pass with open(target_file) as f: WVPASSEQ(f.read(), 'asdf') with atomically_replaced_file(target_file, mode='wb') as f: f.write(os.urandom(20)) WVPASSEQ(f.mode, 'wb') def set_tz(tz): if not tz: del environ[b'TZ'] else: environ[b'TZ'] = tz tzset() def test_utc_offset_str(): tz = environ.get(b'TZ') tzset() try: set_tz(b'FOO+0:00') WVPASSEQ(utc_offset_str(0), b'+0000') set_tz(b'FOO+1:00') WVPASSEQ(utc_offset_str(0), b'-0100') set_tz(b'FOO-1:00') WVPASSEQ(utc_offset_str(0), b'+0100') set_tz(b'FOO+3:3') WVPASSEQ(utc_offset_str(0), b'-0303') set_tz(b'FOO-3:3') WVPASSEQ(utc_offset_str(0), b'+0303') # Offset is not an integer number of minutes set_tz(b'FOO+3:3:3') WVPASSEQ(utc_offset_str(1), b'-0303') set_tz(b'FOO-3:3:3') WVPASSEQ(utc_offset_str(1), b'+0303') WVPASSEQ(utc_offset_str(314159), b'+0303') finally: if tz: set_tz(tz) else: try: set_tz(None) except KeyError: pass def test_valid_save_name(): valid = helpers.valid_save_name WVPASS(valid(b'x')) WVPASS(valid(b'x@')) WVFAIL(valid(b'@')) WVFAIL(valid(b'/')) WVFAIL(valid(b'/foo')) WVFAIL(valid(b'foo/')) WVFAIL(valid(b'/foo/')) WVFAIL(valid(b'foo//bar')) WVFAIL(valid(b'.')) WVFAIL(valid(b'bar.')) WVFAIL(valid(b'foo@{')) for x in b' ~^:?*[\\': WVFAIL(valid(b'foo' + bytes_from_byte(x))) for i in range(20): WVFAIL(valid(b'foo' + bytes_from_uint(i))) WVFAIL(valid(b'foo' + bytes_from_uint(0x7f))) WVFAIL(valid(b'foo..bar')) WVFAIL(valid(b'bar.lock/baz')) WVFAIL(valid(b'foo/bar.lock/baz')) WVFAIL(valid(b'.bar/baz')) WVFAIL(valid(b'foo/.bar/baz')) bup-0.33.3/test/int/test_index.py000066400000000000000000000140531454333004200166600ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import os, time from wvpytest import * from bup import index, metadata from bup.compat import fsencode from bup.helpers import resolve_parent import bup.xstat as xstat lib_t_dir = os.path.dirname(fsencode(__file__)) def test_index_basic(): cd = os.path.realpath(os.path.join(lib_t_dir, b'../')) WVPASS(cd) sd = os.path.realpath(cd + b'/sampledata') WVPASSEQ(resolve_parent(cd + b'/sampledata'), sd) WVPASSEQ(os.path.realpath(cd + b'/sampledata/x'), sd + b'/x') WVPASSEQ(os.path.realpath(cd + b'/sampledata/var/abs-symlink'), sd + b'/var/abs-symlink-target') WVPASSEQ(resolve_parent(cd + b'/sampledata/var/abs-symlink'), sd + b'/var/abs-symlink') def test_index_writer(tmpdir): orig_cwd = os.getcwd() try: os.chdir(tmpdir) ds = xstat.stat(b'.') fs = xstat.stat(lib_t_dir + b'/test_index.py') tmax = (time.time() - 1) * 10**9 with index.MetaStoreWriter(b'index.meta.tmp') as ms, \ index.Writer(b'index.tmp', ms, tmax) as w: w.add(b'/var/tmp/sporky', fs, 0) w.add(b'/etc/passwd', fs, 0) w.add(b'/etc/', ds, 0) w.add(b'/', ds, 0) w.close() finally: os.chdir(orig_cwd) def dump(m): for e in list(m): print('%s%s %s' % (e.is_valid() and ' ' or 'M', e.is_fake() and 'F' or ' ', e.name)) def fake_validate(*l): for i in l: for e in i: e.validate(0o100644, index.FAKE_SHA) e.repack() def eget(l, ename): for e in l: if e.name == ename: return e return None def test_index_negative_timestamps(tmpdir): # Makes 'foo' exist foopath = tmpdir + b'/foo' f = open(foopath, 'wb') f.close() # Dec 31, 1969 os.utime(foopath, (-86400, -86400)) ns_per_sec = 10**9 tmax = (time.time() - 1) * ns_per_sec e = index.BlankNewEntry(foopath, 0, tmax) e.update_from_stat(xstat.stat(foopath), 0) WVPASS(e.packed()) # Jun 10, 1893 os.utime(foopath, (-0x80000000, -0x80000000)) e = index.BlankNewEntry(foopath, 0, tmax) e.update_from_stat(xstat.stat(foopath), 0) WVPASS(e.packed()) def test_index_dirty(tmpdir): orig_cwd = os.getcwd() try: os.chdir(tmpdir) default_meta = metadata.Metadata() with index.MetaStoreWriter(b'index.meta.tmp') as ms1, \ index.MetaStoreWriter(b'index2.meta.tmp') as ms2, \ index.MetaStoreWriter(b'index3.meta.tmp') as ms3: meta_ofs1 = ms1.store(default_meta) meta_ofs2 = ms2.store(default_meta) meta_ofs3 = ms3.store(default_meta) ds = xstat.stat(lib_t_dir) fs = xstat.stat(lib_t_dir + b'/test_index.py') tmax = (time.time() - 1) * 10**9 with index.Writer(b'index.tmp', ms1, tmax) as w1, \ index.Writer(b'index2.tmp', ms2, tmax) as w2, \ index.Writer(b'index3.tmp', ms3, tmax) as w3: w1.add(b'/a/b/x', fs, meta_ofs1) w1.add(b'/a/b/c', fs, meta_ofs1) w1.add(b'/a/b/', ds, meta_ofs1) w1.add(b'/a/', ds, meta_ofs1) #w1.close() WVPASS() w2.add(b'/a/b/n/2', fs, meta_ofs2) #w2.close() WVPASS() w3.add(b'/a/c/n/3', fs, meta_ofs3) #w3.close() WVPASS() with w1.new_reader() as r1, \ w2.new_reader() as r2, \ w3.new_reader() as r3: WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, [b'/a/b/x', b'/a/b/c', b'/a/b/', b'/a/', b'/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, [b'/a/b/n/2', b'/a/b/n/', b'/a/b/', b'/a/', b'/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, [b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/', b'/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, [b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/b/x', b'/a/b/n/2', b'/a/b/n/', b'/a/b/c', b'/a/b/', b'/a/', b'/']) fake_validate(r1) dump(r1) print([hex(e.flags) for e in r1]) WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all) WVPASSEQ([e.name for e in r1 if not e.is_valid()], []) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], [b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/b/n/2', b'/a/b/n/', b'/a/b/', b'/a/', b'/']) expect_invalid = [b'/'] + r2all + r3all expect_real = (set(r1all) - set(r2all) - set(r3all)) \ | set([b'/a/b/n/2', b'/a/c/n/3']) dump(index.merge(r2, r1, r3)) for e in index.merge(r2, r1, r3): print(e.name, hex(e.flags), e.ctime) eiv = e.name in expect_invalid er = e.name in expect_real WVPASSEQ(eiv, not e.is_valid()) WVPASSEQ(er, e.is_real()) fake_validate(r2, r3) dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], []) e = eget(index.merge(r2, r1, r3), b'/a/b/c') e.invalidate() e.repack() dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], [b'/a/b/c', b'/a/b/', b'/a/', b'/']) finally: os.chdir(orig_cwd) bup-0.33.3/test/int/test_metadata.py000066400000000000000000000245541454333004200173400ustar00rootroot00000000000000 from __future__ import absolute_import, print_function import errno, glob, stat, subprocess import os, sys import pytest from wvpytest import * from bup import git, metadata from bup import vfs from bup.helpers import clear_errors, detect_fakeroot, is_superuser, resolve_parent from bup.repo import LocalRepo from bup.xstat import utime, lutime import bup.helpers as helpers from bup.compat import fsencode lib_t_dir = os.path.dirname(fsencode(__file__)) top_dir = os.path.realpath(os.path.join(lib_t_dir, b'..', b'..')) bup_path = top_dir + b'/bup' def ex(*cmd): try: cmd_str = b' '.join(cmd) print(cmd_str, file=sys.stderr) rc = subprocess.call(cmd) if rc < 0: print('terminated by signal', - rc, file=sys.stderr) sys.exit(1) elif rc > 0: print('returned exit status', rc, file=sys.stderr) sys.exit(1) except OSError as e: print('subprocess call failed:', e, file=sys.stderr) sys.exit(1) def setup_testfs(): assert(sys.platform.startswith('linux')) # Set up testfs with user_xattr, etc. if subprocess.call([b'modprobe', b'loop']) != 0: return False subprocess.call([b'umount', b'testfs']) ex(b'dd', b'if=/dev/zero', b'of=testfs.img', b'bs=1M', b'count=32') ex(b'mke2fs', b'-F', b'-j', b'-m', b'0', b'testfs.img') ex(b'rm', b'-rf', b'testfs') os.mkdir(b'testfs') ex(b'mount', b'-o', b'loop,acl,user_xattr', b'testfs.img', b'testfs') # Hide, so that tests can't create risks. os.chown(b'testfs', 0, 0) os.chmod(b'testfs', 0o700) return True def cleanup_testfs(): subprocess.call([b'umount', b'testfs']) helpers.unlink(b'testfs.img') def test_clean_up_archive_path(): cleanup = metadata._clean_up_path_for_archive WVPASSEQ(cleanup(b'foo'), b'foo') WVPASSEQ(cleanup(b'/foo'), b'foo') WVPASSEQ(cleanup(b'///foo'), b'foo') WVPASSEQ(cleanup(b'/foo/bar'), b'foo/bar') WVPASSEQ(cleanup(b'foo/./bar'), b'foo/bar') WVPASSEQ(cleanup(b'/foo/./bar'), b'foo/bar') WVPASSEQ(cleanup(b'/foo/./bar/././baz'), b'foo/bar/baz') WVPASSEQ(cleanup(b'/foo/./bar///././baz'), b'foo/bar/baz') WVPASSEQ(cleanup(b'//./foo/./bar///././baz/.///'), b'foo/bar/baz/') WVPASSEQ(cleanup(b'./foo/./.bar'), b'foo/.bar') WVPASSEQ(cleanup(b'./foo/.'), b'foo') WVPASSEQ(cleanup(b'./foo/..'), b'.') WVPASSEQ(cleanup(b'//./..//.../..//.'), b'.') WVPASSEQ(cleanup(b'//./..//..././/.'), b'...') WVPASSEQ(cleanup(b'/////.'), b'.') WVPASSEQ(cleanup(b'/../'), b'.') WVPASSEQ(cleanup(b''), b'.') def test_risky_path(): risky = metadata._risky_path WVPASS(risky(b'/foo')) WVPASS(risky(b'///foo')) WVPASS(risky(b'/../foo')) WVPASS(risky(b'../foo')) WVPASS(risky(b'foo/..')) WVPASS(risky(b'foo/../')) WVPASS(risky(b'foo/../bar')) WVFAIL(risky(b'foo')) WVFAIL(risky(b'foo/')) WVFAIL(risky(b'foo///')) WVFAIL(risky(b'./foo')) WVFAIL(risky(b'foo/.')) WVFAIL(risky(b'./foo/.')) WVFAIL(risky(b'foo/bar')) WVFAIL(risky(b'foo/./bar')) def test_clean_up_extract_path(): cleanup = metadata._clean_up_extract_path WVPASSEQ(cleanup(b'/foo'), b'foo') WVPASSEQ(cleanup(b'///foo'), b'foo') WVFAIL(cleanup(b'/../foo')) WVFAIL(cleanup(b'../foo')) WVFAIL(cleanup(b'foo/..')) WVFAIL(cleanup(b'foo/../')) WVFAIL(cleanup(b'foo/../bar')) WVPASSEQ(cleanup(b'foo'), b'foo') WVPASSEQ(cleanup(b'foo/'), b'foo/') WVPASSEQ(cleanup(b'foo///'), b'foo///') WVPASSEQ(cleanup(b'./foo'), b'./foo') WVPASSEQ(cleanup(b'foo/.'), b'foo/.') WVPASSEQ(cleanup(b'./foo/.'), b'./foo/.') WVPASSEQ(cleanup(b'foo/bar'), b'foo/bar') WVPASSEQ(cleanup(b'foo/./bar'), b'foo/./bar') WVPASSEQ(cleanup(b'/'), b'.') WVPASSEQ(cleanup(b'./'), b'./') WVPASSEQ(cleanup(b'///foo/bar'), b'foo/bar') WVPASSEQ(cleanup(b'///foo/bar'), b'foo/bar') def test_metadata_method(tmpdir): bup_dir = tmpdir + b'/bup' data_path = tmpdir + b'/foo' os.mkdir(data_path) ex(b'touch', data_path + b'/file') ex(b'ln', b'-s', b'file', data_path + b'/symlink') test_time1 = 13 * 1000000000 test_time2 = 42 * 1000000000 utime(data_path + b'/file', (0, test_time1)) lutime(data_path + b'/symlink', (0, 0)) utime(data_path, (0, test_time2)) ex(bup_path, b'-d', bup_dir, b'init') ex(bup_path, b'-d', bup_dir, b'index', b'-v', data_path) ex(bup_path, b'-d', bup_dir, b'save', b'-tvvn', b'test', data_path) git.check_repo_or_die(bup_dir) with LocalRepo() as repo: resolved = vfs.resolve(repo, b'/test/latest' + resolve_parent(data_path), follow=False) leaf_name, leaf_item = resolved[-1] m = leaf_item.meta WVPASS(m.mtime == test_time2) WVPASS(leaf_name == b'foo') contents = tuple(vfs.contents(repo, leaf_item)) WVPASS(len(contents) == 3) WVPASSEQ(frozenset(name for name, item in contents), frozenset((b'.', b'file', b'symlink'))) for name, item in contents: if name == b'file': m = item.meta WVPASS(m.mtime == test_time1) elif name == b'symlink': m = item.meta WVPASSEQ(m.symlink_target, b'file') WVPASSEQ(m.size, 4) WVPASSEQ(m.mtime, 0) def _first_err(): if helpers.saved_errors: return str(helpers.saved_errors[0]) return '' def test_from_path_error(tmpdir): if is_superuser() or detect_fakeroot(): return path = tmpdir + b'/foo' os.mkdir(path) m = metadata.from_path(path, archive_path=path, save_symlinks=True) WVPASSEQ(m.path, path) os.chmod(path, 0o000) metadata.from_path(path, archive_path=path, save_symlinks=True) if metadata.get_linux_file_attr: print('saved_errors:', helpers.saved_errors, file=sys.stderr) WVPASS(len(helpers.saved_errors) == 1) errmsg = _first_err() WVPASS(errmsg.startswith('read Linux attr')) clear_errors() def _linux_attr_supported(path): # Expects path to denote a regular file or a directory. if not metadata.get_linux_file_attr: return False try: metadata.get_linux_file_attr(path) except OSError as e: if e.errno in (errno.ENOTTY, errno.ENOSYS, errno.EOPNOTSUPP): return False else: raise return True def test_apply_to_path_restricted_access(tmpdir): if is_superuser() or detect_fakeroot(): return if sys.platform.startswith('cygwin'): return # chmod 000 isn't effective. try: parent = tmpdir + b'/foo' path = parent + b'/bar' os.mkdir(parent) os.mkdir(path) clear_errors() if metadata.xattr: try: metadata.xattr.set(path, b'user.buptest', b'bup') except: print("failed to set test xattr") # ignore any failures here - maybe FS cannot do it pass m = metadata.from_path(path, archive_path=path, save_symlinks=True) WVPASSEQ(m.path, path) os.chmod(parent, 0o000) m.apply_to_path(path) print(b'saved_errors:', helpers.saved_errors, file=sys.stderr) expected_errors = ['utime: '] if m.linux_attr and _linux_attr_supported(tmpdir): expected_errors.append('Linux chattr: ') if metadata.xattr and m.linux_xattr: expected_errors.append("xattr.set ") WVPASS(len(helpers.saved_errors) == len(expected_errors)) for i in range(len(expected_errors)): assert str(helpers.saved_errors[i]).startswith(expected_errors[i]) finally: clear_errors() def test_restore_over_existing_target(tmpdir): path = tmpdir + b'/foo' os.mkdir(path) dir_m = metadata.from_path(path, archive_path=path, save_symlinks=True) os.rmdir(path) open(path, 'w').close() file_m = metadata.from_path(path, archive_path=path, save_symlinks=True) # Restore dir over file. WVPASSEQ(dir_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISDIR(os.stat(path).st_mode)) # Restore dir over dir. WVPASSEQ(dir_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISDIR(os.stat(path).st_mode)) # Restore file over dir. WVPASSEQ(file_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISREG(os.stat(path).st_mode)) # Restore file over file. WVPASSEQ(file_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISREG(os.stat(path).st_mode)) # Restore file over non-empty dir. os.remove(path) os.mkdir(path) open(path + b'/bar', 'w').close() WVEXCEPT(Exception, file_m.create_path, path, create_symlinks=True) # Restore dir over non-empty dir. os.remove(path + b'/bar') os.mkdir(path + b'/bar') WVEXCEPT(Exception, dir_m.create_path, path, create_symlinks=True) from bup.metadata import xattr if xattr: def remove_selinux(attrs): return list(filter(lambda i: not i in (b'security.selinux', ), attrs)) def test_handling_of_incorrect_existing_linux_xattrs(): if not is_superuser() or detect_fakeroot(): pytest.skip('skipping test -- not superuser') return if not setup_testfs(): pytest.skip('unable to load loop module; skipping dependent tests') return for f in glob.glob(b'testfs/*'): ex(b'rm', b'-rf', f) path = b'testfs/foo' open(path, 'w').close() xattr.set(path, b'foo', b'bar', namespace=xattr.NS_USER) m = metadata.from_path(path, archive_path=path, save_symlinks=True) xattr.set(path, b'baz', b'bax', namespace=xattr.NS_USER) m.apply_to_path(path, restore_numeric_ids=False) WVPASSEQ(remove_selinux(xattr.list(path)), [b'user.foo']) WVPASSEQ(xattr.get(path, b'user.foo'), b'bar') xattr.set(path, b'foo', b'baz', namespace=xattr.NS_USER) m.apply_to_path(path, restore_numeric_ids=False) WVPASSEQ(remove_selinux(xattr.list(path)), [b'user.foo']) WVPASSEQ(xattr.get(path, b'user.foo'), b'bar') xattr.remove(path, b'foo', namespace=xattr.NS_USER) m.apply_to_path(path, restore_numeric_ids=False) WVPASSEQ(remove_selinux(xattr.list(path)), [b'user.foo']) WVPASSEQ(xattr.get(path, b'user.foo'), b'bar') cleanup_testfs() bup-0.33.3/test/int/test_options.py000066400000000000000000000060651454333004200172500ustar00rootroot00000000000000 from __future__ import absolute_import from wvpytest import * from bup import options def test_optdict(): d = options.OptDict({ 'x': ('x', False), 'y': ('y', False), 'z': ('z', False), 'other_thing': ('other_thing', False), 'no_other_thing': ('other_thing', True), 'no_z': ('z', True), 'no_smart': ('smart', True), 'smart': ('smart', False), 'stupid': ('smart', True), 'no_smart': ('smart', False), }) WVPASS('foo') d['x'] = 5 d['y'] = 4 d['z'] = 99 d['no_other_thing'] = 5 WVPASSEQ(d.x, 5) WVPASSEQ(d.y, 4) WVPASSEQ(d.z, 99) WVPASSEQ(d.no_z, False) WVPASSEQ(d.no_other_thing, True) WVEXCEPT(KeyError, lambda: d.p) invalid_optspec0 = """ """ invalid_optspec1 = """ prog """ invalid_optspec2 = """ -- x,y """ def test_invalid_optspec(): WVPASS(options.Options(invalid_optspec0).parse([])) WVPASS(options.Options(invalid_optspec1).parse([])) WVPASS(options.Options(invalid_optspec2).parse([])) optspec = """ prog [stuff...] prog [-t] -- t test q,quiet quiet l,longoption= long option with parameters and a really really long description that will require wrapping p= short option with parameters onlylong long option with no short neveropt never called options deftest1= a default option with default [1] deftest2= a default option with [1] default [2] deftest3= a default option with [3] no actual default deftest4= a default option with [[square]] deftest5= a default option with "correct" [[square] s,smart,no-stupid disable stupidity x,extended,no-simple extended mode [2] #,compress= set compression level [5] """ def test_options(): o = options.Options(optspec) (opt,flags,extra) = o.parse(['-tttqp', 7, '--longoption', '19', 'hanky', '--onlylong', '-7']) WVPASSEQ(flags[0], ('-t', '')) WVPASSEQ(flags[1], ('-t', '')) WVPASSEQ(flags[2], ('-t', '')) WVPASSEQ(flags[3], ('-q', '')) WVPASSEQ(flags[4], ('-p', 7)) WVPASSEQ(flags[5], ('--longoption', '19')) WVPASSEQ(extra, ['hanky']) WVPASSEQ((opt.t, opt.q, opt.p, opt.l, opt.onlylong, opt.neveropt), (3,1,7,19,1,None)) WVPASSEQ((opt.deftest1, opt.deftest2, opt.deftest3, opt.deftest4, opt.deftest5), (1,2,None,None,'[square')) WVPASSEQ((opt.stupid, opt.no_stupid), (True, None)) WVPASSEQ((opt.smart, opt.no_smart), (None, True)) WVPASSEQ((opt.x, opt.extended, opt.no_simple), (2,2,2)) WVPASSEQ((opt.no_x, opt.no_extended, opt.simple), (False,False,False)) WVPASSEQ(opt['#'], 7) WVPASSEQ(opt.compress, 7) (opt,flags,extra) = o.parse(['--onlylong', '-t', '--no-onlylong', '--smart', '--simple']) WVPASSEQ((opt.t, opt.q, opt.onlylong), (1, None, 0)) WVPASSEQ((opt.stupid, opt.no_stupid), (False, True)) WVPASSEQ((opt.smart, opt.no_smart), (True, False)) WVPASSEQ((opt.x, opt.extended, opt.no_simple), (0,0,0)) WVPASSEQ((opt.no_x, opt.no_extended, opt.simple), (True,True,True)) bup-0.33.3/test/int/test_resolve.py000066400000000000000000000300451454333004200172270ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from binascii import unhexlify from errno import ELOOP, ENOTDIR from os import symlink from stat import S_IFDIR import os from time import localtime, strftime from wvpytest import * from bup import git, path, vfs from bup.compat import environ from bup.repo import LocalRepo, RemoteRepo from buptest import ex, exo from buptest.vfs import tree_dict bup_path = path.exe() ## The clear_cache() calls below are to make sure that the test starts ## from a known state since at the moment the cache entry for a given ## item (like a commit) can change. For example, its meta value might ## be promoted from a mode to a Metadata instance once the tree it ## refers to is traversed. def prep_and_test_repo(tmpdir, create_repo, test_repo): bup_dir = tmpdir + b'/bup' environ[b'GIT_DIR'] = bup_dir environ[b'BUP_DIR'] = bup_dir ex((bup_path, b'init')) git.repodir = bup_dir with create_repo(bup_dir) as repo: test_repo(repo, tmpdir) # Currently, we just test through the repos since LocalRepo resolve is # just a straight redirection to vfs.resolve. def _test_resolve(repo, tmpdir): data_path = tmpdir + b'/src' resolve = repo.resolve save_time = 100000 save_time_str = strftime('%Y-%m-%d-%H%M%S', localtime(save_time)).encode('ascii') os.mkdir(data_path) os.mkdir(data_path + b'/dir') with open(data_path + b'/file', 'wb+') as tmpfile: tmpfile.write(b'canary\n') symlink(b'file', data_path + b'/file-symlink') symlink(b'dir', data_path + b'/dir-symlink') symlink(b'not-there', data_path + b'/bad-symlink') ex((bup_path, b'index', b'-v', data_path)) ex((bup_path, b'save', b'-d', b'%d' % save_time, b'-tvvn', b'test', b'--strip', data_path)) ex((bup_path, b'tag', b'test-tag', b'test')) tip_hash = exo((b'git', b'show-ref', b'refs/heads/test'))[0] tip_oidx = tip_hash.strip().split()[0] tip_oid = unhexlify(tip_oidx) tip_tree_oidx = exo((b'git', b'log', b'--pretty=%T', b'-n1', tip_oidx))[0].strip() tip_tree_oid = unhexlify(tip_tree_oidx) tip_tree = tree_dict(repo, tip_tree_oid) test_revlist_w_meta = vfs.RevList(meta=tip_tree[b'.'].meta, oid=tip_oid) expected_latest_item = vfs.Commit(meta=S_IFDIR | 0o755, oid=tip_tree_oid, coid=tip_oid) expected_latest_item_w_meta = vfs.Commit(meta=tip_tree[b'.'].meta, oid=tip_tree_oid, coid=tip_oid) expected_latest_link = vfs.FakeLink(meta=vfs.default_symlink_mode, target=save_time_str) expected_test_tag_item = expected_latest_item vfs.clear_cache() res = resolve(b'/') wvpasseq(1, len(res)) wvpasseq(((b'', vfs._root),), res) ignore, root_item = res[0] root_content = frozenset(vfs.contents(repo, root_item)) wvpasseq(frozenset([(b'.', root_item), (b'.tag', vfs._tags), (b'test', test_revlist_w_meta)]), root_content) for path in (b'//', b'/.', b'/./', b'/..', b'/../', b'/test/latest/dir/../../..', b'/test/latest/dir/../../../', b'/test/latest/dir/../../../.', b'/test/latest/dir/../../..//', b'/test//latest/dir/../../..', b'/test/./latest/dir/../../..', b'/test/././latest/dir/../../..', b'/test/.//./latest/dir/../../..', b'/test//.//.//latest/dir/../../..' b'/test//./latest/dir/../../..'): vfs.clear_cache() res = resolve(path) wvpasseq(((b'', vfs._root),), res) vfs.clear_cache() res = resolve(b'/.tag') wvpasseq(2, len(res)) wvpasseq(((b'', vfs._root), (b'.tag', vfs._tags)), res) ignore, tag_item = res[1] tag_content = frozenset(vfs.contents(repo, tag_item)) wvpasseq(frozenset([(b'.', tag_item), (b'test-tag', expected_test_tag_item)]), tag_content) vfs.clear_cache() res = resolve(b'/test') wvpasseq(2, len(res)) wvpasseq(((b'', vfs._root), (b'test', test_revlist_w_meta)), res) ignore, test_item = res[1] test_content = frozenset(vfs.contents(repo, test_item)) # latest has metadata here due to caching wvpasseq(frozenset([(b'.', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'latest', expected_latest_link)]), test_content) vfs.clear_cache() res = resolve(b'/test/latest') wvpasseq(3, len(res)) expected_latest_item_w_meta = vfs.Commit(meta=tip_tree[b'.'].meta, oid=tip_tree_oid, coid=tip_oid) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta)) wvpasseq(expected, res) ignore, latest_item = res[2] latest_content = frozenset(vfs.contents(repo, latest_item)) expected = frozenset((x.name, vfs.Item(oid=x.oid, meta=x.meta)) for x in (tip_tree[name] for name in (b'.', b'bad-symlink', b'dir', b'dir-symlink', b'file', b'file-symlink'))) wvpasseq(expected, latest_content) vfs.clear_cache() res = resolve(b'/test/latest/file') wvpasseq(4, len(res)) expected_file_item_w_meta = vfs.Item(meta=tip_tree[b'file'].meta, oid=tip_tree[b'file'].oid) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'file', expected_file_item_w_meta)) wvpasseq(expected, res) vfs.clear_cache() res = resolve(b'/test/latest/bad-symlink') wvpasseq(4, len(res)) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'not-there', None)) wvpasseq(expected, res) vfs.clear_cache() res = resolve(b'/test/latest/bad-symlink', follow=False) wvpasseq(4, len(res)) bad_symlink_value = tip_tree[b'bad-symlink'] expected_bad_symlink_item_w_meta = vfs.Item(meta=bad_symlink_value.meta, oid=bad_symlink_value.oid) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'bad-symlink', expected_bad_symlink_item_w_meta)) wvpasseq(expected, res) vfs.clear_cache() res = resolve(b'/test/latest/file-symlink') wvpasseq(4, len(res)) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'file', expected_file_item_w_meta)) wvpasseq(expected, res) vfs.clear_cache() res = resolve(b'/test/latest/file-symlink', follow=False) wvpasseq(4, len(res)) file_symlink_value = tip_tree[b'file-symlink'] expected_file_symlink_item_w_meta = vfs.Item(meta=file_symlink_value.meta, oid=file_symlink_value.oid) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'file-symlink', expected_file_symlink_item_w_meta)) wvpasseq(expected, res) vfs.clear_cache() res = resolve(b'/test/latest/missing') wvpasseq(4, len(res)) name, item = res[-1] wvpasseq(b'missing', name) wvpass(item is None) for path in (b'/test/latest/file/', b'/test/latest/file/.', b'/test/latest/file/..', b'/test/latest/file/../', b'/test/latest/file/../.', b'/test/latest/file/../..', b'/test/latest/file/foo'): vfs.clear_cache() try: resolve(path) except vfs.IOError as res_ex: wvpasseq(ENOTDIR, res_ex.errno) wvpasseq([b'', b'test', save_time_str, b'file'], [name for name, item in res_ex.terminus]) for path in (b'/test/latest/file-symlink/', b'/test/latest/file-symlink/.', b'/test/latest/file-symlink/..', b'/test/latest/file-symlink/../', b'/test/latest/file-symlink/../.', b'/test/latest/file-symlink/../..'): vfs.clear_cache() try: resolve(path, follow=False) except vfs.IOError as res_ex: wvpasseq(ENOTDIR, res_ex.errno) wvpasseq([b'', b'test', save_time_str, b'file'], [name for name, item in res_ex.terminus]) vfs.clear_cache() file_res = resolve(b'/test/latest/file') try: resolve(b'foo', parent=file_res) except vfs.IOError as res_ex: wvpasseq(ENOTDIR, res_ex.errno) wvpasseq(None, res_ex.terminus) vfs.clear_cache() res = resolve(b'/test/latest/dir-symlink', follow=False) wvpasseq(4, len(res)) dir_symlink_value = tip_tree[b'dir-symlink'] expected_dir_symlink_item_w_meta = vfs.Item(meta=dir_symlink_value.meta, oid=dir_symlink_value.oid) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'dir-symlink', expected_dir_symlink_item_w_meta)) wvpasseq(expected, res) dir_value = tip_tree[b'dir'] expected_dir_item = vfs.Item(oid=dir_value.oid, meta=tree_dict(repo, dir_value.oid)[b'.'].meta) expected = ((b'', vfs._root), (b'test', test_revlist_w_meta), (save_time_str, expected_latest_item_w_meta), (b'dir', expected_dir_item)) def lresolve(*args, **keys): return resolve(*args, **dict(keys, follow=False)) for resname, resolver in (('resolve', resolve), ('resolve nofollow', lresolve)): for path in (b'/test/latest/dir-symlink/', b'/test/latest/dir-symlink/.'): vfs.clear_cache() res = resolver(path) wvpasseq(4, len(res)) wvpasseq(expected, res) vfs.clear_cache() res = resolve(path) wvpasseq(4, len(res)) wvpasseq(expected, res) def test_local_resolve(tmpdir): prep_and_test_repo(tmpdir, lambda x: LocalRepo(repo_dir=x), _test_resolve) def test_remote_resolve(tmpdir): prep_and_test_repo(tmpdir, lambda x: RemoteRepo(x), _test_resolve) def _test_resolve_loop(repo, tmpdir): data_path = tmpdir + b'/src' os.mkdir(data_path) symlink(b'loop', data_path + b'/loop') ex((bup_path, b'init')) ex((bup_path, b'index', b'-v', data_path)) save_utc = 100000 ex((bup_path, b'save', b'-d', b'%d' % save_utc, b'-tvvn', b'test', b'--strip', data_path)) save_name = strftime('%Y-%m-%d-%H%M%S', localtime(save_utc)).encode('ascii') try: wvpasseq('this call should never return', repo.resolve(b'/test/%s/loop' % save_name)) except vfs.IOError as res_ex: wvpasseq(ELOOP, res_ex.errno) wvpasseq([b'', b'test', save_name, b'loop'], [name for name, item in res_ex.terminus]) def test_local_resolve_loop(tmpdir): prep_and_test_repo(tmpdir, lambda x: LocalRepo(x), _test_resolve_loop) def test_remote_resolve_loop(tmpdir): prep_and_test_repo(tmpdir, lambda x: RemoteRepo(x), _test_resolve_loop) # FIXME: add tests for the want_meta=False cases. bup-0.33.3/test/int/test_shquote.py000066400000000000000000000037121454333004200172410ustar00rootroot00000000000000 from __future__ import absolute_import from wvpytest import * from bup import shquote def qst(line): return [word for offset,word in shquote.quotesplit(line)] def test_shquote(): WVPASSEQ(qst(b""" this is basic \t\n\r text """), [b'this', b'is', b'basic', b'text']) WVPASSEQ(qst(br""" \"x\" "help" 'yelp' """), [b'"x"', b'help', b'yelp']) WVPASSEQ(qst(br""" "'\"\"'" '\"\'' """), [b"'\"\"'", b'\\"\'']) WVPASSEQ(shquote.quotesplit(b' this is "unfinished'), [(2, b'this'), (7, b'is'), (10, b'unfinished')]) WVPASSEQ(shquote.quotesplit(b'"silly"\'will'), [(0, b'silly'), (7, b'will')]) WVPASSEQ(shquote.unfinished_word(b'this is a "billy" "goat'), (b'"', b'goat')) WVPASSEQ(shquote.unfinished_word(b"'x"), (b"'", b'x')) WVPASSEQ(shquote.unfinished_word(b"abra cadabra "), (None, b'')) WVPASSEQ(shquote.unfinished_word(b"abra cadabra"), (None, b'cadabra')) qtype, word = shquote.unfinished_word(b"this is /usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, b"/usr/local", True), b"al") qtype, word = shquote.unfinished_word(b"this is '/usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, b"/usr/local", True), b"al'") qtype, word = shquote.unfinished_word(b"this is \"/usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, b"/usr/local", True), b"al\"") qtype, word = shquote.unfinished_word(b"this is \"/usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, b"/usr/local", False), b"al") qtype, word = shquote.unfinished_word(b"this is \\ hammer\\ \"") WVPASSEQ(word, b' hammer "') WVPASSEQ(shquote.what_to_add(qtype, word, b" hammer \"time\"", True), b"time\\\"") WVPASSEQ(shquote.quotify_list([b'a', b'', b'"word"', b"'third'", b"'", b"x y"]), b"a '' '\"word\"' \"'third'\" \"'\" 'x y'") bup-0.33.3/test/int/test_vfs.py000066400000000000000000000362471454333004200163600ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from binascii import unhexlify from io import BytesIO from os import symlink from random import Random, randint from stat import S_IFDIR, S_IFLNK, S_IFREG, S_ISDIR, S_ISREG from sys import stderr import os import sys from time import localtime, strftime, tzset from wvpytest import * from bup._helpers import write_random from bup import git, metadata, vfs from bup.compat import environ, fsencode from bup.helpers import exc, shstr from bup.metadata import Metadata from bup.repo import LocalRepo from buptest import ex, exo from buptest.vfs import tree_dict lib_t_dir = os.path.dirname(fsencode(__file__)) top_dir = os.path.join(lib_t_dir, b'../..') bup_path = top_dir + b'/bup' def ex(cmd, **kwargs): print(shstr(cmd), file=stderr) return exc(cmd, **kwargs) def test_default_modes(): wvpasseq(S_IFREG | 0o644, vfs.default_file_mode) wvpasseq(S_IFDIR | 0o755, vfs.default_dir_mode) wvpasseq(S_IFLNK | 0o755, vfs.default_symlink_mode) def test_cache_behavior(): orig_max = vfs._cache_max_items try: vfs._cache_max_items = 2 vfs.clear_cache() wvpasseq({}, vfs._cache) wvpasseq([], vfs._cache_keys) wvfail(vfs._cache_keys) wvexcept(Exception, vfs.cache_notice, b'x', 1) key_0 = b'itm:' + b'\0' * 20 key_1 = b'itm:' + b'\1' * 20 key_2 = b'itm:' + b'\2' * 20 vfs.cache_notice(key_0, b'something') wvpasseq({key_0 : b'something'}, vfs._cache) wvpasseq([key_0], vfs._cache_keys) vfs.cache_notice(key_1, b'something else') wvpasseq({key_0 : b'something', key_1 : b'something else'}, vfs._cache) wvpasseq(frozenset([key_0, key_1]), frozenset(vfs._cache_keys)) vfs.cache_notice(key_2, b'and also') wvpasseq(2, len(vfs._cache)) wvpass(frozenset(vfs._cache.items()) < frozenset({key_0 : b'something', key_1 : b'something else', key_2 : b'and also'}.items())) wvpasseq(2, len(vfs._cache_keys)) wvpass(frozenset(vfs._cache_keys) < frozenset([key_0, key_1, key_2])) vfs.clear_cache() wvpasseq({}, vfs._cache) wvpasseq([], vfs._cache_keys) finally: vfs._cache_max_items = orig_max vfs.clear_cache() ## The clear_cache() calls below are to make sure that the test starts ## from a known state since at the moment the cache entry for a given ## item (like a commit) can change. For example, its meta value might ## be promoted from a mode to a Metadata instance once the tree it ## refers to is traversed. def run_augment_item_meta_tests(repo, file_path, file_size, link_path, link_target): _, file_item = vfs.resolve(repo, file_path)[-1] _, link_item = vfs.resolve(repo, link_path, follow=False)[-1] wvpass(isinstance(file_item.meta, Metadata)) wvpass(isinstance(link_item.meta, Metadata)) # Note: normally, modifying item.meta values is forbidden file_item.meta.size = file_item.meta.size or vfs.item_size(repo, file_item) link_item.meta.size = link_item.meta.size or vfs.item_size(repo, link_item) ## Ensure a fully populated item is left alone augmented = vfs.augment_item_meta(repo, file_item) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) augmented = vfs.augment_item_meta(repo, file_item, include_size=True) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) ## Ensure a missing size is handled poperly file_item.meta.size = None augmented = vfs.augment_item_meta(repo, file_item) wvpass(augmented is file_item) wvpass(augmented.meta is file_item.meta) augmented = vfs.augment_item_meta(repo, file_item, include_size=True) wvpass(augmented is not file_item) wvpasseq(file_size, augmented.meta.size) ## Ensure a meta mode is handled properly mode_item = file_item._replace(meta=vfs.default_file_mode) augmented = vfs.augment_item_meta(repo, mode_item) augmented_w_size = vfs.augment_item_meta(repo, mode_item, include_size=True) for item in (augmented, augmented_w_size): meta = item.meta wvpass(item is not file_item) wvpass(isinstance(meta, Metadata)) wvpasseq(vfs.default_file_mode, meta.mode) wvpasseq((None, None, 0, 0, 0), (meta.uid, meta.gid, meta.atime, meta.mtime, meta.ctime)) wvpass(augmented.meta.size is None) wvpasseq(file_size, augmented_w_size.meta.size) ## Ensure symlinks are handled properly mode_item = link_item._replace(meta=vfs.default_symlink_mode) augmented = vfs.augment_item_meta(repo, mode_item) wvpass(augmented is not mode_item) wvpass(isinstance(augmented.meta, Metadata)) wvpasseq(link_target, augmented.meta.symlink_target) wvpasseq(len(link_target), augmented.meta.size) augmented = vfs.augment_item_meta(repo, mode_item, include_size=True) wvpass(augmented is not mode_item) wvpass(isinstance(augmented.meta, Metadata)) wvpasseq(link_target, augmented.meta.symlink_target) wvpasseq(len(link_target), augmented.meta.size) def test_item_mode(): mode = S_IFDIR | 0o755 meta = metadata.from_path(b'.') oid = b'\0' * 20 wvpasseq(mode, vfs.item_mode(vfs.Item(oid=oid, meta=mode))) wvpasseq(meta.mode, vfs.item_mode(vfs.Item(oid=oid, meta=meta))) def test_reverse_suffix_duplicates(): suffix = lambda x: tuple(vfs._reverse_suffix_duplicates(x)) wvpasseq((b'x',), suffix((b'x',))) wvpasseq((b'x', b'y'), suffix((b'x', b'y'))) wvpasseq((b'x-1', b'x-0'), suffix((b'x',) * 2)) wvpasseq([b'x-%02d' % n for n in reversed(range(11))], list(suffix((b'x',) * 11))) wvpasseq((b'x-1', b'x-0', b'y'), suffix((b'x', b'x', b'y'))) wvpasseq((b'x', b'y-1', b'y-0'), suffix((b'x', b'y', b'y'))) wvpasseq((b'x', b'y-1', b'y-0', b'z'), suffix((b'x', b'y', b'y', b'z'))) def test_misc(tmpdir): bup_dir = tmpdir + b'/bup' environ[b'GIT_DIR'] = bup_dir environ[b'BUP_DIR'] = bup_dir git.repodir = bup_dir data_path = tmpdir + b'/src' os.mkdir(data_path) with open(data_path + b'/file', 'wb+') as tmpfile: tmpfile.write(b'canary\n') symlink(b'file', data_path + b'/symlink') ex((bup_path, b'init')) ex((bup_path, b'index', b'-v', data_path)) ex((bup_path, b'save', b'-d', b'100000', b'-tvvn', b'test', b'--strip', data_path)) with LocalRepo() as repo: ls_tree = exo((b'git', b'ls-tree', b'test', b'symlink')).out mode, typ, oidx, name = ls_tree.strip().split(None, 3) assert name == b'symlink' link_item = vfs.Item(oid=unhexlify(oidx), meta=int(mode, 8)) wvpasseq(b'file', vfs.readlink(repo, link_item)) ls_tree = exo((b'git', b'ls-tree', b'test', b'file')).out mode, typ, oidx, name = ls_tree.strip().split(None, 3) assert name == b'file' file_item = vfs.Item(oid=unhexlify(oidx), meta=int(mode, 8)) wvexcept(Exception, vfs.readlink, repo, file_item) wvpasseq(4, vfs.item_size(repo, link_item)) wvpasseq(7, vfs.item_size(repo, file_item)) meta = metadata.from_path(fsencode(__file__)) meta.size = 42 fake_item = file_item._replace(meta=meta) wvpasseq(42, vfs.item_size(repo, fake_item)) _, fakelink_item = vfs.resolve(repo, b'/test/latest', follow=False)[-1] wvpasseq(17, vfs.item_size(repo, fakelink_item)) run_augment_item_meta_tests(repo, b'/test/latest/file', 7, b'/test/latest/symlink', b'file') # FIXME: this caused StopIteration #_, file_item = vfs.resolve(repo, '/file')[-1] _, file_item = vfs.resolve(repo, b'/test/latest/file')[-1] file_copy = vfs.copy_item(file_item) wvpass(file_copy is not file_item) wvpass(file_copy.meta is not file_item.meta) wvpass(isinstance(file_copy, tuple)) wvpass(file_item.meta.user) wvpass(file_copy.meta.user) file_copy.meta.user = None wvpass(file_item.meta.user) def write_sized_random_content(parent_dir, size, seed): verbose = 0 with open(b'%s/%d' % (parent_dir, size), 'wb') as f: write_random(f.fileno(), size, seed, verbose) def validate_vfs_streaming_read(repo, item, expected_path, read_sizes): for read_size in read_sizes: with open(expected_path, 'rb') as expected: with vfs.fopen(repo, item) as actual: ex_buf = expected.read(read_size) act_buf = actual.read(read_size) while ex_buf and act_buf: wvpassge(read_size, len(ex_buf)) wvpassge(read_size, len(act_buf)) wvpasseq(len(ex_buf), len(act_buf)) wvpass(ex_buf == act_buf) ex_buf = expected.read(read_size) act_buf = actual.read(read_size) wvpasseq(b'', ex_buf) wvpasseq(b'', act_buf) def validate_vfs_seeking_read(repo, item, expected_path, read_sizes): def read_act(act_pos): with vfs.fopen(repo, item) as actual: actual.seek(act_pos) wvpasseq(act_pos, actual.tell()) act_buf = actual.read(read_size) act_pos += len(act_buf) wvpasseq(act_pos, actual.tell()) return act_pos, act_buf for read_size in read_sizes: with open(expected_path, 'rb') as expected: ex_buf = expected.read(read_size) act_buf = None act_pos = 0 while ex_buf: act_pos, act_buf = read_act(act_pos) wvpassge(read_size, len(ex_buf)) wvpassge(read_size, len(act_buf)) wvpasseq(len(ex_buf), len(act_buf)) wvpass(ex_buf == act_buf) if not act_buf: break ex_buf = expected.read(read_size) else: # hit expected eof first act_pos, act_buf = read_act(act_pos) wvpasseq(b'', ex_buf) wvpasseq(b'', act_buf) def test_read_and_seek(tmpdir): # Write a set of randomly sized files containing random data whose # names are their sizes, and then verify that what we get back # from the vfs when seeking and reading with various block sizes # matches the original content. resolve = vfs.resolve bup_dir = tmpdir + b'/bup' environ[b'GIT_DIR'] = bup_dir environ[b'BUP_DIR'] = bup_dir git.repodir = bup_dir with LocalRepo() as repo: data_path = tmpdir + b'/src' os.mkdir(data_path) seed = randint(-(1 << 31), (1 << 31) - 1) rand = Random() rand.seed(seed) print('test_read seed:', seed, file=sys.stderr) max_size = 2 * 1024 * 1024 sizes = set((rand.randint(1, max_size) for _ in range(5))) sizes.add(1) sizes.add(max_size) for size in sizes: write_sized_random_content(data_path, size, seed) ex((bup_path, b'init')) ex((bup_path, b'index', b'-v', data_path)) ex((bup_path, b'save', b'-d', b'100000', b'-tvvn', b'test', b'--strip', data_path)) read_sizes = set((rand.randint(1, max_size) for _ in range(10))) sizes.add(1) sizes.add(max_size) print('test_read src sizes:', sizes, file=sys.stderr) print('test_read read sizes:', read_sizes, file=sys.stderr) for size in sizes: res = resolve(repo, b'/test/latest/' + str(size).encode('ascii')) _, item = res[-1] wvpasseq(size, vfs.item_size(repo, res[-1][1])) validate_vfs_streaming_read(repo, item, b'%s/%d' % (data_path, size), read_sizes) validate_vfs_seeking_read(repo, item, b'%s/%d' % (data_path, size), read_sizes) def test_contents_with_mismatched_bupm_git_ordering(tmpdir): bup_dir = tmpdir + b'/bup' environ[b'GIT_DIR'] = bup_dir environ[b'BUP_DIR'] = bup_dir git.repodir = bup_dir data_path = tmpdir + b'/src' os.mkdir(data_path) os.mkdir(data_path + b'/foo') with open(data_path + b'/foo.', 'wb+') as tmpfile: tmpfile.write(b'canary\n') ex((bup_path, b'init')) ex((bup_path, b'index', b'-v', data_path)) save_utc = 100000 save_name = strftime('%Y-%m-%d-%H%M%S', localtime(save_utc)).encode('ascii') ex((bup_path, b'save', b'-tvvn', b'test', b'-d', b'%d' % save_utc, b'--strip', data_path)) with LocalRepo() as repo: tip_sref = exo((b'git', b'show-ref', b'refs/heads/test')).out tip_oidx = tip_sref.strip().split()[0] tip_tree_oidx = exo((b'git', b'log', b'--pretty=%T', b'-n1', tip_oidx)).out.strip() tip_tree_oid = unhexlify(tip_tree_oidx) tip_tree = tree_dict(repo, tip_tree_oid) name, item = vfs.resolve(repo, b'/test/latest')[2] wvpasseq(save_name, name) expected = frozenset((x.name, vfs.Item(oid=x.oid, meta=x.meta)) for x in (tip_tree[name] for name in (b'.', b'foo', b'foo.'))) contents = tuple(vfs.contents(repo, item)) wvpasseq(expected, frozenset(contents)) # Spot check, in case tree_dict shares too much code with the vfs name, item = next(((n, i) for n, i in contents if n == b'foo')) wvpass(S_ISDIR(item.meta)) name, item = next(((n, i) for n, i in contents if n == b'foo.')) wvpass(S_ISREG(item.meta.mode)) def test_duplicate_save_dates(tmpdir): bup_dir = tmpdir + b'/bup' environ[b'GIT_DIR'] = bup_dir environ[b'BUP_DIR'] = bup_dir environ[b'TZ'] = b'UTC' tzset() git.repodir = bup_dir data_path = tmpdir + b'/src' os.mkdir(data_path) with open(data_path + b'/file', 'wb+') as tmpfile: tmpfile.write(b'canary\n') ex((b'env',)) ex((bup_path, b'init')) ex((bup_path, b'index', b'-v', data_path)) for i in range(11): ex((bup_path, b'save', b'-d', b'100000', b'-n', b'test', data_path)) with LocalRepo() as repo: res = vfs.resolve(repo, b'/test') wvpasseq(2, len(res)) name, revlist = res[-1] wvpasseq(b'test', name) wvpasseq((b'.', b'1970-01-02-034640-00', b'1970-01-02-034640-01', b'1970-01-02-034640-02', b'1970-01-02-034640-03', b'1970-01-02-034640-04', b'1970-01-02-034640-05', b'1970-01-02-034640-06', b'1970-01-02-034640-07', b'1970-01-02-034640-08', b'1970-01-02-034640-09', b'1970-01-02-034640-10', b'latest'), tuple(sorted(x[0] for x in vfs.contents(repo, revlist)))) def test_item_read_write(): x = vfs.Root(meta=13) stream = BytesIO() vfs.write_item(stream, x) print('stream:', repr(stream.getvalue()), stream.tell(), file=sys.stderr) stream.seek(0) wvpasseq(x, vfs.read_item(stream)) bup-0.33.3/test/int/test_vint.py000066400000000000000000000050401454333004200165250ustar00rootroot00000000000000 from __future__ import absolute_import from io import BytesIO from itertools import combinations_with_replacement from wvpytest import * from bup import vint def encode_and_decode_vuint(x): f = BytesIO() vint.write_vuint(f, x) return vint.read_vuint(BytesIO(f.getvalue())) def test_vuint(): for x in (0, 1, 42, 128, 10**16, 10**100): WVPASSEQ(encode_and_decode_vuint(x), x) WVEXCEPT(Exception, vint.write_vuint, BytesIO(), -1) WVEXCEPT(EOFError, vint.read_vuint, BytesIO()) def encode_and_decode_vint(x): f = BytesIO() vint.write_vint(f, x) return vint.read_vint(BytesIO(f.getvalue())) def test_vint(): values = (0, 1, 42, 64, 10**16, 10**100) for x in values: WVPASSEQ(encode_and_decode_vint(x), x) for x in [-x for x in values]: WVPASSEQ(encode_and_decode_vint(x), x) WVEXCEPT(EOFError, vint.read_vint, BytesIO()) WVEXCEPT(EOFError, vint.read_vint, BytesIO(b"\x80\x80")) def encode_and_decode_bvec(x): f = BytesIO() vint.write_bvec(f, x) return vint.read_bvec(BytesIO(f.getvalue())) def test_bvec(): values = (b'', b'x', b'foo', b'\0', b'\0foo', b'foo\0bar\0') for x in values: WVPASSEQ(encode_and_decode_bvec(x), x) WVEXCEPT(EOFError, vint.read_bvec, BytesIO()) outf = BytesIO() for x in (b'foo', b'bar', b'baz', b'bax'): vint.write_bvec(outf, x) inf = BytesIO(outf.getvalue()) WVPASSEQ(vint.read_bvec(inf), b'foo') WVPASSEQ(vint.read_bvec(inf), b'bar') vint.skip_bvec(inf) WVPASSEQ(vint.read_bvec(inf), b'bax') def pack_and_unpack(types, *values): data = vint.pack(types, *values) return vint.unpack(types, data) def test_pack_and_unpack(): candidates = (('s', b''), ('s', b'x'), ('s', b'foo'), ('s', b'foo' * 10), ('v', -10**100), ('v', -1), ('v', 0), ('v', 1), ('v', -10**100), ('V', 0), ('V', 1), ('V', 10**100)) WVPASSEQ(pack_and_unpack(''), []) for f, v in candidates: WVPASSEQ(pack_and_unpack(f, v), [v]) for (f1, v1), (f2, v2) in combinations_with_replacement(candidates, r=2): WVPASSEQ(pack_and_unpack(f1 + f2, v1, v2), [v1, v2]) WVEXCEPT(Exception, vint.pack, 's') WVEXCEPT(Exception, vint.pack, 's', 'foo', 'bar') WVEXCEPT(Exception, vint.pack, 'x', 1) WVEXCEPT(Exception, vint.unpack, 's', '') WVEXCEPT(Exception, vint.unpack, 'x', '') bup-0.33.3/test/int/test_xstat.py000066400000000000000000000074511454333004200167200ustar00rootroot00000000000000 from __future__ import absolute_import from wvpytest import * import bup._helpers as _helpers from bup import xstat def test_fstime(): WVPASSEQ(xstat.timespec_to_nsecs((0, 0)), 0) WVPASSEQ(xstat.timespec_to_nsecs((1, 0)), 10**9) WVPASSEQ(xstat.timespec_to_nsecs((0, 10**9 / 2)), 500000000) WVPASSEQ(xstat.timespec_to_nsecs((1, 10**9 / 2)), 1500000000) WVPASSEQ(xstat.timespec_to_nsecs((-1, 0)), -10**9) WVPASSEQ(xstat.timespec_to_nsecs((-1, 10**9 / 2)), -500000000) WVPASSEQ(xstat.timespec_to_nsecs((-2, 10**9 / 2)), -1500000000) WVPASSEQ(xstat.timespec_to_nsecs((0, -1)), -1) WVPASSEQ(type(xstat.timespec_to_nsecs((2, 22222222))), type(0)) WVPASSEQ(type(xstat.timespec_to_nsecs((-2, 22222222))), type(0)) WVPASSEQ(xstat.nsecs_to_timespec(0), (0, 0)) WVPASSEQ(xstat.nsecs_to_timespec(10**9), (1, 0)) WVPASSEQ(xstat.nsecs_to_timespec(500000000), (0, 10**9 / 2)) WVPASSEQ(xstat.nsecs_to_timespec(1500000000), (1, 10**9 / 2)) WVPASSEQ(xstat.nsecs_to_timespec(-10**9), (-1, 0)) WVPASSEQ(xstat.nsecs_to_timespec(-500000000), (-1, 10**9 / 2)) WVPASSEQ(xstat.nsecs_to_timespec(-1500000000), (-2, 10**9 / 2)) x = xstat.nsecs_to_timespec(1977777778) WVPASSEQ(type(x[0]), type(0)) WVPASSEQ(type(x[1]), type(0)) x = xstat.nsecs_to_timespec(-1977777778) WVPASSEQ(type(x[0]), type(0)) WVPASSEQ(type(x[1]), type(0)) WVPASSEQ(xstat.nsecs_to_timeval(0), (0, 0)) WVPASSEQ(xstat.nsecs_to_timeval(10**9), (1, 0)) WVPASSEQ(xstat.nsecs_to_timeval(500000000), (0, (10**9 / 2) / 1000)) WVPASSEQ(xstat.nsecs_to_timeval(1500000000), (1, (10**9 / 2) / 1000)) WVPASSEQ(xstat.nsecs_to_timeval(-10**9), (-1, 0)) WVPASSEQ(xstat.nsecs_to_timeval(-500000000), (-1, (10**9 / 2) / 1000)) WVPASSEQ(xstat.nsecs_to_timeval(-1500000000), (-2, (10**9 / 2) / 1000)) x = xstat.nsecs_to_timeval(1977777778) WVPASSEQ(type(x[0]), type(0)) WVPASSEQ(type(x[1]), type(0)) x = xstat.nsecs_to_timeval(-1977777778) WVPASSEQ(type(x[0]), type(0)) WVPASSEQ(type(x[1]), type(0)) WVPASSEQ(xstat.fstime_floor_secs(0), 0) WVPASSEQ(xstat.fstime_floor_secs(10**9 / 2), 0) WVPASSEQ(xstat.fstime_floor_secs(10**9), 1) WVPASSEQ(xstat.fstime_floor_secs(-10**9 / 2), -1) WVPASSEQ(xstat.fstime_floor_secs(-10**9), -1) WVPASSEQ(type(xstat.fstime_floor_secs(10**9 / 2)), type(0)) WVPASSEQ(type(xstat.fstime_floor_secs(-10**9 / 2)), type(0)) def test_bup_utimensat(tmpdir): if not xstat._bup_utimensat: return path = tmpdir + b'/foo' open(path, 'w').close() frac_ts = (0, 10**9 // 2) xstat._bup_utimensat(_helpers.AT_FDCWD, path, (frac_ts, frac_ts), 0) st = _helpers.stat(path) atime_ts = st[8] mtime_ts = st[9] WVPASSEQ(atime_ts[0], 0) WVPASS(atime_ts[1] == 0 or atime_ts[1] == frac_ts[1]) WVPASSEQ(mtime_ts[0], 0) WVPASS(mtime_ts[1] == 0 or mtime_ts[1] == frac_ts[1]) def test_bup_utimes(tmpdir): if not xstat._bup_utimes: return path = tmpdir + b'/foo' open(path, 'w').close() frac_ts = (0, 10**6 // 2) xstat._bup_utimes(path, (frac_ts, frac_ts)) st = _helpers.stat(path) atime_ts = st[8] mtime_ts = st[9] WVPASSEQ(atime_ts[0], 0) WVPASS(atime_ts[1] == 0 or atime_ts[1] == frac_ts[1] * 1000) WVPASSEQ(mtime_ts[0], 0) WVPASS(mtime_ts[1] == 0 or mtime_ts[1] == frac_ts[1] * 1000) def test_bup_lutimes(tmpdir): if not xstat._bup_lutimes: return path = tmpdir + b'/foo' open(path, 'w').close() frac_ts = (0, 10**6 // 2) xstat._bup_lutimes(path, (frac_ts, frac_ts)) st = _helpers.stat(path) atime_ts = st[8] mtime_ts = st[9] WVPASSEQ(atime_ts[0], 0) WVPASS(atime_ts[1] == 0 or atime_ts[1] == frac_ts[1] * 1000) WVPASSEQ(mtime_ts[0], 0) WVPASS(mtime_ts[1] == 0 or mtime_ts[1] == frac_ts[1] * 1000) bup-0.33.3/test/lib/000077500000000000000000000000001454333004200141115ustar00rootroot00000000000000bup-0.33.3/test/lib/__init__.py000066400000000000000000000000001454333004200162100ustar00rootroot00000000000000bup-0.33.3/test/lib/buptest/000077500000000000000000000000001454333004200155775ustar00rootroot00000000000000bup-0.33.3/test/lib/buptest/__init__.py000066400000000000000000000037251454333004200177170ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from collections import namedtuple from os.path import abspath, basename, dirname, realpath from shlex import quote from subprocess import PIPE, Popen from traceback import extract_stack import errno, os, subprocess, sys, tempfile from bup import helpers from bup.compat import fsencode from bup.io import byte_stream ex_res = namedtuple('SubprocResult', ['out', 'err', 'proc', 'rc']) def run(cmd, check=True, input=None, **kwargs): """Run a subprocess as per subprocess.Popen(cmd, **kwargs) followed by communicate(input=input). If check is true, then throw an exception if the subprocess exits with non-zero status. Return a SubprocResult tuple. """ if input: assert 'stdin' not in kwargs kwargs['stdin'] = PIPE p = Popen(cmd, **kwargs) out, err = p.communicate(input=input) if check and p.returncode != 0: raise Exception('subprocess %r failed with status %d%s' % (cmd, p.returncode, (', stderr: %r' % err) if err else '')) return ex_res(out=out, err=err, proc=p, rc=p.returncode) def logcmd(cmd): s = helpers.shstr(cmd) if isinstance(cmd, str): print(s, file=sys.stderr) else: # bytes - for now just escape it print(s.decode(errors='backslashreplace'), file=sys.stderr) def ex(cmd, **kwargs): """Print cmd to stderr and then run it as per ex(...). Print the subprocess stderr to stderr if stderr=PIPE and there's any data. """ logcmd(cmd) result = run(cmd, **kwargs) if result.err: sys.stderr.flush() byte_stream(sys.stderr).write(result.err) return result def exo(cmd, **kwargs): """Print cmd to stderr and then run it as per ex(..., stdout=PIPE). Print the subprocess stderr to stderr if stderr=PIPE and there's any data. """ assert 'stdout' not in kwargs kwargs['stdout'] = PIPE return ex(cmd, **kwargs) bup-0.33.3/test/lib/buptest/vfs.py000066400000000000000000000032721454333004200167530ustar00rootroot00000000000000 from __future__ import absolute_import, print_function from collections import namedtuple from stat import S_ISDIR from bup import vfs from bup.metadata import Metadata from bup.git import BUP_CHUNKED TreeDictValue = namedtuple('TreeDictValue', ('name', 'oid', 'meta')) def tree_items(repo, oid): """Yield (name, entry_oid, meta) for each entry in oid. meta will be a Metadata object for any non-directories and for '.', otherwise None. """ # This is a simpler approach than the one in the vfs, used to # cross-check its behavior. tree_data, bupm_oid = vfs.tree_data_and_bupm(repo, oid) bupm = vfs._FileReader(repo, bupm_oid) if bupm_oid else None try: maybe_meta = lambda : Metadata.read(bupm) if bupm else None m = maybe_meta() if m and m.size is None: m.size = 0 yield TreeDictValue(name=b'.', oid=oid, meta=m) tree_ents = vfs.ordered_tree_entries(tree_data, bupm=True) for name, mangled_name, kind, gitmode, sub_oid in tree_ents: if mangled_name == b'.bupm': continue assert name != b'.' if S_ISDIR(gitmode): if kind == BUP_CHUNKED: yield TreeDictValue(name=name, oid=sub_oid, meta=maybe_meta()) else: yield TreeDictValue(name=name, oid=sub_oid, meta=vfs.default_dir_mode) else: yield TreeDictValue(name=name, oid=sub_oid, meta=maybe_meta()) finally: if bupm: bupm.close() def tree_dict(repo, oid): return dict((x.name, x) for x in tree_items(repo, oid)) bup-0.33.3/test/lib/wvpytest.py000066400000000000000000000016101454333004200163660ustar00rootroot00000000000000import pytest def WVPASS(cond = True, fail_value=None): if fail_value: assert cond, fail_value else: assert cond def WVFAIL(cond = True): assert not cond def WVPASSEQ(a, b, fail_value=None): if fail_value: assert a == b, fail_value else: assert a == b def WVPASSNE(a, b): assert a != b def WVPASSLT(a, b): assert a < b def WVPASSLE(a, b): assert a <= b def WVPASSGT(a, b): assert a > b def WVPASSGE(a, b): assert a >= b def WVEXCEPT(etype, func, *args, **kwargs): with pytest.raises(etype): func(*args, **kwargs) def WVCHECK(cond, msg): assert cond, msg def WVMSG(msg): print(msg) wvpass = WVPASS wvfail = WVFAIL wvpasseq = WVPASSEQ wvpassne = WVPASSNE wvpaslt = WVPASSLT wvpassle = WVPASSLE wvpassgt = WVPASSGT wvpassge = WVPASSGE wvexcept = WVEXCEPT wvcheck = WVCHECK wvmsg = WVMSG wvstart = WVMSG bup-0.33.3/test/sampledata/000077500000000000000000000000001454333004200154565ustar00rootroot00000000000000bup-0.33.3/test/sampledata/b2/000077500000000000000000000000001454333004200157615ustar00rootroot00000000000000bup-0.33.3/test/sampledata/b2/foozy000066400000000000000000000000001454333004200170400ustar00rootroot00000000000000bup-0.33.3/test/sampledata/b2/foozy2000066400000000000000000000000001454333004200171220ustar00rootroot00000000000000bup-0.33.3/test/sampledata/x000066400000000000000000000000351454333004200156460ustar00rootroot00000000000000Sun Jan 3 01:54:26 EST 2010 bup-0.33.3/test/sampledata/y-2000000066400000000000000000000001251454333004200162260ustar00rootroot00000000000000this file should come *before* y/ in the sort order, because of that trailing slash. bup-0.33.3/test/sampledata/y/000077500000000000000000000000001454333004200157265ustar00rootroot00000000000000bup-0.33.3/test/sampledata/y/testfile1000066400000000000000000004657101454333004200175660ustar00rootroot00000000000000#!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu bup-0.33.3/test/sampledata/y/text000066400000000000000000000000471454333004200166360ustar00rootroot00000000000000this is a text file. See me be texty! bup-0.33.3/test/testfile1000066400000000000000000004657101454333004200152030ustar00rootroot00000000000000#!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu bup-0.33.3/test/testfile2000066400000000000000000004657101454333004200152040ustar00rootroot00000000000000#!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pwba vf punatvat fbzr enaqbz olgrf urer naq gurers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) va nccebkvzngryl gur fnzr cynprEQBAYL) naq qvfgevo-0) hgvba nf(sq) va gur bevtvany grfg svyrfREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: tvir be gnxr n ovgerfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu bup-0.33.3/wvtest-bash.sh000066400000000000000000000011161454333004200151660ustar00rootroot00000000000000 declare -a _wvbtstack _wvpushcall() { _wvbtstack[${#_wvbtstack[@]}]="$*" } _wvpopcall() { unset _wvbtstack[$((${#_wvbtstack[@]} - 1))] } _wvbacktrace() { local i loc local call=$((${#_wvbtstack[@]} - 1)) for ((i=0; i <= ${#FUNCNAME[@]}; i++)); do local name="${FUNCNAME[$i]}" if test "${name:0:2}" == WV; then loc="${BASH_SOURCE[$i+1]}:${BASH_LINENO[$i]}" echo "called from $loc ${FUNCNAME[$i]} ${_wvbtstack[$call]}" 1>&2 ((call--)) fi done } _wvfind_caller() { WVCALLER_FILE=${BASH_SOURCE[2]} WVCALLER_LINE=${BASH_LINENO[1]} } bup-0.33.3/wvtest-bup.sh000066400000000000000000000007121454333004200150400ustar00rootroot00000000000000# Include in your test script like this: # # #!/usr/bin/env bash # . ./wvtest-bup.sh . ./wvtest.sh _wvtop="$(pwd -P)" wvmktempdir () { local script_name="$(basename $0)" mkdir -p "$_wvtop/test/tmp" || exit $? mktemp -d "$_wvtop/test/tmp/$script_name-XXXXXXX" || exit $? } wvmkmountpt () { local script_name="$(basename $0)" mkdir -p "$_wvtop/test/mnt" || exit $? mktemp -d "$_wvtop/test/mnt/$script_name-XXXXXXX" || exit $? } bup-0.33.3/wvtest.sh000066400000000000000000000046241454333004200142620ustar00rootroot00000000000000# # Include this file in your shell script by using: # #!/bin/sh # . ./wvtest.sh # # we don't quote $TEXT in case it contains newlines; newlines # aren't allowed in test output. However, we set -f so that # at least shell glob characters aren't processed. _wvtextclean() { ( set -f; echo $* ) } if [ -n "$BASH_VERSION" ]; then . ./wvtest-bash.sh # This keeps sh from choking on the syntax. else _wvbacktrace() { true; } _wvpushcall() { true; } _wvpopcall() { true; } _wvfind_caller() { WVCALLER_FILE="unknown" WVCALLER_LINE=0 } fi _wvcheck() { local CODE="$1" local TEXT=$(_wvtextclean "$2") local OK=ok if [ "$CODE" -ne 0 ]; then OK=FAILED fi echo "! $WVCALLER_FILE:$WVCALLER_LINE $TEXT $OK" >&2 if [ "$CODE" -ne 0 ]; then _wvbacktrace exit $CODE else return 0 fi } WVPASS() { local TEXT="$*" _wvpushcall "$@" _wvfind_caller if "$@"; then _wvpopcall _wvcheck 0 "$TEXT" return 0 else _wvcheck 1 "$TEXT" # NOTREACHED return 1 fi } WVFAIL() { local TEXT="$*" _wvpushcall "$@" _wvfind_caller if "$@"; then _wvcheck 1 "NOT($TEXT)" # NOTREACHED return 1 else _wvcheck 0 "NOT($TEXT)" _wvpopcall return 0 fi } _wvgetrv() { ( "$@" >&2 ) echo -n $? } WVPASSEQ() { _wvpushcall "$@" _wvfind_caller _wvcheck $(_wvgetrv [ "$#" -eq 2 ]) "exactly 2 arguments" echo "Comparing:" >&2 echo "$1" >&2 echo "--" >&2 echo "$2" >&2 _wvcheck $(_wvgetrv [ "$1" = "$2" ]) "'$1' = '$2'" _wvpopcall } WVPASSNE() { _wvpushcall "$@" _wvfind_caller _wvcheck $(_wvgetrv [ "$#" -eq 2 ]) "exactly 2 arguments" echo "Comparing:" >&2 echo "$1" >&2 echo "--" >&2 echo "$2" >&2 _wvcheck $(_wvgetrv [ "$1" != "$2" ]) "'$1' != '$2'" _wvpopcall } WVPASSRC() { local RC=$? _wvpushcall "$@" _wvfind_caller _wvcheck $(_wvgetrv [ $RC -eq 0 ]) "return code($RC) == 0" _wvpopcall } WVFAILRC() { local RC=$? _wvpushcall "$@" _wvfind_caller _wvcheck $(_wvgetrv [ $RC -ne 0 ]) "return code($RC) != 0" _wvpopcall } WVSTART() { echo >&2 _wvfind_caller echo "Testing \"$*\" in $WVCALLER_FILE:" >&2 } WVSKIP() { local TEXT=$(_wvtextclean "$@") _wvpushcall "$@" _wvfind_caller echo "! $WVCALLER_FILE:$WVCALLER_LINE $TEXT skip ok" 1>&2 } WVDIE() { local TEXT=$(_wvtextclean "$@") _wvpushcall "$@" _wvfind_caller echo "! $WVCALLER_FILE:$WVCALLER_LINE $TEXT FAILED" 1>&2 exit 1 } # Local Variables: # indent-tabs-mode: t # sh-basic-offset: 8 # End: