pax_global_header00006660000000000000000000000064122514673050014517gustar00rootroot0000000000000052 comment=5a7fd007e89bb343ab2024d9a87d54bbee997bbf bup-0.25/000077500000000000000000000000001225146730500122335ustar00rootroot00000000000000bup-0.25/.dir-locals.el000066400000000000000000000004331225146730500146640ustar00rootroot00000000000000((nil . ()) (python-mode . ((indent-tabs-mode . nil) (python-indent-offset . 4))) (sh-mode . ((indent-tabs-mode . nil) (sh-basic-offset . 4))) (c-mode . ((indent-tabs-mode . nil) (c-basic-offset . 4) (c-file-style . "BSD")))) bup-0.25/.gitignore000066400000000000000000000003161225146730500142230ustar00rootroot00000000000000/bup /cmd/bup-* /lib/bup/_version.py randomgen memtest *.o *.so *.exe *.dll *~ *.pyc tags[12] tags2[tc] out[12] out2[tc] *.tmp *.tmp.meta /build *.swp nbproject /t/sampledata/[bc] /t/sampledata/etc /t/tmp/ bup-0.25/CODINGSTYLE000066400000000000000000000015701225146730500137450ustar00rootroot00000000000000Python code follows PEP8 [1] with regard to coding style and PEP257 [2] with regard to docstring style. Multi-line docstrings should have one short summary line, followed by a blank line and a series of paragraphs. The last paragraph should be followed by a line that closes the docstring (no blank line in between). Here's an example from lib/bup/helpers.py: def unlink(f): """Delete a file at path 'f' if it currently exists. Unlike os.unlink(), does not throw an exception if the file didn't already exist. """ #code... Module-level docstrings follow exactly the same guidelines but without the blank line between the summary and the details. The C implementations should follow the kernel/git coding style [3]. [1]:http://www.python.org/dev/peps/pep-0008/ [2]:http://www.python.org/dev/peps/pep-0257/ [3]:http://www.kernel.org/doc/Documentation/CodingStyle bup-0.25/DESIGN000066400000000000000000001012261225146730500131310ustar00rootroot00000000000000 The Crazy Hacker's Crazy Guide to Bup Craziness =============================================== Despite what you might have heard, bup is not that crazy, and neither are you if you're trying to figure out how it works. But it's also (as of this writing) rather new and the source code doesn't have a lot of comments, so it can be a little confusing at first glance. This document is designed to make it easier for you to get started if you want to add a new feature, fix a bug, or just understand how it all works. Bup Source Code Layout ---------------------- As you're reading this, you might want to look at different parts of the bup source code to follow along and see what we're talking about. bup's code is written primarily in python with a bit of C code in speed-sensitive places. Here are the most important things to know: - bup (symlinked to main.py) is the main program that runs when you type 'bup'. - cmd/bup-* (mostly symlinked to cmd/*-cmd.py) are the individual subcommands, in a way similar to how git breaks all its subcommands into separate programs. Not all the programs have to be written in python; they could be in any language, as long as they end up named cmd/bup-*. We might end up re-coding large parts of bup in C eventually so that it can be even faster and (perhaps) more portable. - lib/bup/*.py are python library files used by the cmd/*.py commands. That directory name seems a little silly (and worse, redundant) but there seemed to be no better way to let programs write "from bup import index" and have it work. Putting bup in the top level conflicted with the 'bup' command; calling it anything other than 'bup' was fundamentally wrong, and doesn't work when you install bup on your system in /usr/lib somewhere. So we get the annoyingly long paths. Repository Structure ==================== Before you can talk about how bup works, we need to first address what it does. The purpose of bup is essentially to let you "replicate" data between two main data structures: 1. Your computer's filesystem; 2. A bup repository. (Yes, we know, that part also resides in your filesystem. Stop trying to confuse yourself. Don't worry, we'll be plenty confusing enough as it is.) Essentially, copying data from the filesystem to your repository is called "backing stuff up," which is what bup specializes in. Normally you initiate a backup using the 'bup save' command, but that's getting ahead of ourselves. For the inverse operation, ie. copying from the repository to your filesystem, you have several choices; the main ones are 'bup restore', 'bup ftp', 'bup fuse', and 'bup web'. Now, those are the basics of backups. In other words, we just spent about half a page telling you that bup backs up and restores data. Are we having fun yet? The next thing you'll want to know is the format of the bup repository, because hacking on bup is rather impossible unless you understand that part. In short, a bup repository is a git repository. If you don't know about git, you'll want to read about it now. A really good article to read is "Git for Computer Scientists" - you can find it in Google. Go read it now. We'll wait. Got it? Okay, so now you're an expert in blobs, trees, commits, and refs, the four building blocks of a git repository. bup uses these four things, and they're formatted in exactly the same way as git does it, so you can use git to manipulate the bup repository if you want, and you probably won't break anything. It's also a comfort to know you can squeeze data out using git, just in case bup fails you, and as a developer, git offers some nice tools (like 'git rev-list' and 'git log' and 'git diff' and 'git show' and so on) that allow you to explore your repository and help debug when things go wrong. Now, bup does use these tools a little bit differently than plain git. We need to do this in order to address two deficiencies in git when used for large backups, namely a) git bogs down and crashes if you give it really large files; b) git is too slow when you give it too many files; and c) git doesn't store detailed filesystem metadata. Let's talk about each of those problems in turn. Handling large files (cmd/split, hashsplit.split_to_blob_or_tree) -------------------- The primary reason git can't handle huge files is that it runs them through xdelta, which generally means it tries to load the entire contents of a file into memory at once. If it didn't do this, it would have to store the entire contents of every single revision of every single file, even if you only changed a few bytes of that file. That would be a terribly inefficient use of disk space, and git is well known for its amazingly efficient repository format. Unfortunately, xdelta works great for small files and gets amazingly slow and memory-hungry for large files. For git's main purpose, ie. managing your source code, this isn't a problem. But when backing up your filesystem, you're going to have at least a few large files, and so it's a non-starter. bup has to do something totally different. What bup does instead of xdelta is what we call "hashsplitting." We wanted a general-purpose way to efficiently back up *any* large file that might change in small ways, without storing the entire file every time. In fact, the original versions of bup could only store a single file at a time; surprisingly enough, this was enough to give us a large part of bup's functionality. If you just take your entire filesystem and put it in a giant tarball each day, then send that tarball to bup, bup will be able to efficiently store only the changes to that tarball from one day to the next. For small files, bup's compression won't be as good as xdelta's, but for anything over a few megabytes in size, bup's compression will actually *work*, which is a big advantage over xdelta. How does hashsplitting work? It's deceptively simple. We read through the file one byte at a time, calculating a rolling checksum of the last 64 bytes. (Why 64? No reason. Literally. We picked it out of the air. Probably some other number is better. Feel free to join the mailing list and tell us which one and why.) (The rolling checksum idea is actually stolen from rsync and xdelta, although we use it differently. And they use some kind of variable window size based on a formula we don't totally understand.) The original rolling checksum algorithm we used was called "stupidsum," because it was based on the only checksum Avery remembered how to calculate at the time. He also remembered that it was the introductory checksum algorithm in a whole article about how to make good checksums that he read about 15 years ago, and it was thoroughly discredited in that article for being very stupid. But, as so often happens, Avery couldn't remember any better algorithms from the article. So what we got is stupidsum. Since then, we have replaced the stupidsum algorithm with what we call "rollsum," based on code in librsync. It's essentially the same as what rsync does, except we use a fixed window size. (If you're a computer scientist and can demonstrate that some other rolling checksum would be faster and/or better and/or have fewer screwy edge cases, we need your help! Avery's out of control! Join our mailing list! Please! Save us! ... oh boy, I sure hope he doesn't read this) In any case, rollsum seems to do pretty well at its job. You can find it in bupsplit.c. Basically, it converts the last 64 bytes read into a 32-bit integer. What we then do is take the lowest 13 bits of the rollsum, and if they're all 1's, we consider that to be the end of a chunk. This happens on average once every 2^13 = 8192 bytes, so the average chunk size is 8192 bytes. (Why 13 bits? Well, we picked the number at random and... eugh. You're getting the idea, right? Join the mailing list and tell us why we're wrong.) (Incidentally, even though the average chunk size is 8192 bytes, the actual probability distribution of block sizes ends up being non-uniform; if we remember our stats classes correctly, which we probably don't, it's probably an "exponential distribution." The idea is that for each byte in the block, the probability that it's the last block is one in 8192. Thus, the block sizes end up being skewed toward the smaller end. That's not necessarily for the best, but maybe it is. Computer science to the rescue? You know the drill.) Anyway, so we're dividing up those files into chunks based on the rolling checksum. Then we store each chunk separately (indexed by its sha1sum) as a git blob. Why do we split this way? Well, because the results are actually really nice. Let's imagine you have a big mysql database dump (produced by mysqldump) and it's basically 100 megs of SQL text. Tomorrow's database dump adds 100 rows to the middle of the file somewhere, soo it's 100.01 megs of text. A naive block splitting algorithm - for example, just dividing the file into 8192-byte blocks - would be a disaster. After the first bit of text has changed, every block after that would have a different boundary, so most of the blocks in the new backup would be different from the previous ones, and you'd have to store the same data all over again. But with hashsplitting, no matter how much data you add, modify, or remove in the middle of the file, all the chunks *before* and *after* the affected chunk are absolutely the same. All that matters to the hashsplitting algorithm is the 32-byte "separator" sequence, and a single change can only affect, at most, one separator sequence or the bytes between two separator sequences. And because of rollsum, about one in 8192 possible 64-byte sequences is a separator sequence. Like magic, the hashsplit chunking algorithm will chunk your file the same way every time, even without knowing how it had chunked it previously. The next problem is less obvious: after you store your series of chunks as git blobs, how do you store their sequence? Each blob has a 20-byte sha1 identifier, which means the simple list of blobs is going to be 20/8192 = 0.25% of the file length. For a 200GB file, that's 488 megs of just sequence data. As an overhead percentage, 0.25% basically doesn't matter. 488 megs sounds like a lot, but compared to the 200GB you have to store anyway, it's irrelevant. What *is* relevant is that 488 megs is a lot of memory you have to use in order to to keep track of the list. Worse, if you back up an almost-identical file tomorrow, you'll have *another* 488 meg blob to keep track of, and it'll be almost but not quite the same as last time. Hmm, big files, each one almost the same as the last... you know where this is going, right? Actually no! Ha! We didn't split this list in the same way. We could have, in fact, but it wouldn't have been very "git-like", since we'd like to store the list as a git 'tree' object in order to make sure git's refcounting and reachability analysis doesn't get confused. Never mind the fact that we want you to be able to 'git checkout' your data without any special tools. What we do instead is we extend the hashsplit algorithm a little further using what we call "fanout." Instead of checking just the last 13 bits of the checksum, we use additional checksum bits to produce additional splits. For example, let's say we use a 4-bit fanout. That means we'll break a series of chunks into its own tree object whenever the last 13+4 = 17 bits of the rolling checksum are 1. Naturally, whenever the lowest 17 bits are 1, the lowest 13 bits are *also* 1, so the boundary of a chunk group is always also the boundary of a particular chunk. And so on. Eventually you'll have too many chunk groups, but you can group them into supergroups by using another 4 bits, and continue from there. What you end up with is an actual tree of blobs - which git 'tree' objects are ideal to represent. And if you think about it, just like the original list of chunks, the tree itself is pretty stable across file modifications. Any one modification will only affect the chunks actually containing the modifications, thus only the groups containing those chunks, and so on up the tree. Essentially, the number of changed git objects is O(log n) where n is the number of chunks. Since log 200 GB, using a base of 16 or so, is not a very big number, this is pretty awesome. Remember, any git object we *don't* change in a new backup is one we can reuse from last time, so the deduplication effect is pretty awesome. Better still, the hashsplit-tree format is good for a) random instead of sequential access to data (which you can see in action with 'bup fuse'); and b) quickly showing the differences between huge files (which we haven't really implemented because we don't need it, but you can try 'git diff -M -C -C backup1 backup2 -- filename' for a good start). So now we've split out 200 GB file into about 24 million pieces. That brings us to git limitation number 2. Handling huge numbers of files (git.PackWriter) ------------------------------ git is designed for handling reasonably-sized repositories that change relatively infrequently. (You might think you change your source code "frequently" and that git handles much more frequent changes than, say, svn can handle. But that's not the same kind of "frequently" we're talking about. Imagine you're backing up all the files on your disk, and one of those files is a 100 GB database file with hundreds of daily users. You disk changes so frequently you can't even back up all the revisions even if you were backing stuff up 24 hours a day. That's "frequently.") git's way of doing things works really nicely for the way software developers write software, but it doesn't really work so well for everything else. The #1 killer is the way it adds new objects to the repository: it creates one file per blob. Then you later run 'git gc' and combine those files into a single file (using highly efficient xdelta compression, and ignoring any files that are no longer relevant). 'git gc' is slow, but for source code repositories, the resulting super-efficient storage (and associated really fast access to the stored files) is worth it. For backups, it's not; you almost never access your backed-up data, so storage time is paramount, and retrieval time is mostly unimportant. To back up that 200 GB file with git and hashsplitting, you'd have to create 24 million little 8k files, then copy them into a 200 GB packfile, then delete the 24 million files again. That would take about 400 GB of disk space to run, require lots of random disk seeks, and require you to go through your data twice. So bup doesn't do that. It just writes packfiles directly. Luckily, these packfiles are still git-formatted, so git can happily access them once they're written. But that leads us to our next problem. Huge numbers of huge packfiles (midx.py, bloom.py, cmd/midx, cmd/bloom) ------------------------------ Git isn't actually designed to handle super-huge repositories. Most git repositories are small enough that it's reasonable to merge them all into a single packfile, which 'git gc' usually does eventually. The problematic part of large packfiles isn't the packfiles themselves - git is designed to expect the total size of all packs to be larger than available memory, and once it can handle that, it can handle virtually any amount of data about equally efficiently. The problem is the packfile indexes (.idx) files. In bup we call these idx (pronounced "idix") files instead of using the word "index," because the word index is already used for something totally different in git (and thus bup) and we'll become hopelessly confused otherwise. Anyway, each packfile (*.pack) in git has an associated idx (*.idx) that's a sorted list of git object hashes and file offsets. If you're looking for a particular object based on its sha1, you open the idx, binary search it to find the right hash, then take the associated file offset, seek to that offset in the packfile, and read the object contents. The performance of the binary search is about O(log n) with the number of hashes in the pack, with an optimized first step (you can read about it elsewhere) that somewhat improves it to O(log(n)-7). Unfortunately, this breaks down a bit when you have *lots* of packs. Say you have 24 million objects (containing around 200 GB of data) spread across 200 packfiles of 1GB each. To look for an object requires you search through about 122000 objects per pack; ceil(log2(122000)-7) = 10, so you'll have to search 10 times. About 7 of those searches will be confined to a single 4k memory page, so you'll probably have to page in about 3-4 pages per file, times 200 files, which makes 600-800 4k pages (2.4-3.6 megs)... every single time you want to look for an object. This brings us to another difference between git's and bup's normal use case. With git, there's a simple optimization possible here: when looking for an object, always search the packfiles in MRU (most recently used) order. Related objects are usually clusted together in a single pack, so you'll usually end up searching around 3 pages instead of 600, which is a tremendous improvement. (And since you'll quickly end up swapping in all the pages in a particular idx file this way, it isn't long before searching for a nearby object doesn't involve any swapping at all.) bup isn't so lucky. git users spend most of their time examining existing objects (looking at logs, generating diffs, checking out branches), which lends itself to the above optimization. bup, on the other hand, spends most of its time looking for *nonexistent* objects in the repository so that it can back them up. When you're looking for objects that aren't in the repository, there's no good way to optimize; you have to exhaustively check all the packs, one by one, to ensure that none of them contain the data you want. To improve performance of this sort of operation, bup introduces midx (pronounced "midix" and short for "multi-idx") files. As the name implies, they index multiple packs at a time. Imagine you had a midx file for your 200 packs. midx files are a lot like idx files; they have a lookup table at the beginning that narrows down the initial search, followed by a binary search. Then unlike idx files (which have a fixed-size 256-entry lookup table) midx tables have a variably-sized table that makes sure the entire binary search can be contained to a single page of the midx file. Basically, the lookup table tells you which page to load, and then you binary search inside that page. A typical search thus only requires the kernel to swap in two pages, which is better than results with even a single large idx file. And if you have lots of RAM, eventually the midx lookup table (at least) will end up cached in memory, so only a single page should be needed for each lookup. You generate midx files with 'bup midx'. The downside of midx files is that generating one takes a while, and you have to regenerate it every time you add a few packs. UPDATE: Brandon Low contributed an implementation of "bloom filters", which have even better characteristics than midx for certain uses. Look it up in Wikipedia. He also massively sped up both midx and bloom by rewriting the key parts in C. The nicest thing about bloom filters is we can update them incrementally every time we get a new idx, without regenerating from scratch. That makes the update phase much faster, and means we can also get away with generating midxes less often. midx files are a bup-specific optimization and git doesn't know what to do with them. However, since they're stored as separate files, they don't interfere with git's ability to read the repository. Detailed Metadata ----------------- So that's the basic structure of a bup repository, which is also a git repository. There's just one more thing we have to deal with: filesystem metadata. Git repositories are really only intended to store file contents with a small bit of extra information, like symlink targets and and executable bits, so we have to store the rest some other way. Bup stores more complete metadata in the VFS in a file named .bupm in each tree. This file contains one entry for each file in the tree object, sorted in the same order as the tree. The first .bupm entry is for the directory itself, i.e. ".", and its name is the empty string, "". Each .bupm entry contains a variable length sequence of records containing the metadata for the corresponding path. Each record records one type of metadata. Current types include a common record type (containing the normal stat information), a symlink target type, a hardlink target type, a POSIX1e ACL type, etc. See metadata.py for the complete list. The .bupm file is optional, and when it's missing, bup will behave as it did before the addition of metadata, and restore files using the tree information. The nice thing about this design is that you can walk through each file in a tree just by opening the tree and the .bupm contents, and iterating through both at the same time. Since the contents of any .bupm file should match the state of the filesystem when it was *indexed*, bup must record the detailed metadata in the index. To do this, bup records four values in the index, the atime, mtime, and ctime (as timespecs), and an integer offset into a secondary "metadata store" which has the same name as the index, but with ".meta" appended. This secondary store contains the encoded Metadata object corresponding to each path in the index. Currently, in order to decrease the storage required for the metadata store, bup only writes unique values there, reusing offsets when appropriate across the index. The effectiveness of this approach relies on the expectation that there will be many duplicate metadata records. Storing the full timestamps in the index is intended to make that more likely, because it makes it unnecessary to record those values in the secondary store. So bup clears them before encoding the Metadata objects destined for the index, and timestamp differences don't contribute to the uniqueness of the metadata. Bup supports recording and restoring hardlinks, and it does so by tracking sets of paths that correspond to the same dev/inode pair when indexing. This information is stored in an optional file with the same name as the index, but ending with ".hlink". If there are multiple index runs, and the hardlinks change, bup will notice this (within whatever subtree it is asked to reindex) and update the .hlink information accordingly. The current hardlink implementation will refuse to link to any file that resides outside the restore tree, and if the restore tree spans a different set of filesystems than the save tree, complete sets of hardlinks may not be restored. Filesystem Interaction ====================== Storing data is just half of the problem of making a backup; figuring out what to store is the other half. At the most basic level, piping the output of 'tar' into 'bup split' is an easy way to offload that decision; just let tar do all the hard stuff. And if you like tar files, that's a perfectly acceptable way to do it. But we can do better. Backing up with tarballs would totally be the way to go, except for two serious problems: 1. The result isn't easily "seekable." Tar files have no index, so if (as commonly happens) you only want to restore one file in a 200 GB backup, you'll have to read up to 200 GB before you can get to the beginning of that file. tar is short for "tape archive"; on a tape, there was no better way to do it anyway, so they didn't try. But on a disk, random file access is much, much better when you can figure out how. 2. tar doesn't remember which files it backed up last time, so it has to read through the entire file contents again in order to generate the tarball, large parts of which will then be skipped by bup since they've already been stored. This is much slower than necessary. (The second point isn't entirely true for all versions of tar. For example, GNU tar has an "incremental" mode that can somewhat mitigate this problem, if you're smart enough to know how to use it without hurting yourself. But you still have to decide which backups are "incremental" and which ones will be "full" and so on, so even when it works, it's more error-prone than bup.) bup divides the backup process into two major steps: a) indexing the filesystem, and b) saving file contents into the repository. Let's look at those steps in detail. Indexing the filesystem (cmd/drecurse, cmd/index, index.py) ----------------------- Splitting the filesystem indexing phase into its own program is nontraditional, but it gives us several advantages. The first advantage is trivial, but might be the most important: you can index files a lot faster than you can back them up. That means we can generate the index (.bup/bupindex) first, then have a nice, reliable, non-lying completion bar that tells you how much of your filesystem remains to be backed up. The alternative would be annoying failures like counting the number of *files* remaining (as rsync does), even though one of the files is a virtual machine image of 80 GB, and the 1000 other files are each under 10k. With bup, the percentage complete is the *real* percentage complete, which is very pleasant. Secondly, it makes it easier to debug and test; you can play with the index without actually backing up any files. Thirdly, you can replace the 'bup index' command with something else and not have to change anything about the 'bup save' command. The current 'bup index' implementation just blindly walks the whole filesystem looking for files that have changed since the last time it was indexed; this works fine, but something using inotify instead would be orders of magnitude faster. Windows and MacOS both have inotify-like services too, but they're totally different; if we want to support them, we can simply write new bup commands that do the job, and they'll never interfere with each other. And fourthly, git does it that way, and git is awesome, so who are we to argue? So let's look at how the index file works. First of all, note that the ".bup/bupindex" file is not the same as git's ".git/index" file. The latter isn't used in bup; as far as git is concerned, your bup repository is a "bare" git repository and doesn't have a working tree, and thus it doesn't have an index either. However, the bupindex file actually serves exactly the same purpose as git's index file, which is why we still call it "the index." We just had to redesign it for the usual bup-vs-git reasons, mostly that git just isn't designed to handle millions of files in a single repository. (The only way to find a file in git's index is to search it linearly; that's very fast in git-sized repositories, but very slow in bup-sized ones.) Let's not worry about the exact format of the bupindex file; it's still not optimal, and will probably change again. The most important things to know about bupindex are: - You can iterate through it much faster than you can iterate through the "real" filesystem (using something like the 'find' command). - If you delete it, you can get it back just by reindexing your filesystem (although that can be annoying to wait for); it's not critical to the repository itself. - You can iterate through only particular subtrees if you want. - There is no need to have more than one index for a particular filesystem, since it doesn't store anything about backups; it just stores file metadata. It's really just a cache (or 'index') of your filesystem's existing metadata. You could share the bupindex between repositories, or between multiple users on the same computer. If you back up your filesystem to multiple remote repositories to be extra safe, you can still use the same bupindex file across all of them, because it's the same filesystem every time. - Filenames in the bupindex are absolute paths, because that's the best way to ensure that you only need one bupindex file and that they're interchangeable. A note on file "dirtiness" -------------------------- The concept on which 'bup save' operates is simple enough; it reads through the index and backs up any file that is "dirty," that is, doesn't already exist in the repository. Determination of dirtiness is a little more complicated than it sounds. The most dirtiness-relevant relevant flag in the bupindex is IX_HASHVALID; if this flag is reset, the file *definitely* is dirty and needs to be backed up. But a file may be dirty even if IX_HASHVALID is set, and that's the confusing part. The index stores a listing of files, their attributes, and their git object ids (sha1 hashes), if known. The "if known" is what IX_HASHVALID is about. When 'bup save' backs up a file, it sets the sha1 and sets IX_HASHVALID; when 'bup index' sees that a file has changed, it leaves the sha1 alone and resets IX_HASHVALID. Remember that the index can be shared between users, repositories, and backups. So IX_HASHVALID doesn't mean your repository *has* that sha1 in it; it only means that if you *do* have it, that you don't need to back up the file. Thus, 'bup save' needs to check every file in the index to make sure its hash exists, not just that it's valid. There's an optimization possible, however: if you know a particular tree's hash is valid and exists (say /usr), then you don't need to check the validity of all its children; because of the way git trees and blobs work, if your repository is valid and you have a tree object, then you have all the blobs it points to. You won't back up a tree object without backing up its blobs first, so you don't need to double check it next time. (If you really want to double check this, it belongs in a tool like 'bup fsck' or 'git fsck'.) So in short, 'bup save' on a "clean" index (all files are marked IX_HASHVALID) can be very fast; we just check our repository and see if the top level IX_HASHVALID sha1 exists. If it does, then we're done. Similarly, if not the entire index is valid, you can still avoid recursing into subtrees if those particular subtrees are IX_HASHVALID and their sha1s are in the repository. The net result is that, as long as you never lose your index, 'bup save' can always run very fast. Another interesting trick is that you can skip backing up files even if IX_HASHVALID *isn't* set, as long as you have that file's sha1 in the repository. What that means is you've chosen not to backup the latest version of that file; instead, your new backup set just contains the most-recently-known valid version of that file. This is a good trick if you want to do frequent backups of smallish files and infrequent backups of large ones (as in 'bup save --smaller'). Each of your backups will be "complete," in that they contain all the small files and the large ones, but intermediate ones will just contain out-of-date copies of the large files. A final game we can play with the bupindex involves restoring: when you restore a directory from a previous backup, you can update the bupindex right away. Then, if you want to restore a different backup on top, you can compare the files in the index against the ones in the backup set, and update only the ones that have changed. (Even more interesting things happen if people are using the files on the restored system and you haven't updated the index yet; the net result would be an automated merge of all non-conflicting files.) This would be a poor man's distributed filesystem. The only catch is that nobody has written this feature for 'bup restore' yet. Someday! How 'bup save' works (cmd/save) -------------------- This section is too boring and has been omitted. Once you understand the index, there's nothing special about bup save. Retrieving backups: the bup vfs layer (vfs.py, cmd/ls, cmd/ftp, cmd/fuse) ===================================== One of the neat things about bup's storage format, at least compared to most backup tools, is it's easy to read a particular file, or even part of a file. That means a read-only virtual filesystem is easy to generate and it'll have good performance characteristics. Because of git's commit structure, you could even use branching and merging to make a transactional read-write filesystem... but that's probably getting a little out of bup's scope. Who knows what the future might bring, though? Read-only filesystems are well within our reach today, however. The 'bup ls', 'bup ftp', and 'bup fuse' commands all use a "VFS" (virtual filesystem) layer to let you access your repositories. Feel free to explore the source code for these tools and vfs.py - they're pretty straightforward. Some things to note: - None of these use the bupindex for anything. - For user-friendliness, they present your refs/commits/trees as a single hierarchy (ie. a filesystem), which isn't really how git repositories are formatted. So don't get confused! We hope you'll enjoy bup. Looking forward to your patches! -- apenwarr and the rest of the bup team Local Variables: mode: text End: bup-0.25/Documentation/000077500000000000000000000000001225146730500150445ustar00rootroot00000000000000bup-0.25/Documentation/.gitignore000066400000000000000000000000371225146730500170340ustar00rootroot00000000000000*.[0-9] *.html .docs-available bup-0.25/Documentation/Makefile000066400000000000000000000014751225146730500165130ustar00rootroot00000000000000PANDOC:=$(shell \ if pandoc /dev/null; then \ echo pandoc; \ touch .docs-available; \ else \ echo "Warning: pandoc not installed; can't generate manpages." >&2; \ echo '@echo Skipping: pandoc'; \ rm -f .docs-available; \ fi) BUP_VERSION:=$(shell ../bup version --tag) BUP_DATE:=$(shell ../bup version --date) default: all all: man html man: $(patsubst %.md,%.1,$(wildcard *.md)) html: $(patsubst %.md,%.html,$(wildcard *.md)) %.1: %.md.tmp Makefile $(PANDOC) -s -r markdown -w man -o $@ $< %.html: %.md.tmp Makefile $(PANDOC) -s -r markdown -w html -o $@ $< .PRECIOUS: %.md.tmp %.md.tmp: %.md Makefile rm -f $@ $@.new sed -e 's,%BUP_VERSION%,${BUP_VERSION},g' \ -e 's,%BUP_DATE%,${BUP_DATE},g' <$< >$@.new mv $@.new $@ clean: rm -f *~ .*~ *.[0-9] *.new *.tmp *.html .docs-available bup-0.25/Documentation/bup-bloom.md000066400000000000000000000027351225146730500172710ustar00rootroot00000000000000% bup-bloom(1) Bup %BUP_VERSION% % Brandon Low % %BUP_DATE% # NAME bup-bloom - generates, regenerates, updates bloom filters # SYNOPSIS bup bloom [-d dir] [-o outfile] [-k hashes] [-c idxfile] [-f] [\--ruin] # DESCRIPTION `bup bloom` builds a bloom filter file for a bup repository. If one already exists, it checks the filter and updates or regenerates it as needed. # OPTIONS \--ruin : destroy bloom filters by setting the whole bitmask to zeros. you really want to know what you are doing if run this and you want to delete the resulting bloom when you are done with it. -f, \--force : don't update the existing bloom file; generate a new one from scratch. -d, \--dir=*directory* : the directory, containing `.idx` files, to process. Defaults to $BUP_DIR/objects/pack -o, \--outfile=*outfile* : the file to write the bloom filter to. defaults to $dir/bup.bloom -k, \--hashes=*hashes* : number of hash functions to use only 4 and 5 are valid. defaults to 5 for repositories < 2 TiB, or 4 otherwise. See comments in git.py for more on this value. -c, \--check=*idxfile* : checks the bloom file (counterintuitively outfile) against the specified `.idx` file, first checks that the bloom filter is claiming to contain the `.idx`, then checks that it does actually contain all of the objects in the `.idx`. Does not write anything and ignores the `-k` option. # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-cat-file.md000066400000000000000000000025551225146730500176450ustar00rootroot00000000000000% bup-cat-file(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-cat-file - extract archive content (low-level) # SYNOPSIS bup cat-file [--meta|--bupm] <*path*> # DESCRIPTION `bup cat-file` extracts content associated with *path* from the archive and dumps it to standard output. If nothing special is requested, the actual data contained by *path* (which must be a regular file) will be dumped. # OPTIONS \--meta : retrieve the metadata entry associated with *path*. Note that currently this does not return the raw bytes for the entry recorded in the relevant .bupm in the archive, but rather a decoded and then re-encoded version. When that matters, it should be possible (though awkward) to use `--bupm` on the parent directory and then find the relevant entry in the output. \--bupm : retrieve the .bupm file associated with *path*, which must be a directory. # EXAMPLES # Retrieve the content of somefile. $ bup cat-file /foo/latest/somefile > somefile-content # Examine the metadata associated with something. $ bup cat-file --meta /foo/latest/something | bup meta -tvvf - # Examine the metadata for somedir, including the items it contains. $ bup cat-file --bupm /foo/latest/somedir | bup meta -tvvf - # SEE ALSO `bup-join`(1), `bup-meta`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-daemon.md000066400000000000000000000007541225146730500174230ustar00rootroot00000000000000% bup-daemon(1) Bup %BUP_VERSION% % Brandon Low % %BUP_DATE% # NAME bup-daemon - listens for connections and runs `bup server` # SYNOPSIS bup daemon [-l address] [-p port] # DESCRIPTION `bup daemon` is a simple bup server which listens on a socket and forks connections to `bup mux server` children. # OPTIONS -l, \--listen=*address* : the address or hostname to listen on -p, \--port=*port* : the port to listen on # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-damage.md000066400000000000000000000057071225146730500174010ustar00rootroot00000000000000% bup-damage(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-damage - randomly destroy blocks of a file # SYNOPSIS bup damage [-n count] [-s maxsize] [\--percent pct] [-S seed] [\--equal] \ # DESCRIPTION Use `bup damage` to deliberately destroy blocks in a `.pack` or `.idx` file (from `.bup/objects/pack`) to test the recovery features of `bup-fsck`(1) or other programs. *THIS PROGRAM IS EXTREMELY DANGEROUS AND WILL DESTROY YOUR DATA* `bup damage` is primarily useful for automated or manual tests of data recovery tools, to reassure yourself that the tools actually work. # OPTIONS -n, \--num=*numblocks* : the number of separate blocks to damage in each file (default 10). Note that it's possible for more than one damaged segment to fall in the same `bup-fsck`(1) recovery block, so you might not damage as many recovery blocks as you expect. If this is a problem, use `--equal`. -s, \--size=*maxblocksize* : the maximum size, in bytes, of each damaged block (default 1 unless `--percent` is specified). Note that because of the way `bup-fsck`(1) works, a multi-byte block could fall on the boundary between two recovery blocks, and thus damaging two separate recovery blocks. In small files, it's also possible for a damaged block to be larger than a recovery block. If these issues might be a problem, you should use the default damage size of one byte. \--percent=*maxblockpercent* : the maximum size, in percent of the original file, of each damaged block. If both `--size` and `--percent` are given, the maximum block size is the minimum of the two restrictions. You can use this to ensure that a given block will never damage more than one or two `git-fsck`(1) recovery blocks. -S, \--seed=*randomseed* : seed the random number generator with the given value. If you use this option, your tests will be repeatable, since the damaged block offsets, sizes, and contents will be the same every time. By default, the random numbers are different every time (so you can run tests in a loop and repeatedly test with different damage each time). \--equal : instead of choosing random offsets for each damaged block, space the blocks equally throughout the file, starting at offset 0. If you also choose a correct maximum block size, this can guarantee that any given damage block never damages more than one `git-fsck`(1) recovery block. (This is also guaranteed if you use `-s 1`.) # EXAMPLE # make a backup in case things go horribly wrong cp -a ~/.bup/objects/pack ~/bup-packs.bak # generate recovery blocks for all packs bup fsck -g # deliberately damage the packs bup damage -n 10 -s 1 -S 0 ~/.bup/objects/pack/*.{pack,idx} # recover from the damage bup fsck -r # SEE ALSO `bup-fsck`(1), `par2`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-drecurse.md000066400000000000000000000026731225146730500177760ustar00rootroot00000000000000% bup-drecurse(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-drecurse - recursively list files in your filesystem # SYNOPSIS bup drecurse [-x] [-q] [\--exclude *path*] [\--exclude-from *filename*] [\--profile] \ # DESCRIPTION `bup drecurse` traverses files in the filesystem in a way similar to `find`(1). In most cases, you should use `find`(1) instead. This program is useful mainly for testing the file traversal algorithm used in `bup-index`(1). Note that filenames are returned in reverse alphabetical order, as in `bup-index`(1). This is important because you can't generate the hash of a parent directory until you have generated the hashes of all its children. When listing files in reverse order, the parent directory will come after its children, making this easy. # OPTIONS -x, \--xdev, \--one-file-system : don't cross filesystem boundaries. -q, \--quiet : don't print filenames as they are encountered. Useful when testing performance of the traversal algorithms. \--exclude=*path* : a path to exclude from the backup (can be used more than once) \--exclude-from=*filename* : a file that contains exclude paths (can be used more than once) \--profile : print profiling information upon completion. Useful when testing performance of the traversal algorithms. # EXAMPLE bup drecurse -x / # SEE ALSO `bup-index`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-fsck.md000066400000000000000000000070131225146730500171010ustar00rootroot00000000000000% bup-fsck(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-fsck - verify or repair a bup repository # SYNOPSIS bup fsck [-r] [-g] [-v] [\--quick] [-j *jobs*] [\--par2-ok] [\--disable-par2] [filenames...] # DESCRIPTION `bup fsck` is a tool for validating bup repositories in the same way that `git fsck` validates git repositories. It can also generate and/or use "recovery blocks" using the `par2`(1) tool (if you have it installed). This allows you to recover from damaged blocks covering up to 5% of your `.pack` files. In a normal backup system, damaged blocks are less important, because there tends to be enough data duplicated between backup sets that a single damaged backup set is non-critical. In a deduplicating backup system like bup, however, no block is ever stored more than once, even if it is used in every single backup. If that block were to be unrecoverable, *all* your backup sets would be damaged at once. Thus, it's important to be able to verify the integrity of your backups and recover from disk errors if they occur. *WARNING*: bup fsck's recovery features are not available unless you have the free `par2`(1) package installed on your bup server. *WARNING*: bup fsck obviously cannot recover from a complete disk failure. If your backups are important, you need to carefully consider redundancy (such as using RAID for multi-disk redundancy, or making off-site backups for site redundancy). # OPTIONS -r, \--repair : attempt to repair any damaged packs using existing recovery blocks. (Requires `par2`(1).) -g, \--generate : generate recovery blocks for any packs that don't already have them. (Requires `par2`(1).) -v, \--verbose : increase verbosity (can be used more than once). \--quick : don't run a full `git verify-pack` on each pack file; instead just check the final checksum. This can cause a significant speedup with no obvious decrease in reliability. However, you may want to avoid this option if you're paranoid. Has no effect on packs that already have recovery information. -j, \--jobs=*numjobs* : maximum number of pack verifications to run at a time. The optimal value for this option depends how fast your CPU can verify packs vs. your disk throughput. If you run too many jobs at once, your disk will get saturated by seeking back and forth between files and performance will actually decrease, even if *numjobs* is less than the number of CPU cores on your system. You can experiment with this option to find the optimal value. \--par2-ok : immediately return 0 if `par2`(1) is installed and working, or 1 otherwise. Do not actually check anything. \--disable-par2 : pretend that `par2`(1) is not installed, and ignore all recovery blocks. # EXAMPLE # generate recovery blocks for all packs that don't # have them bup fsck -g # generate recovery blocks for a particular pack bup fsck -g ~/.bup/objects/pack/153a1420cb1c8*.pack # check all packs for correctness (can be very slow!) bup fsck # check all packs for correctness and recover any # damaged ones bup fsck -r # check a particular pack for correctness and recover # it if damaged bup fsck -r ~/.bup/objects/pack/153a1420cb1c8*.pack # check if recovery blocks are available on this system if bup fsck --par2-ok; then echo "par2 is ok" fi # SEE ALSO `bup-damage`(1), `fsck`(1), `git-fsck`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-ftp.md000066400000000000000000000037111225146730500167450ustar00rootroot00000000000000% bup-ftp(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-ftp - ftp-like client for navigating bup repositories # SYNOPSIS bup ftp # DESCRIPTION `bup ftp` is a command-line tool for navigating bup repositories. It has commands similar to the Unix `ftp`(1) command. The file hierarchy is the same as that shown by `bup-fuse`(1) and `bup-ls`(1). Note: if your system has the python-readline library installed, you can use the \ key to complete filenames while navigating your backup data. This will save you a lot of typing. # COMMANDS The following commands are available inside `bup ftp`: ls [-s] [-a] [*path*] : print the contents of a directory. If no path argument is given, the current directory's contents are listed. If -a is given, also include hidden files (files which start with a `.` character). If -s is given, each file is displayed with its hash from the bup archive to its left. cd *dirname* : change to a different working directory pwd : print the path of the current working directory cat *filenames...* : print the contents of one or more files to stdout get *filename* *localname* : download the contents of *filename* and save it to disk as *localname*. If *localname* is omitted, uses *filename* as the local name. mget *filenames...* : download the contents of the given *filenames* and stores them to disk under the same names. The filenames may contain Unix filename globs (`*`, `?`, etc.) help : print a list of available commands quit : exit the `bup ftp` client # EXAMPLE $ bup ftp bup> ls mybackup/ yourbackup/ bup> cd mybackup/ bup> ls 2010-02-05-185507@ 2010-02-05-185508@ latest@ bup> cd latest/ bup> ls (...etc...) bup> get myfile Saving 'myfile' bup> quit # SEE ALSO `bup-fuse`(1), `bup-ls`(1), `bup-save`(1), `bup-restore`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-fuse.md000066400000000000000000000023161225146730500171160ustar00rootroot00000000000000% bup-fuse(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-fuse - mount a bup repository as a filesystem # SYNOPSIS bup fuse [-d] [-f] [-o] \ # DESCRIPTION `bup fuse` opens a bup repository and exports it as a `fuse`(7) userspace filesystem. This feature is only available on systems (such as Linux) which support FUSE. **WARNING**: bup fuse is still experimental and does not enforce any file permissions! All files will be readable by all users. When you're done accessing the mounted fuse filesystem, you should unmount it with `umount`(8). # OPTIONS -d, \--debug : run in the foreground and print FUSE debug information for each request. -f, \--foreground : run in the foreground and exit only when the filesystem is unmounted. -o, \--allow-other : permit other users to access the filesystem. Necessary for exporting the filesystem via Samba, for example. # EXAMPLE rm -rf /tmp/buptest mkdir /tmp/buptest sudo bup fuse -d /tmp/buptest ls /tmp/buptest/*/latest ... umount /tmp/buptest # SEE ALSO `fuse`(7), `fusermount`(1), `bup-ls`(1), `bup-ftp`(1), `bup-restore`(1), `bup-web`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-help.md000066400000000000000000000010671225146730500171060ustar00rootroot00000000000000% bup-help(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-help - open the documentation for a given bup command # SYNOPSIS bup help \ # DESCRIPTION `bup help ` opens the documentation for the given command. This is currently equivalent to typing `man bup-`. # EXAMPLE $ bup help help (Imagine that this man page was pasted below, recursively. Since that would cause an endless loop we include this silly remark instead. Chicken.) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-import-rdiff-backup.md000066400000000000000000000011561225146730500220220ustar00rootroot00000000000000% bup-import-rdiff-backup(1) Bup %BUP_VERSION% % Zoran Zaric % %BUP_DATE% # NAME bup-import-rdiff-backup - import a rdiff-backup archive # SYNOPSIS bup import-rdiff-backup [-n] # DESCRIPTION `bup import-rdiff-backup` imports a rdiff-backup archive. The timestamps for the backups are preserved and the path to the rdiff-backup archive is stripped from the paths. # OPTIONS -n,--dry-run : don't do anything just print out what would be done # EXAMPLES $ bup import-rdiff-backup /.snapshots legacy-rdiff-backup # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-import-rsnapshot.md000066400000000000000000000013421225146730500215030ustar00rootroot00000000000000% bup-import-rsnapshot(1) Bup %BUP_VERSION% % Zoran Zaric % %BUP_DATE% # NAME bup-import-rsnapshot - import a rsnapshot archive # SYNOPSIS bup import-rsnapshot [-n] \ [\] # SYNOPSIS `bup import-rsnapshot` imports an rsnapshot archive. The timestamps for the backups are preserved and the path to the rsnapshot archive is stripped from the paths. `bup import-rsnapshot` either imports the whole archive or imports all backups only for a given backuptarget. # OPTIONS -n, \--dry-run : don't do anything just print out what would be done # EXAMPLES $ bup import-rsnapshot /.snapshots $ bup import-rsnapshot /.snapshots host1 # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-index.md000066400000000000000000000147431225146730500172720ustar00rootroot00000000000000% bup-index(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-index - print and/or update the bup filesystem index # SYNOPSIS bup index \<-p|-m|-s|-u\> [-H] [-l] [-x] [\--fake-valid] [\--no-check-device] [\--fake-invalid] [\--check] [\--clear] [-f *indexfile*] [\--exclude *path*] [\--exclude-from *filename*] [\--exclude-rx *pattern*] [-v] \ # DESCRIPTION `bup index` prints and/or updates the bup filesystem index, which is a cache of the filenames, attributes, and sha-1 hashes of each file and directory in the filesystem. The bup index is similar in function to the `git`(1) index, and can be found in `$BUP_DIR/bupindex`. Creating a backup in bup consists of two steps: updating the index with `bup index`, then actually backing up the files (or a subset of the files) with `bup save`. The separation exists for these reasons: 1. There is more than one way to generate a list of files that need to be backed up. For example, you might want to use `inotify`(7) or `dnotify`(7). 2. Even if you back up files to multiple destinations (for added redundancy), the file names, attributes, and hashes will be the same each time. Thus, you can save the trouble of repeatedly re-generating the list of files for each backup set. 3. You may want to use the data tracked by bup index for other purposes (such as speeding up other programs that need the same information). # NOTES bup makes accommodations for the expected "worst-case" filesystem timestamp resolution -- currently one second; examples include VFAT, ext2, ext3, small ext4, etc. Since bup cannot know the filesystem timestamp resolution, and could be traversing multiple filesystems during any given run, it always assumes that the resolution may be no better than one second. As a practical matter, this means that index updates are a bit imprecise, and so `bup save` may occasionally record filesystem changes that you didn't expect. That's because, during an index update, if bup encounters a path whose actual timestamps are more recent than one second before the update started, bup will set the index timestamps for that path (mtime and ctime) to exactly one second before the run, -- effectively capping those values. This ensures that no subsequent changes to those paths can result in timestamps that are identical to those in the index. If that were possible, bup could overlook the modifications. You can see the effect of this behavior in this example (assume that less than one second elapses between the initial file creation and first index run): $ touch src/1 src/2 # A "sleep 1" here would avoid the unexpected save. $ bup index src $ bup save -n src src # Saves 1 and 2. $ date > src/1 $ bup index src $ date > src/2 # Not indexed. $ bup save -n src src # But src/2 is saved anyway. Strictly speaking, bup should not notice the change to src/2, but it does, due to the accommodations described above. # MODES -u, \--update : recursively update the index for the given filenames and their descendants. One or more filenames must be given. If no mode option is given, this is the default. -p, \--print : print the contents of the index. If filenames are given, shows the given entries and their descendants. If no filenames are given, shows the entries starting at the current working directory (.). -m, \--modified : prints only files which are marked as modified (ie. changed since the most recent backup) in the index. Implies `-p`. -s, \--status : prepend a status code (A, M, D, or space) before each filename. Implies `-p`. The codes mean, respectively, that a file is marked in the index as added, modified, deleted, or unchanged since the last backup. # OPTIONS -H, \--hash : for each file printed, prepend the most recently recorded hash code. The hash code is normally generated by `bup save`. For objects which have not yet been backed up, the hash code will be 0000000000000000000000000000000000000000. Note that the hash code is printed even if the file is known to be modified or deleted in the index (ie. the file on the filesystem no longer matches the recorded hash). If this is a problem for you, use `--status`. -l, \--long : print more information about each file, in a similar format to the `-l` option to `ls`(1). -x, \--xdev, \--one-file-system : don't cross filesystem boundaries when recursing through the filesystem. Only applicable if you're using `-u`. \--fake-valid : mark specified filenames as up-to-date even if they aren't. This can be useful for testing, or to avoid unnecessarily backing up files that you know are boring. \--fake-invalid : mark specified filenames as not up-to-date, forcing the next "bup save" run to re-check their contents. \--check : carefully check index file integrity before and after updating. Mostly useful for automated tests. \--clear : clear the default index. -f, \--indexfile=*indexfile* : use a different index filename instead of `$BUP_DIR/bupindex`. \--exclude=*path* : exclude *path* from the backup; bup will not expand *path* in any way (can be used more than once). \--exclude-from=*filename* : read --exclude paths from *filename*, one path per-line (can be used more than once). \--exclude-rx=*pattern* : exclude any path matching *pattern*, which must be a Python regular expression (http://docs.python.org/library/re.html). The pattern will be compared against the full path, without anchoring, so "x/y" will match "ox/yard" or "box/yards". To exclude the contents of /tmp, but not the directory itself, use "^/tmp/.". (can be specified more than once) Examples: * '/foo$' - exclude any file named foo * '/foo/$' - exclude any directory named foo * '/foo/.' - exclude the content of any directory named foo * '^/tmp/.' - exclude root-level /tmp's content, but not /tmp itself \--no-check-device : don't mark a an entry invalid if the device number (stat(2) st_dev) changes. This can be useful when indexing remote, automounted, or (LVM) snapshot filesystems. -v, \--verbose : increase log output during update (can be used more than once). With one `-v`, print each directory as it is updated; with two `-v`, print each file too. # EXAMPLE bup index -vux /etc /var /usr # SEE ALSO `bup-save`(1), `bup-drecurse`(1), `bup-on`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-init.md000066400000000000000000000015551225146730500171230ustar00rootroot00000000000000% bup-init(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-init - initialize a bup repository # SYNOPSIS [BUP_DIR=*localpath*] bup init [-r *host*:*path*] # DESCRIPTION `bup init` initializes your local bup repository. By default, BUP_DIR is `~/.bup`. # OPTIONS -r, \--remote=*host*:*path* : Initialize not only the local repository, but also the remote repository given by the *host* and *path*. This is not necessary if you intend to back up to the default location on the server (ie. a blank *path*). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. # EXAMPLE bup init # SEE ALSO `bup-fsck`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-join.md000066400000000000000000000030261225146730500171120ustar00rootroot00000000000000% bup-join(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-join - concatenate files from a bup repository # SYNOPSIS bup join [-r *host*:*path*] [refs or hashes...] # DESCRIPTION `bup join` is roughly the opposite operation to `bup-split`(1). You can use it to retrieve the contents of a file from a local or remote bup repository. The supplied list of refs or hashes can be in any format accepted by `git`(1), including branch names, commit ids, tree ids, or blob ids. If no refs or hashes are given on the command line, `bup join` reads them from stdin instead. # OPTIONS -r, \--remote=*host*:*path* : Retrieves objects from the given remote repository instead of the local one. *path* may be blank, in which case the default remote repository is used. The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. # EXAMPLE # split and then rejoin a file using its tree id TREE=$(tar -cvf - /etc | bup split -t) bup join $TREE | tar -tf - # make two backups, then get the second-most-recent. # mybackup~1 is git(1) notation for the second most # recent commit on the branch named mybackup. tar -cvf - /etc | bup split -n mybackup tar -cvf - /etc | bup split -n mybackup bup join mybackup~1 | tar -tf - # SEE ALSO `bup-split`(1), `bup-save`(1), `bup-cat-file`, `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-ls.md000066400000000000000000000032341225146730500165720ustar00rootroot00000000000000% bup-ls(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-ls - list the contents of a bup repository # SYNOPSIS bup ls [-s] [-a] \ # DESCRIPTION `bup ls` lists files and directories in your bup repository using the same directory hierarchy as they would have with `bup-fuse`(1). The top level directory contains the branch (corresponding to the `-n` option in `bup save`), the next level is the date of the backup, and subsequent levels correspond to files in the backup. When `bup ls` is asked to output on a tty, it formats its output in columns so that it can list as much as possible in as few lines as possible. However, when `bup ls` is asked to output to something other than a tty (say you pipe the output to another command, or you redirect it to a file), it will output one file name per line. This makes the listing easier to parse with external tools. Note that `bup ls` doesn't show hidden files by default and one needs to use the `-a` option to show them. Files are hidden when their name begins with a dot. For example, on the topmost level, the special directories named `.commit` and `.tag` are hidden directories. Once you have identified the file you want using `bup ls`, you can view its contents using `bup join` or `git show`. # OPTIONS -s, \--hash : show hash for each file/directory. -a, \--all : show hidden files. -l : show file sizes. \--human-readable : print human readable file sizes (i.e. 3.9K, 4.7M) # EXAMPLE bup ls /myserver/latest/etc/profile bup ls -a / # SEE ALSO `bup-join`(1), `bup-fuse`(1), `bup-ftp`(1), `bup-save`(1), `git-show`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-margin.md000066400000000000000000000043321225146730500174310ustar00rootroot00000000000000% bup-margin(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-margin - figure out your deduplication safety margin # SYNOPSIS bup margin [options...] # DESCRIPTION `bup margin` iterates through all objects in your bup repository, calculating the largest number of prefix bits shared between any two entries. This number, `n`, identifies the longest subset of SHA-1 you could use and still encounter a collision between your object ids. For example, one system that was tested had a collection of 11 million objects (70 GB), and `bup margin` returned 45. That means a 46-bit hash would be sufficient to avoid all collisions among that set of objects; each object in that repository could be uniquely identified by its first 46 bits. The number of bits needed seems to increase by about 1 or 2 for every doubling of the number of objects. Since SHA-1 hashes have 160 bits, that leaves 115 bits of margin. Of course, because SHA-1 hashes are essentially random, it's theoretically possible to use many more bits with far fewer objects. If you're paranoid about the possibility of SHA-1 collisions, you can monitor your repository by running `bup margin` occasionally to see if you're getting dangerously close to 160 bits. # OPTIONS \--predict : Guess the offset into each index file where a particular object will appear, and report the maximum deviation of the correct answer from the guess. This is potentially useful for tuning an interpolation search algorithm. \--ignore-midx : don't use `.midx` files, use only `.idx` files. This is only really useful when used with `--predict`. # EXAMPLE $ bup margin Reading indexes: 100.00% (1612581/1612581), done. 40 40 matching prefix bits 1.94 bits per doubling 120 bits (61.86 doublings) remaining 4.19338e+18 times larger is possible Everyone on earth could have 625878182 data sets like yours, all in one repository, and we would expect 1 object collision. $ bup margin --predict PackIdxList: using 1 index. Reading indexes: 100.00% (1612581/1612581), done. 915 of 1612581 (0.057%) # SEE ALSO `bup-midx`(1), `bup-save`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-memtest.md000066400000000000000000000113421225146730500176310ustar00rootroot00000000000000% bup-memtest(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-memtest - test bup memory usage statistics # SYNOPSIS bup memtest [options...] # DESCRIPTION `bup memtest` opens the list of pack indexes in your bup repository, then searches the list for a series of nonexistent objects, printing memory usage statistics after each cycle. Because of the way Unix systems work, the output will usually show a large (and unchanging) value in the VmSize column, because mapping the index files in the first place takes a certain amount of virtual address space. However, this virtual memory usage is entirely virtual; it doesn't take any of your RAM. Over time, bup uses *parts* of the indexes, which need to be loaded from disk, and this is what causes an increase in the VmRSS column. # OPTIONS -n, \--number=*number* : set the number of objects to search for during each cycle (ie. before printing a line of output) -c, \--cycles=*cycles* : set the number of cycles (ie. the number of lines of output after the first). The first line of output is always 0 (ie. the baseline before searching for any objects). \--ignore-midx : ignore any `.midx` files created by `bup midx`. This allows you to compare memory performance with and without using midx. \--existing : search for existing objects instead of searching for random nonexistent ones. This can greatly affect memory usage and performance. Note that most of the time, `bup save` spends most of its time searching for nonexistent objects, since existing ones are probably in unmodified files that we won't be trying to back up anyway. So the default behaviour reflects real bup performance more accurately. But you might want this option anyway just to make sure you haven't made searching for existing objects much worse than before. # EXAMPLE $ bup memtest -n300 -c5 PackIdxList: using 1 index. VmSize VmRSS VmData VmStk 0 20824 kB 4528 kB 1980 kB 84 kB 300 20828 kB 5828 kB 1984 kB 84 kB 600 20828 kB 6844 kB 1984 kB 84 kB 900 20828 kB 7836 kB 1984 kB 84 kB 1200 20828 kB 8736 kB 1984 kB 84 kB 1500 20828 kB 9452 kB 1984 kB 84 kB $ bup memtest -n300 -c5 --ignore-midx PackIdxList: using 361 indexes. VmSize VmRSS VmData VmStk 0 27444 kB 6552 kB 2516 kB 84 kB 300 27448 kB 15832 kB 2520 kB 84 kB 600 27448 kB 17220 kB 2520 kB 84 kB 900 27448 kB 18012 kB 2520 kB 84 kB 1200 27448 kB 18388 kB 2520 kB 84 kB 1500 27448 kB 18556 kB 2520 kB 84 kB # DISCUSSION When optimizing bup indexing, the first goal is to keep the VmRSS reasonably low. However, it might eventually be necessary to swap in all the indexes, simply because you're searching for a lot of objects, and this will cause your RSS to grow as large as VmSize eventually. The key word here is *eventually*. As long as VmRSS grows reasonably slowly, the amount of disk activity caused by accessing pack indexes is reasonably small. If it grows quickly, bup will probably spend most of its time swapping index data from disk instead of actually running your backup, so backups will run very slowly. The purpose of `bup memtest` is to give you an idea of how fast your memory usage is growing, and to help in optimizing bup for better memory use. If you have memory problems you might be asked to send the output of `bup memtest` to help diagnose the problems. Tip: try using `bup midx -a` or `bup midx -f` to see if it helps reduce your memory usage. Trivia: index memory usage in bup (or git) is only really a problem when adding a large number of previously unseen objects. This is because for each object, we need to absolutely confirm that it isn't already in the database, which requires us to search through *all* the existing pack indexes to ensure that none of them contain the object in question. In the more obvious case of searching for objects that *do* exist, the objects being searched for are typically related in some way, which means they probably all exist in a small number of packfiles, so memory usage will be constrained to just those packfile indexes. Since git users typically don't add a lot of files in a single run, git doesn't really need a program like `bup midx`. bup, on the other hand, spends most of its time backing up files it hasn't seen before, so its memory usage patterns are different. # SEE ALSO `bup-midx`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-meta.md000066400000000000000000000104701225146730500171020ustar00rootroot00000000000000% bup-meta(1) Bup %BUP_VERSION% % Rob Browning % %BUP_DATE% # NAME bup-meta - create or extract a metadata archive # SYNOPSIS bup meta \--create ~ [-R] [-v] [-q] [\--no-symlinks] [\--no-paths] [-f *file*] \<*paths*...\> bup meta \--list ~ [-v] [-q] [-f *file*] bup meta \--extract ~ [-v] [-q] [\--numeric-ids] [\--no-symlinks] [-f *file*] bup meta \--start-extract ~ [-v] [-q] [\--numeric-ids] [\--no-symlinks] [-f *file*] bup meta \--finish-extract ~ [-v] [-q] [\--numeric-ids] [-f *file*] bup meta \--edit ~ [\--set-uid *uid* | \--set-gid *gid* | \--set-user *user* | \--set-group *group* | ...] \<*paths*...\> # DESCRIPTION `bup meta` creates, extracts, or otherwise manipulates metadata archives. A metadata archive contains the metadata information (timestamps, ownership, access permissions, etc.) for a set of filesystem paths. See `bup-restore`(1) for a description of the way ownership metadata is restored. # OPTIONS -c, \--create : Create a metadata archive for the specified *path*s. Write the archive to standard output unless `--file` is specified. -t, \--list : Display information about the metadata in an archive. Read the archive from standard input unless `--file` is specified. -x, \--extract : Extract a metadata archive. Conceptually, perform `--start-extract` followed by `--finish-extract`. Read the archive from standard input unless `--file` is specified. \--start-extract : Build a filesystem tree matching the paths stored in a metadata archive. By itself, this command does not produce a full restoration of the metadata. For a full restoration, this command must be followed by a call to `--finish-extract`. Once this command has finished, all of the normal files described by the metadata will exist and be empty. Restoring the data in those files, and then calling `--finish-extract` should restore the original tree. The archive will be read from standard input unless `--file` is specified. \--finish-extract : Finish applying the metadata stored in an archive to the filesystem. Normally, this command should follow a call to `--start-extract`. The archive will be read from standard input unless `--file` is specified. \--edit : Edit metadata archives. The result will be written to standard output unless `--file` is specified. -f, \--file=*filename* : Read the metadata archive from *filename* or write it to *filename* as appropriate. If *filename* is "-", then read from standard input or write to standard output. -R, \--recurse : Recursively descend into subdirectories during `--create`. \--numeric-ids : Apply numeric IDs (user, group, etc.) rather than names during `--extract` or `--finish-extract`. \--symlinks : Record symbolic link targets when creating an archive, or restore symbolic links when extracting an archive (during `--extract` or `--start-extract`). This option is enabled by default. Specify `--no-symlinks` to disable it. \--paths : Record pathnames when creating an archive. This option is enabled by default. Specify `--no-paths` to disable it. \--set-uid=*uid* : Set the metadata uid to the integer *uid* during `--edit`. \--set-gid=*gid* : Set the metadata gid to the integer *gid* during `--edit`. \--set-user=*user* : Set the metadata user to *user* during `--edit`. \--unset-user : Remove the metadata user during `--edit`. \--set-group=*group* : Set the metadata user to *group* during `--edit`. \--unset-group : Remove the metadata group during `--edit`. -v, \--verbose : Be more verbose (can be used more than once). -q, \--quiet : Be quiet. # EXAMPLES # Create a metadata archive for /etc. $ bup meta -cRf etc.meta /etc bup: removing leading "/" from "/etc" # Extract the etc.meta archive (files will be empty). $ mkdir tmp && cd tmp $ bup meta -xf ../etc.meta $ ls etc # Restore /etc completely. $ mkdir tmp && cd tmp $ bup meta --start-extract -f ../etc.meta ...fill in all regular file contents using some other tool... $ bup meta --finish-extract -f ../etc.meta # Change user/uid to root. $ bup meta --edit --set-uid 0 --set-user root \ src.meta > dest.meta # BUGS Hard links are not handled yet. # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-midx.md000066400000000000000000000065131225146730500171200ustar00rootroot00000000000000% bup-midx(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-midx - create a multi-index (`.midx`) file from several `.idx` files # SYNOPSIS bup midx [-o *outfile*] \<-a|-f|*idxnames*...\> # DESCRIPTION `bup midx` creates a multi-index (`.midx`) file from one or more git pack index (`.idx`) files. Note: you should no longer need to run this command by hand. It gets run automatically by `bup-save`(1) and similar commands. # OPTIONS -o, \--output=*filename.midx* : use the given output filename for the `.midx` file. Default is auto-generated. -a, \--auto : automatically generate new `.midx` files for any `.idx` files where it would be appropriate. -f, \--force : force generation of a single new `.midx` file containing *all* your `.idx` files, even if other `.midx` files already exist. This will result in the fastest backup performance, but may take a long time to run. \--dir=*packdir* : specify the directory containing the `.idx`/`.midx` files to work with. The default is $BUP_DIR/objects/pack and $BUP_DIR/indexcache/*. \--max-files : maximum number of `.idx` files to open at a time. You can use this if you have an especially small number of file descriptors available, so that midx can complete (though possibly non-optimally) even if it can't open all your `.idx` files at once. The default value of this option should be fine for most people. \--check : validate a `.midx` file by ensuring that all objects in its contained `.idx` files exist inside the `.midx`. May be useful for debugging. # EXAMPLE $ bup midx -a Merging 21 indexes (2278559 objects). Table size: 524288 (17 bits) Reading indexes: 100.00% (2278559/2278559), done. midx-b66d7c9afc4396187218f2936a87b865cf342672.midx # DISCUSSION By default, bup uses git-formatted pack files, which consist of a pack file (containing objects) and an idx file (containing a sorted list of object names and their offsets in the .pack file). Normal idx files are convenient because it means you can use `git`(1) to access your backup datasets. However, idx files can get slow when you have a lot of very large packs (which git typically doesn't have, but bup often does). bup `.midx` files consist of a single sorted list of all the objects contained in all the .pack files it references. This list can be binary searched in about log2(m) steps, where m is the total number of objects. To further speed up the search, midx files also have a variable-sized fanout table that reduces the first n steps of the binary search. With the help of this fanout table, bup can narrow down which page of the midx file a given object id would be in (if it exists) with a single lookup. Thus, typical searches will only need to swap in two pages: one for the fanout table, and one for the object id. midx files are most useful when creating new backups, since searching for a nonexistent object in the repository necessarily requires searching through *all* the index files to ensure that it does not exist. (Searching for objects that *do* exist can be optimized; for example, consecutive objects are often stored in the same pack, so we can search that one first using an MRU algorithm.) # SEE ALSO `bup-save`(1), `bup-margin`(1), `bup-memtest`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-mux.md000066400000000000000000000010131225146730500167560ustar00rootroot00000000000000% bup-mux(1) Bup %BUP_VERSION% % Brandon Low % %BUP_DATE% # NAME bup-mux - multiplexes data and error streams over a connection # SYNOPSIS bup mux \ [options...] # DESCRIPTION `bup mux` is used in the bup client-server protocol to send both data and debugging/error output over the single connection stream. `bup mux server` might be used in an inetd server setup. # OPTIONS command : the subcommand to run options : options for command # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-newliner.md000066400000000000000000000024561225146730500200040ustar00rootroot00000000000000% bup-newliner(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-newliner - make sure progress messages don't overlap with output # SYNOPSIS \ 2>&1 | bup newliner # DESCRIPTION `bup newliner` is run automatically by bup. You shouldn't need it unless you're using it in some other program. Progress messages emitted by bup (and some other tools) are of the form "Message ### content\\r", that is, a status message containing a variable-length number, followed by a carriage return character and no newline. If these messages are printed more than once, they overwrite each other, so what the user sees is a single line with a continually-updating number. This works fine until some other message is printed. For example, progress messages are usually printed to stderr, but other program messages might be printed to stdout. If those messages are shorter than the progress message line, the screen will be left with weird looking artifacts as the two messages get mixed together. `bup newliner` prints extra space characters at the right time to make sure that doesn't happen. If you're running a program that has problems with these artifacts, you can usually fix them by piping its stdout *and* its stderr through bup newliner. # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-on.md000066400000000000000000000045051225146730500165720ustar00rootroot00000000000000% bup-on(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-on - run a bup server locally and client remotely # SYNOPSIS bup on \ index ... bup on \ save ... bup on \ split ... # DESCRIPTION `bup on` runs the given bup command on the given host using ssh. It runs a bup server on the local machine, so that commands like `bup save` on the remote machine can back up to the local machine. (You don't need to provide a `--remote` option to `bup save` in order for this to work.) See `bup-index`(1), `bup-save`(1), and so on for details of how each subcommand works. This 'reverse mode' operation is useful when the machine being backed up isn't supposed to be able to ssh into the backup server. For example, your backup server can be hidden behind a one-way firewall on a private or dynamic IP address; using an ssh key, it can be authorized to ssh into each of your important machines. After connecting to each destination machine, it initiates a backup, receiving the resulting data and storing in its local repository. For example, if you run several virtual private Linux machines on a remote hosting provider, you could back them up to a local (much less expensive) computer in your basement. # EXAMPLES # First index the files on the remote server $ bup on myserver index -vux /etc bup server: reading from stdin. Indexing: 2465, done. bup: merging indexes (186668/186668), done. bup server: done # Now save the files from the remote server to the # local $BUP_DIR $ bup on myserver save -n myserver-backup /etc bup server: reading from stdin. bup server: command: 'list-indexes' PackIdxList: using 7 indexes. Saving: 100.00% (241/241k, 648/648 files), done. bup server: received 55 objects. Indexing objects: 100% (55/55), done. bup server: command: 'quit' bup server: done # Now we can look at the resulting repo on the local # machine $ bup ftp 'cat /myserver-backup/latest/etc/passwd' root:x:0:0:root:/root:/bin/bash daemon:x:1:1:daemon:/usr/sbin:/bin/sh bin:x:2:2:bin:/bin:/bin/sh sys:x:3:3:sys:/dev:/bin/sh sync:x:4:65534:sync:/bin:/bin/sync ... # SEE ALSO `bup-index`(1), `bup-save`(1), `bup-split`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-random.md000066400000000000000000000043261225146730500174370ustar00rootroot00000000000000% bup-random(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-random - generate a stream of random output # SYNOPSIS bup random [-S seed] [-fv] \ # DESCRIPTION `bup random` produces a stream of pseudorandom output bytes to stdout. Note: the bytes are *not* generated using a cryptographic algorithm and should never be used for security. Note that the stream of random bytes will be identical every time `bup random` is run, unless you provide a different `seed` value. This is intentional: the purpose of this program is to be able to run repeatable tests on large amounts of data, so we want identical data every time. `bup random` generates about 240 megabytes per second on a modern test system (Intel Core2), which is faster than you could achieve by reading data from most disks. Thus, it can be helpful when running microbenchmarks. # OPTIONS \ : the number of bytes of data to generate. Can be used with the suffices `k`, `M`, or `G` to indicate kilobytes, megabytes, or gigabytes, respectively. -S, \--seed=*seed* : use the given value to seed the pseudorandom number generator. The generated output stream will be identical for every stream seeded with the same value. The default seed is 1. A seed value of 0 is equivalent to 1. -f, \--force : generate output even if stdout is a tty. (Generating random data to a tty is generally considered ill-advised, but you can do if you really want.) -v, \--verbose : print a progress message showing the number of bytes that has been output so far. # EXAMPLES $ bup random 1k | sha1sum 2108c55d0a2687c8dacf9192677c58437a55db71 - $ bup random -S1 1k | sha1sum 2108c55d0a2687c8dacf9192677c58437a55db71 - $ bup random -S2 1k | sha1sum f71acb90e135d98dad7efc136e8d2cc30573e71a - $ time bup random 1G >/dev/null Random: 1024 Mbytes, done. real 0m4.261s user 0m4.048s sys 0m0.172s $ bup random 1G | bup split -t --bench Random: 1024 Mbytes, done. bup: 1048576.00kbytes in 18.59 secs = 56417.78 kbytes/sec 1092599b9c7b2909652ef1e6edac0796bfbfc573 # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-restore.md000066400000000000000000000213431225146730500176400ustar00rootroot00000000000000% bup-restore(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-restore - extract files from a backup set # SYNOPSIS bup restore [\--outdir=*outdir*] [\--exclude-rx *pattern*] [-v] [-q] \ # DESCRIPTION `bup restore` extracts files from a backup set (created with `bup-save`(1)) to the local filesystem. The specified *paths* are of the form /_branch_/_revision_/_some/where_. The components of the path are as follows: branch : the name of the backup set to restore from; this corresponds to the `--name` (`-n`) option to `bup save`. revision : the revision of the backup set to restore. The revision *latest* is always the most recent backup on the given branch. You can discover other revisions using `bup ls /branch`. some/where : the previously saved path (after any stripping/grafting) that you want to restore. For example, `etc/passwd`. If _some/where_ names a directory, `bup restore` will restore that directory and then recursively restore its contents. If _some/where_ names a directory and ends with a slash (ie. path/to/dir/), `bup restore` will restore the children of that directory directly to the current directory (or the `--outdir`). If _some/where_ does not end in a slash, the children will be restored to a subdirectory of the current directory. If _some/where_ names a directory and ends in '/.' (ie. path/to/dir/.), `bup restore` will do exactly what it would have done for path/to/dir, and then restore _dir_'s metadata to the current directory (or the `--outdir`). See the EXAMPLES section. Whenever path metadata is available, `bup restore` will attempt to restore it. When restoring ownership, bup implements tar/rsync-like semantics. It will normally prefer user and group names to uids and gids when they're available, but it will not try to restore the user unless running as root, and it will fall back to the numeric uid or gid whenever the metadata contains a user or group name that doesn't exist on the current system. The use of user and group names can be disabled via `--numeric-ids` (which can be important when restoring a chroot, for example), and as a special case, a uid or gid of 0 will never be remapped by name. Additionally, some systems don't allow setting a uid/gid that doesn't correspond with a known user/group. On those systems, bup will log an error for each relevant path. The `--map-user`, `--map-group`, `--map-uid`, `--map-gid` options may be used to adjust the available ownership information before any of the rules above are applied, but note that due to those rules, `--map-uid` and `--map-gid` will have no effect whenever a path has a valid user or group. In those cases, either `--numeric-ids` must be specified, or the user or group must be cleared by a suitable `--map-user foo=` or `--map-group foo=`. Hardlinks will also be restored when possible, but at least currently, no links will be made to targets outside the restore tree, and if the restore tree spans a different arrangement of filesystems from the save tree, some hardlink sets may not be completely restored. Also note that changing hardlink sets on disk between index and save may produce unexpected results. With the current implementation, bup will attempt to recreate any given hardlink set as it existed at index time, even if all of the files in the set weren't still hardlinked (but were otherwise identical) at save time. Note that during the restoration process, access to data within the restore tree may be more permissive than it was in the original source. Unless security is irrelevant, you must restore to a private subdirectory, and then move the resulting tree to its final position. See the EXAMPLES section for a demonstration. # OPTIONS -C, \--outdir=*outdir* : create and change to directory *outdir* before extracting the files. \--numeric-ids : restore numeric IDs (user, group, etc.) rather than names. \--exclude-rx=*pattern* : exclude any path matching *pattern*, which must be a Python regular expression (http://docs.python.org/library/re.html). The pattern will be compared against the full path rooted at the top of the restore tree, without anchoring, so "x/y" will match "ox/yard" or "box/yards". To exclude the contents of /tmp, but not the directory itself, use "^/tmp/.". (can be specified more than once) Note that the root of the restore tree (which matches '^/') is the top of the archive tree being restored, and has nothing to do with the filesystem destination. Given "restore ... /foo/latest/etc/", the pattern '^/passwd$' would match if a file named passwd had been saved as '/foo/latest/etc/passwd'. Examples: * '/foo$' - exclude any file named foo * '/foo/$' - exclude any directory named foo * '/foo/.' - exclude the content of any directory named foo * '^/tmp/.' - exclude root-level /tmp's content, but not /tmp itself \--map-user *old*=*new* : for every path, restore the *old* (saved) user name as *new*. Specifying "" for *new* will clear the user. For example "--map-user foo=" will allow the uid to take effect for any path that originally had a user of "foo", unless countermanded by a subsequent "--map-user foo=..." specification. See DESCRIPTION above for further information. \--map-group *old*=*new* : for every path, restore the *old* (saved) group name as *new*. Specifying "" for *new* will clear the group. For example "--map-group foo=" will allow the gid to take effect for any path that originally had a group of "foo", unless countermanded by a subsequent "--map-group foo=..." specification. See DESCRIPTION above for further information. \--map-uid *old*=*new* : for every path, restore the *old* (saved) uid as *new*, unless countermanded by a subsequent "--map-uid *old*=..." option. Note that the uid will only be relevant for paths with no user. See DESCRIPTION above for further information. \--map-gid *old*=*new* : for every path, restore the *old* (saved) gid as *new*, unless countermanded by a subsequent "--map-gid *old*=..." option. Note that the gid will only be relevant for paths with no user. See DESCRIPTION above for further information. -v, \--verbose : increase log output. Given once, prints every directory as it is restored; given twice, prints every file and directory. -q, \--quiet : don't show the progress meter. Normally, is stderr is a tty, a progress display is printed that shows the total number of files restored. # EXAMPLE Create a simple test backup set: $ bup index -u /etc $ bup save -n mybackup /etc/passwd /etc/profile Restore just one file: $ bup restore /mybackup/latest/etc/passwd Restoring: 1, done. $ ls -l passwd -rw-r--r-- 1 apenwarr apenwarr 1478 2010-09-08 03:06 passwd Restore etc to test (no trailing slash): $ bup restore -C test /mybackup/latest/etc Restoring: 3, done. $ find test test test/etc test/etc/passwd test/etc/profile Restore the contents of etc to test (trailing slash): $ bup restore -C test /mybackup/latest/etc/ Restoring: 2, done. $ find test test test/passwd test/profile Restore the contents of etc and etc's metadata to test (trailing "/."): $ bup restore -C test /mybackup/latest/etc/. Restoring: 2, done. # At this point test and etc's metadata will match. $ find test test test/passwd test/profile Restore a tree without risk of unauthorized access: # mkdir --mode 0700 restore-tmp # bup restore -C restore-tmp /somebackup/latest/foo Restoring: 42, done. # mv restore-tmp/foo somewhere # rmdir restore-tmp Restore a tree, remapping an old user and group to a new user and group: # ls -l /original/y -rw-r----- 1 foo baz 3610 Nov 4 11:31 y # bup restore -C dest --map-user foo=bar --map-group baz=bax /x/latest/y Restoring: 42, done. # ls -l dest/y -rw-r----- 1 bar bax 3610 Nov 4 11:31 y Restore a tree, remapping an old uid to a new uid. Note that the old user must be erased so that bup won't prefer it over the uid: # ls -l /original/y -rw-r----- 1 foo baz 3610 Nov 4 11:31 y # ls -ln /original/y -rw-r----- 1 1000 1007 3610 Nov 4 11:31 y # bup restore -C dest --map-user foo= --map-uid 1000=1042 /x/latest/y Restoring: 97, done. # ls -ln dest/y -rw-r----- 1 1042 1007 3610 Nov 4 11:31 y An alternate way to do the same by quashing users/groups universally with `--numeric-ids`: # bup restore -C dest --numeric-ids --map-uid 1000=1042 /x/latest/y Restoring: 97, done. # SEE ALSO `bup-save`(1), `bup-ftp`(1), `bup-fuse`(1), `bup-web`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-save.md000066400000000000000000000131721225146730500171140ustar00rootroot00000000000000% bup-save(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-save - create a new bup backup set # SYNOPSIS bup save [-r *host*:*path*] \<-t|-c|-n *name*\> [-#] [-f *indexfile*] [-v] [-q] [\--smaller=*maxsize*] \; # DESCRIPTION `bup save` saves the contents of the given files or paths into a new backup set and optionally names that backup set. Note that in order to refer to your backup set later (i.e. for restoration), you must either specify `--name` (the normal case), or record the tree or commit id printed by `--tree` or `--commit`. Before trying to save files using `bup save`, you should first update the index using `bup index`. The reasons for separating the two steps are described in the man page for `bup-index`(1). By default, metadata will be saved for every path, and the metadata for any unindexed parent directories of indexed paths will be taken directly from the filesystem. However, if `--strip`, `--strip-path`, or `--graft` is specified, metadata will not be saved for the root directory (*/*). See `bup-restore`(1) for more information about the handling of metadata. # OPTIONS -r, \--remote=*host*:*path* : save the backup set to the given remote server. If *path* is omitted, uses the default path on the remote server (you still need to include the ':'). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. -t, \--tree : after creating the backup set, print out the git tree id of the resulting backup. -c, \--commit : after creating the backup set, print out the git commit id of the resulting backup. -n, \--name=*name* : after creating the backup set, create a git branch named *name* so that the backup can be accessed using that name. If *name* already exists, the new backup will be considered a descendant of the old *name*. (Thus, you can continually create new backup sets with the same name, and later view the history of that backup set to see how files have changed over time.) -d, \--date=*date* : specify the date of the backup, in seconds since the epoch, instead of the current time. -f, \--indexfile=*indexfile* : use a different index filename instead of `$BUP_DIR/bupindex`. -v, \--verbose : increase verbosity (can be used more than once). With one -v, prints every directory name as it gets backed up. With two -v, also prints every filename. -q, \--quiet : disable progress messages. \--smaller=*maxsize* : don't back up files >= *maxsize* bytes. You can use this to run frequent incremental backups of your small files, which can usually be backed up quickly, and skip over large ones (like virtual machine images) which take longer. Then you can back up the large files less frequently. Use a suffix like k, M, or G to specify multiples of 1024, 1024*1024, 1024*1024*1024 respectively. \--bwlimit=*bytes/sec* : don't transmit more than *bytes/sec* bytes per second to the server. This is good for making your backups not suck up all your network bandwidth. Use a suffix like k, M, or G to specify multiples of 1024, 1024*1024, 1024*1024*1024 respectively. \--strip : strips the path that is given from all files and directories. A directory */root/chroot/etc* saved with "bup save -n chroot \--strip /root/chroot" would be saved as */etc*. Note that currently, metadata will not be saved for the root directory (*/*) when this option is specified. \--strip-path=*path-prefix* : strips the given path prefix *path-prefix* from all files and directories. A directory */root/chroot/webserver* saved with "bup save -n webserver \--strip-path=/root/chroot" would be saved as */webserver/etc*. Note that currently, metadata will not be saved for the root directory (*/*) when this option is specified. \--graft=*old_path*=*new_path* : a graft point *old_path*=*new_path* (can be used more than once). A directory */root/chroot/a/etc* saved with "bup save -n chroot \--graft /root/chroot/a=/chroot/a" would be saved as */chroot/a/etc*. Note that currently, metadata will not be saved for the root directory (*/*) when this option is specified. -*#*, \--compress=*#* : set the compression level to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 1 (fast, loose compression) # EXAMPLE $ bup index -ux /etc Indexing: 1981, done. $ bup save -r myserver: -n my-pc-backup --bwlimit=50k /etc Reading index: 1981, done. Saving: 100.00% (998/998k, 1981/1981 files), done. $ ls /home/joe/chroot/httpd bin var $ bup index -ux /home/joe/chroot/httpd Indexing: 1337, done. $ bup save --strip -n joes-httpd-chroot /home/joe/chroot/httpd Reading index: 1337, done. Saving: 100.00% (998/998k, 1337/1337 files), done. $ bup ls joes-httpd-chroot/latest/ bin/ var/ $ bup save --strip-path=/home/joe/chroot -n joes-chroot \ /home/joe/chroot/httpd Reading index: 1337, done. Saving: 100.00% (998/998k, 1337/1337 files), done. $ bup ls joes-chroot/latest/ httpd/ $ bup save --graft /home/joe/chroot/httpd=/http-chroot \ -n joe /home/joe/chroot/httpd Reading index: 1337, done. Saving: 100.00% (998/998k, 1337/1337 files), done. $ bup ls joe/latest/ http-chroot/ # SEE ALSO `bup-index`(1), `bup-split`(1), `bup-on`(1), `bup-restore`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-server.md000066400000000000000000000025141225146730500174620ustar00rootroot00000000000000% bup-server(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-server - the server side of the bup client-server relationship # SYNOPSIS bup server # DESCRIPTION `bup server` is the server side of a remote bup session. If you use `bup-split`(1) or `bup-save`(1) with the `-r` option, they will ssh to the remote server and run `bup server` to receive the transmitted objects. There is normally no reason to run `bup server` yourself. # MODES smart : In this mode, the server checks each incoming object against the idx files in its repository. If any object already exists, it tells the client about the idx file it was found in, allowing the client to download that idx and avoid sending duplicate data. This is `bup-server`'s default mode. dumb : In this mode, the server will not check its local index before writing an object. To avoid writing duplicate objects, the server will tell the client to download all of its `.idx` files at the start of the session. This mode is useful on low powered server hardware (ie router/slow NAS). # FILES $BUP_DIR/bup-dumb-server : Activate dumb server mode, as discussed above. This file is not created by default in new repositories. # SEE ALSO `bup-save`(1), `bup-split`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-split.md000066400000000000000000000134031225146730500173060ustar00rootroot00000000000000% bup-split(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-split - save individual files to bup backup sets # SYNOPSIS bup split \[-t\] \[-c\] \[-n *name*\] COMMON\_OPTIONS bup split -b COMMON\_OPTIONS bup split \<--noop \[--copy\]|--copy\> COMMON\_OPTIONS COMMON\_OPTIONS ~ \[-r *host*:*path*\] \[-v\] \[-q\] \[-d *seconds-since-epoch*\] \[\--bench\] \[\--max-pack-size=*bytes*\] \[-#\] \[\--bwlimit=*bytes*\] \[\--max-pack-objects=*n*\] \[\--fanout=*count*\] \[\--keep-boundaries\] \[--git-ids | filenames...\] # DESCRIPTION `bup split` concatenates the contents of the given files (or if no filenames are given, reads from stdin), splits the content into chunks of around 8k using a rolling checksum algorithm, and saves the chunks into a bup repository. Chunks which have previously been stored are not stored again (ie. they are 'deduplicated'). Because of the way the rolling checksum works, chunks tend to be very stable across changes to a given file, including adding, deleting, and changing bytes. For example, if you use `bup split` to back up an XML dump of a database, and the XML file changes slightly from one run to the next, nearly all the data will still be deduplicated and the size of each backup after the first will typically be quite small. Another technique is to pipe the output of the `tar`(1) or `cpio`(1) programs to `bup split`. When individual files in the tarball change slightly or are added or removed, bup still processes the remainder of the tarball efficiently. (Note that `bup save` is usually a more efficient way to accomplish this, however.) To get the data back, use `bup-join`(1). # MODES These options select the primary behavior of the command, with -n being the most likely choice. -n, \--name=*name* : after creating the dataset, create a git branch named *name* so that it can be accessed using that name. If *name* already exists, the new dataset will be considered a descendant of the old *name*. (Thus, you can continually create new datasets with the same name, and later view the history of that dataset to see how it has changed over time.) -t, \--tree : output the git tree id of the resulting dataset. -c, \--commit : output the git commit id of the resulting dataset. -b, \--blobs : output a series of git blob ids that correspond to the chunks in the dataset. Incompatible with -n, -t, and -c. \--noop : read the data and split it into blocks based on the "bupsplit" rolling checksum algorithm, but don't do anything with the blocks. This is mostly useful for benchmarking. Incompatible with -n, -t, -c, and -b. \--copy : like `--noop`, but also write the data to stdout. This can be useful for benchmarking the speed of read+bupsplit+write for large amounts of data. Incompatible with -n, -t, -c, and -b. # OPTIONS -r, \--remote=*host*:*path* : save the backup set to the given remote server. If *path* is omitted, uses the default path on the remote server (you still need to include the ':'). The connection to the remote server is made with SSH. If you'd like to specify which port, user or private key to use for the SSH connection, we recommend you use the `~/.ssh/config` file. -d, \--date=*seconds-since-epoch* : specify the date inscribed in the commit (seconds since 1970-01-01). -q, \--quiet : disable progress messages. -v, \--verbose : increase verbosity (can be used more than once). \--git-ids : stdin is a list of git object ids instead of raw data. `bup split` will read the contents of each named git object (if it exists in the bup repository) and split it. This might be useful for converting a git repository with large binary files to use bup-style hashsplitting instead. This option is probably most useful when combined with `--keep-boundaries`. \--keep-boundaries : if multiple filenames are given on the command line, they are normally concatenated together as if the content all came from a single file. That is, the set of blobs/trees produced is identical to what it would have been if there had been a single input file. However, if you use `--keep-boundaries`, each file is split separately. You still only get a single tree or commit or series of blobs, but each blob comes from only one of the files; the end of one of the input files always ends a blob. \--bench : print benchmark timings to stderr. \--max-pack-size=*bytes* : never create git packfiles larger than the given number of bytes. Default is 1 billion bytes. Usually there is no reason to change this. \--max-pack-objects=*numobjs* : never create git packfiles with more than the given number of objects. Default is 200 thousand objects. Usually there is no reason to change this. \--fanout=*numobjs* : when splitting very large files, try and keep the number of elements in trees to an average of *numobjs*. \--bwlimit=*bytes/sec* : don't transmit more than *bytes/sec* bytes per second to the server. This is good for making your backups not suck up all your network bandwidth. Use a suffix like k, M, or G to specify multiples of 1024, 1024*1024, 1024*1024*1024 respectively. -*#*, \--compress=*#* : set the compression level to # (a value from 0-9, where 9 is the highest and 0 is no compression). The default is 1 (fast, loose compression) # EXAMPLE $ tar -cf - /etc | bup split -r myserver: -n mybackup-tar tar: Removing leading /' from member names Indexing objects: 100% (196/196), done. $ bup join -r myserver: mybackup-tar | tar -tf - | wc -l 1961 # SEE ALSO `bup-join`(1), `bup-index`(1), `bup-save`(1), `bup-on`(1), `ssh_config`(5) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-tag.md000066400000000000000000000036371225146730500167360ustar00rootroot00000000000000% bup-tag(1) Bup %BUP_VERSION% % Gabriel Filion % %BUP_DATE% # NAME bup-tag - tag a commit in the bup repository # SYNOPSIS bup tag bup tag [-f] \ \ bup tag -d [-f] \ # DESCRIPTION `bup tag` lists, creates or deletes a tag in the bup repository. A tag is an easy way to retrieve a specific commit. It can be used to mark a specific backup for easier retrieval later. When called without any arguments, the command lists all tags that can be found in the repository. When called with a tag name and a commit ID or ref name, it creates a new tag with the given name, if it doesn't already exist, that points to the commit given in the second argument. When called with '-d' and a tag name, it removes the given tag, if it exists. bup exposes the contents of backups with current tags, via any command that lists or shows backups. They can be found under the /.tag directory. For example, the 'ftp' command will show the tag named 'tag1' under /.tag/tag1. Tags are also exposed under the branches from which they can be reached. For example, if you create a tag named 'important' under branch 'computerX', you will also be able to retrieve the contents of the backup that was tagged under /computerX/important. This is done as a convenience, and should the branch 'computerX' be deleted, the contents of the tagged backup will be available through /.tag/important as long as the tag is not deleted. # OPTIONS -d, \--delete : delete a tag -f, \--force : Overwrite the named tag even if it already exists. With -f, don't report a missing tag as an error. # EXAMPLE $ bup tag new-puppet-version hostx-backup $ bup tag new-puppet-version $ bup ftp "ls /.tag/new-puppet-version" files.. $ bup tag -d new-puppet-version # SEE ALSO `bup-save`(1), `bup-split`(1), `bup-ftp`(1), `bup-fuse`(1), `bup-web`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-tick.md000066400000000000000000000012501225146730500171020ustar00rootroot00000000000000% bup-tick(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup-tick - wait for up to one second # SYNOPSIS bup tick # DESCRIPTION `bup tick` waits until `time`(2) returns a different value than it originally did. Since time() has a granularity of one second, this can cause a delay of up to one second. This program is useful for writing tests that need to ensure a file date will be seen as modified. It is slightly better than `sleep`(1) since it sometimes waits for less than one second. # EXAMPLE $ date; bup tick; date Sat Feb 6 16:59:58 EST 2010 Sat Feb 6 16:59:59 EST 2010 # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup-web.md000066400000000000000000000017251225146730500167340ustar00rootroot00000000000000% bup-ftp(1) Bup %BUP_VERSION% % Joe Beda % %BUP_DATE% # NAME bup-web - Start web server to browse bup repositiory # SYNOPSIS bup web [[hostname]:port] # DESCRIPTION `bup web` starts a web server that can browse bup repositories. The file hierarchy is the same as that shown by `bup-fuse`(1), `bup-ls`(1) and `bup-ftp`(1). `hostname` and `port` default to 127.0.0.1 and 8080, respectively, and hence `bup web` will only offer up the web server to locally running clients. If you'd like to expose the web server to anyone on your network (dangerous!) you can omit the bind address to bind to all available interfaces: `:8080`. # OPTIONS --human-readable : display human readable file sizes (i.e. 3.9K, 4.7M) # EXAMPLE $ bup web Serving HTTP on 127.0.0.1:8080... ^C $ bup web :8080 Serving HTTP on 0.0.0.0:8080... ^C # SEE ALSO `bup-fuse`(1), `bup-ls`(1), `bup-ftp`(1), `bup-restore`(1) # BUP Part of the `bup`(1) suite. bup-0.25/Documentation/bup.md000066400000000000000000000047051225146730500161620ustar00rootroot00000000000000% bup(1) Bup %BUP_VERSION% % Avery Pennarun % %BUP_DATE% # NAME bup - Backup program using rolling checksums and git file formats # SYNOPSIS bup [global options...] \ [options...] # DESCRIPTION `bup` is a program for making backups of your files using the git file format. Unlike `git`(1) itself, bup is optimized for handling huge data sets including individual very large files (such a virtual machine images). However, once a backup set is created, it can still be accessed using git tools. The individual bup subcommands appear in their own man pages. # GLOBAL OPTIONS \--version : print bup's version number. Equivalent to `bup-version`(1) -d, \--bup-dir=*BUP_DIR* : use the given BUP_DIR parameter as the bup repository location, instead of reading it from the $BUP_DIR environment variable or using the default `~/.bup` location. # COMMONLY USED SUBCOMMANDS `bup-fsck`(1) : Check backup sets for damage and add redundancy information `bup-ftp`(1) : Browse backup sets using an ftp-like client `bup-fuse`(1) : Mount your backup sets as a filesystem `bup-help`(1) : Print detailed help for the given command `bup-index`(1) : Create or display the index of files to back up `bup-on`(1) : Backup a remote machine to the local one `bup-restore`(1) : Extract files from a backup set `bup-save`(1) : Save files into a backup set (note: run "bup index" first) `bup-web`(1) : Launch a web server to examine backup sets # RARELY USED SUBCOMMANDS `bup-damage`(1) : Deliberately destroy data `bup-drecurse`(1) : Recursively list files in your filesystem `bup-init`(1) : Initialize a bup repository `bup-join`(1) : Retrieve a file backed up using `bup-split`(1) `bup-ls`(1) : Browse the files in your backup sets `bup-margin`(1) : Determine how close your bup repository is to armageddon `bup-memtest`(1) : Test bup memory usage statistics `bup-midx`(1) : Index objects to speed up future backups `bup-newliner`(1) : Make sure progress messages don't overlap with output `bup-random`(1) : Generate a stream of random output `bup-server`(1) : The server side of the bup client-server relationship `bup-split`(1) : Split a single file into its own backup set `bup-tick`(1) : Wait for up to one second. `bup-version`(1) : Report the version number of your copy of bup. # SEE ALSO `git`(1) and the *README* file from the bup distribution. The home of bup is at . bup-0.25/HACKING000066400000000000000000000070321225146730500132240ustar00rootroot00000000000000 Conventions? Are you kidding? OK fine. Code Branching Model ==================== The master branch is what we consider the main-line of development, and the last, non-rc tag on master is the most recent stable release. Of course, in all fairness, it has been a *long* time since the last stable release, but we're working fairly hard to fix that -- no, seriously. Any branch with a "tmp/" prefix might be rebased (often), so keep that in mind when using or depending on one. Any branch with a "tmp/review/" prefix corresponds to a patchset submitted to the mailing list. We try to maintain these branches to make the review process easier for those not as familiar with patches via email. Current Trajectory ================== At the moment, the primary goal is to test master in preparation for a 0.25 release, which among many other things will include more complete support for filesystem metadata. If you have the time and inclination, please help review patches posted to the list for inclusion in 0.25. (See "ways to help" below.) We'd like to try to release (0.25) soon, so we're limiting the scope of prospective changes -- definitely in-scope: - fixes to the new metadata support - fixes for regressions (portability included) - fixes for "serious" bugs - "simple" fixes - documentation improvements More specific ways to help ========================== Testing -- yes please. With respect to patches, bup development is handled via the mailing list, and all patches should be sent to the list for review (see "Submitting Patches" below). In most cases, we try to wait until we have at least one or two "Reviewed-by:" replies to a patch posted to the list before incorporating it into master, so reviews are an important way to help. We also love a good "Tested-by:" -- the more the merrier. Submitting patches ================== As mentioned, all patches should be posted to the mailing list for review. You can create a "signed off" (see ./SIGNED-OFF-BY) set of patches in ./pending, ready for submission to the list, like this: git format-patch -s -o patches origin/master which will include all of the patches since origin/master on your current branch. Then you can send them to the list like this: git send-email --to bup-list@googlegroups.com --compose patches/* The use of --compose will cause git to ask you to edit a cover letter that will be sent as the first message. It's also possible to handle everything in one step: git send-email -s --to bup-list@googlegroups.com --compose origin/master and you can add --annotate if you'd like to review or edit each patch before it's sent. For single patches, this might be easier: git send-email -s --to bup-list@googlegroups.com --annotate -n1 HEAD which will send the top patch on the current branch, and will stop to allow you to add comments. You can add comments to the section with the diffstat without affecting the commit message. Of course, unless your machine is set up to handle outgoing mail locally, you may need to configure git to be able to send mail. See git-send-email(1) for further details. Oh, and we do have a ./CODING-STYLE, hobgoblins and all, though don't let that scare you off. We're not all that fierce. Even More Generally =================== It's not like we have a lot of hard and fast rules, but some of the ideas here aren't altogether terrible: http://www.kernel.org/doc/Documentation/SubmittingPatches In particular, we've been paying at least some attention to the bits regarding Acked-by:, Reported-by:, Tested-by: and Reviewed-by:. bup-0.25/LICENSE000066400000000000000000000623671225146730500132560ustar00rootroot00000000000000 Unless otherwise stated below, the files in this project may be distributed under the terms of the following license. (The LGPL version 2.) In addition, bupsplit.c, bupsplit.h, and options.py may be redistributed according to the separate (BSD-style) license written inside those files. The files in lib/tornado are covered by the license described in lib/tornado/README. The definition of the relpath function was taken from CPython (tag v2.6, file Lib/posixpath.py, hg-commit 95fff5a6a276) and is covered under the terms of the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2. GNU LIBRARY GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1991 Free Software Foundation, Inc. 675 Mass Ave, Cambridge, MA 02139, USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the library GPL. It is numbered 2 because it goes with version 2 of the ordinary GPL.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Library General Public License, applies to some specially designated Free Software Foundation software, and to any other libraries whose authors decide to use it. You can use it for your libraries, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library, or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link a program with the library, you must provide complete object files to the recipients so that they can relink them with the library, after making changes to the library and recompiling it. And you must show them these terms so they know their rights. Our method of protecting your rights has two steps: (1) copyright the library, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the library. Also, for each distributor's protection, we want to make certain that everyone understands that there is no warranty for this free library. If the library is modified by someone else and passed on, we want its recipients to know that what they have is not the original version, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that companies distributing free software will individually obtain patent licenses, thus in effect transforming the program into proprietary software. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License, which was designed for utility programs. This license, the GNU Library General Public License, applies to certain designated libraries. This license is quite different from the ordinary one; be sure to read it in full, and don't assume that anything in it is the same as in the ordinary license. The reason we have a separate public license for some libraries is that they blur the distinction we usually make between modifying or adding to a program and simply using it. Linking a program with a library, without changing the library, is in some sense simply using the library, and is analogous to running a utility program or application program. However, in a textual and legal sense, the linked executable is a combined work, a derivative of the original library, and the ordinary General Public License treats it as such. Because of this blurred distinction, using the ordinary General Public License for libraries did not effectively promote software sharing, because most developers did not use the libraries. We concluded that weaker conditions might promote sharing better. However, unrestricted linking of non-free programs would deprive the users of those programs of all benefit from the free status of the libraries themselves. This Library General Public License is intended to permit developers of non-free programs to use free libraries, while preserving your freedom as a user of such programs to change the free libraries that are incorporated in them. (We have not seen how to achieve this as regards changes in header files, but we have achieved it as regards changes in the actual functions of the Library.) The hope is that this will lead to faster development of free libraries. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, while the latter only works together with the library. Note that it is possible for a library to be covered by the ordinary General Public License rather than by this special one. GNU LIBRARY GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Library General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also compile or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. c) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. d) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Library General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS Appendix: How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! bup-0.25/Makefile000066400000000000000000000120121225146730500136670ustar00rootroot00000000000000OS:=$(shell uname | sed 's/[-_].*//') CFLAGS := -Wall -O2 -Werror $(PYINCLUDE) $(CFLAGS) SOEXT:=.so ifeq ($(OS),CYGWIN) SOEXT:=.dll endif ifdef TMPDIR test_tmp := $(TMPDIR) else test_tmp := $(CURDIR)/t/tmp endif default: all all: bup Documentation/all t/configure-sampledata --setup bup: lib/bup/_version.py lib/bup/_helpers$(SOEXT) cmds Documentation/all: bup INSTALL=install PYTHON=python PREFIX=/usr MANDIR=$(DESTDIR)$(PREFIX)/share/man DOCDIR=$(DESTDIR)$(PREFIX)/share/doc/bup BINDIR=$(DESTDIR)$(PREFIX)/bin LIBDIR=$(DESTDIR)$(PREFIX)/lib/bup install: all $(INSTALL) -d $(MANDIR)/man1 $(DOCDIR) $(BINDIR) \ $(LIBDIR)/bup $(LIBDIR)/cmd $(LIBDIR)/tornado \ $(LIBDIR)/web $(LIBDIR)/web/static [ ! -e Documentation/.docs-available ] || \ $(INSTALL) -m 0644 \ Documentation/*.1 \ $(MANDIR)/man1 [ ! -e Documentation/.docs-available ] || \ $(INSTALL) -m 0644 \ Documentation/*.html \ $(DOCDIR) $(INSTALL) -pm 0755 bup $(BINDIR) $(INSTALL) -pm 0755 \ cmd/bup-* \ $(LIBDIR)/cmd $(INSTALL) -pm 0644 \ lib/bup/*.py \ $(LIBDIR)/bup $(INSTALL) -pm 0755 \ lib/bup/*$(SOEXT) \ $(LIBDIR)/bup $(INSTALL) -pm 0644 \ lib/tornado/*.py \ $(LIBDIR)/tornado $(INSTALL) -pm 0644 \ lib/web/static/* \ $(LIBDIR)/web/static/ $(INSTALL) -pm 0644 \ lib/web/*.html \ $(LIBDIR)/web/ %/all: $(MAKE) -C $* all %/clean: $(MAKE) -C $* clean config/config.h: config/Makefile config/configure config/configure.inc \ $(wildcard config/*.in) cd config && $(MAKE) config.h lib/bup/_helpers$(SOEXT): \ config/config.h \ lib/bup/bupsplit.c lib/bup/_helpers.c lib/bup/csetup.py @rm -f $@ cd lib/bup && \ LDFLAGS="$(LDFLAGS)" CFLAGS="$(CFLAGS)" $(PYTHON) csetup.py build cp lib/bup/build/*/_helpers$(SOEXT) lib/bup/ .PHONY: lib/bup/_version.py lib/bup/_version.py: rm -f $@ $@.new ./format-subst.pl $@.pre >$@.new mv $@.new $@ runtests: all runtests-python runtests-cmdline runtests-python: all test -e t/tmp || mkdir t/tmp TMPDIR="$(test_tmp)" $(PYTHON) wvtest.py t/t*.py lib/*/t/t*.py runtests-cmdline: all test -e t/tmp || mkdir t/tmp TMPDIR="$(test_tmp)" t/test-cat-file.sh TMPDIR="$(test_tmp)" t/test-index-check-device.sh TMPDIR="$(test_tmp)" t/test-meta.sh TMPDIR="$(test_tmp)" t/test-restore-map-owner.sh TMPDIR="$(test_tmp)" t/test-restore-single-file.sh TMPDIR="$(test_tmp)" t/test-rm-between-index-and-save.sh TMPDIR="$(test_tmp)" t/test-command-without-init-fails.sh TMPDIR="$(test_tmp)" t/test-redundant-saves.sh TMPDIR="$(test_tmp)" t/test.sh stupid: PATH=/bin:/usr/bin $(MAKE) test test: all ./wvtestrun $(MAKE) PYTHON=$(PYTHON) runtests check: test bup: main.py rm -f $@ ln -s $< $@ cmds: \ $(patsubst cmd/%-cmd.py,cmd/bup-%,$(wildcard cmd/*-cmd.py)) \ $(patsubst cmd/%-cmd.sh,cmd/bup-%,$(wildcard cmd/*-cmd.sh)) cmd/bup-%: cmd/%-cmd.py rm -f $@ ln -s $*-cmd.py $@ %: %.py rm -f $@ ln -s $< $@ bup-%: cmd-%.sh rm -f $@ ln -s $< $@ cmd/bup-%: cmd/%-cmd.sh rm -f $@ ln -s $*-cmd.sh $@ # update the local 'man' and 'html' branches with pregenerated output files, for # people who don't have pandoc (and maybe to aid in google searches or something) export-docs: Documentation/all git update-ref refs/heads/man origin/man '' 2>/dev/null || true git update-ref refs/heads/html origin/html '' 2>/dev/null || true GIT_INDEX_FILE=gitindex.tmp; export GIT_INDEX_FILE; \ rm -f $${GIT_INDEX_FILE} && \ git add -f Documentation/*.1 && \ git update-ref refs/heads/man \ $$(echo "Autogenerated man pages for $$(git describe)" \ | git commit-tree $$(git write-tree --prefix=Documentation) \ -p refs/heads/man) && \ rm -f $${GIT_INDEX_FILE} && \ git add -f Documentation/*.html && \ git update-ref refs/heads/html \ $$(echo "Autogenerated html pages for $$(git describe)" \ | git commit-tree $$(git write-tree --prefix=Documentation) \ -p refs/heads/html) # push the pregenerated doc files to origin/man and origin/html push-docs: export-docs git push origin man html # import pregenerated doc files from origin/man and origin/html, in case you # don't have pandoc but still want to be able to install the docs. import-docs: Documentation/clean git archive origin/html | (cd Documentation; tar -xvf -) git archive origin/man | (cd Documentation; tar -xvf -) clean: Documentation/clean config/clean rm -f *.o lib/*/*.o *.so lib/*/*.so *.dll lib/*/*.dll *.exe \ .*~ *~ */*~ lib/*/*~ lib/*/*/*~ \ *.pyc */*.pyc lib/*/*.pyc lib/*/*/*.pyc \ bup bup-* cmd/bup-* lib/bup/_version.py randomgen memtest \ out[12] out2[tc] tags[12] tags2[tc] \ testfs.img lib/bup/t/testfs.img umount t/mnt/* || true if test -e t/mnt; then rm -r t/mnt; fi # FIXME: migrate these to t/mnt/ if test -e bupmeta.tmp/testfs; \ then umount bupmeta.tmp/testfs || true; fi if test -e lib/bup/t/testfs; \ then umount lib/bup/t/testfs || true; fi if test -e bupmeta.tmp/testfs-limited; \ then umount bupmeta.tmp/testfs-limited || true; fi rm -rf *.tmp *.tmp.meta t/*.tmp lib/*/*/*.tmp build lib/bup/build lib/bup/t/testfs if test -e t/tmp; then t/force-delete t/tmp; fi t/configure-sampledata --clean bup-0.25/README000077700000000000000000000000001225146730500143652README.mdustar00rootroot00000000000000bup-0.25/README.md000066400000000000000000000455521225146730500135250ustar00rootroot00000000000000bup: It backs things up ======================= bup is a program that backs things up. It's short for "backup." Can you believe that nobody else has named an open source program "bup" after all this time? Me neither. Despite its unassuming name, bup is pretty cool. To give you an idea of just how cool it is, I wrote you this poem: Bup is teh awesome What rhymes with awesome? I guess maybe possum But that's irrelevant. Hmm. Did that help? Maybe prose is more useful after all. Reasons bup is awesome ---------------------- bup has a few advantages over other backup software: - It uses a rolling checksum algorithm (similar to rsync) to split large files into chunks. The most useful result of this is you can backup huge virtual machine (VM) disk images, databases, and XML files incrementally, even though they're typically all in one huge file, and not use tons of disk space for multiple versions. - It uses the packfile format from git (the open source version control system), so you can access the stored data even if you don't like bup's user interface. - Unlike git, it writes packfiles *directly* (instead of having a separate garbage collection / repacking stage) so it's fast even with gratuitously huge amounts of data. bup's improved index formats also allow you to track far more filenames than git (millions) and keep track of far more objects (hundreds or thousands of gigabytes). - Data is "automagically" shared between incremental backups without having to know which backup is based on which other one - even if the backups are made from two different computers that don't even know about each other. You just tell bup to back stuff up, and it saves only the minimum amount of data needed. - You can back up directly to a remote bup server, without needing tons of temporary disk space on the computer being backed up. And if your backup is interrupted halfway through, the next run will pick up where you left off. And it's easy to set up a bup server: just install bup on any machine where you have ssh access. - Bup can use "par2" redundancy to recover corrupted backups even if your disk has undetected bad sectors. - Even when a backup is incremental, you don't have to worry about restoring the full backup, then each of the incrementals in turn; an incremental backup *acts* as if it's a full backup, it just takes less disk space. - You can mount your bup repository as a FUSE filesystem and access the content that way, and even export it over Samba. - It's written in python (with some C parts to make it faster) so it's easy for you to extend and maintain. Reasons you might want to avoid bup ----------------------------------- - This is a very early version. Therefore it will most probably not work for you, but we don't know why. It is also missing some probably-critical features. - It requires python >= 2.5, a C compiler, and an installed git version >= 1.5.3.1. - It currently only works on Linux, MacOS X >= 10.4, NetBSD, Solaris, or Windows (with Cygwin). Patches to support other platforms are welcome. - Any items in "Things that are stupid" below. Getting started =============== From source ----------- - Check out the bup source code using git: git clone git://github.com/bup/bup - Install the needed python libraries (including the development libraries). On Debian/Ubuntu this is usually sufficient (run as root): apt-get install python2.6-dev python-fuse apt-get install python-pyxattr python-pylibacl apt-get install linux-libc-dev Substitute python2.5-dev if you have an older system. Alternately, on newer Debian/Ubuntu versions, you can try this: apt-get build-dep bup On CentOS (for CentOS 6, at least), this should be sufficient (run as root): yum groupinstall "Development Tools" yum install python python-devel yum install fuse-python pyxattr pylibacl yum install perl-Time-HiRes In addition to the default CentOS repositories, you may need to add RPMForge (for fuse-python) and EPEL (for pyxattr and pylibacl). On Cygwin, install python, make, rsync, and gcc4. - Build the python module and symlinks: make - Run the tests: make test (The tests should pass. If they don't pass for you, stop here and send an email to bup-list@googlegroups.com.) - You can install bup via "make install", and override the default destination with DESTDIR and PREFIX. Files are normally installed to "$DESTDIR/$PREFIX" where DESTDIR is empty by default, and PREFIX is set to /usr. So if you wanted to install bup to /opt/bup, you might do something like this: make install DESTDIR=/opt/bup PREFIX='' From binary packages -------------------- Binary packages of bup are known to be built for the following OSes: - Debian: http://packages.debian.org/search?searchon=names&keywords=bup - Ubuntu: http://packages.ubuntu.com/search?searchon=names&keywords=bup - pkgsrc (NetBSD, Dragonfly, and others) http://pkgsrc.se/sysutils/bup http://cvsweb.netbsd.org/bsdweb.cgi/pkgsrc/sysutils/bup/ Using bup --------- - Get help for any bup command: bup help bup help init bup help index bup help save bup help restore ... - Initialize the default BUP_DIR (~/.bup): bup init - Make a local backup (-v or -vv will increase the verbosity): bup index /etc bup save -n local-etc /etc - Restore a local backup to ./dest: bup restore -C ./dest local-etc/latest/etc ls -l dest/etc - Look at how much disk space your backup took: du -s ~/.bup - Make another backup (which should be mostly identical to the last one; notice that you don't have to *specify* that this backup is incremental, it just saves space automatically): bup index /etc bup save -n local-etc /etc - Look how little extra space your second backup used (on top of the first): du -s ~/.bup - Get a list of your previous backups: bup ls local-etc - Restore your first backup again: bup restore -C ./dest-2 local-etc/2013-11-23-11195/etc - Make a backup on a remote server (which must already have the 'bup' command somewhere in the server's PATH (see /etc/profile, etc/environment, ~/.profile, or ~/.bashrc), and be accessible via ssh. Make sure to replace SERVERNAME with the actual hostname of your server): ssh SERVERNAME bup init bup index /etc bup save -r SERVERNAME: -n local-etc /etc - Restore a backup from a remote server. (FAIL: unfortunately, unlike "bup join", "bup restore" does not yet support remote restores. See both "bup join" and "Things that are stupid" below.) - Defend your backups from death rays (OK fine, more likely from the occasional bad disk block). This writes parity information (currently via par2) for all of the existing data so that bup may be able to recover from some amount of repository corruption: bup fsck -g - Use split/join instead of index/save/restore. Try making a local backup using tar: tar -cvf - /etc | bup split -n local-etc -vv - Try restoring the tarball: bup join local-etc | tar -tf - - Look at how much disk space your backup took: du -s ~/.bup - Make another tar backup: tar -cvf - /etc | bup split -n local-etc -vv - Look at how little extra space your second backup used on top of the first: du -s ~/.bup - Restore the first tar backup again (the ~1 is git notation for "one older than the most recent"): bup join local-etc~1 | tar -tf - - Get a list of your previous split-based backups: GIT_DIR=~/.bup git log local-etc - Make a backup on a remote server: tar -cvf - /etc | bup split -r SERVERNAME: -n local-etc -vv - Try restoring the remote backup tarball: bup join -r SERVERNAME: local-etc | tar -tf - That's all there is to it! Notes on FreeBSD ---------------- - FreeBSD's default 'make' command doesn't like bup's Makefile. In order to compile the code, run tests and install bup, you need to install GNU Make from the port named 'gmake' and use its executable instead in the commands seen above. (i.e. 'gmake test' runs bup's test suite) - Python's development headers are automatically installed with the 'python' port so there's no need to install them separately. - To use the 'bup fuse' command, you need to install the fuse kernel module from the 'fusefs-kmod' port in the 'sysutils' section and the libraries from the port named 'py-fusefs' in the 'devel' section. - The 'par2' command can be found in the port named 'par2cmdline'. - In order to compile the documentation, you need pandoc which can be found in the port named 'hs-pandoc' in the 'textproc' section. Notes on NetBSD/pkgsrc ---------------------- - See pkgsrc/sysutils/bup, which should be the most recent stable release and includes man pages. It also has a reasonable set of dependencies (git, par2, py-fuse-bindings). - The "fuse-python" package referred to is hard to locate, and is a separate tarball for the python language binding distributed by the fuse project on sourceforge. It is available as pkgsrc/filesystems/py-fuse-bindings and on NetBSD 5, "bup fuse" works with it. - "bup fuse" presents every directory/file as inode 0. The directory traversal code ("fts") in NetBSD's libc will interpret this as a cycle and error out, so "ls -R" and "find" will not work. - There is no support for ACLs. If/when some entrprising person fixes this, adjust t/compare-trees. Notes on Cygwin --------------- - There is no support for ACLs. If/when some enterprising person fixes this, adjust t/compare-trees. - In t/test.sh, two tests have been disabled. These tests check to see that repeated saves produce identical trees and that an intervening index doesn't change the SHA1. Apparently Cygwin has some unusual behaviors with respect to access times (that probably warrant further investigation). Possibly related: http://cygwin.com/ml/cygwin/2007-06/msg00436.html Notes on OS X ------------- - There is no support for ACLs. If/when some enterprising person fixes this, adjust t/compare-trees. How it works ============ Basic storage: -------------- bup stores its data in a git-formatted repository. Unfortunately, git itself doesn't actually behave very well for bup's use case (huge numbers of files, files with huge sizes, retaining file permissions/ownership are important), so we mostly don't use git's *code* except for a few helper programs. For example, bup has its own git packfile writer written in python. Basically, 'bup split' reads the data on stdin (or from files specified on the command line), breaks it into chunks using a rolling checksum (similar to rsync), and saves those chunks into a new git packfile. There is one git packfile per backup. When deciding whether to write a particular chunk into the new packfile, bup first checks all the other packfiles that exist to see if they already have that chunk. If they do, the chunk is skipped. git packs come in two parts: the pack itself (*.pack) and the index (*.idx). The index is pretty small, and contains a list of all the objects in the pack. Thus, when generating a remote backup, we don't have to have a copy of the packfiles from the remote server: the local end just downloads a copy of the server's *index* files, and compares objects against those when generating the new pack, which it sends directly to the server. The "-n" option to 'bup split' and 'bup save' is the name of the backup you want to create, but it's actually implemented as a git branch. So you can do cute things like checkout a particular branch using git, and receive a bunch of chunk files corresponding to the file you split. If you use '-b' or '-t' or '-c' instead of '-n', bup split will output a list of blobs, a tree containing that list of blobs, or a commit containing that tree, respectively, to stdout. You can use this to construct your own scripts that do something with those values. The bup index: -------------- 'bup index' walks through your filesystem and updates a file (whose name is, by default, ~/.bup/bupindex) to contain the name, attributes, and an optional git SHA1 (blob id) of each file and directory. 'bup save' basically just runs the equivalent of 'bup split' a whole bunch of times, once per file in the index, and assembles a git tree that contains all the resulting objects. Among other things, that makes 'git diff' much more useful (compared to splitting a tarball, which is essentially a big binary blob). However, since bup splits large files into smaller chunks, the resulting tree structure doesn't *exactly* correspond to what git itself would have stored. Also, the tree format used by 'bup save' will probably change in the future to support storing file ownership, more complex file permissions, and so on. If a file has previously been written by 'bup save', then its git blob/tree id is stored in the index. This lets 'bup save' avoid reading that file to produce future incremental backups, which means it can go *very* fast unless a lot of files have changed. Things that are stupid for now but which we'll fix later ======================================================== Help with any of these problems, or others, is very welcome. Join the mailing list (see below) if you'd like to help. - 'bup restore' can't pull directly from a remote server. So in one sense "save -r" is a dead-end right now. Obviously you can use "ssh SERVER bup restore -C ./dest..." to create a tree you can transfer elsewhere via rsync/tar/whatever, but that's *lame*. Until we fix it, you may be able to mount the remote BUP_DIR via sshfs and then restore "normally", though that hasn't been officially tested. - 'bup save' and 'bup restore' have immature metadata support. On the plus side, they actually do have support now, but it's new, and not remotely as well tested as tar/rsync/whatever's. However, you have to start somewhere, and as of 0.25, we think it's ready for more general use. Please let us know if you have any trouble. Also, if any strip or graft-style options are specified to 'bup save', then no metadata will be written for the root directory. That's obviously less than ideal. - bup is overly optimistic about mmap. Right now bup just assumes that it can mmap as large a block as it likes, and that mmap will never fail. Yeah, right... If nothing else, this has failed on 32-bit architectures (and 31-bit is even worse -- looking at you, s390). To fix this, we might just implement a FakeMmap[1] class that uses normal file IO and handles all of the mmap methods[2] that bup actually calls. Then we'd swap in one of those whenever mmap fails. This would also require implementing some of the methods needed to support "[]" array access, probably at a minimum __getitem__, __setitem__, and __setslice__ [3]. [1] http://comments.gmane.org/gmane.comp.sysutils.backup.bup/613 [2] http://docs.python.org/2/library/mmap.html [3] http://docs.python.org/2/reference/datamodel.html#emulating-container-types - 'bup index' is slower than it should be. It's still rather fast: it can iterate through all the filenames on my 600,000 file filesystem in a few seconds. But it still needs to rewrite the entire index file just to add a single filename, which is pretty nasty; it should just leave the new files in a second "extra index" file or something. - bup could use inotify for *really* efficient incremental backups. You could even have your system doing "continuous" backups: whenever a file changes, we immediately send an image of it to the server. We could give the continuous-backup process a really low CPU and I/O priority so you wouldn't even know it was running. - bup currently has no way to prune *old* backups. Because of the way the packfile system works, backups become "entangled" in weird ways and it's not actually possible to delete one pack (corresponding approximately to one backup) without risking screwing up other backups. git itself has lots of ways of optimizing this sort of thing, but its methods aren't really applicable here; bup packfiles are just too huge. We'll have to do it in a totally different way. There are lots of options. For now: make sure you've got lots of disk space :) Until we fix this, one possible workaround is to just start a new BUP_DIR occasionally, i.e. bup-2013-10, bup-2013-11... - bup has never been tested on anything but Linux, MacOS, and Windows+Cygwin. There's nothing that makes it *inherently* non-portable, though, so that's mostly a matter of someone putting in some effort. (For a "native" Windows port, the most annoying thing is the absence of ssh in a default Windows installation.) - bup needs better documentation. According to a recent article about bup in Linux Weekly News (https://lwn.net/Articles/380983/), "it's a bit short on examples and a user guide would be nice." Documentation is the sort of thing that will never be great unless someone from outside contributes it (since the developers can never remember which parts are hard to understand). - bup is "relatively speedy" and has "pretty good" compression. ...according to the same LWN article. Clearly neither of those is good enough. We should have awe-inspiring speed and crazy-good compression. Must work on that. Writing more parts in C might help with the speed. - bup has no GUI. Actually, that's not stupid, but you might consider it a limitation. There are a bunch of Linux GUI backup programs; someday I expect someone will adapt one of them to use bup. More Documentation ================== bup has an extensive set of man pages. Try using 'bup help' to get started, or use 'bup help SUBCOMMAND' for any bup subcommand (like split, join, index, save, etc.) to get details on that command. For further technical details, please see ./DESIGN. How you can help ================ bup is a work in progress and there are many ways it can still be improved. If you'd like to contribute patches, ideas, or bug reports, please join the bup mailing list. You can find the mailing list archives here: http://groups.google.com/group/bup-list and you can subscribe by sending a message to: bup-list+subscribe@googlegroups.com Please see ./HACKING for additional information, i.e. how to submit patches (hint - no pull requests), how we handle branches, etc. Have fun, Avery bup-0.25/SIGNED-OFF-BY000066400000000000000000000003761225146730500140550ustar00rootroot00000000000000 Patches to bup should have a Signed-off-by: header. If you include this header in your patches, this signifies that you are licensing your patch to be used under the same terms as the rest of bup, ie. the GNU Library General Public License, version 2. bup-0.25/cmd/000077500000000000000000000000001225146730500127765ustar00rootroot00000000000000bup-0.25/cmd/bloom-cmd.py000077500000000000000000000116371225146730500152340ustar00rootroot00000000000000#!/usr/bin/env python import sys, glob, tempfile from bup import options, git, bloom from bup.helpers import * optspec = """ bup bloom [options...] -- ruin ruin the specified bloom file (clearing the bitfield) f,force ignore existing bloom file and regenerate it from scratch o,output= output bloom filename (default: auto) d,dir= input directory to look for idx files (default: auto) k,hashes= number of hash functions to use (4 or 5) (default: auto) c,check= check the given .idx file against the bloom filter """ def ruin_bloom(bloomfilename): rbloomfilename = git.repo_rel(bloomfilename) if not os.path.exists(bloomfilename): log("%s\n" % bloomfilename) add_error("bloom: %s not found to ruin\n" % rbloomfilename) return b = bloom.ShaBloom(bloomfilename, readwrite=True, expected=1) b.map[16:16+2**b.bits] = '\0' * 2**b.bits def check_bloom(path, bloomfilename, idx): rbloomfilename = git.repo_rel(bloomfilename) ridx = git.repo_rel(idx) if not os.path.exists(bloomfilename): log("bloom: %s: does not exist.\n" % rbloomfilename) return b = bloom.ShaBloom(bloomfilename) if not b.valid(): add_error("bloom: %r is invalid.\n" % rbloomfilename) return base = os.path.basename(idx) if base not in b.idxnames: log("bloom: %s does not contain the idx.\n" % rbloomfilename) return if base == idx: idx = os.path.join(path, idx) log("bloom: bloom file: %s\n" % rbloomfilename) log("bloom: checking %s\n" % ridx) for objsha in git.open_idx(idx): if not b.exists(objsha): add_error("bloom: ERROR: object %s missing" % str(objsha).encode('hex')) _first = None def do_bloom(path, outfilename): global _first b = None if os.path.exists(outfilename) and not opt.force: b = bloom.ShaBloom(outfilename) if not b.valid(): debug1("bloom: Existing invalid bloom found, regenerating.\n") b = None add = [] rest = [] add_count = 0 rest_count = 0 for i,name in enumerate(glob.glob('%s/*.idx' % path)): progress('bloom: counting: %d\r' % i) ix = git.open_idx(name) ixbase = os.path.basename(name) if b and (ixbase in b.idxnames): rest.append(name) rest_count += len(ix) else: add.append(name) add_count += len(ix) total = add_count + rest_count if not add: debug1("bloom: nothing to do.\n") return if b: if len(b) != rest_count: debug1("bloom: size %d != idx total %d, regenerating\n" % (len(b), rest_count)) b = None elif (b.bits < bloom.MAX_BLOOM_BITS and b.pfalse_positive(add_count) > bloom.MAX_PFALSE_POSITIVE): debug1("bloom: regenerating: adding %d entries gives " "%.2f%% false positives.\n" % (add_count, b.pfalse_positive(add_count))) b = None else: b = bloom.ShaBloom(outfilename, readwrite=True, expected=add_count) if not b: # Need all idxs to build from scratch add += rest add_count += rest_count del rest del rest_count msg = b is None and 'creating from' or 'adding' if not _first: _first = path dirprefix = (_first != path) and git.repo_rel(path)+': ' or '' progress('bloom: %s%s %d file%s (%d object%s).\n' % (dirprefix, msg, len(add), len(add)!=1 and 's' or '', add_count, add_count!=1 and 's' or '')) tfname = None if b is None: tfname = os.path.join(path, 'bup.tmp.bloom') b = bloom.create(tfname, expected=add_count, k=opt.k) count = 0 icount = 0 for name in add: ix = git.open_idx(name) qprogress('bloom: writing %.2f%% (%d/%d objects)\r' % (icount*100.0/add_count, icount, add_count)) b.add_idx(ix) count += 1 icount += len(ix) # Currently, there's an open file object for tfname inside b. # Make sure it's closed before rename. b.close() if tfname: os.rename(tfname, outfilename) handle_ctrl_c() o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal('no positional parameters expected') git.check_repo_or_die() if not opt.check and opt.k and opt.k not in (4,5): o.fatal('only k values of 4 and 5 are supported') paths = opt.dir and [opt.dir] or git.all_packdirs() for path in paths: debug1('bloom: scanning %s\n' % path) outfilename = opt.output or os.path.join(path, 'bup.bloom') if opt.check: check_bloom(path, outfilename, opt.check) elif opt.ruin: ruin_bloom(outfilename) else: do_bloom(path, outfilename) if saved_errors: log('WARNING: %d errors encountered during bloom.\n' % len(saved_errors)) sys.exit(1) elif opt.check: log('All tests passed.\n') bup-0.25/cmd/cat-file-cmd.py000077500000000000000000000031721225146730500156030ustar00rootroot00000000000000#!/usr/bin/env python import sys, stat from bup import options, git, vfs from bup.helpers import * optspec = """ bup cat-file [--meta|--bupm] /branch/revision/[path] -- meta print the target's metadata entry (decoded then reencoded) to stdout bupm print the target directory's .bupm file directly to stdout """ handle_ctrl_c() o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) git.check_repo_or_die() top = vfs.RefList(None) if not extra: o.fatal('must specify a target') if len(extra) > 1: o.fatal('only one target file allowed') if opt.bupm and opt.meta: o.fatal('--meta and --bupm are incompatible') target = extra[0] if not re.match(r'/*[^/]+/[^/]+', target): o.fatal("path %r doesn't include a branch and revision" % target) try: n = top.lresolve(target) except vfs.NodeError, e: o.fatal(e) if isinstance(n, vfs.FakeSymlink): # Source is actually /foo/what, i.e. a top-level commit # like /foo/latest, which is a symlink to ../.commit/SHA. # So dereference it. target = n.dereference() if opt.bupm: if not stat.S_ISDIR(n.mode): o.fatal('%r is not a directory' % target) mfile = n.metadata_file() # VFS file -- cannot close(). if mfile: meta_stream = mfile.open() sys.stdout.write(meta_stream.read()) elif opt.meta: sys.stdout.write(n.metadata().encode()) else: if stat.S_ISREG(n.mode): for b in chunkyreader(n.open()): sys.stdout.write(b) else: o.fatal('%r is not a plain file' % target) if saved_errors: log('warning: %d errors encountered\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/daemon-cmd.py000077500000000000000000000036411225146730500153630ustar00rootroot00000000000000#!/usr/bin/env python import sys, getopt, socket, subprocess, fcntl from bup import options, path from bup.helpers import * optspec = """ bup daemon [options...] -- [bup-server options...] -- l,listen ip address to listen on, defaults to * p,port port to listen on, defaults to 1982 """ o = options.Options(optspec, optfunc=getopt.getopt) (opt, flags, extra) = o.parse(sys.argv[1:]) host = opt.listen port = opt.port and int(opt.port) or 1982 import socket import sys socks = [] e = None for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: s = socket.socket(af, socktype, proto) except socket.error, e: continue try: if af == socket.AF_INET6: log("bup daemon: listening on [%s]:%s\n" % sa[:2]) else: log("bup daemon: listening on %s:%s\n" % sa[:2]) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(sa) s.listen(1) fcntl.fcntl(s.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) except socket.error, e: s.close() continue socks.append(s) if not socks: log('bup daemon: listen socket: %s\n' % e.args[1]) sys.exit(1) try: while True: [rl,wl,xl] = select.select(socks, [], [], 60) for l in rl: s, src = l.accept() try: log("Socket accepted connection from %s\n" % (src,)) fd1 = os.dup(s.fileno()) fd2 = os.dup(s.fileno()) s.close() sp = subprocess.Popen([path.exe(), 'mux', '--', 'server'] + extra, stdin=fd1, stdout=fd2) finally: os.close(fd1) os.close(fd2) finally: for l in socks: l.shutdown(socket.SHUT_RDWR) l.close() debug1("bup daemon: done") bup-0.25/cmd/damage-cmd.py000077500000000000000000000027331225146730500153370ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, random from bup import options from bup.helpers import * def randblock(n): l = [] for i in xrange(n): l.append(chr(random.randrange(0,256))) return ''.join(l) optspec = """ bup damage [-n count] [-s maxsize] [-S seed] -- WARNING: THIS COMMAND IS EXTREMELY DANGEROUS n,num= number of blocks to damage s,size= maximum size of each damaged block percent= maximum size of each damaged block (as a percent of entire file) equal spread damage evenly throughout the file S,seed= random number seed (for repeatable tests) """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if not extra: o.fatal('filenames expected') if opt.seed != None: random.seed(opt.seed) for name in extra: log('Damaging "%s"...\n' % name) f = open(name, 'r+b') st = os.fstat(f.fileno()) size = st.st_size if opt.percent or opt.size: ms1 = int(float(opt.percent or 0)/100.0*size) or size ms2 = opt.size or size maxsize = min(ms1, ms2) else: maxsize = 1 chunks = opt.num or 10 chunksize = size/chunks for r in range(chunks): sz = random.randrange(1, maxsize+1) if sz > size: sz = size if opt.equal: ofs = r*chunksize else: ofs = random.randrange(0, size - sz + 1) log(' %6d bytes at %d\n' % (sz, ofs)) f.seek(ofs) f.write(randblock(sz)) f.close() bup-0.25/cmd/drecurse-cmd.py000077500000000000000000000017771225146730500157440ustar00rootroot00000000000000#!/usr/bin/env python from bup import options, drecurse from bup.helpers import * optspec = """ bup drecurse -- x,xdev,one-file-system don't cross filesystem boundaries exclude= a path to exclude from the backup (can be used more than once) exclude-from= a file that contains exclude paths (can be used more than once) q,quiet don't actually print filenames profile run under the python profiler """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if len(extra) != 1: o.fatal("exactly one filename expected") excluded_paths = parse_excludes(flags, o.fatal) it = drecurse.recursive_dirlist(extra, opt.xdev, excluded_paths=excluded_paths) if opt.profile: import cProfile def do_it(): for i in it: pass cProfile.run('do_it()') else: if opt.quiet: for i in it: pass else: for (name,st) in it: print name if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/fsck-cmd.py000077500000000000000000000142621225146730500150470ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, glob, subprocess from bup import options, git from bup.helpers import * par2_ok = 0 nullf = open('/dev/null') def debug(s): if opt.verbose > 1: log(s) def run(argv): # at least in python 2.5, using "stdout=2" or "stdout=sys.stderr" below # doesn't actually work, because subprocess closes fd #2 right before # execing for some reason. So we work around it by duplicating the fd # first. fd = os.dup(2) # copy stderr try: p = subprocess.Popen(argv, stdout=fd, close_fds=False) return p.wait() finally: os.close(fd) def par2_setup(): global par2_ok rv = 1 try: p = subprocess.Popen(['par2', '--help'], stdout=nullf, stderr=nullf, stdin=nullf) rv = p.wait() except OSError: log('fsck: warning: par2 not found; disabling recovery features.\n') else: par2_ok = 1 def parv(lvl): if opt.verbose >= lvl: if istty2: return [] else: return ['-q'] else: return ['-qq'] def par2_generate(base): return run(['par2', 'create', '-n1', '-c200'] + parv(2) + ['--', base, base+'.pack', base+'.idx']) def par2_verify(base): return run(['par2', 'verify'] + parv(3) + ['--', base]) def par2_repair(base): return run(['par2', 'repair'] + parv(2) + ['--', base]) def quick_verify(base): f = open(base + '.pack', 'rb') f.seek(-20, 2) wantsum = f.read(20) assert(len(wantsum) == 20) f.seek(0) sum = Sha1() for b in chunkyreader(f, os.fstat(f.fileno()).st_size - 20): sum.update(b) if sum.digest() != wantsum: raise ValueError('expected %r, got %r' % (wantsum.encode('hex'), sum.hexdigest())) def git_verify(base): if opt.quick: try: quick_verify(base) except Exception, e: debug('error: %s\n' % e) return 1 return 0 else: return run(['git', 'verify-pack', '--', base]) def do_pack(base, last, par2_exists): code = 0 if par2_ok and par2_exists and (opt.repair or not opt.generate): vresult = par2_verify(base) if vresult != 0: if opt.repair: rresult = par2_repair(base) if rresult != 0: action_result = 'failed' log('%s par2 repair: failed (%d)\n' % (last, rresult)) code = rresult else: action_result = 'repaired' log('%s par2 repair: succeeded (0)\n' % last) code = 100 else: action_result = 'failed' log('%s par2 verify: failed (%d)\n' % (last, vresult)) code = vresult else: action_result = 'ok' elif not opt.generate or (par2_ok and not par2_exists): gresult = git_verify(base) if gresult != 0: action_result = 'failed' log('%s git verify: failed (%d)\n' % (last, gresult)) code = gresult else: if par2_ok and opt.generate: presult = par2_generate(base) if presult != 0: action_result = 'failed' log('%s par2 create: failed (%d)\n' % (last, presult)) code = presult else: action_result = 'generated' else: action_result = 'ok' else: assert(opt.generate and (not par2_ok or par2_exists)) action_result = 'exists' if par2_exists else 'skipped' if opt.verbose: print last, action_result return code optspec = """ bup fsck [options...] [filenames...] -- r,repair attempt to repair errors using par2 (dangerous!) g,generate generate auto-repair information using par2 v,verbose increase verbosity (can be used more than once) quick just check pack sha1sum, don't use git verify-pack j,jobs= run 'n' jobs in parallel par2-ok immediately return 0 if par2 is ok, 1 if not disable-par2 ignore par2 even if it is available """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) par2_setup() if opt.par2_ok: if par2_ok: sys.exit(0) # 'true' in sh else: sys.exit(1) if opt.disable_par2: par2_ok = 0 git.check_repo_or_die() if not extra: debug('fsck: No filenames given: checking all packs.\n') extra = glob.glob(git.repo('objects/pack/*.pack')) code = 0 count = 0 outstanding = {} for name in extra: if name.endswith('.pack'): base = name[:-5] elif name.endswith('.idx'): base = name[:-4] elif name.endswith('.par2'): base = name[:-5] elif os.path.exists(name + '.pack'): base = name else: raise Exception('%s is not a pack file!' % name) (dir,last) = os.path.split(base) par2_exists = os.path.exists(base + '.par2') if par2_exists and os.stat(base + '.par2').st_size == 0: par2_exists = 0 sys.stdout.flush() debug('fsck: checking %s (%s)\n' % (last, par2_ok and par2_exists and 'par2' or 'git')) if not opt.verbose: progress('fsck (%d/%d)\r' % (count, len(extra))) if not opt.jobs: nc = do_pack(base, last, par2_exists) code = code or nc count += 1 else: while len(outstanding) >= opt.jobs: (pid,nc) = os.wait() nc >>= 8 if pid in outstanding: del outstanding[pid] code = code or nc count += 1 pid = os.fork() if pid: # parent outstanding[pid] = 1 else: # child try: sys.exit(do_pack(base, last, par2_exists)) except Exception, e: log('exception: %r\n' % e) sys.exit(99) while len(outstanding): (pid,nc) = os.wait() nc >>= 8 if pid in outstanding: del outstanding[pid] code = code or nc count += 1 if not opt.verbose: progress('fsck (%d/%d)\r' % (count, len(extra))) if istty2: debug('fsck done. \n') sys.exit(code) bup-0.25/cmd/ftp-cmd.py000077500000000000000000000145011225146730500147060ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, stat, fnmatch from bup import options, git, shquote, vfs, ls from bup.helpers import * handle_ctrl_c() class OptionError(Exception): pass # Check out lib/bup/ls.py for the opt spec def do_ls(cmd_args): try: ls.do_ls(cmd_args, pwd, onabort=OptionError) except OptionError, e: return def write_to_file(inf, outf): for blob in chunkyreader(inf): outf.write(blob) def inputiter(): if os.isatty(sys.stdin.fileno()): while 1: try: yield raw_input('bup> ') except EOFError: print '' # Clear the line for the terminal's next prompt break else: for line in sys.stdin: yield line def _completer_get_subs(line): (qtype, lastword) = shquote.unfinished_word(line) (dir,name) = os.path.split(lastword) #log('\ncompleter: %r %r %r\n' % (qtype, lastword, text)) try: n = pwd.resolve(dir) subs = list(filter(lambda x: x.name.startswith(name), n.subs())) except vfs.NoSuchFile, e: subs = [] return (dir, name, qtype, lastword, subs) def find_readline_lib(): """Return the name (and possibly the full path) of the readline library linked to the given readline module. """ import readline f = open(readline.__file__, "rb") try: data = f.read() finally: f.close() import re m = re.search('\0([^\0]*libreadline[^\0]*)\0', data) if m: return m.group(1) return None def init_readline_vars(): """Work around trailing space automatically inserted by readline. See http://bugs.python.org/issue5833""" try: import ctypes except ImportError: # python before 2.5 didn't have the ctypes module; but those # old systems probably also didn't have this readline bug, so # just ignore it. return lib_name = find_readline_lib() if lib_name is not None: lib = ctypes.cdll.LoadLibrary(lib_name) global rl_completion_suppress_append rl_completion_suppress_append = ctypes.c_int.in_dll(lib, "rl_completion_suppress_append") rl_completion_suppress_append = None _last_line = None _last_res = None def completer(text, state): global _last_line global _last_res global rl_completion_suppress_append if rl_completion_suppress_append is not None: rl_completion_suppress_append.value = 1 try: line = readline.get_line_buffer()[:readline.get_endidx()] if _last_line != line: _last_res = _completer_get_subs(line) _last_line = line (dir, name, qtype, lastword, subs) = _last_res if state < len(subs): sn = subs[state] sn1 = sn.try_resolve() # find the type of any symlink target fullname = os.path.join(dir, sn.name) if stat.S_ISDIR(sn1.mode): ret = shquote.what_to_add(qtype, lastword, fullname+'/', terminate=False) else: ret = shquote.what_to_add(qtype, lastword, fullname, terminate=True) + ' ' return text + ret except Exception, e: log('\n') try: import traceback traceback.print_tb(sys.exc_traceback) except Exception, e2: log('Error printing traceback: %s\n' % e2) log('\nError in completion: %s\n' % e) optspec = """ bup ftp [commands...] """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) git.check_repo_or_die() top = vfs.RefList(None) pwd = top rv = 0 if extra: lines = extra else: try: import readline except ImportError: log('* readline module not available: line editing disabled.\n') readline = None if readline: readline.set_completer_delims(' \t\n\r/') readline.set_completer(completer) if sys.platform.startswith('darwin'): # MacOS uses a slighly incompatible clone of libreadline readline.parse_and_bind('bind ^I rl_complete') readline.parse_and_bind('tab: complete') init_readline_vars() lines = inputiter() for line in lines: if not line.strip(): continue words = [word for (wordstart,word) in shquote.quotesplit(line)] cmd = words[0].lower() #log('execute: %r %r\n' % (cmd, parm)) try: if cmd == 'ls': do_ls(words[1:]) elif cmd == 'cd': np = pwd for parm in words[1:]: np = np.resolve(parm) if not stat.S_ISDIR(np.mode): raise vfs.NotDir('%s is not a directory' % parm) pwd = np elif cmd == 'pwd': print pwd.fullname() elif cmd == 'cat': for parm in words[1:]: write_to_file(pwd.resolve(parm).open(), sys.stdout) elif cmd == 'get': if len(words) not in [2,3]: rv = 1 raise Exception('Usage: get [localname]') rname = words[1] (dir,base) = os.path.split(rname) lname = len(words)>2 and words[2] or base inf = pwd.resolve(rname).open() log('Saving %r\n' % lname) write_to_file(inf, open(lname, 'wb')) elif cmd == 'mget': for parm in words[1:]: (dir,base) = os.path.split(parm) for n in pwd.resolve(dir).subs(): if fnmatch.fnmatch(n.name, base): try: log('Saving %r\n' % n.name) inf = n.open() outf = open(n.name, 'wb') write_to_file(inf, outf) outf.close() except Exception, e: rv = 1 log(' error: %s\n' % e) elif cmd == 'help' or cmd == '?': log('Commands: ls cd pwd cat get mget help quit\n') elif cmd == 'quit' or cmd == 'exit' or cmd == 'bye': break else: rv = 1 raise Exception('no such command %r' % cmd) except Exception, e: rv = 1 log('error: %s\n' % e) #raise sys.exit(rv) bup-0.25/cmd/fuse-cmd.py000077500000000000000000000067011225146730500150620ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, errno from bup import options, git, vfs from bup.helpers import * try: import fuse except ImportError: log('error: cannot find the python "fuse" module; please install it\n') sys.exit(1) class Stat(fuse.Stat): def __init__(self): self.st_mode = 0 self.st_ino = 0 self.st_dev = 0 self.st_nlink = 0 self.st_uid = 0 self.st_gid = 0 self.st_size = 0 self.st_atime = 0 self.st_mtime = 0 self.st_ctime = 0 self.st_blocks = 0 self.st_blksize = 0 self.st_rdev = 0 cache = {} def cache_get(top, path): parts = path.split('/') cache[('',)] = top c = None max = len(parts) #log('cache: %r\n' % cache.keys()) for i in range(max): pre = parts[:max-i] #log('cache trying: %r\n' % pre) c = cache.get(tuple(pre)) if c: rest = parts[max-i:] for r in rest: #log('resolving %r from %r\n' % (r, c.fullname())) c = c.lresolve(r) key = tuple(pre + [r]) #log('saving: %r\n' % (key,)) cache[key] = c break assert(c) return c class BupFs(fuse.Fuse): def __init__(self, top): fuse.Fuse.__init__(self) self.top = top def getattr(self, path): log('--getattr(%r)\n' % path) try: node = cache_get(self.top, path) st = Stat() st.st_mode = node.mode st.st_nlink = node.nlinks() st.st_size = node.size() st.st_mtime = node.mtime st.st_ctime = node.ctime st.st_atime = node.atime return st except vfs.NoSuchFile: return -errno.ENOENT def readdir(self, path, offset): log('--readdir(%r)\n' % path) node = cache_get(self.top, path) yield fuse.Direntry('.') yield fuse.Direntry('..') for sub in node.subs(): yield fuse.Direntry(sub.name) def readlink(self, path): log('--readlink(%r)\n' % path) node = cache_get(self.top, path) return node.readlink() def open(self, path, flags): log('--open(%r)\n' % path) node = cache_get(self.top, path) accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR if (flags & accmode) != os.O_RDONLY: return -errno.EACCES node.open() def release(self, path, flags): log('--release(%r)\n' % path) def read(self, path, size, offset): log('--read(%r)\n' % path) n = cache_get(self.top, path) o = n.open() o.seek(offset) return o.read(size) if not hasattr(fuse, '__version__'): raise RuntimeError, "your fuse module is too old for fuse.__version__" fuse.fuse_python_api = (0, 2) optspec = """ bup fuse [-d] [-f] -- d,debug increase debug level f,foreground run in foreground o,allow-other allow other users to access the filesystem """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if len(extra) != 1: o.fatal("exactly one argument expected") git.check_repo_or_die() top = vfs.RefList(None) f = BupFs(top) f.fuse_args.mountpoint = extra[0] if opt.debug: f.fuse_args.add('debug') if opt.foreground: f.fuse_args.setmod('foreground') print f.multithreaded f.multithreaded = False if opt.allow_other: f.fuse_args.add('allow_other') f.main() bup-0.25/cmd/help-cmd.py000077500000000000000000000015041225146730500150440ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, glob from bup import options, path optspec = """ bup help """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if len(extra) == 0: # the wrapper program provides the default usage string os.execvp(os.environ['BUP_MAIN_EXE'], ['bup']) elif len(extra) == 1: docname = (extra[0]=='bup' and 'bup' or ('bup-%s' % extra[0])) manpath = os.path.join(path.exedir(), 'Documentation/' + docname + '.[1-9]') g = glob.glob(manpath) try: if g: os.execvp('man', ['man', '-l', g[0]]) else: os.execvp('man', ['man', docname]) except OSError, e: sys.stderr.write('Unable to run man command: %s\n' % e) sys.exit(1) else: o.fatal("exactly one command name expected") bup-0.25/cmd/import-rdiff-backup-cmd.sh000077500000000000000000000032141225146730500177430ustar00rootroot00000000000000#!/usr/bin/env bash set -o pipefail must() { local file=${BASH_SOURCE[0]} local line=${BASH_LINENO[0]} "$@" local rc=$? if test $rc -ne 0; then echo "Failed at line $line in $file" 1>&2 exit $rc fi } usage() { echo "Usage: bup import-rdiff-backup [-n]" \ " " echo "-n,--dry-run: just print what would be done" exit 1 } control_c() { echo "bup import-rdiff-backup: signal 2 received" 1>&2 exit 128 } must trap control_c INT dry_run= while [ "$1" = "-n" -o "$1" = "--dry-run" ]; do dry_run=echo shift done bup() { $dry_run "${BUP_MAIN_EXE:=bup}" "$@" } snapshot_root="$1" branch="$2" [ -n "$snapshot_root" -a "$#" = 2 ] || usage if [ ! -e "$snapshot_root/." ]; then echo "'$snapshot_root' isn't a directory!" exit 1 fi backups=$(must rdiff-backup --list-increments --parsable-output "$snapshot_root") \ || exit $? backups_count=$(echo "$backups" | must wc -l) || exit $? counter=1 echo "$backups" | while read timestamp type; do tmpdir=$(must mktemp -d) || exit $? echo "Importing backup from $(date -d @$timestamp +%c) " \ "($counter / $backups_count)" 1>&2 echo 1>&2 echo "Restoring from rdiff-backup..." 1>&2 must rdiff-backup -r $timestamp "$snapshot_root" "$tmpdir" echo 1>&2 echo "Importing into bup..." 1>&2 TMPIDX=$(must mktemp -u) || exit $? must bup index -ux -f "$tmpidx" "$tmpdir" must bup save --strip --date="$timestamp" -f "$tmpidx" -n "$branch" "$tmpdir" must rm -f "$tmpidx" must rm -rf "$tmpdir" counter=$((counter+1)) echo 1>&2 echo 1>&2 done bup-0.25/cmd/import-rsnapshot-cmd.sh000077500000000000000000000025531225146730500174340ustar00rootroot00000000000000#!/bin/sh # Does an import of a rsnapshot archive. usage() { echo "Usage: bup import-rsnapshot [-n]" \ " []" echo "-n,--dry-run: just print what would be done" exit -1 } DRY_RUN= while [ "$1" = "-n" -o "$1" = "--dry-run" ]; do DRY_RUN=echo shift done bup() { $DRY_RUN "${BUP_MAIN_EXE:=bup}" "$@" } SNAPSHOT_ROOT=$1 TARGET=$2 [ -n "$SNAPSHOT_ROOT" -a "$#" -le 2 ] || usage if [ ! -e "$SNAPSHOT_ROOT/." ]; then echo "'$SNAPSHOT_ROOT' isn't a directory!" exit 1 fi cd "$SNAPSHOT_ROOT" || exit 2 for SNAPSHOT in *; do [ -e "$SNAPSHOT/." ] || continue echo "snapshot='$SNAPSHOT'" >&2 for BRANCH_PATH in "$SNAPSHOT/"*; do BRANCH=$(basename "$BRANCH_PATH") || exit $? [ -e "$BRANCH_PATH/." ] || continue [ -z "$TARGET" -o "$TARGET" = "$BRANCH" ] || continue echo "snapshot='$SNAPSHOT' branch='$BRANCH'" >&2 # Get the snapshot's ctime DATE=$(perl -e '@a=stat($ARGV[0]) or die "$ARGV[0]: $!"; print $a[10];' "$BRANCH_PATH") [ -n "$DATE" ] || exit 3 TMPIDX=bupindex.$BRANCH.tmp bup index -ux -f "$TMPIDX" "$BRANCH_PATH/" || exit $? bup save --strip --date="$DATE" \ -f "$TMPIDX" -n "$BRANCH" \ "$BRANCH_PATH/" || exit $? rm "$TMPIDX" || exit $? done done bup-0.25/cmd/index-cmd.py000077500000000000000000000236171225146730500152340ustar00rootroot00000000000000#!/usr/bin/env python import sys, stat, time, os, errno, re from bup import metadata, options, git, index, drecurse, hlinkdb from bup.helpers import * from bup.hashsplit import GIT_MODE_TREE, GIT_MODE_FILE class IterHelper: def __init__(self, l): self.i = iter(l) self.cur = None self.next() def next(self): try: self.cur = self.i.next() except StopIteration: self.cur = None return self.cur def check_index(reader): try: log('check: checking forward iteration...\n') e = None d = {} for e in reader.forward_iter(): if e.children_n: if opt.verbose: log('%08x+%-4d %r\n' % (e.children_ofs, e.children_n, e.name)) assert(e.children_ofs) assert(e.name.endswith('/')) assert(not d.get(e.children_ofs)) d[e.children_ofs] = 1 if e.flags & index.IX_HASHVALID: assert(e.sha != index.EMPTY_SHA) assert(e.gitmode) assert(not e or e.name == '/') # last entry is *always* / log('check: checking normal iteration...\n') last = None for e in reader: if last: assert(last > e.name) last = e.name except: log('index error! at %r\n' % e) raise log('check: passed.\n') def clear_index(indexfile): indexfiles = [indexfile, indexfile + '.meta', indexfile + '.hlink'] for indexfile in indexfiles: path = git.repo(indexfile) try: os.remove(path) if opt.verbose: log('clear: removed %s\n' % path) except OSError, e: if e.errno != errno.ENOENT: raise def update_index(top, excluded_paths, exclude_rxs): # tmax and start must be epoch nanoseconds. tmax = (time.time() - 1) * 10**9 ri = index.Reader(indexfile) msw = index.MetaStoreWriter(indexfile + '.meta') wi = index.Writer(indexfile, msw, tmax) rig = IterHelper(ri.iter(name=top)) tstart = int(time.time()) * 10**9 hlinks = hlinkdb.HLinkDB(indexfile + '.hlink') hashgen = None if opt.fake_valid: def hashgen(name): return (GIT_MODE_FILE, index.FAKE_SHA) total = 0 bup_dir = os.path.abspath(git.repo()) for (path,pst) in drecurse.recursive_dirlist([top], xdev=opt.xdev, bup_dir=bup_dir, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs): if opt.verbose>=2 or (opt.verbose==1 and stat.S_ISDIR(pst.st_mode)): sys.stdout.write('%s\n' % path) sys.stdout.flush() qprogress('Indexing: %d\r' % total) elif not (total % 128): qprogress('Indexing: %d\r' % total) total += 1 while rig.cur and rig.cur.name > path: # deleted paths if rig.cur.exists(): rig.cur.set_deleted() rig.cur.repack() if rig.cur.nlink > 1 and not stat.S_ISDIR(rig.cur.mode): hlinks.del_path(rig.cur.name) rig.next() if rig.cur and rig.cur.name == path: # paths that already existed try: meta = metadata.from_path(path, statinfo=pst) except (OSError, IOError), e: add_error(e) rig.next() continue if not stat.S_ISDIR(rig.cur.mode) and rig.cur.nlink > 1: hlinks.del_path(rig.cur.name) if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1: hlinks.add_path(path, pst.st_dev, pst.st_ino) # Clear these so they don't bloat the store -- they're # already in the index (since they vary a lot and they're # fixed length). If you've noticed "tmax", you might # wonder why it's OK to do this, since that code may # adjust (mangle) the index mtime and ctime -- producing # fake values which must not end up in a .bupm. However, # it looks like that shouldn't be possible: (1) When # "save" validates the index entry, it always reads the # metadata from the filesytem. (2) Metadata is only # read/used from the index if hashvalid is true. (3) index # always invalidates "faked" entries, because "old != new" # in from_stat(). meta.ctime = meta.mtime = meta.atime = 0 meta_ofs = msw.store(meta) rig.cur.from_stat(pst, meta_ofs, tstart, check_device=opt.check_device) if not (rig.cur.flags & index.IX_HASHVALID): if hashgen: (rig.cur.gitmode, rig.cur.sha) = hashgen(path) rig.cur.flags |= index.IX_HASHVALID if opt.fake_invalid: rig.cur.invalidate() rig.cur.repack() rig.next() else: # new paths try: meta = metadata.from_path(path, statinfo=pst) except (OSError, IOError), e: add_error(e) continue # See same assignment to 0, above, for rationale. meta.atime = meta.mtime = meta.ctime = 0 meta_ofs = msw.store(meta) wi.add(path, pst, meta_ofs, hashgen = hashgen) if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1: hlinks.add_path(path, pst.st_dev, pst.st_ino) progress('Indexing: %d, done.\n' % total) hlinks.prepare_save() if ri.exists(): ri.save() wi.flush() if wi.count: wr = wi.new_reader() if opt.check: log('check: before merging: oldfile\n') check_index(ri) log('check: before merging: newfile\n') check_index(wr) mi = index.Writer(indexfile, msw, tmax) for e in index.merge(ri, wr): # FIXME: shouldn't we remove deleted entries eventually? When? mi.add_ixentry(e) ri.close() mi.close() wr.close() wi.abort() else: wi.close() msw.close() hlinks.commit_save() optspec = """ bup index <-p|m|s|u> [options...] -- Modes: p,print print the index entries for the given names (also works with -u) m,modified print only added/deleted/modified files (implies -p) s,status print each filename with a status char (A/M/D) (implies -p) u,update recursively update the index entries for the given file/dir names (default if no mode is specified) check carefully check index file integrity clear clear the default index Options: H,hash print the hash for each object next to its name l,long print more information about each file no-check-device don't invalidate an entry if the containing device changes fake-valid mark all index entries as up-to-date even if they aren't fake-invalid mark all index entries as invalid f,indexfile= the name of the index file (normally BUP_DIR/bupindex) exclude= a path to exclude from the backup (can be used more than once) exclude-from= a file that contains exclude paths (can be used more than once) exclude-rx= skip paths that match the unanchored regular expression v,verbose increase log output (can be used more than once) x,xdev,one-file-system don't cross filesystem boundaries """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if not (opt.modified or \ opt['print'] or \ opt.status or \ opt.update or \ opt.check or \ opt.clear): opt.update = 1 if (opt.fake_valid or opt.fake_invalid) and not opt.update: o.fatal('--fake-{in,}valid are meaningless without -u') if opt.fake_valid and opt.fake_invalid: o.fatal('--fake-valid is incompatible with --fake-invalid') if opt.clear and opt.indexfile: o.fatal('cannot clear an external index (via -f)') # FIXME: remove this once we account for timestamp races, i.e. index; # touch new-file; index. It's possible for this to happen quickly # enough that new-file ends up with the same timestamp as the first # index, and then bup will ignore it. tick_start = time.time() time.sleep(1 - (tick_start - int(tick_start))) git.check_repo_or_die() indexfile = opt.indexfile or git.repo('bupindex') handle_ctrl_c() if opt.check: log('check: starting initial check.\n') check_index(index.Reader(indexfile)) if opt.clear: log('clear: clearing index.\n') clear_index(indexfile) excluded_paths = parse_excludes(flags, o.fatal) exclude_rxs = parse_rx_excludes(flags, o.fatal) paths = index.reduce_paths(extra) if opt.update: if not extra: o.fatal('update mode (-u) requested but no paths given') for (rp,path) in paths: update_index(rp, excluded_paths, exclude_rxs) if opt['print'] or opt.status or opt.modified: for (name, ent) in index.Reader(indexfile).filter(extra or ['']): if (opt.modified and (ent.is_valid() or ent.is_deleted() or not ent.mode)): continue line = '' if opt.status: if ent.is_deleted(): line += 'D ' elif not ent.is_valid(): if ent.sha == index.EMPTY_SHA: line += 'A ' else: line += 'M ' else: line += ' ' if opt.hash: line += ent.sha.encode('hex') + ' ' if opt.long: line += "%7s %7s " % (oct(ent.mode), oct(ent.gitmode)) print line + (name or './') if opt.check and (opt['print'] or opt.status or opt.modified or opt.update): log('check: starting final check.\n') check_index(index.Reader(indexfile)) if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/init-cmd.py000077500000000000000000000010601225146730500150540ustar00rootroot00000000000000#!/usr/bin/env python import sys from bup import git, options, client from bup.helpers import * optspec = """ [BUP_DIR=...] bup init [-r host:path] -- r,remote= remote repository path """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal("no arguments expected") try: git.init_repo() # local repo except git.GitError, e: log("bup: error: could not init repository: %s" % e) sys.exit(1) if opt.remote: git.check_repo_or_die() cli = client.Client(opt.remote, create=True) cli.close() bup-0.25/cmd/join-cmd.py000077500000000000000000000014011225146730500150470ustar00rootroot00000000000000#!/usr/bin/env python import sys from bup import git, options, client from bup.helpers import * optspec = """ bup join [-r host:path] [refs or hashes...] -- r,remote= remote repository path o= output filename """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) git.check_repo_or_die() if not extra: extra = linereader(sys.stdin) ret = 0 if opt.remote: cli = client.Client(opt.remote) cat = cli.cat else: cp = git.CatPipe() cat = cp.join if opt.o: outfile = open(opt.o, 'wb') else: outfile = sys.stdout for id in extra: try: for blob in cat(id): outfile.write(blob) except KeyError, e: outfile.flush() log('error: %s\n' % e) ret = 1 sys.exit(ret) bup-0.25/cmd/list-idx-cmd.py000077500000000000000000000024421225146730500156530ustar00rootroot00000000000000#!/usr/bin/env python import sys, os from bup import git, options from bup.helpers import * optspec = """ bup list-idx [--find=] -- find= display only objects that start with """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) handle_ctrl_c() opt.find = opt.find or '' if not extra: o.fatal('you must provide at least one filename') if len(opt.find) > 40: o.fatal('--find parameter must be <= 40 chars long') else: if len(opt.find) % 2: s = opt.find + '0' else: s = opt.find try: bin = s.decode('hex') except TypeError: o.fatal('--find parameter is not a valid hex string') find = opt.find.lower() count = 0 for name in extra: try: ix = git.open_idx(name) except git.GitError, e: add_error('%s: %s' % (name, e)) continue if len(opt.find) == 40: if ix.exists(bin): print name, find else: # slow, exhaustive search for _i in ix: i = str(_i).encode('hex') if i.startswith(find): print name, i qprogress('Searching: %d\r' % count) count += 1 if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/ls-cmd.py000077500000000000000000000004101225146730500145250ustar00rootroot00000000000000#!/usr/bin/env python import sys from bup import git, vfs, ls from bup.helpers import * git.check_repo_or_die() top = vfs.RefList(None) # Check out lib/bup/ls.py for the opt spec ret = ls.do_ls(sys.argv[1:], top, default='/', spec_prefix='bup ') sys.exit(ret) bup-0.25/cmd/margin-cmd.py000077500000000000000000000036411225146730500153750ustar00rootroot00000000000000#!/usr/bin/env python import sys, struct, math from bup import options, git, _helpers from bup.helpers import * POPULATION_OF_EARTH=6.7e9 # as of September, 2010 optspec = """ bup margin -- predict Guess object offsets and report the maximum deviation ignore-midx Don't use midx files; use only plain pack idx files. """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal("no arguments expected") git.check_repo_or_die() git.ignore_midx = opt.ignore_midx mi = git.PackIdxList(git.repo('objects/pack')) def do_predict(ix): total = len(ix) maxdiff = 0 for count,i in enumerate(ix): prefix = struct.unpack('!Q', i[:8])[0] expected = prefix * total / (1<<64) diff = count - expected maxdiff = max(maxdiff, abs(diff)) print '%d of %d (%.3f%%) ' % (maxdiff, len(ix), maxdiff*100.0/len(ix)) sys.stdout.flush() assert(count+1 == len(ix)) if opt.predict: if opt.ignore_midx: for pack in mi.packs: do_predict(pack) else: do_predict(mi) else: # default mode: find longest matching prefix last = '\0'*20 longmatch = 0 for i in mi: if i == last: continue #assert(str(i) >= last) pm = _helpers.bitmatch(last, i) longmatch = max(longmatch, pm) last = i print longmatch log('%d matching prefix bits\n' % longmatch) doublings = math.log(len(mi), 2) bpd = longmatch / doublings log('%.2f bits per doubling\n' % bpd) remain = 160 - longmatch rdoublings = remain / bpd log('%d bits (%.2f doublings) remaining\n' % (remain, rdoublings)) larger = 2**rdoublings log('%g times larger is possible\n' % larger) perperson = larger/POPULATION_OF_EARTH log('\nEveryone on earth could have %d data sets like yours, all in one\n' 'repository, and we would expect 1 object collision.\n' % int(perperson)) bup-0.25/cmd/memtest-cmd.py000077500000000000000000000071521225146730500155770ustar00rootroot00000000000000#!/usr/bin/env python import sys, re, struct, time, resource from bup import git, bloom, midx, options, _helpers from bup.helpers import * handle_ctrl_c() _linux_warned = 0 def linux_memstat(): global _linux_warned #fields = ['VmSize', 'VmRSS', 'VmData', 'VmStk', 'ms'] d = {} try: f = open('/proc/self/status') except IOError, e: if not _linux_warned: log('Warning: %s\n' % e) _linux_warned = 1 return {} for line in f: # Note that on Solaris, this file exists but is binary. If that # happens, this split() might not return two elements. We don't # really need to care about the binary format since this output # isn't used for much and report() can deal with missing entries. t = re.split(r':\s*', line.strip(), 1) if len(t) == 2: k,v = t d[k] = v return d last = last_u = last_s = start = 0 def report(count): global last, last_u, last_s, start headers = ['RSS', 'MajFlt', 'user', 'sys', 'ms'] ru = resource.getrusage(resource.RUSAGE_SELF) now = time.time() rss = int(ru.ru_maxrss/1024) if not rss: rss = linux_memstat().get('VmRSS', '??') fields = [rss, ru.ru_majflt, int((ru.ru_utime - last_u) * 1000), int((ru.ru_stime - last_s) * 1000), int((now - last) * 1000)] fmt = '%9s ' + ('%10s ' * len(fields)) if count >= 0: print fmt % tuple([count] + fields) else: start = now print fmt % tuple([''] + headers) sys.stdout.flush() # don't include time to run report() in usage counts ru = resource.getrusage(resource.RUSAGE_SELF) last_u = ru.ru_utime last_s = ru.ru_stime last = time.time() optspec = """ bup memtest [-n elements] [-c cycles] -- n,number= number of objects per cycle [10000] c,cycles= number of cycles to run [100] ignore-midx ignore .midx files, use only .idx files existing test with existing objects instead of fake ones """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal('no arguments expected') git.ignore_midx = opt.ignore_midx git.check_repo_or_die() m = git.PackIdxList(git.repo('objects/pack')) report(-1) _helpers.random_sha() report(0) if opt.existing: def foreverit(mi): while 1: for e in mi: yield e objit = iter(foreverit(m)) for c in xrange(opt.cycles): for n in xrange(opt.number): if opt.existing: bin = objit.next() assert(m.exists(bin)) else: bin = _helpers.random_sha() # technically, a randomly generated object id might exist. # but the likelihood of that is the likelihood of finding # a collision in sha-1 by accident, which is so unlikely that # we don't care. assert(not m.exists(bin)) report((c+1)*opt.number) if bloom._total_searches: print ('bloom: %d objects searched in %d steps: avg %.3f steps/object' % (bloom._total_searches, bloom._total_steps, bloom._total_steps*1.0/bloom._total_searches)) if midx._total_searches: print ('midx: %d objects searched in %d steps: avg %.3f steps/object' % (midx._total_searches, midx._total_steps, midx._total_steps*1.0/midx._total_searches)) if git._total_searches: print ('idx: %d objects searched in %d steps: avg %.3f steps/object' % (git._total_searches, git._total_steps, git._total_steps*1.0/git._total_searches)) print 'Total time: %.3fs' % (time.time() - start) bup-0.25/cmd/meta-cmd.py000077500000000000000000000124341225146730500150460ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. # TODO: Add tar-like -C option. import sys from bup import metadata from bup import options from bup.helpers import handle_ctrl_c, log, saved_errors def open_input(name): if not name or name == '-': return sys.stdin return open(name, 'r') def open_output(name): if not name or name == '-': return sys.stdout return open(name, 'w') optspec = """ bup meta --create [OPTION ...] bup meta --list [OPTION ...] bup meta --extract [OPTION ...] bup meta --start-extract [OPTION ...] bup meta --finish-extract [OPTION ...] bup meta --edit [OPTION ...] -- c,create write metadata for PATHs to stdout (or --file) t,list display metadata x,extract perform --start-extract followed by --finish-extract start-extract build tree matching metadata provided on standard input (or --file) finish-extract finish applying standard input (or --file) metadata to filesystem edit alter metadata; write to stdout (or --file) f,file= specify source or destination file R,recurse recurse into subdirectories xdev,one-file-system don't cross filesystem boundaries numeric-ids apply numeric IDs (user, group, etc.) rather than names symlinks handle symbolic links (default is true) paths include paths in metadata (default is true) set-uid= set metadata uid (via --edit) set-gid= set metadata gid (via --edit) set-user= set metadata user (via --edit) unset-user remove metadata user (via --edit) set-group= set metadata group (via --edit) unset-group remove metadata group (via --edit) v,verbose increase log output (can be used more than once) q,quiet don't show progress meter """ handle_ctrl_c() o = options.Options(optspec) (opt, flags, remainder) = o.parse(['--paths', '--symlinks', '--recurse'] + sys.argv[1:]) opt.verbose = opt.verbose or 0 opt.quiet = opt.quiet or 0 metadata.verbose = opt.verbose - opt.quiet action_count = sum([bool(x) for x in [opt.create, opt.list, opt.extract, opt.start_extract, opt.finish_extract, opt.edit]]) if action_count > 1: o.fatal("bup: only one action permitted: --create --list --extract --edit") if action_count == 0: o.fatal("bup: no action specified") if opt.create: if len(remainder) < 1: o.fatal("no paths specified for create") output_file = open_output(opt.file) metadata.save_tree(output_file, remainder, recurse=opt.recurse, write_paths=opt.paths, save_symlinks=opt.symlinks, xdev=opt.xdev) elif opt.list: if len(remainder) > 0: o.fatal("cannot specify paths for --list") src = open_input(opt.file) metadata.display_archive(src) elif opt.start_extract: if len(remainder) > 0: o.fatal("cannot specify paths for --start-extract") src = open_input(opt.file) metadata.start_extract(src, create_symlinks=opt.symlinks) elif opt.finish_extract: if len(remainder) > 0: o.fatal("cannot specify paths for --finish-extract") src = open_input(opt.file) metadata.finish_extract(src, restore_numeric_ids=opt.numeric_ids) elif opt.extract: if len(remainder) > 0: o.fatal("cannot specify paths for --extract") src = open_input(opt.file) metadata.extract(src, restore_numeric_ids=opt.numeric_ids, create_symlinks=opt.symlinks) elif opt.edit: if len(remainder) < 1: o.fatal("no paths specified for edit") output_file = open_output(opt.file) unset_user = False # True if --unset-user was the last relevant option. unset_group = False # True if --unset-group was the last relevant option. for flag in flags: if flag[0] == '--set-user': unset_user = False elif flag[0] == '--unset-user': unset_user = True elif flag[0] == '--set-group': unset_group = False elif flag[0] == '--unset-group': unset_group = True for path in remainder: f = open(path, 'r') try: for m in metadata._ArchiveIterator(f): if opt.set_uid is not None: try: m.uid = int(opt.set_uid) except ValueError: o.fatal("uid must be an integer") if opt.set_gid is not None: try: m.gid = int(opt.set_gid) except ValueError: o.fatal("gid must be an integer") if unset_user: m.user = '' elif opt.set_user is not None: m.user = opt.set_user if unset_group: m.group = '' elif opt.set_group is not None: m.group = opt.set_group m.write(output_file) finally: f.close() if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) else: sys.exit(0) bup-0.25/cmd/midx-cmd.py000077500000000000000000000205271225146730500150630ustar00rootroot00000000000000#!/usr/bin/env python import sys, math, struct, glob, resource import tempfile from bup import options, git, midx, _helpers, xstat from bup.helpers import * PAGE_SIZE=4096 SHA_PER_PAGE=PAGE_SIZE/20. optspec = """ bup midx [options...] -- o,output= output midx filename (default: auto-generated) a,auto automatically use all existing .midx/.idx files as input f,force merge produce exactly one .midx containing all objects p,print print names of generated midx files check validate contents of the given midx files (with -a, all midx files) max-files= maximum number of idx files to open at once [-1] d,dir= directory containing idx/midx files """ merge_into = _helpers.merge_into def _group(l, count): for i in xrange(0, len(l), count): yield l[i:i+count] def max_files(): mf = min(resource.getrlimit(resource.RLIMIT_NOFILE)) if mf > 32: mf -= 20 # just a safety margin else: mf -= 6 # minimum safety margin return mf def check_midx(name): nicename = git.repo_rel(name) log('Checking %s.\n' % nicename) try: ix = git.open_idx(name) except git.GitError, e: add_error('%s: %s' % (name, e)) return for count,subname in enumerate(ix.idxnames): sub = git.open_idx(os.path.join(os.path.dirname(name), subname)) for ecount,e in enumerate(sub): if not (ecount % 1234): qprogress(' %d/%d: %s %d/%d\r' % (count, len(ix.idxnames), git.shorten_hash(subname), ecount, len(sub))) if not sub.exists(e): add_error("%s: %s: %s missing from idx" % (nicename, git.shorten_hash(subname), str(e).encode('hex'))) if not ix.exists(e): add_error("%s: %s: %s missing from midx" % (nicename, git.shorten_hash(subname), str(e).encode('hex'))) prev = None for ecount,e in enumerate(ix): if not (ecount % 1234): qprogress(' Ordering: %d/%d\r' % (ecount, len(ix))) if not e >= prev: add_error('%s: ordering error: %s < %s' % (nicename, str(e).encode('hex'), str(prev).encode('hex'))) prev = e _first = None def _do_midx(outdir, outfilename, infilenames, prefixstr): global _first if not outfilename: assert(outdir) sum = Sha1('\0'.join(infilenames)).hexdigest() outfilename = '%s/midx-%s.midx' % (outdir, sum) inp = [] total = 0 allfilenames = [] for name in infilenames: ix = git.open_idx(name) inp.append(( ix.map, len(ix), ix.sha_ofs, isinstance(ix, midx.PackMidx) and ix.which_ofs or 0, len(allfilenames), )) for n in ix.idxnames: allfilenames.append(os.path.basename(n)) total += len(ix) inp.sort(lambda x,y: cmp(str(y[0][y[2]:y[2]+20]),str(x[0][x[2]:x[2]+20]))) if not _first: _first = outdir dirprefix = (_first != outdir) and git.repo_rel(outdir)+': ' or '' debug1('midx: %s%screating from %d files (%d objects).\n' % (dirprefix, prefixstr, len(infilenames), total)) if (opt.auto and (total < 1024 and len(infilenames) < 3)) \ or ((opt.auto or opt.force) and len(infilenames) < 2) \ or (opt.force and not total): debug1('midx: nothing to do.\n') return pages = int(total/SHA_PER_PAGE) or 1 bits = int(math.ceil(math.log(pages, 2))) entries = 2**bits debug1('midx: table size: %d (%d bits)\n' % (entries*4, bits)) unlink(outfilename) f = open(outfilename + '.tmp', 'w+b') f.write('MIDX') f.write(struct.pack('!II', midx.MIDX_VERSION, bits)) assert(f.tell() == 12) f.truncate(12 + 4*entries + 20*total + 4*total) f.flush() fdatasync(f.fileno()) fmap = mmap_readwrite(f, close=False) count = merge_into(fmap, bits, total, inp) del fmap # Assume this calls msync() now. f.seek(0, os.SEEK_END) f.write('\0'.join(allfilenames)) f.close() os.rename(outfilename + '.tmp', outfilename) # this is just for testing if 0: p = midx.PackMidx(outfilename) assert(len(p.idxnames) == len(infilenames)) print p.idxnames assert(len(p) == total) for pe, e in p, git.idxmerge(inp, final_progress=False): pin = pi.next() assert(i == pin) assert(p.exists(i)) return total, outfilename def do_midx(outdir, outfilename, infilenames, prefixstr): rv = _do_midx(outdir, outfilename, infilenames, prefixstr) if rv and opt['print']: print rv[1] def do_midx_dir(path): already = {} sizes = {} if opt.force and not opt.auto: midxs = [] # don't use existing midx files else: midxs = glob.glob('%s/*.midx' % path) contents = {} for mname in midxs: m = git.open_idx(mname) contents[mname] = [('%s/%s' % (path,i)) for i in m.idxnames] sizes[mname] = len(m) # sort the biggest+newest midxes first, so that we can eliminate # smaller (or older) redundant ones that come later in the list midxs.sort(key=lambda ix: (-sizes[ix], -xstat.stat(ix).st_mtime)) for mname in midxs: any = 0 for iname in contents[mname]: if not already.get(iname): already[iname] = 1 any = 1 if not any: debug1('%r is redundant\n' % mname) unlink(mname) already[mname] = 1 midxs = [k for k in midxs if not already.get(k)] idxs = [k for k in glob.glob('%s/*.idx' % path) if not already.get(k)] for iname in idxs: i = git.open_idx(iname) sizes[iname] = len(i) all = [(sizes[n],n) for n in (midxs + idxs)] # FIXME: what are the optimal values? Does this make sense? DESIRED_HWM = opt.force and 1 or 5 DESIRED_LWM = opt.force and 1 or 2 existed = dict((name,1) for sz,name in all) debug1('midx: %d indexes; want no more than %d.\n' % (len(all), DESIRED_HWM)) if len(all) <= DESIRED_HWM: debug1('midx: nothing to do.\n') while len(all) > DESIRED_HWM: all.sort() part1 = [name for sz,name in all[:len(all)-DESIRED_LWM+1]] part2 = all[len(all)-DESIRED_LWM+1:] all = list(do_midx_group(path, part1)) + part2 if len(all) > DESIRED_HWM: debug1('\nStill too many indexes (%d > %d). Merging again.\n' % (len(all), DESIRED_HWM)) if opt['print']: for sz,name in all: if not existed.get(name): print name def do_midx_group(outdir, infiles): groups = list(_group(infiles, opt.max_files)) gprefix = '' for n,sublist in enumerate(groups): if len(groups) != 1: gprefix = 'Group %d: ' % (n+1) rv = _do_midx(path, None, sublist, gprefix) if rv: yield rv handle_ctrl_c() o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra and (opt.auto or opt.force): o.fatal("you can't use -f/-a and also provide filenames") if opt.check and (not extra and not opt.auto): o.fatal("if using --check, you must provide filenames or -a") git.check_repo_or_die() if opt.max_files < 0: opt.max_files = max_files() assert(opt.max_files >= 5) if opt.check: # check existing midx files if extra: midxes = extra else: midxes = [] paths = opt.dir and [opt.dir] or git.all_packdirs() for path in paths: debug1('midx: scanning %s\n' % path) midxes += glob.glob(os.path.join(path, '*.midx')) for name in midxes: check_midx(name) if not saved_errors: log('All tests passed.\n') else: if extra: do_midx(git.repo('objects/pack'), opt.output, extra, '') elif opt.auto or opt.force: paths = opt.dir and [opt.dir] or git.all_packdirs() for path in paths: debug1('midx: scanning %s\n' % path) do_midx_dir(path) else: o.fatal("you must use -f or -a or provide input filenames") if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/mux-cmd.py000077500000000000000000000016031225146730500147250ustar00rootroot00000000000000#!/usr/bin/env python import os, sys, subprocess, struct from bup import options from bup.helpers import * optspec = """ bup mux command [command arguments...] -- """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if len(extra) < 1: o.fatal('command is required') cmdpath, cmdfn = os.path.split(__file__) subcmd = extra subcmd[0] = os.path.join(cmdpath, 'bup-' + subcmd[0]) debug2('bup mux: starting %r\n' % (extra,)) outr, outw = os.pipe() errr, errw = os.pipe() def close_fds(): os.close(outr) os.close(errr) p = subprocess.Popen(subcmd, stdout=outw, stderr=errw, preexec_fn=close_fds) os.close(outw) os.close(errw) sys.stdout.write('BUPMUX') sys.stdout.flush() mux(p, sys.stdout.fileno(), outr, errr) os.close(outr) os.close(errr) prv = p.wait() if prv: debug1('%s exited with code %d\n' % (extra[0], prv)) debug1('bup mux: done\n') sys.exit(prv) bup-0.25/cmd/newliner-cmd.py000077500000000000000000000021701225146730500157370ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, re from bup import options from bup import _helpers # fixes up sys.argv on import optspec = """ bup newliner """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal("no arguments expected") r = re.compile(r'([\r\n])') lastlen = 0 all = '' width = options._tty_width() or 78 while 1: l = r.split(all, 1) if len(l) <= 1: if len(all) >= 160: sys.stdout.write('%s\n' % all[:78]) sys.stdout.flush() all = all[78:] try: b = os.read(sys.stdin.fileno(), 4096) except KeyboardInterrupt: break if not b: break all += b else: assert(len(l) == 3) (line, splitchar, all) = l if splitchar == '\r': line = line[:width] sys.stdout.write('%-*s%s' % (lastlen, line, splitchar)) if splitchar == '\r': lastlen = len(line) else: lastlen = 0 sys.stdout.flush() if lastlen: sys.stdout.write('%-*s\r' % (lastlen, '')) if all: sys.stdout.write('%s\n' % all) bup-0.25/cmd/on--server-cmd.py000077500000000000000000000033361225146730500161160ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, struct from bup import options, helpers optspec = """ bup on--server -- This command is run automatically by 'bup on' """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal('no arguments expected') # get the subcommand's argv. # Normally we could just pass this on the command line, but since we'll often # be getting called on the other end of an ssh pipe, which tends to mangle # argv (by sending it via the shell), this way is much safer. buf = sys.stdin.read(4) sz = struct.unpack('!I', buf)[0] assert(sz > 0) assert(sz < 1000000) buf = sys.stdin.read(sz) assert(len(buf) == sz) argv = buf.split('\0') # stdin/stdout are supposedly connected to 'bup server' that the caller # started for us (often on the other end of an ssh tunnel), so we don't want # to misuse them. Move them out of the way, then replace stdout with # a pointer to stderr in case our subcommand wants to do something with it. # # It might be nice to do the same with stdin, but my experiments showed that # ssh seems to make its child's stderr a readable-but-never-reads-anything # socket. They really should have used shutdown(SHUT_WR) on the other end # of it, but probably didn't. Anyway, it's too messy, so let's just make sure # anyone reading from stdin is disappointed. # # (You can't just leave stdin/stdout "not open" by closing the file # descriptors. Then the next file that opens is automatically assigned 0 or 1, # and people *trying* to read/write stdin/stdout get screwed.) os.dup2(0, 3) os.dup2(1, 4) os.dup2(2, 1) fd = os.open('/dev/null', os.O_RDONLY) os.dup2(fd, 0) os.close(fd) os.environ['BUP_SERVER_REVERSE'] = helpers.hostname() os.execvp(argv[0], argv) sys.exit(99) bup-0.25/cmd/on-cmd.py000077500000000000000000000030071225146730500145300ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, struct, getopt, subprocess, signal from bup import options, ssh, path from bup.helpers import * optspec = """ bup on index ... bup on save ... bup on split ... """ o = options.Options(optspec, optfunc=getopt.getopt) (opt, flags, extra) = o.parse(sys.argv[1:]) if len(extra) < 2: o.fatal('arguments expected') class SigException(Exception): def __init__(self, signum): self.signum = signum Exception.__init__(self, 'signal %d received' % signum) def handler(signum, frame): raise SigException(signum) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) sp = None p = None ret = 99 try: hp = extra[0].split(':') if len(hp) == 1: (hostname, port) = (hp[0], None) else: (hostname, port) = hp argv = extra[1:] p = ssh.connect(hostname, port, 'on--server') argvs = '\0'.join(['bup'] + argv) p.stdin.write(struct.pack('!I', len(argvs)) + argvs) p.stdin.flush() sp = subprocess.Popen([path.exe(), 'server'], stdin=p.stdout, stdout=p.stdin) p.stdin.close() p.stdout.close() finally: while 1: # if we get a signal while waiting, we have to keep waiting, just # in case our child doesn't die. try: ret = p.wait() sp.wait() break except SigException, e: log('\nbup on: %s\n' % e) os.kill(p.pid, e.signum) ret = 84 sys.exit(ret) bup-0.25/cmd/random-cmd.py000077500000000000000000000014221225146730500153730ustar00rootroot00000000000000#!/usr/bin/env python import sys from bup import options, _helpers from bup.helpers import * optspec = """ bup random [-S seed] -- S,seed= optional random number seed [1] f,force print random data to stdout even if it's a tty v,verbose print byte counter to stderr """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if len(extra) != 1: o.fatal("exactly one argument expected") total = parse_num(extra[0]) handle_ctrl_c() if opt.force or (not os.isatty(1) and not atoi(os.environ.get('BUP_FORCE_TTY')) & 1): _helpers.write_random(sys.stdout.fileno(), total, opt.seed, opt.verbose and 1 or 0) else: log('error: not writing binary data to a terminal. Use -f to force.\n') sys.exit(1) bup-0.25/cmd/restore-cmd.py000077500000000000000000000261061225146730500156040ustar00rootroot00000000000000#!/usr/bin/env python import copy, errno, sys, stat, re from bup import options, git, metadata, vfs from bup.helpers import * optspec = """ bup restore [-C outdir] -- C,outdir= change to given outdir before extracting files numeric-ids restore numeric IDs (user, group, etc.) rather than names exclude-rx= skip paths that match the unanchored regular expression v,verbose increase log output (can be used more than once) map-user= given OLD=NEW, restore OLD user as NEW user map-group= given OLD=NEW, restore OLD group as NEW group map-uid= given OLD=NEW, restore OLD uid as NEW uid map-gid= given OLD=NEW, restore OLD gid as NEW gid q,quiet don't show progress meter """ total_restored = 0 def verbose1(s): if opt.verbose >= 1: print s def verbose2(s): if opt.verbose >= 2: print s def plog(s): if opt.quiet: return qprogress(s) def valid_restore_path(path): path = os.path.normpath(path) if path.startswith('/'): path = path[1:] if '/' in path: return True def print_info(n, fullname): if stat.S_ISDIR(n.mode): verbose1('%s/' % fullname) elif stat.S_ISLNK(n.mode): verbose2('%s@ -> %s' % (fullname, n.readlink())) else: verbose2(fullname) def create_path(n, fullname, meta): if meta: meta.create_path(fullname) else: # These fallbacks are important -- meta could be null if, for # example, save created a "fake" item, i.e. a new strip/graft # path element, etc. You can find cases like that by # searching for "Metadata()". unlink(fullname) if stat.S_ISDIR(n.mode): mkdirp(fullname) elif stat.S_ISLNK(n.mode): os.symlink(n.readlink(), fullname) def parse_owner_mappings(type, options, fatal): """Traverse the options and parse all --map-TYPEs, or call Option.fatal().""" opt_name = '--map-' + type value_rx = r'^([^=]+)=([^=]*)$' if type in ('uid', 'gid'): value_rx = r'^(-?[0-9]+)=(-?[0-9]+)$' owner_map = {} for flag in options: (option, parameter) = flag if option != opt_name: continue match = re.match(value_rx, parameter) if not match: raise fatal("couldn't parse %s as %s mapping" % (parameter, type)) old_id, new_id = match.groups() if type in ('uid', 'gid'): old_id = int(old_id) new_id = int(new_id) owner_map[old_id] = new_id return owner_map def apply_metadata(meta, name, restore_numeric_ids, owner_map): m = copy.deepcopy(meta) m.user = owner_map['user'].get(m.user, m.user) m.group = owner_map['group'].get(m.group, m.group) m.uid = owner_map['uid'].get(m.uid, m.uid) m.gid = owner_map['gid'].get(m.gid, m.gid) m.apply_to_path(name, restore_numeric_ids = restore_numeric_ids) # Track a list of (restore_path, vfs_path, meta) triples for each path # we've written for a given hardlink_target. This allows us to handle # the case where we restore a set of hardlinks out of order (with # respect to the original save call(s)) -- i.e. when we don't restore # the hardlink_target path first. This data also allows us to attempt # to handle other situations like hardlink sets that change on disk # during a save, or between index and save. targets_written = {} def hardlink_compatible(target_path, target_vfs_path, target_meta, src_node, src_meta): global top if not os.path.exists(target_path): return False target_node = top.lresolve(target_vfs_path) if src_node.mode != target_node.mode \ or src_node.mtime != target_node.mtime \ or src_node.ctime != target_node.ctime \ or src_node.hash != target_node.hash: return False if not src_meta.same_file(target_meta): return False return True def hardlink_if_possible(fullname, node, meta): """Find a suitable hardlink target, link to it, and return true, otherwise return false.""" # Expect the caller to handle restoring the metadata if # hardlinking isn't possible. global targets_written target = meta.hardlink_target target_versions = targets_written.get(target) if target_versions: # Check every path in the set that we've written so far for a match. for (target_path, target_vfs_path, target_meta) in target_versions: if hardlink_compatible(target_path, target_vfs_path, target_meta, node, meta): try: os.link(target_path, fullname) return True except OSError, e: if e.errno != errno.EXDEV: raise else: target_versions = [] targets_written[target] = target_versions full_vfs_path = node.fullname() target_versions.append((fullname, full_vfs_path, meta)) return False def write_file_content(fullname, n): outf = open(fullname, 'wb') try: for b in chunkyreader(n.open()): outf.write(b) finally: outf.close() def find_dir_item_metadata_by_name(dir, name): """Find metadata in dir (a node) for an item with the given name, or for the directory itself if the name is ''.""" meta_stream = None try: mfile = dir.metadata_file() # VFS file -- cannot close(). if mfile: meta_stream = mfile.open() # First entry is for the dir itself. meta = metadata.Metadata.read(meta_stream) if name == '': return meta for sub in dir: if stat.S_ISDIR(sub.mode): meta = find_dir_item_metadata_by_name(sub, '') else: meta = metadata.Metadata.read(meta_stream) if sub.name == name: return meta finally: if meta_stream: meta_stream.close() def do_root(n, owner_map, restore_root_meta = True): # Very similar to do_node(), except that this function doesn't # create a path for n's destination directory (and so ignores # n.fullname). It assumes the destination is '.', and restores # n's metadata and content there. global total_restored, opt meta_stream = None try: # Directory metadata is the first entry in any .bupm file in # the directory. Get it. mfile = n.metadata_file() # VFS file -- cannot close(). if mfile: meta_stream = mfile.open() root_meta = metadata.Metadata.read(meta_stream) print_info(n, '.') total_restored += 1 plog('Restoring: %d\r' % total_restored) for sub in n: m = None # Don't get metadata if this is a dir -- handled in sub do_node(). if meta_stream and not stat.S_ISDIR(sub.mode): m = metadata.Metadata.read(meta_stream) do_node(n, sub, owner_map, meta = m) if root_meta and restore_root_meta: apply_metadata(root_meta, '.', opt.numeric_ids, owner_map) finally: if meta_stream: meta_stream.close() def do_node(top, n, owner_map, meta = None): # Create n.fullname(), relative to the current directory, and # restore all of its metadata, when available. The meta argument # will be None for dirs, or when there is no .bupm (i.e. no # metadata). global total_restored, opt meta_stream = None try: fullname = n.fullname(stop_at=top) # Match behavior of index --exclude-rx with respect to paths. exclude_candidate = '/' + fullname if(stat.S_ISDIR(n.mode)): exclude_candidate += '/' if should_rx_exclude_path(exclude_candidate, exclude_rxs): return # If this is a directory, its metadata is the first entry in # any .bupm file inside the directory. Get it. if(stat.S_ISDIR(n.mode)): mfile = n.metadata_file() # VFS file -- cannot close(). if mfile: meta_stream = mfile.open() meta = metadata.Metadata.read(meta_stream) print_info(n, fullname) created_hardlink = False if meta and meta.hardlink_target: created_hardlink = hardlink_if_possible(fullname, n, meta) if not created_hardlink: create_path(n, fullname, meta) if meta: if stat.S_ISREG(meta.mode): write_file_content(fullname, n) elif stat.S_ISREG(n.mode): write_file_content(fullname, n) total_restored += 1 plog('Restoring: %d\r' % total_restored) for sub in n: m = None # Don't get metadata if this is a dir -- handled in sub do_node(). if meta_stream and not stat.S_ISDIR(sub.mode): m = metadata.Metadata.read(meta_stream) do_node(top, sub, owner_map, meta = m) if meta and not created_hardlink: apply_metadata(meta, fullname, opt.numeric_ids, owner_map) finally: if meta_stream: meta_stream.close() handle_ctrl_c() o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) git.check_repo_or_die() top = vfs.RefList(None) if not extra: o.fatal('must specify at least one filename to restore') exclude_rxs = parse_rx_excludes(flags, o.fatal) owner_map = {} for map_type in ('user', 'group', 'uid', 'gid'): owner_map[map_type] = parse_owner_mappings(map_type, flags, o.fatal) if opt.outdir: mkdirp(opt.outdir) os.chdir(opt.outdir) ret = 0 for d in extra: if not valid_restore_path(d): add_error("ERROR: path %r doesn't include a branch and revision" % d) continue path,name = os.path.split(d) try: n = top.lresolve(d) except vfs.NodeError, e: add_error(e) continue isdir = stat.S_ISDIR(n.mode) if not name or name == '.': # Source is /foo/what/ever/ or /foo/what/ever/. -- extract # what/ever/* to the current directory, and if name == '.' # (i.e. /foo/what/ever/.), then also restore what/ever's # metadata to the current directory. if not isdir: add_error('%r: not a directory' % d) else: do_root(n, owner_map, restore_root_meta = (name == '.')) else: # Source is /foo/what/ever -- extract ./ever to cwd. if isinstance(n, vfs.FakeSymlink): # Source is actually /foo/what, i.e. a top-level commit # like /foo/latest, which is a symlink to ../.commit/SHA. # So dereference it, and restore ../.commit/SHA/. to # "./what/.". target = n.dereference() mkdirp(n.name) os.chdir(n.name) do_root(target, owner_map) else: # Not a directory or fake symlink. meta = find_dir_item_metadata_by_name(n.parent, n.name) do_node(n.parent, n, owner_map, meta = meta) if not opt.quiet: progress('Restoring: %d, done.\n' % total_restored) if saved_errors: log('WARNING: %d errors encountered while restoring.\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/save-cmd.py000077500000000000000000000367011225146730500150610ustar00rootroot00000000000000#!/usr/bin/env python import sys, stat, time, math from bup import hashsplit, git, options, index, client, metadata, hlinkdb from bup.helpers import * from bup.hashsplit import GIT_MODE_TREE, GIT_MODE_FILE, GIT_MODE_SYMLINK optspec = """ bup save [-tc] [-n name] -- r,remote= hostname:/path/to/repo of remote repository t,tree output a tree id c,commit output a commit id n,name= name of backup set to update (if any) d,date= date for the commit (seconds since the epoch) v,verbose increase log output (can be used more than once) q,quiet don't show progress meter smaller= only back up files smaller than n bytes bwlimit= maximum bytes/sec to transmit to server f,indexfile= the name of the index file (normally BUP_DIR/bupindex) strip strips the path to every filename given strip-path= path-prefix to be stripped when saving graft= a graft point *old_path*=*new_path* (can be used more than once) #,compress= set compression level to # (0-9, 9 is highest) [1] """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) git.check_repo_or_die() if not (opt.tree or opt.commit or opt.name): o.fatal("use one or more of -t, -c, -n") if not extra: o.fatal("no filenames given") opt.progress = (istty2 and not opt.quiet) opt.smaller = parse_num(opt.smaller or 0) if opt.bwlimit: client.bwlimit = parse_num(opt.bwlimit) if opt.date: date = parse_date_or_fatal(opt.date, o.fatal) else: date = time.time() if opt.strip and opt.strip_path: o.fatal("--strip is incompatible with --strip-path") graft_points = [] if opt.graft: if opt.strip: o.fatal("--strip is incompatible with --graft") if opt.strip_path: o.fatal("--strip-path is incompatible with --graft") for (option, parameter) in flags: if option == "--graft": splitted_parameter = parameter.split('=') if len(splitted_parameter) != 2: o.fatal("a graft point must be of the form old_path=new_path") old_path, new_path = splitted_parameter if not (old_path and new_path): o.fatal("a graft point cannot be empty") graft_points.append((realpath(old_path), realpath(new_path))) is_reverse = os.environ.get('BUP_SERVER_REVERSE') if is_reverse and opt.remote: o.fatal("don't use -r in reverse mode; it's automatic") if opt.name and opt.name.startswith('.'): o.fatal("'%s' is not a valid branch name" % opt.name) refname = opt.name and 'refs/heads/%s' % opt.name or None if opt.remote or is_reverse: cli = client.Client(opt.remote) oldref = refname and cli.read_ref(refname) or None w = cli.new_packwriter() else: cli = None oldref = refname and git.read_ref(refname) or None w = git.PackWriter(compression_level=opt.compress) handle_ctrl_c() def eatslash(dir): if dir.endswith('/'): return dir[:-1] else: return dir # Metadata is stored in a file named .bupm in each directory. The # first metadata entry will be the metadata for the current directory. # The remaining entries will be for each of the other directory # elements, in the order they're listed in the index. # # Since the git tree elements are sorted according to # git.shalist_item_sort_key, the metalist items are accumulated as # (sort_key, metadata) tuples, and then sorted when the .bupm file is # created. The sort_key must be computed using the element's real # name and mode rather than the git mode and (possibly mangled) name. # Maintain a stack of information representing the current location in # the archive being constructed. The current path is recorded in # parts, which will be something like ['', 'home', 'someuser'], and # the accumulated content and metadata for of the dirs in parts is # stored in parallel stacks in shalists and metalists. parts = [] # Current archive position (stack of dir names). shalists = [] # Hashes for each dir in paths. metalists = [] # Metadata for each dir in paths. def _push(part, metadata): # Enter a new archive directory -- make it the current directory. parts.append(part) shalists.append([]) metalists.append([('', metadata)]) # This dir's metadata (no name). def _pop(force_tree, dir_metadata=None): # Leave the current archive directory and add its tree to its parent. assert(len(parts) >= 1) part = parts.pop() shalist = shalists.pop() metalist = metalists.pop() if metalist and not force_tree: if dir_metadata: # Override the original metadata pushed for this dir. metalist = [('', dir_metadata)] + metalist[1:] sorted_metalist = sorted(metalist, key = lambda x : x[0]) metadata = ''.join([m[1].encode() for m in sorted_metalist]) shalist.append((0100644, '.bupm', w.new_blob(metadata))) tree = force_tree or w.new_tree(shalist) if shalists: shalists[-1].append((GIT_MODE_TREE, git.mangle_name(part, GIT_MODE_TREE, GIT_MODE_TREE), tree)) return tree lastremain = None def progress_report(n): global count, subcount, lastremain subcount += n cc = count + subcount pct = total and (cc*100.0/total) or 0 now = time.time() elapsed = now - tstart kps = elapsed and int(cc/1024./elapsed) kps_frac = 10 ** int(math.log(kps+1, 10) - 1) kps = int(kps/kps_frac)*kps_frac if cc: remain = elapsed*1.0/cc * (total-cc) else: remain = 0.0 if (lastremain and (remain > lastremain) and ((remain - lastremain)/lastremain < 0.05)): remain = lastremain else: lastremain = remain hours = int(remain/60/60) mins = int(remain/60 - hours*60) secs = int(remain - hours*60*60 - mins*60) if elapsed < 30: remainstr = '' kpsstr = '' else: kpsstr = '%dk/s' % kps if hours: remainstr = '%dh%dm' % (hours, mins) elif mins: remainstr = '%dm%d' % (mins, secs) else: remainstr = '%ds' % secs qprogress('Saving: %.2f%% (%d/%dk, %d/%d files) %s %s\r' % (pct, cc/1024, total/1024, fcount, ftotal, remainstr, kpsstr)) indexfile = opt.indexfile or git.repo('bupindex') r = index.Reader(indexfile) if not os.access(indexfile + '.meta', os.W_OK|os.R_OK): log('error: cannot access "%s"; have you run bup index?' % indexfile) sys.exit(1) msr = index.MetaStoreReader(indexfile + '.meta') hlink_db = hlinkdb.HLinkDB(indexfile + '.hlink') def already_saved(ent): return ent.is_valid() and w.exists(ent.sha) and ent.sha def wantrecurse_pre(ent): return not already_saved(ent) def wantrecurse_during(ent): return not already_saved(ent) or ent.sha_missing() def find_hardlink_target(hlink_db, ent): if hlink_db and not stat.S_ISDIR(ent.mode) and ent.nlink > 1: link_paths = hlink_db.node_paths(ent.dev, ent.ino) if link_paths: return link_paths[0] total = ftotal = 0 if opt.progress: for (transname,ent) in r.filter(extra, wantrecurse=wantrecurse_pre): if not (ftotal % 10024): qprogress('Reading index: %d\r' % ftotal) exists = ent.exists() hashvalid = already_saved(ent) ent.set_sha_missing(not hashvalid) if not opt.smaller or ent.size < opt.smaller: if exists and not hashvalid: total += ent.size ftotal += 1 progress('Reading index: %d, done.\n' % ftotal) hashsplit.progress_callback = progress_report # Root collisions occur when strip or graft options map more than one # path to the same directory (paths which originally had separate # parents). When that situation is detected, use empty metadata for # the parent. Otherwise, use the metadata for the common parent. # Collision example: "bup save ... --strip /foo /foo/bar /bar". # FIXME: Add collision tests, or handle collisions some other way. # FIXME: Detect/handle strip/graft name collisions (other than root), # i.e. if '/foo/bar' and '/bar' both map to '/'. first_root = None root_collision = None tstart = time.time() count = subcount = fcount = 0 lastskip_name = None lastdir = '' for (transname,ent) in r.filter(extra, wantrecurse=wantrecurse_during): (dir, file) = os.path.split(ent.name) exists = (ent.flags & index.IX_EXISTS) hashvalid = already_saved(ent) wasmissing = ent.sha_missing() oldsize = ent.size if opt.verbose: if not exists: status = 'D' elif not hashvalid: if ent.sha == index.EMPTY_SHA: status = 'A' else: status = 'M' else: status = ' ' if opt.verbose >= 2: log('%s %-70s\n' % (status, ent.name)) elif not stat.S_ISDIR(ent.mode) and lastdir != dir: if not lastdir.startswith(dir): log('%s %-70s\n' % (status, os.path.join(dir, ''))) lastdir = dir if opt.progress: progress_report(0) fcount += 1 if not exists: continue if opt.smaller and ent.size >= opt.smaller: if exists and not hashvalid: add_error('skipping large file "%s"' % ent.name) lastskip_name = ent.name continue assert(dir.startswith('/')) if opt.strip: dirp = stripped_path_components(dir, extra) elif opt.strip_path: dirp = stripped_path_components(dir, [opt.strip_path]) elif graft_points: dirp = grafted_path_components(graft_points, dir) else: dirp = path_components(dir) # At this point, dirp contains a representation of the archive # path that looks like [(archive_dir_name, real_fs_path), ...]. # So given "bup save ... --strip /foo/bar /foo/bar/baz", dirp # might look like this at some point: # [('', '/foo/bar'), ('baz', '/foo/bar/baz'), ...]. # This dual representation supports stripping/grafting, where the # archive path may not have a direct correspondence with the # filesystem. The root directory is represented by an initial # component named '', and any component that doesn't have a # corresponding filesystem directory (due to grafting, for # example) will have a real_fs_path of None, i.e. [('', None), # ...]. if first_root == None: dir_name, fs_path = dirp[0] first_root = dirp[0] # Not indexed, so just grab the FS metadata or use empty metadata. try: meta = metadata.from_path(fs_path) if fs_path else metadata.Metadata() except (OSError, IOError), e: add_error(e) lastskip_name = dir_name else: _push(dir_name, meta) elif first_root != dirp[0]: root_collision = True # If switching to a new sub-tree, finish the current sub-tree. while parts > [x[0] for x in dirp]: _pop(force_tree = None) # If switching to a new sub-tree, start a new sub-tree. for path_component in dirp[len(parts):]: dir_name, fs_path = path_component # Not indexed, so just grab the FS metadata or use empty metadata. try: meta = metadata.from_path(fs_path) if fs_path else metadata.Metadata() except (OSError, IOError), e: add_error(e) lastskip_name = dir_name else: _push(dir_name, meta) if not file: if len(parts) == 1: continue # We're at the top level -- keep the current root dir # Since there's no filename, this is a subdir -- finish it. oldtree = already_saved(ent) # may be None newtree = _pop(force_tree = oldtree) if not oldtree: if lastskip_name and lastskip_name.startswith(ent.name): ent.invalidate() else: ent.validate(GIT_MODE_TREE, newtree) ent.repack() if exists and wasmissing: count += oldsize continue # it's not a directory id = None if hashvalid: id = ent.sha git_name = git.mangle_name(file, ent.mode, ent.gitmode) git_info = (ent.gitmode, git_name, id) shalists[-1].append(git_info) sort_key = git.shalist_item_sort_key((ent.mode, file, id)) meta = msr.metadata_at(ent.meta_ofs) meta.hardlink_target = find_hardlink_target(hlink_db, ent) # Restore the times that were cleared to 0 in the metastore. (meta.atime, meta.mtime, meta.ctime) = (ent.atime, ent.mtime, ent.ctime) metalists[-1].append((sort_key, meta)) else: if stat.S_ISREG(ent.mode): try: f = hashsplit.open_noatime(ent.name) except (IOError, OSError), e: add_error(e) lastskip_name = ent.name else: try: (mode, id) = hashsplit.split_to_blob_or_tree( w.new_blob, w.new_tree, [f], keep_boundaries=False) except (IOError, OSError), e: add_error('%s: %s' % (ent.name, e)) lastskip_name = ent.name else: if stat.S_ISDIR(ent.mode): assert(0) # handled above elif stat.S_ISLNK(ent.mode): try: rl = os.readlink(ent.name) except (OSError, IOError), e: add_error(e) lastskip_name = ent.name else: (mode, id) = (GIT_MODE_SYMLINK, w.new_blob(rl)) else: # Everything else should be fully described by its # metadata, so just record an empty blob, so the paths # in the tree and .bupm will match up. (mode, id) = (GIT_MODE_FILE, w.new_blob("")) if id: ent.validate(mode, id) ent.repack() git_name = git.mangle_name(file, ent.mode, ent.gitmode) git_info = (mode, git_name, id) shalists[-1].append(git_info) sort_key = git.shalist_item_sort_key((ent.mode, file, id)) hlink = find_hardlink_target(hlink_db, ent) try: meta = metadata.from_path(ent.name, hardlink_target=hlink) except (OSError, IOError), e: add_error(e) lastskip_name = ent.name else: metalists[-1].append((sort_key, meta)) if exists and wasmissing: count += oldsize subcount = 0 if opt.progress: pct = total and count*100.0/total or 100 progress('Saving: %.2f%% (%d/%dk, %d/%d files), done. \n' % (pct, count/1024, total/1024, fcount, ftotal)) while len(parts) > 1: # _pop() all the parts above the root _pop(force_tree = None) assert(len(shalists) == 1) assert(len(metalists) == 1) # Finish the root directory. tree = _pop(force_tree = None, # When there's a collision, use empty metadata for the root. dir_metadata = metadata.Metadata() if root_collision else None) if opt.tree: print tree.encode('hex') if opt.commit or opt.name: msg = 'bup save\n\nGenerated by command:\n%r' % sys.argv commit = w.new_commit(oldref, tree, date, msg) if opt.commit: print commit.encode('hex') msr.close() w.close() # must close before we can update the ref if opt.name: if cli: cli.update_ref(refname, commit, oldref) else: git.update_ref(refname, commit, oldref) if cli: cli.close() if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/server-cmd.py000077500000000000000000000133721225146730500154300ustar00rootroot00000000000000#!/usr/bin/env python import os, sys, struct from bup import options, git from bup.helpers import * suspended_w = None dumb_server_mode = False def do_help(conn, junk): conn.write('Commands:\n %s\n' % '\n '.join(sorted(commands))) conn.ok() def _set_mode(): global dumb_server_mode dumb_server_mode = os.path.exists(git.repo('bup-dumb-server')) debug1('bup server: serving in %s mode\n' % (dumb_server_mode and 'dumb' or 'smart')) def _init_session(reinit_with_new_repopath=None): if reinit_with_new_repopath is None and git.repodir: return git.check_repo_or_die(reinit_with_new_repopath) # OK. we now know the path is a proper repository. Record this path in the # environment so that subprocesses inherit it and know where to operate. os.environ['BUP_DIR'] = git.repodir debug1('bup server: bupdir is %r\n' % git.repodir) _set_mode() def init_dir(conn, arg): git.init_repo(arg) debug1('bup server: bupdir initialized: %r\n' % git.repodir) _init_session(arg) conn.ok() def set_dir(conn, arg): _init_session(arg) conn.ok() def list_indexes(conn, junk): _init_session() suffix = '' if dumb_server_mode: suffix = ' load' for f in os.listdir(git.repo('objects/pack')): if f.endswith('.idx'): conn.write('%s%s\n' % (f, suffix)) conn.ok() def send_index(conn, name): _init_session() assert(name.find('/') < 0) assert(name.endswith('.idx')) idx = git.open_idx(git.repo('objects/pack/%s' % name)) conn.write(struct.pack('!I', len(idx.map))) conn.write(idx.map) conn.ok() def receive_objects_v2(conn, junk): global suspended_w _init_session() suggested = set() if suspended_w: w = suspended_w suspended_w = None else: if dumb_server_mode: w = git.PackWriter(objcache_maker=None) else: w = git.PackWriter() while 1: ns = conn.read(4) if not ns: w.abort() raise Exception('object read: expected length header, got EOF\n') n = struct.unpack('!I', ns)[0] #debug2('expecting %d bytes\n' % n) if not n: debug1('bup server: received %d object%s.\n' % (w.count, w.count!=1 and "s" or '')) fullpath = w.close(run_midx=not dumb_server_mode) if fullpath: (dir, name) = os.path.split(fullpath) conn.write('%s.idx\n' % name) conn.ok() return elif n == 0xffffffff: debug2('bup server: receive-objects suspended.\n') suspended_w = w conn.ok() return shar = conn.read(20) crcr = struct.unpack('!I', conn.read(4))[0] n -= 20 + 4 buf = conn.read(n) # object sizes in bup are reasonably small #debug2('read %d bytes\n' % n) _check(w, n, len(buf), 'object read: expected %d bytes, got %d\n') if not dumb_server_mode: oldpack = w.exists(shar, want_source=True) if oldpack: assert(not oldpack == True) assert(oldpack.endswith('.idx')) (dir,name) = os.path.split(oldpack) if not (name in suggested): debug1("bup server: suggesting index %s\n" % git.shorten_hash(name)) debug1("bup server: because of object %s\n" % shar.encode('hex')) conn.write('index %s\n' % name) suggested.add(name) continue nw, crc = w._raw_write((buf,), sha=shar) _check(w, crcr, crc, 'object read: expected crc %d, got %d\n') # NOTREACHED def _check(w, expected, actual, msg): if expected != actual: w.abort() raise Exception(msg % (expected, actual)) def read_ref(conn, refname): _init_session() r = git.read_ref(refname) conn.write('%s\n' % (r or '').encode('hex')) conn.ok() def update_ref(conn, refname): _init_session() newval = conn.readline().strip() oldval = conn.readline().strip() git.update_ref(refname, newval.decode('hex'), oldval.decode('hex')) conn.ok() cat_pipe = None def cat(conn, id): global cat_pipe _init_session() if not cat_pipe: cat_pipe = git.CatPipe() try: for blob in cat_pipe.join(id): conn.write(struct.pack('!I', len(blob))) conn.write(blob) except KeyError, e: log('server: error: %s\n' % e) conn.write('\0\0\0\0') conn.error(e) else: conn.write('\0\0\0\0') conn.ok() optspec = """ bup server """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal('no arguments expected') debug2('bup server: reading from stdin.\n') commands = { 'quit': None, 'help': do_help, 'init-dir': init_dir, 'set-dir': set_dir, 'list-indexes': list_indexes, 'send-index': send_index, 'receive-objects-v2': receive_objects_v2, 'read-ref': read_ref, 'update-ref': update_ref, 'cat': cat, } # FIXME: this protocol is totally lame and not at all future-proof. # (Especially since we abort completely as soon as *anything* bad happens) conn = Conn(sys.stdin, sys.stdout) lr = linereader(conn) for _line in lr: line = _line.strip() if not line: continue debug1('bup server: command: %r\n' % line) words = line.split(' ', 1) cmd = words[0] rest = len(words)>1 and words[1] or '' if cmd == 'quit': break else: cmd = commands.get(cmd) if cmd: cmd(conn, rest) else: raise Exception('unknown server command: %r\n' % line) debug1('bup server: done\n') bup-0.25/cmd/split-cmd.py000077500000000000000000000147521225146730500152600ustar00rootroot00000000000000#!/usr/bin/env python import sys, time from bup import hashsplit, git, options, client from bup.helpers import * optspec = """ bup split [-t] [-c] [-n name] OPTIONS [--git-ids | filenames...] bup split -b OPTIONS [--git-ids | filenames...] bup split <--noop [--copy]|--copy> OPTIONS [--git-ids | filenames...] -- Modes: b,blobs output a series of blob ids. Implies --fanout=0. t,tree output a tree id c,commit output a commit id n,name= save the result under the given name noop split the input, but throw away the result copy split the input, copy it to stdout, don't save to repo Options: r,remote= remote repository path d,date= date for the commit (seconds since the epoch) q,quiet don't print progress messages v,verbose increase log output (can be used more than once) git-ids read a list of git object ids from stdin and split their contents keep-boundaries don't let one chunk span two input files bench print benchmark timings to stderr max-pack-size= maximum bytes in a single pack max-pack-objects= maximum number of objects in a single pack fanout= average number of blobs in a single tree bwlimit= maximum bytes/sec to transmit to server #,compress= set compression level to # (0-9, 9 is highest) [1] """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) handle_ctrl_c() git.check_repo_or_die() if not (opt.blobs or opt.tree or opt.commit or opt.name or opt.noop or opt.copy): o.fatal("use one or more of -b, -t, -c, -n, --noop, --copy") if (opt.noop or opt.copy) and (opt.blobs or opt.tree or opt.commit or opt.name): o.fatal('--noop and --copy are incompatible with -b, -t, -c, -n') if opt.blobs and (opt.tree or opt.commit or opt.name): o.fatal('-b is incompatible with -t, -c, -n') if extra and opt.git_ids: o.fatal("don't provide filenames when using --git-ids") if opt.verbose >= 2: git.verbose = opt.verbose - 1 opt.bench = 1 if opt.max_pack_size: git.max_pack_size = parse_num(opt.max_pack_size) if opt.max_pack_objects: git.max_pack_objects = parse_num(opt.max_pack_objects) if opt.fanout: hashsplit.fanout = parse_num(opt.fanout) if opt.blobs: hashsplit.fanout = 0 if opt.bwlimit: client.bwlimit = parse_num(opt.bwlimit) if opt.date: date = parse_date_or_fatal(opt.date, o.fatal) else: date = time.time() total_bytes = 0 def prog(filenum, nbytes): global total_bytes total_bytes += nbytes if filenum > 0: qprogress('Splitting: file #%d, %d kbytes\r' % (filenum+1, total_bytes/1024)) else: qprogress('Splitting: %d kbytes\r' % (total_bytes/1024)) is_reverse = os.environ.get('BUP_SERVER_REVERSE') if is_reverse and opt.remote: o.fatal("don't use -r in reverse mode; it's automatic") start_time = time.time() if opt.name and opt.name.startswith('.'): o.fatal("'%s' is not a valid branch name." % opt.name) refname = opt.name and 'refs/heads/%s' % opt.name or None if opt.noop or opt.copy: cli = pack_writer = oldref = None elif opt.remote or is_reverse: cli = client.Client(opt.remote, compression_level=opt.compress) oldref = refname and cli.read_ref(refname) or None pack_writer = cli.new_packwriter() else: cli = None oldref = refname and git.read_ref(refname) or None pack_writer = git.PackWriter(compression_level=opt.compress) if opt.git_ids: # the input is actually a series of git object ids that we should retrieve # and split. # # This is a bit messy, but basically it converts from a series of # CatPipe.get() iterators into a series of file-type objects. # It would be less ugly if either CatPipe.get() returned a file-like object # (not very efficient), or split_to_shalist() expected an iterator instead # of a file. cp = git.CatPipe() class IterToFile: def __init__(self, it): self.it = iter(it) def read(self, size): v = next(self.it) return v or '' def read_ids(): while 1: line = sys.stdin.readline() if not line: break if line: line = line.strip() try: it = cp.get(line.strip()) next(it) # skip the file type except KeyError, e: add_error('error: %s' % e) continue yield IterToFile(it) files = read_ids() else: # the input either comes from a series of files or from stdin. files = extra and (open(fn) for fn in extra) or [sys.stdin] if pack_writer and opt.blobs: shalist = hashsplit.split_to_blobs(pack_writer.new_blob, files, keep_boundaries=opt.keep_boundaries, progress=prog) for (sha, size, level) in shalist: print sha.encode('hex') reprogress() elif pack_writer: # tree or commit or name shalist = hashsplit.split_to_shalist(pack_writer.new_blob, pack_writer.new_tree, files, keep_boundaries=opt.keep_boundaries, progress=prog) tree = pack_writer.new_tree(shalist) else: last = 0 it = hashsplit.hashsplit_iter(files, keep_boundaries=opt.keep_boundaries, progress=prog) for (blob, level) in it: hashsplit.total_split += len(blob) if opt.copy: sys.stdout.write(str(blob)) megs = hashsplit.total_split/1024/1024 if not opt.quiet and last != megs: last = megs if opt.verbose: log('\n') if opt.tree: print tree.encode('hex') if opt.commit or opt.name: msg = 'bup split\n\nGenerated by command:\n%r' % sys.argv ref = opt.name and ('refs/heads/%s' % opt.name) or None commit = pack_writer.new_commit(oldref, tree, date, msg) if opt.commit: print commit.encode('hex') if pack_writer: pack_writer.close() # must close before we can update the ref if opt.name: if cli: cli.update_ref(refname, commit, oldref) else: git.update_ref(refname, commit, oldref) if cli: cli.close() secs = time.time() - start_time size = hashsplit.total_split if opt.bench: log('bup: %.2fkbytes in %.2f secs = %.2f kbytes/sec\n' % (size/1024., secs, size/1024./secs)) if saved_errors: log('WARNING: %d errors encountered while saving.\n' % len(saved_errors)) sys.exit(1) bup-0.25/cmd/tag-cmd.py000077500000000000000000000041611225146730500146710ustar00rootroot00000000000000#!/usr/bin/env python """Tag a commit in the bup repository. Creating a tag on a commit can be used for avoiding automatic cleanup from removing this commit due to old age. """ import sys import os from bup import git, options from bup.helpers import * # FIXME: review for safe writes. handle_ctrl_c() optspec = """ bup tag bup tag [-f] bup tag -d [-f] -- d,delete= Delete a tag f,force Overwrite existing tag, or 'delete' a tag that doesn't exist """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) git.check_repo_or_die() if opt.delete: tag_file = git.repo('refs/tags/%s' % opt.delete) debug1("tag file: %s\n" % tag_file) if not os.path.exists(tag_file): if opt.force: sys.exit(0) log("bup: error: tag '%s' not found.\n" % opt.delete) sys.exit(1) try: os.unlink(tag_file) except OSError, e: log("bup: error: unable to delete tag '%s': %s" % (opt.delete, e)) sys.exit(1) sys.exit(0) tags = [t for sublist in git.tags().values() for t in sublist] if not extra: for t in tags: print t sys.exit(0) elif len(extra) < 2: o.fatal('no commit ref or hash given.') (tag_name, commit) = extra[:2] if not tag_name: o.fatal("tag name must not be empty.") debug1("args: tag name = %s; commit = %s\n" % (tag_name, commit)) if tag_name in tags and not opt.force: log("bup: error: tag '%s' already exists\n" % tag_name) sys.exit(1) if tag_name.startswith('.'): o.fatal("'%s' is not a valid tag name." % tag_name) try: hash = git.rev_parse(commit) except git.GitError, e: log("bup: error: %s" % e) sys.exit(2) if not hash: log("bup: error: commit %s not found.\n" % commit) sys.exit(2) pL = git.PackIdxList(git.repo('objects/pack')) if not pL.exists(hash): log("bup: error: commit %s not found.\n" % commit) sys.exit(2) tag_file = git.repo('refs/tags/%s' % tag_name) try: tag = file(tag_file, 'w') except OSError, e: log("bup: error: could not create tag '%s': %s" % (tag_name, e)) sys.exit(3) tag.write(hash.encode('hex')) tag.close() bup-0.25/cmd/tick-cmd.py000077500000000000000000000004201225146730500150420ustar00rootroot00000000000000#!/usr/bin/env python import sys, time from bup import options optspec = """ bup tick """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal("no arguments expected") t = time.time() tleft = 1 - (t - int(t)) time.sleep(tleft) bup-0.25/cmd/version-cmd.py000077500000000000000000000012161225146730500156010ustar00rootroot00000000000000#!/usr/bin/env python import sys from bup import options from bup.helpers import * optspec = """ bup version [--date|--commit|--tag] -- date display the date this version of bup was created commit display the git commit id of this version of bup tag display the tag name of this version. If no tag is available, display the commit id """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) total = (opt.date or 0) + (opt.commit or 0) + (opt.tag or 0) if total > 1: o.fatal('at most one option expected') if opt.date: print version_date() elif opt.commit: print version_commit() else: print version_tag() bup-0.25/cmd/web-cmd.py000077500000000000000000000153601225146730500146760ustar00rootroot00000000000000#!/usr/bin/env python import sys, stat, urllib, mimetypes, posixpath, time from bup import options, git, vfs from bup.helpers import * try: import tornado.httpserver import tornado.ioloop import tornado.web except ImportError: log('error: cannot find the python "tornado" module; please install it\n') sys.exit(1) handle_ctrl_c() def _compute_breadcrumbs(path, show_hidden=False): """Returns a list of breadcrumb objects for a path.""" breadcrumbs = [] breadcrumbs.append(('[root]', '/')) path_parts = path.split('/')[1:-1] full_path = '/' for part in path_parts: full_path += part + "/" url_append = "" if show_hidden: url_append = '?hidden=1' breadcrumbs.append((part, full_path+url_append)) return breadcrumbs def _contains_hidden_files(n): """Return True if n contains files starting with a '.', False otherwise.""" for sub in n: name = sub.name if len(name)>1 and name.startswith('.'): return True return False def _compute_dir_contents(n, path, show_hidden=False): """Given a vfs node, returns an iterator for display info of all subs.""" url_append = "" if show_hidden: url_append = "?hidden=1" if path != "/": yield('..', '../' + url_append, '') for sub in n: display = link = sub.name # link should be based on fully resolved type to avoid extra # HTTP redirect. if stat.S_ISDIR(sub.try_resolve().mode): link = sub.name + "/" if not show_hidden and len(display)>1 and display.startswith('.'): continue size = None if stat.S_ISDIR(sub.mode): display = sub.name + '/' elif stat.S_ISLNK(sub.mode): display = sub.name + '@' else: size = sub.size() size = (opt.human_readable and format_filesize(size)) or size yield (display, link + url_append, size) class BupRequestHandler(tornado.web.RequestHandler): def get(self, path): return self._process_request(path) def head(self, path): return self._process_request(path) @tornado.web.asynchronous def _process_request(self, path): path = urllib.unquote(path) print 'Handling request for %s' % path try: n = top.resolve(path) except vfs.NoSuchFile: self.send_error(404) return f = None if stat.S_ISDIR(n.mode): self._list_directory(path, n) else: self._get_file(path, n) def _list_directory(self, path, n): """Helper to produce a directory listing. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ if not path.endswith('/') and len(path) > 0: print 'Redirecting from %s to %s' % (path, path + '/') return self.redirect(path + '/', permanent=True) try: show_hidden = int(self.request.arguments.get('hidden', [0])[-1]) except ValueError, e: show_hidden = False self.render( 'list-directory.html', path=path, breadcrumbs=_compute_breadcrumbs(path, show_hidden), files_hidden=_contains_hidden_files(n), hidden_shown=show_hidden, dir_contents=_compute_dir_contents(n, path, show_hidden)) def _get_file(self, path, n): """Process a request on a file. Return value is either a file object, or None (indicating an error). In either case, the headers are sent. """ ctype = self._guess_type(path) self.set_header("Last-Modified", self.date_time_string(n.mtime)) self.set_header("Content-Type", ctype) size = n.size() self.set_header("Content-Length", str(size)) assert(len(n.hash) == 20) self.set_header("Etag", n.hash.encode('hex')) if self.request.method != 'HEAD': self.flush() f = n.open() it = chunkyreader(f) def write_more(me): try: blob = it.next() except StopIteration: f.close() self.finish() return self.request.connection.stream.write(blob, callback=lambda: me(me)) write_more(write_more) def _guess_type(self, path): """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ base, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() if ext in self.extensions_map: return self.extensions_map[ext] else: return self.extensions_map[''] if not mimetypes.inited: mimetypes.init() # try to read system mime.types extensions_map = mimetypes.types_map.copy() extensions_map.update({ '': 'text/plain', # Default '.py': 'text/plain', '.c': 'text/plain', '.h': 'text/plain', }) def date_time_string(self, t): return time.strftime('%a, %d %b %Y %H:%M:%S', time.gmtime(t)) optspec = """ bup web [[hostname]:port] -- human-readable display human readable file sizes (i.e. 3.9K, 4.7M) """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if len(extra) > 1: o.fatal("at most one argument expected") address = ('127.0.0.1', 8080) if len(extra) > 0: addressl = extra[0].split(':', 1) addressl[1] = int(addressl[1]) address = tuple(addressl) git.check_repo_or_die() top = vfs.RefList(None) settings = dict( debug = 1, template_path = resource_path('web'), static_path = resource_path('web/static') ) # Disable buffering on stdout, for debug messages sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) application = tornado.web.Application([ (r"(/.*)", BupRequestHandler), ], **settings) if __name__ == "__main__": http_server = tornado.httpserver.HTTPServer(application) http_server.listen(address[1], address=address[0]) try: sock = http_server._socket # tornado < 2.0 except AttributeError, e: sock = http_server._sockets.values()[0] print "Serving HTTP on %s:%d..." % sock.getsockname() loop = tornado.ioloop.IOLoop.instance() loop.start() bup-0.25/cmd/xstat-cmd.py000077500000000000000000000042671225146730500152700ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. import sys, stat, errno from bup import metadata, options, xstat from bup.helpers import handle_ctrl_c, saved_errors, add_error, log optspec = """ bup xstat pathinfo [OPTION ...] -- v,verbose increase log output (can be used more than once) q,quiet don't show progress meter exclude-fields= exclude comma-separated fields include-fields= include comma-separated fields (definitive if first) """ target_filename = '' active_fields = metadata.all_fields handle_ctrl_c() o = options.Options(optspec) (opt, flags, remainder) = o.parse(sys.argv[1:]) treat_include_fields_as_definitive = True for flag, value in flags: if flag == '--exclude-fields': exclude_fields = frozenset(value.split(',')) for f in exclude_fields: if not f in metadata.all_fields: o.fatal(f + ' is not a valid field name') active_fields = active_fields - exclude_fields treat_include_fields_as_definitive = False elif flag == '--include-fields': include_fields = frozenset(value.split(',')) for f in include_fields: if not f in metadata.all_fields: o.fatal(f + ' is not a valid field name') if treat_include_fields_as_definitive: active_fields = include_fields treat_include_fields_as_definitive = False else: active_fields = active_fields | include_fields opt.verbose = opt.verbose or 0 opt.quiet = opt.quiet or 0 metadata.verbose = opt.verbose - opt.quiet first_path = True for path in remainder: try: m = metadata.from_path(path, archive_path = path) except (OSError,IOError), e: if e.errno == errno.ENOENT: add_error(e) continue else: raise if metadata.verbose >= 0: if not first_path: print print metadata.detailed_str(m, active_fields) first_path = False if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1) else: sys.exit(0) bup-0.25/config/000077500000000000000000000000001225146730500135005ustar00rootroot00000000000000bup-0.25/config/.gitignore000066400000000000000000000001121225146730500154620ustar00rootroot00000000000000config.cmd config.h config.log config.mak config.md config.sub config.varsbup-0.25/config/Makefile000066400000000000000000000002741225146730500151430ustar00rootroot00000000000000-include config.vars config.h: configure configure.inc $(wildcard *.in) MAKE=${MAKE} ./configure clean: rm -f ${CONFIGURE_FILES} ${CONFIGURE_DETRITUS} ${GENERATED_FILES} rm -f *~ .*~ bup-0.25/config/config.vars.in000066400000000000000000000001041225146730500162420ustar00rootroot00000000000000CONFIGURE_FILES=@CONFIGURE_FILES@ GENERATED_FILES=@GENERATED_FILES@ bup-0.25/config/configure000077500000000000000000000035431225146730500154140ustar00rootroot00000000000000#!/bin/sh bup_find_prog() { # Prints prog path to stdout or nothing. local name="$1" result="$2" TLOGN "checking for $name" if ! [ "$result" ]; then result=`acLookFor "$name"` fi TLOG " ($result)" echo "$result" } TARGET=bup . ./configure.inc AC_INIT $TARGET if ! AC_PROG_CC; then LOG " You need to have a functional C compiler to build $TARGET" exit 1 fi MAKE="$(bup_find_prog make $MAKE)" if test -z "$MAKE"; then MAKE="$(bup_find_prog gmake "$GMAKE")" fi if test -z "$MAKE"; then AC_FAIL "ERROR: unable to find make" fi if ! ($MAKE --version | grep "GNU Make"); then AC_FAIL "ERROR: $MAKE is not GNU Make" fi MAKE_VERSION=`$MAKE --version | grep "GNU Make" | awk '{print $3}'` if [ -z "$MAKE_VERSION" ]; then AC_FAIL "ERROR: $MAKE --version does not return sensible output?" fi expr "$MAKE_VERSION" '>=' '3.81' || AC_FAIL "ERROR: $MAKE must be >= version 3.81" if test -z "$(bup_find_prog python '')"; then AC_FAIL "ERROR: unable to find python" fi if test -z "$(bup_find_prog git '')"; then AC_FAIL "ERROR: unable to find git" fi # For stat. AC_CHECK_HEADERS sys/stat.h AC_CHECK_HEADERS sys/types.h AC_CHECK_HEADERS unistd.h # For FS_IOC_GETFLAGS and FS_IOC_SETFLAGS. AC_CHECK_HEADERS linux/fs.h AC_CHECK_HEADERS sys/ioctl.h # On GNU/kFreeBSD utimensat is defined in GNU libc, but won't work. if [ -z "$OS_GNU_KFREEBSD" ]; then AC_CHECK_FUNCS utimensat fi AC_CHECK_FUNCS utimes AC_CHECK_FUNCS lutimes AC_CHECK_FIELD stat st_atim sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_mtim sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_ctim sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_atimensec sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_mtimensec sys/types.h sys/stat.h unistd.h AC_CHECK_FIELD stat st_ctimensec sys/types.h sys/stat.h unistd.h AC_OUTPUT config.vars bup-0.25/config/configure.inc000066400000000000000000000640631225146730500161650ustar00rootroot00000000000000# -*-shell-script-*- # @(#) configure.inc 1.40@(#) # Copyright (c) 1999-2007 David Parsons. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # 3. My name may not be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY DAVID PARSONS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID # PARSONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # # # this preamble code is executed when this file is sourced and it picks # interesting things off the command line. # ac_default_path="/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:/usr/X11R6/bin" ac_standard="--src=DIR where the source lives (.) --prefix=DIR where to install the final product (/usr/local) --execdir=DIR where to put executables (prefix/bin) --sbindir=DIR where to put static executables (prefix/sbin) --confdir=DIR where to put configuration information (/etc) --libdir=DIR where to put libraries (prefix/lib) --libexecdir=DIR where to put private executables --mandir=DIR where to put manpages" __fail=exit if dirname B/A 2>/dev/null >/dev/null; then __ac_dirname() { dirname "$1" } else __ac_dirname() { echo "$1" | sed -e 's:/[^/]*$::' } fi ac_progname=$0 ac_configure_command= Q=\' for x in "$@"; do ac_configure_command="$ac_configure_command $Q$x$Q" done # ac_configure_command="$*" __d=`__ac_dirname "$ac_progname"` if [ "$__d" = "$ac_progname" ]; then AC_SRCDIR=`pwd` else AC_SRCDIR=`cd $__d;pwd` fi __ac_dir() { if test -d "$1"; then (cd "$1";pwd) else echo "$1"; fi } while [ $# -gt 0 ]; do unset matched case X"$1" in X--src|X--srcdir) AC_SRCDIR=`__ac_dir "$2"` _set_srcdir=1 shift 2;; X--src=*|X--srcdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_SRCDIR=`__ac_dir "$__d"` _set_srcdir=1 shift 1 ;; X--prefix) AC_PREFIX=`__ac_dir "$2"` _set_prefix=1 shift 2;; X--prefix=*) __d=`echo "$1"| sed -e 's/^[^=]*=//'` AC_PREFIX=`__ac_dir "$__d"` _set_prefix=1 shift 1;; X--confdir) AC_CONFDIR=`__ac_dir "$2"` _set_confdir=1 shift 2;; X--confdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_CONFDIR=`__ac_dir "$__d"` _set_confdir=1 shift 1;; X--libexec|X--libexecdir) AC_LIBEXEC=`__ac_dir "$2"` _set_libexec=1 shift 2;; X--libexec=*|X--libexecdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_LIBEXEC=`__ac_dir "$__d"` _set_libexec=1 shift 1;; X--lib|X--libdir) AC_LIBDIR=`__ac_dir "$2"` _set_libdir=1 shift 2;; X--lib=*|X--libdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_LIBDIR=`__ac_dir "$__d"` _set_libdir=1 shift 1;; X--exec|X--execdir) AC_EXECDIR=`__ac_dir "$2"` _set_execdir=1 shift 2;; X--exec=*|X--execdir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_EXECDIR=`__ac_dir "$__d"` _set_execdir=1 shift 1;; X--sbin|X--sbindir) AC_SBINDIR=`__ac_dir "$2"` _set_sbindir=1 shift 2;; X--sbin=*|X--sbindir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_SBINDIR=`__ac_dir "$__d"` _set_sbindir=1 shift 1;; X--man|X--mandir) AC_MANDIR=`__ac_dir "$2"` _set_mandir=1 shift 2;; X--man=*|X--mandir=*) __d=`echo "$1" | sed -e 's/^[^=]*=//'` AC_MANDIR=`__ac_dir "$__d"` _set_mandir=1 shift 1;; X--use-*=*) _var=`echo "$1"| sed -n 's/^--use-\([A-Za-z][-A-Za-z0-9_]*\)=.*$/\1/p'` if [ "$_var" ]; then _val=`echo "$1" | sed -e 's/^--use-[^=]*=\(.*\)$/\1/'` _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` case X"$_val" in X[Yy][Ee][Ss]|X[Tt][Rr][Uu][Ee]) eval USE_${_v}=T ;; X[Nn][Oo]|X[Ff][Aa][Ll][Ss][Ee]) eval unset USE_${_v} ;; *) echo "Bad value for --use-$_var ; must be yes or no" exit 1 ;; esac else echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi shift 1 ;; X--use-*) _var=`echo "$1"|sed -n 's/^--use-\([A-Za-z][-A-Za-z0-9_]*\)$/\1/p'` _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` eval USE_${_v}=T shift 1;; X--with-*=*) _var=`echo "$1"| sed -n 's/^--with-\([A-Za-z][-A-Za-z0-9_]*\)=.*$/\1/p'` if [ "$_var" ]; then _val=`echo "$1" | sed -e 's/^--with-[^=]*=\(.*\)$/\1/'` _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` eval WITH_${_v}=\"$_val\" else echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi shift 1 ;; X--with-*) _var=`echo "$1" | sed -n 's/^--with-\([A-Za-z][A-Za-z0-9_-]*\)$/\1/p'` if [ "$_var" ]; then _v=`echo $_var | tr '[a-z]' '[A-Z]' | tr '-' '_'` eval WITH_${_v}=1 else echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi shift 1 ;; X--help) echo "$ac_standard" test "$ac_help" && echo "$ac_help" exit 0;; *) if [ "$LOCAL_AC_OPTIONS" ]; then eval "$LOCAL_AC_OPTIONS" else ac_error=T fi if [ "$ac_error" ]; then echo "Bad option $1. Use --help to show options" 1>&2 exit 1 fi ;; esac done # # echo w/o newline # echononl() { ${ac_echo:-echo} "${@}$ac_echo_nonl" } # # log something to the terminal and to a logfile. # LOG () { echo "$@" echo "$@" 1>&5 } # # log something to the terminal without a newline, and to a logfile with # a newline # LOGN () { echononl "$@" 1>&5 echo "$@" } # # log something to the terminal # TLOG () { echo "$@" 1>&5 } # # log something to the terminal, no newline # TLOGN () { echononl "$@" 1>&5 } # # AC_CONTINUE tells configure not to bomb if something fails, but to # continue blithely along # AC_CONTINUE () { __fail="return" } # # Emulate gnu autoconf's AC_CHECK_HEADERS() function # AC_CHECK_HEADERS () { AC_PROG_CC echo "/* AC_CHECK_HEADERS */" > /tmp/ngc$$.c for hdr in $*; do echo "#include <$hdr>" >> /tmp/ngc$$.c done echo "main() { }" >> /tmp/ngc$$.c LOGN "checking for header $hdr" if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c; then AC_DEFINE 'HAVE_'`echo $hdr | tr 'a-z' 'A-Z' | tr './' '_'` 1 TLOG " (found)" rc=0 else TLOG " (not found)" rc=1 fi rm -f /tmp/ngc$$.c /tmp/ngc$$ return $rc } # # emulate GNU autoconf's AC_CHECK_FUNCS function # AC_CHECK_FUNCS () { AC_PROG_CC F=$1 shift rm -f /tmp/ngc$$.c while [ "$1" ]; do echo "#include <$1>" >> /tmp/ngc$$.c shift done cat >> /tmp/ngc$$.c << EOF main() { $F(); } EOF LOGN "checking for the $F function" if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $LIBS; then AC_DEFINE `echo ${2:-HAVE_$F} | tr 'a-z' 'A-Z'` 1 TLOG " (found)" rc=0 else echo "offending command was:" cat /tmp/ngc$$.c echo "$AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $LIBS" TLOG " (not found)" rc=1 fi rm -f /tmp/ngc$$.c /tmp/ngc$$ return $rc } # # check to see if some structure exists # # usage: AC_CHECK_STRUCT structure {include ...} # AC_CHECK_STRUCT () { AC_PROG_CC struct=$1 shift rm -f /tmp/ngc$$.c for include in $*; do echo "#include <$include>" >> /tmp/ngc$$.c done cat >> /tmp/ngc$$.c << EOF main() { struct $struct foo; } EOF LOGN "checking for struct $struct" if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $AC_LIBS 2>>config.log; then AC_DEFINE HAVE_STRUCT_`echo ${struct} | tr 'a-z' 'A-Z'` TLOG " (found)" rc=0 else TLOG " (not found)" rc=1 fi rm -f /tmp/ngc$$.c /tmp/ngc$$ return $rc } # # check to see if some structure contains a field # # usage: AC_CHECK_FIELD structure field {include ...} # AC_CHECK_FIELD () { AC_PROG_CC struct=$1 field=$2 shift 2 rm -f /tmp/ngc$$.c for include in $*;do echo "#include <$include>" >> /tmp/ngc$$.c done cat >> /tmp/ngc$$.c << EOF main() { struct $struct foo; foo.$field; } EOF LOGN "checking that struct $struct has a $field field" if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $AC_LIBS 2>>config.log; then AC_DEFINE HAVE_`echo ${struct}_$field | tr 'a-z' 'A-Z'` TLOG " (yes)" rc=0 else TLOG " (no)" rc=1 fi rm -f /tmp/ngc$$.c /tmp/ngc$$ return $rc } # # check that the C compiler works # AC_PROG_CC () { test "$AC_CC" && return 0 cat > /tmp/ngc$$.c << \EOF #include main() { puts("hello, sailor"); } EOF TLOGN "checking the C compiler" unset AC_CFLAGS AC_LDFLAGS if [ "$CC" ] ; then AC_CC="$CC" elif [ "$WITH_PATH" ]; then AC_CC=`acLookFor cc` elif [ "`acLookFor cc`" ]; then # don't specify the full path if the user is looking in their $PATH # for a C compiler. AC_CC=cc fi # finally check for POSIX c89 test "$AC_CC" || AC_CC=`acLookFor c89` if [ ! "$AC_CC" ]; then TLOG " (no C compiler found)" $__fail 1 fi echo "checking out the C compiler" $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c status=$? TLOGN " ($AC_CC)" if [ $status -eq 0 ]; then TLOG " ok" # check that the CFLAGS and LDFLAGS aren't bogus unset AC_CFLAGS AC_LDFLAGS if [ "$CFLAGS" ]; then test "$CFLAGS" && echo "validating CFLAGS=${CFLAGS}" if $AC_CC $CFLAGS -o /tmp/ngc$$.o /tmp/ngc$$.c ; then AC_CFLAGS=${CFLAGS:-"-g"} test "$CFLAGS" && echo "CFLAGS=\"${CFLAGS}\" are okay" elif [ "$CFLAGS" ]; then echo "ignoring bogus CFLAGS=\"${CFLAGS}\"" fi else AC_CFLAGS=-g fi if [ "$LDFLAGS" ]; then test "$LDFLAGS" && echo "validating LDFLAGS=${LDFLAGS}" if $AC_CC $LDFLAGS -o /tmp/ngc$$ /tmp/ngc$$.o; then AC_LDFLAGS=${LDFLAGS:-"-g"} test "$LDFLAGS" && TLOG "LDFLAGS=\"${LDFLAGS}\" are okay" elif [ "$LDFLAGS" ]; then TLOG "ignoring bogus LDFLAGS=\"${LDFLAGS}\"" fi else AC_LDFLAGS=${CFLAGS:-"-g"} fi else AC_FAIL " does not compile code properly" fi AC_SUB 'CC' "$AC_CC" rm -f /tmp/ngc$$ /tmp/ngc$$.c /tmp/ngc$$.o return $status } # # acLookFor actually looks for a program, without setting anything. # acLookFor () { path=${AC_PATH:-$ac_default_path} case "X$1" in X-[rx]) __mode=$1 shift ;; *) __mode=-x ;; esac oldifs="$IFS" for program in $*; do IFS=":" for x in $path; do if [ $__mode $x/$program -a -f $x/$program ]; then echo $x/$program break 2 fi done done IFS="$oldifs" unset __mode } # # check that a program exists and set its path # MF_PATH_INCLUDE () { SYM=$1; shift case X$1 in X-[rx]) __mode=$1 shift ;; *) unset __mode ;; esac TLOGN "looking for $1" DEST=`acLookFor $__mode $*` __sym=`echo "$SYM" | tr '[a-z]' '[A-Z]'` if [ "$DEST" ]; then TLOG " ($DEST)" echo "$1 is $DEST" AC_MAK $SYM AC_DEFINE PATH_$__sym \""$DEST"\" AC_SUB $__sym "$DEST" eval CF_$SYM=$DEST return 0 else #AC_SUB $__sym '' echo "$1 is not found" TLOG " (not found)" return 1 fi } # # AC_INIT starts the ball rolling # # After AC_INIT, fd's 1 and 2 point to config.log # and fd 5 points to what used to be fd 1 # AC_INIT () { __config_files="config.cmd config.sub config.h config.mak config.log" __config_detritus="config.h.tmp" rm -f $__config_files $__config_detritus __cwd=`pwd` exec 5>&1 1>$__cwd/config.log 2>&1 AC_CONFIGURE_FOR=__AC_`echo $1 | sed -e 's/\..$//' | tr 'a-z' 'A-Z' | tr ' ' '_'`_D # check to see whether to use echo -n or echo ...\c # echo -n hello > $$ echo world >> $$ if grep "helloworld" $$ >/dev/null; then ac_echo="echo -n" echo "[echo -n] works" else ac_echo="echo" echo 'hello\c' > $$ echo 'world' >> $$ if grep "helloworld" $$ >/dev/null; then ac_echo_nonl='\c' echo "[echo ...\\c] works" fi fi rm -f $$ LOG "Configuring for [$1]" rm -f $__cwd/config.h cat > $__cwd/config.h.tmp << EOF /* * configuration for $1${2:+" ($2)"}, generated `date` * by ${LOGNAME:-`whoami`}@`hostname` */ #ifndef $AC_CONFIGURE_FOR #define $AC_CONFIGURE_FOR 1 EOF unset __share if [ -d $AC_PREFIX/share/man ]; then for t in 1 2 3 4 5 6 7 8 9; do if [ -d $AC_PREFIX/share/man/man$t ]; then __share=/share elif [ -d $AC_PREFIX/share/man/cat$t ]; then __share=/share fi done else __share= fi if [ -d $AC_PREFIX/libexec ]; then __libexec=libexec else __libexec=lib fi AC_PREFIX=${AC_PREFIX:-/usr/local} AC_EXECDIR=${AC_EXECDIR:-$AC_PREFIX/bin} AC_SBINDIR=${AC_SBINDIR:-$AC_PREFIX/sbin} AC_LIBDIR=${AC_LIBDIR:-$AC_PREFIX/lib} AC_MANDIR=${AC_MANDIR:-$AC_PREFIX$__share/man} AC_LIBEXEC=${AC_LIBEXEC:-$AC_PREFIX/$__libexec} AC_CONFDIR=${AC_CONFDIR:-/etc} AC_PATH=${WITH_PATH:-$PATH} AC_PROG_CPP AC_PROG_INSTALL ac_os=`uname -s | sed 's/[-_].*//; s/[^a-zA-Z0-9]/_/g'` _os=`echo $ac_os | tr '[a-z]' '[A-Z]'` AC_DEFINE OS_$_os 1 eval OS_${_os}=1 unset _os } # # AC_LIBRARY checks to see if a given library exists and contains the # given function. # usage: AC_LIBRARY function library [alternate ...] # AC_LIBRARY() { SRC=$1 shift __acllibs= __aclhdrs= for x in "$@"; do case X"$x" in X-l*) __acllibs="$__acllibs $x" ;; *) __aclhdrs="$__aclhdrs $x" ;; esac done # first see if the function can be found in any of the # current libraries AC_QUIET AC_CHECK_FUNCS $SRC $__aclhdrs && return 0 # then search through the list of libraries __libs="$LIBS" for x in $__acllibs; do LIBS="$__libs $x" if AC_QUIET AC_CHECK_FUNCS $SRC $__aclhdrs; then AC_LIBS="$AC_LIBS $x" return 0 fi done return 1 } # # AC_PROG_LEX checks to see if LEX exists, and if it's lex or flex. # AC_PROG_LEX() { TLOGN "looking for lex " DEST=`acLookFor lex` if [ "$DEST" ]; then AC_MAK LEX AC_DEFINE PATH_LEX \"$DEST\" AC_SUB 'LEX' "$DEST" echo "lex is $DEST" else DEST=`acLookFor flex` if [ "$DEST" ]; then AC_MAK FLEX AC_DEFINE 'LEX' \"$DEST\" AC_SUB 'LEX', "$DEST" echo "lex is $DEST" else AC_SUB LEX '' echo "neither lex or flex found" TLOG " (not found)" return 1 fi fi if AC_LIBRARY yywrap -ll -lfl; then TLOG "($DEST)" return 0 fi TLOG "(no lex library found)" return 1 } # # AC_PROG_YACC checks to see if YACC exists, and if it's bison or # not. # AC_PROG_YACC () { TLOGN "looking for yacc " DEST=`acLookFor yacc` if [ "$DEST" ]; then AC_MAK YACC AC_DEFINE PATH_YACC \"$DEST\" AC_SUB 'YACC' "$DEST" TLOG "($DEST)" echo "yacc is $DEST" else DEST=`acLookFor bison` if [ "$DEST" ]; then AC_MAK BISON AC_DEFINE 'YACC' \"$DEST\" AC_SUB 'YACC' "$DEST -y" echo "yacc is $DEST -y" TLOG "($DEST -y)" else AC_SUB 'YACC' '' echo "neither yacc or bison found" TLOG " (not found)" return 1 fi fi return 0 } # # AC_PROG_LN_S checks to see if ln exists, and, if so, if ln -s works # AC_PROG_LN_S () { test "$AC_FIND_PROG" || AC_PROG_FIND test "$AC_FIND_PROG" || return 1 TLOGN "looking for \"ln -s\"" DEST=`acLookFor ln` if [ "$DEST" ]; then rm -f /tmp/b$$ $DEST -s /tmp/a$$ /tmp/b$$ if [ "`$AC_FIND_PROG /tmp/b$$ -type l -print`" ]; then TLOG " ($DEST)" echo "$DEST exists, and ln -s works" AC_SUB 'LN_S' "$DEST -s" rm -f /tmp/b$$ else AC_SUB 'LN_S' '' TLOG " ($DEST exists, but -s does not seem to work)" echo "$DEST exists, but ln -s doesn't seem to work" rm -f /tmp/b$$ return 1 fi else AC_SUB 'LN_S' '' echo "ln not found" TLOG " (not found)" return 1 fi } # # AC_PROG_FIND looks for the find program and sets the FIND environment # variable # AC_PROG_FIND () { if test -z "$AC_FIND_PROG"; then MF_PATH_INCLUDE FIND find rc=$? AC_FIND_PROG=$DEST return $rc fi return 0 } # # AC_PROG_AWK looks for the awk program and sets the AWK environment # variable # AC_PROG_AWK () { if test -z "$AC_AWK_PROG"; then MF_PATH_INCLUDE AWK awk rc=$? AC_AWK_PROG=$DEST return $rc fi return 0 } # # AC_PROG_SED looks for the sed program and sets the SED environment # variable # AC_PROG_SED () { if test -z "$AC_SED_PROG"; then MF_PATH_INCLUDE SED sed rc=$? AC_SED_PROG=$DEST return $rc fi return 0 } # # AC_HEADER_SYS_WAIT looks for sys/wait.h # AC_HEADER_SYS_WAIT () { AC_CHECK_HEADERS sys/wait.h || return 1 } # # AC_TYPE_PID_T checks to see if the pid_t type exists # AC_TYPE_PID_T () { cat > /tmp/pd$$.c << EOF #include main() { pid_t me; } EOF LOGN "checking for pid_t" if $AC_CC -c /tmp/pd$$.c -o /tmp/pd$$.o; then TLOG " (found)" rc=0 else echo "typedef int pid_t;" >> $__cwd/config.h.tmp TLOG " (not found)" rc=1 fi rm -f /tmp/pd$$.o /tmp/pd$$.c return $rc } # # AC_C_CONST checks to see if the compiler supports the const keyword # AC_C_CONST () { cat > /tmp/pd$$.c << EOF const char me=1; EOF LOGN "checking for \"const\" keyword" if $AC_CC -c /tmp/pd$$.c -o /tmp/pd$$.o; then TLOG " (yes)" rc=0 else AC_DEFINE 'const' '/**/' TLOG " (no)" rc=1 fi rm -f /tmp/pd$$.o /tmp/pd$$.c return $rc } # # AC_SCALAR_TYPES checks to see if the compiler can generate 2 and 4 byte ints. # AC_SCALAR_TYPES () { cat > /tmp/pd$$.c << EOF #include main() { unsigned long v_long; unsigned int v_int; unsigned short v_short; if (sizeof v_long == 4) puts("#define DWORD unsigned long"); else if (sizeof v_int == 4) puts("#define DWORD unsigned int"); else exit(1); if (sizeof v_int == 2) puts("#define WORD unsigned int"); else if (sizeof v_short == 2) puts("#define WORD unsigned short"); else exit(2); puts("#define BYTE unsigned char"); exit(0); } EOF rc=1 LOGN "defining WORD & DWORD scalar types" if $AC_CC /tmp/pd$$.c -o /tmp/pd$$; then if /tmp/pd$$ >> $__cwd/config.h.tmp; then rc=0 fi fi case "$rc" in 0) TLOG "" ;; *) TLOG " ** FAILED **" ;; esac rm -f /tmp/pd$$ /tmp/pd$$.c } # # AC_OUTPUT generates makefiles from makefile.in's # AC_OUTPUT () { cd $__cwd AC_SUB 'LIBS' "$AC_LIBS" AC_SUB 'CONFIGURE_FILES' "$__config_files" AC_SUB 'CONFIGURE_DETRITUS' "$__config_detritus" AC_SUB 'GENERATED_FILES' "$*" AC_SUB 'CFLAGS' "$AC_CFLAGS" AC_SUB 'FCFLAGS' "$AC_FCFLAGS" AC_SUB 'CXXFLAGS' "$AC_CXXFLAGS" AC_SUB 'LDFLAGS' "$AC_LDFLAGS" AC_SUB 'srcdir' "$AC_SRCDIR" AC_SUB 'prefix' "$AC_PREFIX" AC_SUB 'exedir' "$AC_EXECDIR" AC_SUB 'sbindir' "$AC_SBINDIR" AC_SUB 'libdir' "$AC_LIBDIR" AC_SUB 'libexec' "$AC_LIBEXEC" AC_SUB 'confdir' "$AC_CONFDIR" AC_SUB 'mandir' "$AC_MANDIR" if [ -r config.sub ]; then test "$AC_SED_PROG" || AC_PROG_SED test "$AC_SED_PROG" || return 1 echo >> config.h.tmp echo "#endif/* ${AC_CONFIGURE_FOR} */" >> config.h.tmp rm -f config.cmd Q=\' cat - > config.cmd << EOF #! /bin/sh ${CXX:+CXX=${Q}${CXX}${Q}} ${CXXFLAGS:+CXXFLAGS=${Q}${CXXFLAGS}${Q}} ${FC:+FC=${Q}${FC}${Q}} ${FCFLAGS:+FCFLAGS=${Q}${FCFLAGS}${Q}} ${CC:+CC=${Q}${CC}${Q}} ${CFLAGS:+CFLAGS=${Q}${CFLAGS}${Q}} $ac_progname $ac_configure_command EOF chmod +x config.cmd __d=$AC_SRCDIR for makefile in $*;do if test -r $__d/${makefile}.in; then LOG "generating $makefile" ./config.md `__ac_dirname ./$makefile` 2>/dev/null $AC_SED_PROG -f config.sub < $__d/${makefile}.in > $makefile __config_files="$__config_files $makefile" else LOG "WARNING: ${makefile}.in does not exist!" fi done unset __d else echo fi cp $__cwd/config.h.tmp $__cwd/config.h } # # AC_CHECK_FLOCK checks to see if flock() exists and if the LOCK_NB argument # works properly. # AC_CHECK_FLOCK() { AC_CHECK_HEADERS sys/types.h sys/file.h fcntl.h cat << EOF > $$.c #include #include #include #include main() { int x = open("$$.c", O_RDWR, 0666); int y = open("$$.c", O_RDWR, 0666); if (flock(x, LOCK_EX) != 0) exit(1); if (flock(y, LOCK_EX|LOCK_NB) == 0) exit(1); exit(0); } EOF LOGN "checking for flock()" HAS_FLOCK=0 if $AC_CC -o flock $$.c ; then if ./flock ; then LOG " (found)" HAS_FLOCK=1 AC_DEFINE HAS_FLOCK else LOG " (bad)" fi else LOG " (no)" fi rm -f flock $$.c case "$HAS_FLOCK" in 0) return 1 ;; *) return 0 ;; esac } # # AC_CHECK_RESOLVER finds out whether the berkeley resolver is # present on this system. # AC_CHECK_RESOLVER () { AC_PROG_CC TLOGN "checking for the Berkeley resolver library" cat > /tmp/ngc$$.c << EOF #include #include #include #include main() { char bfr[256]; res_init(); res_query("hello", C_IN, T_A, bfr, sizeof bfr); } EOF # first see if res_init() and res_query() actually exist... if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c; then __extralib= elif $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c -lresolv; then __extralib=-lresolv AC_LIBS="$AC_LIBS -lresolv" else TLOG " (not found)" rm -f /tmp/ngc$$.c return 1 fi # if res_init() and res_query() actually exist, check to # see if the HEADER structure is defined ... cat > /tmp/ngc$$.c << EOF #include #include #include #include main() { HEADER hhh; res_init(); } EOF if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $__extralib; then TLOG " (found)" elif $AC_CC -DBIND_8_COMPAT -o /tmp/ngc$$ /tmp/ngc$$.c $__extralib; then TLOG " (bind9 with BIND_8_COMPAT)" AC_DEFINE BIND_8_COMPAT 1 else TLOG " (broken)" rm -f /tmp/ngc$$.c return 1 fi rm -f /tmp/ngc$$.c return 0 } # # AC_PROG_INSTALL finds the install program and guesses whether it's a # Berkeley or GNU install program # AC_PROG_INSTALL () { DEST=`acLookFor install` LOGN "checking for install" unset IS_BSD if [ "$DEST" ]; then # BSD install or GNU install? Let's find out... touch /tmp/a$$ $DEST /tmp/a$$ /tmp/b$$ if test -r /tmp/a$$; then LOG " ($DEST)" else IS_BSD=1 LOG " ($DEST) bsd install" fi rm -f /tmp/a$$ /tmp/b$$ else DEST=`acLookFor ginstall` if [ "$DEST" ]; then LOG " ($DEST)" else DEST="false" LOG " (not found)" fi fi if [ "$IS_BSD" ]; then PROG_INSTALL="$DEST -c" else PROG_INSTALL="$DEST" fi AC_SUB 'INSTALL' "$PROG_INSTALL" AC_SUB 'INSTALL_PROGRAM' "$PROG_INSTALL -s -m 755" AC_SUB 'INSTALL_DATA' "$PROG_INSTALL -m 444" # finally build a little directory installer # if mkdir -p works, use that, otherwise use install -d, # otherwise build a script to do it by hand. # in every case, test to see if the directory exists before # making it. if mkdir -p $$a/b; then # I like this method best. __mkdir="mkdir -p" rmdir $$a/b rmdir $$a elif $PROG_INSTALL -d $$a/b; then __mkdir="$PROG_INSTALL -d" rmdir $$a/b rmdir $$a fi __config_files="$__config_files config.md" AC_SUB 'INSTALL_DIR' "$__cwd/config.md" echo "#! /bin/sh" > $__cwd/config.md echo "# script generated" `date` "by configure.sh" >> $__cwd/config.md echo >> $__cwd/config.md if [ "$__mkdir" ]; then echo "test -d \"\$1\" || $__mkdir \"\$1\"" >> $__cwd/config.md echo "exit $?" >> $__cwd/config.md else cat - >> $__cwd/config.md << \EOD pieces=`IFS=/; for x in $1; do echo $x; done` dir= for x in $pieces; do dir="$dir$x" mkdir $dir || exit 1 dir="$dir/" done exit 0 EOD fi chmod +x $__cwd/config.md } # # acCheckCPP is a local that runs a C preprocessor with a given set of # compiler options # acCheckCPP () { cat > /tmp/ngc$$.c << EOF #define FOO BAR FOO EOF if $1 $2 /tmp/ngc$$.c > /tmp/ngc$$.o; then if grep -v '#define' /tmp/ngc$$.o | grep -s BAR >/dev/null; then echo "CPP=[$1], CPPFLAGS=[$2]" AC_SUB 'CPP' "$1" AC_SUB 'CPPFLAGS' "$2" rm /tmp/ngc$$.c /tmp/ngc$$.o return 0 fi fi rm /tmp/ngc$$.c /tmp/ngc$$.o return 1 } # # AC_PROG_CPP checks for cpp, then checks to see which CPPFLAGS are needed # to run it as a filter. # AC_PROG_CPP () { if [ "$AC_CPP_PROG" ]; then DEST=$AC_CPP_PROG else __ac_path="$AC_PATH" AC_PATH="/lib:/usr/lib:${__ac_path:-$ac_default_path}" DEST=`acLookFor cpp` AC_PATH="$__ac_path" fi unset fail LOGN "Looking for cpp" if [ "$DEST" ]; then TLOGN " ($DEST)" acCheckCPP $DEST "$CPPFLAGS" || \ acCheckCPP $DEST -traditional-cpp -E || \ acCheckCPP $DEST -E || \ acCheckCPP $DEST -traditional-cpp -pipe || \ acCheckCPP $DEST -pipe || fail=1 if [ "$fail" ]; then AC_FAIL " (can't run cpp as a pipeline)" else TLOG " ok" return 0 fi fi AC_FAIL " (not found)" } # # AC_FAIL spits out an error message, then __fail's AC_FAIL() { LOG "$*" $__fail 1 } # # AC_SUB writes a substitution into config.sub AC_SUB() { ( echononl "s;@$1@;" _subst=`echo $2 | sed -e 's/;/\\;/g'` echononl "$_subst" echo ';g' ) >> $__cwd/config.sub } # # AC_MAK writes a define into config.mak AC_MAK() { echo "HAVE_$1 = 1" >> $__cwd/config.mak } # # AC_DEFINE adds a #define to config.h AC_DEFINE() { echo "#define $1 ${2:-1}" >> $__cwd/config.h.tmp } # # AC_INCLUDE adds a #include to config.h AC_INCLUDE() { echo "#include \"$1\"" >> $__cwd/config.h.tmp } # # AC_CONFIG adds a configuration setting to all the config files AC_CONFIG() { AC_DEFINE "PATH_$1" \""$2"\" AC_MAK "$1" AC_SUB "$1" "$2" } # # AC_QUIET does something quietly AC_QUIET() { eval $* 5>/dev/null } bup-0.25/configure000077500000000000000000000001671225146730500141460ustar00rootroot00000000000000#!/bin/sh if test "$#" -gt 0; then echo "Usage: configure" 1>&2 exit 1 fi cd config && exec ./configure "$@" bup-0.25/format-subst.pl000077500000000000000000000005171225146730500152240ustar00rootroot00000000000000#!/usr/bin/env perl use warnings; use strict; sub fix($) { my $s = shift; chomp $s; return $s; } while (<>) { s{ \$Format:\%d\$ }{ my $tag = fix(`git describe --match="[0-9]*"`); "(tag: bup-$tag)" }ex; s{ \$Format:([^\$].*)\$ }{ fix(`git log -1 --pretty=format:"$1"`) }ex; print; } bup-0.25/lib/000077500000000000000000000000001225146730500130015ustar00rootroot00000000000000bup-0.25/lib/__init__.py000066400000000000000000000000001225146730500151000ustar00rootroot00000000000000bup-0.25/lib/bup/000077500000000000000000000000001225146730500135675ustar00rootroot00000000000000bup-0.25/lib/bup/.gitattributes000066400000000000000000000000361225146730500164610ustar00rootroot00000000000000_version.py.pre export-subst bup-0.25/lib/bup/__init__.py000066400000000000000000000000001225146730500156660ustar00rootroot00000000000000bup-0.25/lib/bup/_helpers.c000066400000000000000000000744221225146730500155450ustar00rootroot00000000000000#define _LARGEFILE64_SOURCE 1 #define PY_SSIZE_T_CLEAN 1 #undef NDEBUG #include "../../config/config.h" // According to Python, its header has to go first: // http://docs.python.org/2/c-api/intro.html#include-files #include #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_STAT_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_LINUX_FS_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #include "bupsplit.h" #if defined(FS_IOC_GETFLAGS) && defined(FS_IOC_SETFLAGS) #define BUP_HAVE_FILE_ATTRS 1 #endif #ifndef FS_NOCOW_FL // Of course, this assumes it's a bitfield value. #define FS_NOCOW_FL 0 #endif static int istty2 = 0; // Probably we should use autoconf or something and set HAVE_PY_GETARGCARGV... #if __WIN32__ || __CYGWIN__ // There's no 'ps' on win32 anyway, and Py_GetArgcArgv() isn't available. static void unpythonize_argv(void) { } #else // not __WIN32__ // For some reason this isn't declared in Python.h extern void Py_GetArgcArgv(int *argc, char ***argv); static void unpythonize_argv(void) { int argc, i; char **argv, *arge; Py_GetArgcArgv(&argc, &argv); for (i = 0; i < argc-1; i++) { if (argv[i] + strlen(argv[i]) + 1 != argv[i+1]) { // The argv block doesn't work the way we expected; it's unsafe // to mess with it. return; } } arge = argv[argc-1] + strlen(argv[argc-1]) + 1; if (strstr(argv[0], "python") && argv[1] == argv[0] + strlen(argv[0]) + 1) { char *p; size_t len, diff; p = strrchr(argv[1], '/'); if (p) { p++; diff = p - argv[0]; len = arge - p; memmove(argv[0], p, len); memset(arge - diff, 0, diff); for (i = 0; i < argc; i++) argv[i] = argv[i+1] ? argv[i+1]-diff : NULL; } } } #endif // not __WIN32__ or __CYGWIN__ static PyObject *selftest(PyObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "")) return NULL; return Py_BuildValue("i", !bupsplit_selftest()); } static PyObject *blobbits(PyObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "")) return NULL; return Py_BuildValue("i", BUP_BLOBBITS); } static PyObject *splitbuf(PyObject *self, PyObject *args) { unsigned char *buf = NULL; Py_ssize_t len = 0; int out = 0, bits = -1; if (!PyArg_ParseTuple(args, "t#", &buf, &len)) return NULL; assert(len <= INT_MAX); out = bupsplit_find_ofs(buf, len, &bits); if (out) assert(bits >= BUP_BLOBBITS); return Py_BuildValue("ii", out, bits); } static PyObject *bitmatch(PyObject *self, PyObject *args) { unsigned char *buf1 = NULL, *buf2 = NULL; Py_ssize_t len1 = 0, len2 = 0; Py_ssize_t byte; int bit; if (!PyArg_ParseTuple(args, "t#t#", &buf1, &len1, &buf2, &len2)) return NULL; bit = 0; for (byte = 0; byte < len1 && byte < len2; byte++) { int b1 = buf1[byte], b2 = buf2[byte]; if (b1 != b2) { for (bit = 0; bit < 8; bit++) if ( (b1 & (0x80 >> bit)) != (b2 & (0x80 >> bit)) ) break; break; } } assert(byte <= (INT_MAX >> 3)); return Py_BuildValue("i", byte*8 + bit); } static PyObject *firstword(PyObject *self, PyObject *args) { unsigned char *buf = NULL; Py_ssize_t len = 0; uint32_t v; if (!PyArg_ParseTuple(args, "t#", &buf, &len)) return NULL; if (len < 4) return NULL; v = ntohl(*(uint32_t *)buf); return PyLong_FromUnsignedLong(v); } #define BLOOM2_HEADERLEN 16 static void to_bloom_address_bitmask4(const unsigned char *buf, const int nbits, uint64_t *v, unsigned char *bitmask) { int bit; uint32_t high; uint64_t raw, mask; memcpy(&high, buf, 4); mask = (1<> (37-nbits)) & 0x7; *v = (raw >> (40-nbits)) & mask; *bitmask = 1 << bit; } static void to_bloom_address_bitmask5(const unsigned char *buf, const int nbits, uint32_t *v, unsigned char *bitmask) { int bit; uint32_t high; uint32_t raw, mask; memcpy(&high, buf, 4); mask = (1<> (29-nbits)) & 0x7; *v = (raw >> (32-nbits)) & mask; *bitmask = 1 << bit; } #define BLOOM_SET_BIT(name, address, otype) \ static void name(unsigned char *bloom, const unsigned char *buf, const int nbits)\ {\ unsigned char bitmask;\ otype v;\ address(buf, nbits, &v, &bitmask);\ bloom[BLOOM2_HEADERLEN+v] |= bitmask;\ } BLOOM_SET_BIT(bloom_set_bit4, to_bloom_address_bitmask4, uint64_t) BLOOM_SET_BIT(bloom_set_bit5, to_bloom_address_bitmask5, uint32_t) #define BLOOM_GET_BIT(name, address, otype) \ static int name(const unsigned char *bloom, const unsigned char *buf, const int nbits)\ {\ unsigned char bitmask;\ otype v;\ address(buf, nbits, &v, &bitmask);\ return bloom[BLOOM2_HEADERLEN+v] & bitmask;\ } BLOOM_GET_BIT(bloom_get_bit4, to_bloom_address_bitmask4, uint64_t) BLOOM_GET_BIT(bloom_get_bit5, to_bloom_address_bitmask5, uint32_t) static PyObject *bloom_add(PyObject *self, PyObject *args) { unsigned char *sha = NULL, *bloom = NULL; unsigned char *end; Py_ssize_t len = 0, blen = 0; int nbits = 0, k = 0; if (!PyArg_ParseTuple(args, "w#s#ii", &bloom, &blen, &sha, &len, &nbits, &k)) return NULL; if (blen < 16+(1< 29) return NULL; for (end = sha + len; sha < end; sha += 20/k) bloom_set_bit5(bloom, sha, nbits); } else if (k == 4) { if (nbits > 37) return NULL; for (end = sha + len; sha < end; sha += 20/k) bloom_set_bit4(bloom, sha, nbits); } else return NULL; return Py_BuildValue("n", len/20); } static PyObject *bloom_contains(PyObject *self, PyObject *args) { unsigned char *sha = NULL, *bloom = NULL; Py_ssize_t len = 0, blen = 0; int nbits = 0, k = 0; unsigned char *end; int steps; if (!PyArg_ParseTuple(args, "t#s#ii", &bloom, &blen, &sha, &len, &nbits, &k)) return NULL; if (len != 20) return NULL; if (k == 5) { if (nbits > 29) return NULL; for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) if (!bloom_get_bit5(bloom, sha, nbits)) return Py_BuildValue("Oi", Py_None, steps); } else if (k == 4) { if (nbits > 37) return NULL; for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) if (!bloom_get_bit4(bloom, sha, nbits)) return Py_BuildValue("Oi", Py_None, steps); } else return NULL; return Py_BuildValue("ii", 1, k); } static uint32_t _extract_bits(unsigned char *buf, int nbits) { uint32_t v, mask; mask = (1<> (32-nbits)) & mask; return v; } static PyObject *extract_bits(PyObject *self, PyObject *args) { unsigned char *buf = NULL; Py_ssize_t len = 0; int nbits = 0; if (!PyArg_ParseTuple(args, "t#i", &buf, &len, &nbits)) return NULL; if (len < 4) return NULL; return PyLong_FromUnsignedLong(_extract_bits(buf, nbits)); } struct sha { unsigned char bytes[20]; }; struct idx { unsigned char *map; struct sha *cur; struct sha *end; uint32_t *cur_name; Py_ssize_t bytes; int name_base; }; static int _cmp_sha(const struct sha *sha1, const struct sha *sha2) { int i; for (i = 0; i < sizeof(struct sha); i++) if (sha1->bytes[i] != sha2->bytes[i]) return sha1->bytes[i] - sha2->bytes[i]; return 0; } static void _fix_idx_order(struct idx **idxs, int *last_i) { struct idx *idx; int low, mid, high, c = 0; idx = idxs[*last_i]; if (idxs[*last_i]->cur >= idxs[*last_i]->end) { idxs[*last_i] = NULL; PyMem_Free(idx); --*last_i; return; } if (*last_i == 0) return; low = *last_i-1; mid = *last_i; high = 0; while (low >= high) { mid = (low + high) / 2; c = _cmp_sha(idx->cur, idxs[mid]->cur); if (c < 0) high = mid + 1; else if (c > 0) low = mid - 1; else break; } if (c < 0) ++mid; if (mid == *last_i) return; memmove(&idxs[mid+1], &idxs[mid], (*last_i-mid)*sizeof(struct idx *)); idxs[mid] = idx; } static uint32_t _get_idx_i(struct idx *idx) { if (idx->cur_name == NULL) return idx->name_base; return ntohl(*idx->cur_name) + idx->name_base; } #define MIDX4_HEADERLEN 12 static PyObject *merge_into(PyObject *self, PyObject *args) { PyObject *ilist = NULL; unsigned char *fmap = NULL; struct sha *sha_ptr, *sha_start = NULL; uint32_t *table_ptr, *name_ptr, *name_start; struct idx **idxs = NULL; Py_ssize_t flen = 0; int bits = 0, i; unsigned int total; uint32_t count, prefix; int num_i; int last_i; if (!PyArg_ParseTuple(args, "w#iIO", &fmap, &flen, &bits, &total, &ilist)) return NULL; num_i = PyList_Size(ilist); idxs = (struct idx **)PyMem_Malloc(num_i * sizeof(struct idx *)); for (i = 0; i < num_i; i++) { long len, sha_ofs, name_map_ofs; idxs[i] = (struct idx *)PyMem_Malloc(sizeof(struct idx)); PyObject *itup = PyList_GetItem(ilist, i); if (!PyArg_ParseTuple(itup, "t#llli", &idxs[i]->map, &idxs[i]->bytes, &len, &sha_ofs, &name_map_ofs, &idxs[i]->name_base)) return NULL; idxs[i]->cur = (struct sha *)&idxs[i]->map[sha_ofs]; idxs[i]->end = &idxs[i]->cur[len]; if (name_map_ofs) idxs[i]->cur_name = (uint32_t *)&idxs[i]->map[name_map_ofs]; else idxs[i]->cur_name = NULL; } table_ptr = (uint32_t *)&fmap[MIDX4_HEADERLEN]; sha_start = sha_ptr = (struct sha *)&table_ptr[1<= 0) { struct idx *idx; uint32_t new_prefix; if (count % 102424 == 0 && istty2) fprintf(stderr, "midx: writing %.2f%% (%d/%d)\r", count*100.0/total, count, total); idx = idxs[last_i]; new_prefix = _extract_bits((unsigned char *)idx->cur, bits); while (prefix < new_prefix) table_ptr[prefix++] = htonl(count); memcpy(sha_ptr++, idx->cur, sizeof(struct sha)); *name_ptr++ = htonl(_get_idx_i(idx)); ++idx->cur; if (idx->cur_name != NULL) ++idx->cur_name; _fix_idx_order(idxs, &last_i); ++count; } while (prefix < (1<> 32); return value; // already in network byte order MSB-LSB } #define FAN_ENTRIES 256 static PyObject *write_idx(PyObject *self, PyObject *args) { char *filename = NULL; PyObject *idx = NULL; PyObject *part; unsigned char *fmap = NULL; Py_ssize_t flen = 0; unsigned int total = 0; uint32_t count; int i, j, ofs64_count; uint32_t *fan_ptr, *crc_ptr, *ofs_ptr; uint64_t *ofs64_ptr; struct sha *sha_ptr; if (!PyArg_ParseTuple(args, "sw#OI", &filename, &fmap, &flen, &idx, &total)) return NULL; if (PyList_Size (idx) != FAN_ENTRIES) // Check for list of the right length. return PyErr_Format (PyExc_TypeError, "idx must contain %d entries", FAN_ENTRIES); const char idx_header[] = "\377tOc\0\0\0\002"; memcpy (fmap, idx_header, sizeof(idx_header) - 1); fan_ptr = (uint32_t *)&fmap[sizeof(idx_header) - 1]; sha_ptr = (struct sha *)&fan_ptr[FAN_ENTRIES]; crc_ptr = (uint32_t *)&sha_ptr[total]; ofs_ptr = (uint32_t *)&crc_ptr[total]; ofs64_ptr = (uint64_t *)&ofs_ptr[total]; count = 0; ofs64_count = 0; for (i = 0; i < FAN_ENTRIES; ++i) { int plen; part = PyList_GET_ITEM(idx, i); PyList_Sort(part); plen = PyList_GET_SIZE(part); count += plen; *fan_ptr++ = htonl(count); for (j = 0; j < plen; ++j) { unsigned char *sha = NULL; Py_ssize_t sha_len = 0; unsigned int crc = 0; unsigned PY_LONG_LONG ofs_py = 0; uint64_t ofs; if (!PyArg_ParseTuple(PyList_GET_ITEM(part, j), "t#IK", &sha, &sha_len, &crc, &ofs_py)) return NULL; assert(crc <= UINT32_MAX); assert(ofs_py <= UINT64_MAX); ofs = ofs_py; if (sha_len != sizeof(struct sha)) return NULL; memcpy(sha_ptr++, sha, sizeof(struct sha)); *crc_ptr++ = htonl(crc); if (ofs > 0x7fffffff) { *ofs64_ptr++ = htonll(ofs); ofs = 0x80000000 | ofs64_count++; } *ofs_ptr++ = htonl((uint32_t)ofs); } } int rc = msync(fmap, flen, MS_ASYNC); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename); return PyLong_FromUnsignedLong(count); } // I would have made this a lower-level function that just fills in a buffer // with random values, and then written those values from python. But that's // about 20% slower in my tests, and since we typically generate random // numbers for benchmarking other parts of bup, any slowness in generating // random bytes will make our benchmarks inaccurate. Plus nobody wants // pseudorandom bytes much except for this anyway. static PyObject *write_random(PyObject *self, PyObject *args) { uint32_t buf[1024/4]; int fd = -1, seed = 0, verbose = 0; ssize_t ret; long long len = 0, kbytes = 0, written = 0; if (!PyArg_ParseTuple(args, "iLii", &fd, &len, &seed, &verbose)) return NULL; srandom(seed); for (kbytes = 0; kbytes < len/1024; kbytes++) { unsigned i; for (i = 0; i < sizeof(buf)/sizeof(buf[0]); i++) buf[i] = random(); ret = write(fd, buf, sizeof(buf)); if (ret < 0) ret = 0; written += ret; if (ret < (int)sizeof(buf)) break; if (verbose && kbytes/1024 > 0 && !(kbytes%1024)) fprintf(stderr, "Random: %lld Mbytes\r", kbytes/1024); } // handle non-multiples of 1024 if (len % 1024) { unsigned i; for (i = 0; i < sizeof(buf)/sizeof(buf[0]); i++) buf[i] = random(); ret = write(fd, buf, len % 1024); if (ret < 0) ret = 0; written += ret; } if (kbytes/1024 > 0) fprintf(stderr, "Random: %lld Mbytes, done.\n", kbytes/1024); return Py_BuildValue("L", written); } static PyObject *random_sha(PyObject *self, PyObject *args) { static int seeded = 0; uint32_t shabuf[20/4]; int i; if (!seeded) { assert(sizeof(shabuf) == 20); srandom(time(NULL)); seeded = 1; } if (!PyArg_ParseTuple(args, "")) return NULL; memset(shabuf, 0, sizeof(shabuf)); for (i=0; i < 20/4; i++) shabuf[i] = random(); return Py_BuildValue("s#", shabuf, 20); } static int _open_noatime(const char *filename, int attrs) { int attrs_noatime, fd; attrs |= O_RDONLY; #ifdef O_NOFOLLOW attrs |= O_NOFOLLOW; #endif #ifdef O_LARGEFILE attrs |= O_LARGEFILE; #endif attrs_noatime = attrs; #ifdef O_NOATIME attrs_noatime |= O_NOATIME; #endif fd = open(filename, attrs_noatime); if (fd < 0 && errno == EPERM) { // older Linux kernels would return EPERM if you used O_NOATIME // and weren't the file's owner. This pointless restriction was // relaxed eventually, but we have to handle it anyway. // (VERY old kernels didn't recognized O_NOATIME, but they would // just harmlessly ignore it, so this branch won't trigger) fd = open(filename, attrs); } return fd; } static PyObject *open_noatime(PyObject *self, PyObject *args) { char *filename = NULL; int fd; if (!PyArg_ParseTuple(args, "s", &filename)) return NULL; fd = _open_noatime(filename, 0); if (fd < 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, filename); return Py_BuildValue("i", fd); } static PyObject *fadvise_done(PyObject *self, PyObject *args) { int fd = -1; long long ofs = 0; if (!PyArg_ParseTuple(args, "iL", &fd, &ofs)) return NULL; #ifdef POSIX_FADV_DONTNEED posix_fadvise(fd, 0, ofs, POSIX_FADV_DONTNEED); #endif return Py_BuildValue(""); } #ifdef BUP_HAVE_FILE_ATTRS static PyObject *bup_get_linux_file_attr(PyObject *self, PyObject *args) { int rc; unsigned int attr; char *path; int fd; if (!PyArg_ParseTuple(args, "s", &path)) return NULL; fd = _open_noatime(path, O_NONBLOCK); if (fd == -1) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); attr = 0; rc = ioctl(fd, FS_IOC_GETFLAGS, &attr); if (rc == -1) { close(fd); return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); } close(fd); return Py_BuildValue("I", attr); } #endif /* def BUP_HAVE_FILE_ATTRS */ #ifdef BUP_HAVE_FILE_ATTRS static PyObject *bup_set_linux_file_attr(PyObject *self, PyObject *args) { int rc; unsigned int orig_attr, attr; char *path; int fd; if (!PyArg_ParseTuple(args, "sI", &path, &attr)) return NULL; fd = open(path, O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_NOFOLLOW); if (fd == -1) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); // Restrict attr to modifiable flags acdeijstuADST -- see // chattr(1) and the e2fsprogs source. Letter to flag mapping is // in pf.c flags_array[]. attr &= FS_APPEND_FL | FS_COMPR_FL | FS_NODUMP_FL | FS_EXTENT_FL | FS_IMMUTABLE_FL | FS_JOURNAL_DATA_FL | FS_SECRM_FL | FS_NOTAIL_FL | FS_UNRM_FL | FS_NOATIME_FL | FS_DIRSYNC_FL | FS_SYNC_FL | FS_TOPDIR_FL | FS_NOCOW_FL; // The extents flag can't be removed, so don't (see chattr(1) and chattr.c). rc = ioctl(fd, FS_IOC_GETFLAGS, &orig_attr); if (rc == -1) { close(fd); return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); } attr |= (orig_attr & FS_EXTENT_FL); rc = ioctl(fd, FS_IOC_SETFLAGS, &attr); if (rc == -1) { close(fd); return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); } close(fd); return Py_BuildValue("O", Py_None); } #endif /* def BUP_HAVE_FILE_ATTRS */ #if defined(HAVE_UTIMENSAT) || defined(HAVE_FUTIMES) || defined(HAVE_LUTIMES) static int bup_parse_xutime_args(char **path, long *access, long *access_ns, long *modification, long *modification_ns, PyObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "s((ll)(ll))", path, access, access_ns, modification, modification_ns)) return 0; if (isnan(*access)) { PyErr_SetString(PyExc_ValueError, "access time is NaN"); return 0; } else if (isinf(*access)) { PyErr_SetString(PyExc_ValueError, "access time is infinite"); return 0; } else if (isnan(*modification)) { PyErr_SetString(PyExc_ValueError, "modification time is NaN"); return 0; } else if (isinf(*modification)) { PyErr_SetString(PyExc_ValueError, "modification time is infinite"); return 0; } if (isnan(*access_ns)) { PyErr_SetString(PyExc_ValueError, "access time ns is NaN"); return 0; } else if (isinf(*access_ns)) { PyErr_SetString(PyExc_ValueError, "access time ns is infinite"); return 0; } else if (isnan(*modification_ns)) { PyErr_SetString(PyExc_ValueError, "modification time ns is NaN"); return 0; } else if (isinf(*modification_ns)) { PyErr_SetString(PyExc_ValueError, "modification time ns is infinite"); return 0; } return 1; } #endif /* defined(HAVE_UTIMENSAT) || defined(HAVE_FUTIMES) || defined(HAVE_LUTIMES) */ #ifdef HAVE_UTIMENSAT static PyObject *bup_xutime_ns(PyObject *self, PyObject *args, int follow_symlinks) { int rc; char *path; long access, access_ns, modification, modification_ns; struct timespec ts[2]; if (!bup_parse_xutime_args(&path, &access, &access_ns, &modification, &modification_ns, self, args)) return NULL; ts[0].tv_sec = access; ts[0].tv_nsec = access_ns; ts[1].tv_sec = modification; ts[1].tv_nsec = modification_ns; rc = utimensat(AT_FDCWD, path, ts, follow_symlinks ? 0 : AT_SYMLINK_NOFOLLOW); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); return Py_BuildValue("O", Py_None); } #define BUP_HAVE_BUP_UTIME_NS 1 static PyObject *bup_utime_ns(PyObject *self, PyObject *args) { return bup_xutime_ns(self, args, 1); } #define BUP_HAVE_BUP_LUTIME_NS 1 static PyObject *bup_lutime_ns(PyObject *self, PyObject *args) { return bup_xutime_ns(self, args, 0); } #else /* not defined(HAVE_UTIMENSAT) */ #ifdef HAVE_UTIMES #define BUP_HAVE_BUP_UTIME_NS 1 static PyObject *bup_utime_ns(PyObject *self, PyObject *args) { int rc; char *path; long access, access_ns, modification, modification_ns; struct timeval tv[2]; if (!bup_parse_xutime_args(&path, &access, &access_ns, &modification, &modification_ns, self, args)) return NULL; tv[0].tv_sec = access; tv[0].tv_usec = access_ns / 1000; tv[1].tv_sec = modification; tv[1].tv_usec = modification_ns / 1000; rc = utimes(path, tv); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); return Py_BuildValue("O", Py_None); } #endif /* def HAVE_UTIMES */ #ifdef HAVE_LUTIMES #define BUP_HAVE_BUP_LUTIME_NS 1 static PyObject *bup_lutime_ns(PyObject *self, PyObject *args) { int rc; char *path; long access, access_ns, modification, modification_ns; struct timeval tv[2]; if (!bup_parse_xutime_args(&path, &access, &access_ns, &modification, &modification_ns, self, args)) return NULL; tv[0].tv_sec = access; tv[0].tv_usec = access_ns / 1000; tv[1].tv_sec = modification; tv[1].tv_usec = modification_ns / 1000; rc = lutimes(path, tv); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); return Py_BuildValue("O", Py_None); } #endif /* def HAVE_LUTIMES */ #endif /* not defined(HAVE_UTIMENSAT) */ #ifdef HAVE_STAT_ST_ATIM # define BUP_STAT_ATIME_NS(st) (st)->st_atim.tv_nsec # define BUP_STAT_MTIME_NS(st) (st)->st_mtim.tv_nsec # define BUP_STAT_CTIME_NS(st) (st)->st_ctim.tv_nsec #elif defined HAVE_STAT_ST_ATIMENSEC # define BUP_STAT_ATIME_NS(st) (st)->st_atimespec.tv_nsec # define BUP_STAT_MTIME_NS(st) (st)->st_mtimespec.tv_nsec # define BUP_STAT_CTIME_NS(st) (st)->st_ctimespec.tv_nsec #else # define BUP_STAT_ATIME_NS(st) 0 # define BUP_STAT_MTIME_NS(st) 0 # define BUP_STAT_CTIME_NS(st) 0 #endif static void set_invalid_timespec_msg(const char *field, const long long sec, const long nsec, const char *filename, int fd) { if (filename != NULL) PyErr_Format(PyExc_ValueError, "invalid %s timespec (%lld %ld) for file \"%s\"", field, sec, nsec, filename); else PyErr_Format(PyExc_ValueError, "invalid %s timespec (%lld %ld) for file descriptor %d", field, sec, nsec, fd); } static int normalize_timespec_values(const char *name, long long *sec, long *nsec, const char *filename, int fd) { if (*nsec < -999999999 || *nsec > 999999999) { set_invalid_timespec_msg(name, *sec, *nsec, filename, fd); return 0; } if (*nsec < 0) { if (*sec == LONG_MIN) { set_invalid_timespec_msg(name, *sec, *nsec, filename, fd); return 0; } *nsec += 1000000000; *sec -= 1; } return 1; } #define INTEGER_TO_PY(x) \ (((x) >= 0) ? PyLong_FromUnsignedLongLong(x) : PyLong_FromLongLong(x)) static PyObject *stat_struct_to_py(const struct stat *st, const char *filename, int fd) { long long atime = st->st_atime; long long mtime = st->st_mtime; long long ctime = st->st_ctime; long atime_ns = BUP_STAT_ATIME_NS(st); long mtime_ns = BUP_STAT_MTIME_NS(st); long ctime_ns = BUP_STAT_CTIME_NS(st); if (!normalize_timespec_values("atime", &atime, &atime_ns, filename, fd)) return NULL; if (!normalize_timespec_values("mtime", &mtime, &mtime_ns, filename, fd)) return NULL; if (!normalize_timespec_values("ctime", &ctime, &ctime_ns, filename, fd)) return NULL; // We can check the known (via POSIX) signed and unsigned types at // compile time, but not (easily) the unspecified types, so handle // those via INTEGER_TO_PY(). return Py_BuildValue("OKOOOOOL(Ll)(Ll)(Ll)", INTEGER_TO_PY(st->st_mode), (unsigned PY_LONG_LONG) st->st_ino, INTEGER_TO_PY(st->st_dev), INTEGER_TO_PY(st->st_nlink), INTEGER_TO_PY(st->st_uid), INTEGER_TO_PY(st->st_gid), INTEGER_TO_PY(st->st_rdev), (PY_LONG_LONG) st->st_size, (PY_LONG_LONG) atime, (long) atime_ns, (PY_LONG_LONG) mtime, (long) mtime_ns, (PY_LONG_LONG) ctime, (long) ctime_ns); } static PyObject *bup_stat(PyObject *self, PyObject *args) { int rc; char *filename; if (!PyArg_ParseTuple(args, "s", &filename)) return NULL; struct stat st; rc = stat(filename, &st); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, filename); return stat_struct_to_py(&st, filename, 0); } static PyObject *bup_lstat(PyObject *self, PyObject *args) { int rc; char *filename; if (!PyArg_ParseTuple(args, "s", &filename)) return NULL; struct stat st; rc = lstat(filename, &st); if (rc != 0) return PyErr_SetFromErrnoWithFilename(PyExc_OSError, filename); return stat_struct_to_py(&st, filename, 0); } static PyObject *bup_fstat(PyObject *self, PyObject *args) { int rc, fd; if (!PyArg_ParseTuple(args, "i", &fd)) return NULL; struct stat st; rc = fstat(fd, &st); if (rc != 0) return PyErr_SetFromErrno(PyExc_OSError); return stat_struct_to_py(&st, NULL, fd); } static PyMethodDef helper_methods[] = { { "selftest", selftest, METH_VARARGS, "Check that the rolling checksum rolls correctly (for unit tests)." }, { "blobbits", blobbits, METH_VARARGS, "Return the number of bits in the rolling checksum." }, { "splitbuf", splitbuf, METH_VARARGS, "Split a list of strings based on a rolling checksum." }, { "bitmatch", bitmatch, METH_VARARGS, "Count the number of matching prefix bits between two strings." }, { "firstword", firstword, METH_VARARGS, "Return an int corresponding to the first 32 bits of buf." }, { "bloom_contains", bloom_contains, METH_VARARGS, "Check if a bloom filter of 2^nbits bytes contains an object" }, { "bloom_add", bloom_add, METH_VARARGS, "Add an object to a bloom filter of 2^nbits bytes" }, { "extract_bits", extract_bits, METH_VARARGS, "Take the first 'nbits' bits from 'buf' and return them as an int." }, { "merge_into", merge_into, METH_VARARGS, "Merges a bunch of idx and midx files into a single midx." }, { "write_idx", write_idx, METH_VARARGS, "Write a PackIdxV2 file from an idx list of lists of tuples" }, { "write_random", write_random, METH_VARARGS, "Write random bytes to the given file descriptor" }, { "random_sha", random_sha, METH_VARARGS, "Return a random 20-byte string" }, { "open_noatime", open_noatime, METH_VARARGS, "open() the given filename for read with O_NOATIME if possible" }, { "fadvise_done", fadvise_done, METH_VARARGS, "Inform the kernel that we're finished with earlier parts of a file" }, #ifdef BUP_HAVE_FILE_ATTRS { "get_linux_file_attr", bup_get_linux_file_attr, METH_VARARGS, "Return the Linux attributes for the given file." }, #endif #ifdef BUP_HAVE_FILE_ATTRS { "set_linux_file_attr", bup_set_linux_file_attr, METH_VARARGS, "Set the Linux attributes for the given file." }, #endif #ifdef BUP_HAVE_BUP_UTIME_NS { "bup_utime_ns", bup_utime_ns, METH_VARARGS, "Change path timestamps with up to nanosecond precision." }, #endif #ifdef BUP_HAVE_BUP_LUTIME_NS { "bup_lutime_ns", bup_lutime_ns, METH_VARARGS, "Change path timestamps with up to nanosecond precision;" " don't follow symlinks." }, #endif { "stat", bup_stat, METH_VARARGS, "Extended version of stat." }, { "lstat", bup_lstat, METH_VARARGS, "Extended version of lstat." }, { "fstat", bup_fstat, METH_VARARGS, "Extended version of fstat." }, { NULL, NULL, 0, NULL }, // sentinel }; PyMODINIT_FUNC init_helpers(void) { // FIXME: migrate these tests to configure. Check against the // type we're going to use when passing to python. Other stat // types are tested at runtime. assert(sizeof(ino_t) <= sizeof(unsigned PY_LONG_LONG)); assert(sizeof(off_t) <= sizeof(PY_LONG_LONG)); assert(sizeof(blksize_t) <= sizeof(PY_LONG_LONG)); assert(sizeof(blkcnt_t) <= sizeof(PY_LONG_LONG)); // Just be sure (relevant when passing timestamps back to Python above). assert(sizeof(PY_LONG_LONG) <= sizeof(long long)); char *e; PyObject *m = Py_InitModule("_helpers", helper_methods); if (m == NULL) return; e = getenv("BUP_FORCE_TTY"); istty2 = isatty(2) || (atoi(e ? e : "0") & 2); unpythonize_argv(); } bup-0.25/lib/bup/_version.py.pre000066400000000000000000000002551225146730500165540ustar00rootroot00000000000000 COMMIT='5a7fd007e89bb343ab2024d9a87d54bbee997bbf' NAMES=' (tag: 0.25-rc5, tag: 0.25, upstream/master, origin/master, origin/HEAD, master)' DATE='2013-12-09 19:30:45 -0600' bup-0.25/lib/bup/bloom.py000066400000000000000000000247011225146730500152550ustar00rootroot00000000000000"""Discussion of bloom constants for bup: There are four basic things to consider when building a bloom filter: The size, in bits, of the filter The capacity, in entries, of the filter The probability of a false positive that is tolerable The number of bits readily available to use for addressing filter bits There is one major tunable that is not directly related to the above: k: the number of bits set in the filter per entry Here's a wall of numbers showing the relationship between k; the ratio between the filter size in bits and the entries in the filter; and pfalse_positive: mn|k=3 |k=4 |k=5 |k=6 |k=7 |k=8 |k=9 |k=10 |k=11 8|3.05794|2.39687|2.16792|2.15771|2.29297|2.54917|2.92244|3.41909|4.05091 9|2.27780|1.65770|1.40703|1.32721|1.34892|1.44631|1.61138|1.84491|2.15259 10|1.74106|1.18133|0.94309|0.84362|0.81937|0.84555|0.91270|1.01859|1.16495 11|1.36005|0.86373|0.65018|0.55222|0.51259|0.50864|0.53098|0.57616|0.64387 12|1.08231|0.64568|0.45945|0.37108|0.32939|0.31424|0.31695|0.33387|0.36380 13|0.87517|0.49210|0.33183|0.25527|0.21689|0.19897|0.19384|0.19804|0.21013 14|0.71759|0.38147|0.24433|0.17934|0.14601|0.12887|0.12127|0.12012|0.12399 15|0.59562|0.30019|0.18303|0.12840|0.10028|0.08523|0.07749|0.07440|0.07468 16|0.49977|0.23941|0.13925|0.09351|0.07015|0.05745|0.05049|0.04700|0.04587 17|0.42340|0.19323|0.10742|0.06916|0.04990|0.03941|0.03350|0.03024|0.02870 18|0.36181|0.15765|0.08392|0.05188|0.03604|0.02748|0.02260|0.01980|0.01827 19|0.31160|0.12989|0.06632|0.03942|0.02640|0.01945|0.01549|0.01317|0.01182 20|0.27026|0.10797|0.05296|0.03031|0.01959|0.01396|0.01077|0.00889|0.00777 21|0.23591|0.09048|0.04269|0.02356|0.01471|0.01014|0.00759|0.00609|0.00518 22|0.20714|0.07639|0.03473|0.01850|0.01117|0.00746|0.00542|0.00423|0.00350 23|0.18287|0.06493|0.02847|0.01466|0.00856|0.00555|0.00392|0.00297|0.00240 24|0.16224|0.05554|0.02352|0.01171|0.00663|0.00417|0.00286|0.00211|0.00166 25|0.14459|0.04779|0.01957|0.00944|0.00518|0.00316|0.00211|0.00152|0.00116 26|0.12942|0.04135|0.01639|0.00766|0.00408|0.00242|0.00157|0.00110|0.00082 27|0.11629|0.03595|0.01381|0.00626|0.00324|0.00187|0.00118|0.00081|0.00059 28|0.10489|0.03141|0.01170|0.00515|0.00259|0.00146|0.00090|0.00060|0.00043 29|0.09492|0.02756|0.00996|0.00426|0.00209|0.00114|0.00069|0.00045|0.00031 30|0.08618|0.02428|0.00853|0.00355|0.00169|0.00090|0.00053|0.00034|0.00023 31|0.07848|0.02147|0.00733|0.00297|0.00138|0.00072|0.00041|0.00025|0.00017 32|0.07167|0.01906|0.00633|0.00250|0.00113|0.00057|0.00032|0.00019|0.00013 Here's a table showing available repository size for a given pfalse_positive and three values of k (assuming we only use the 160 bit SHA1 for addressing the filter and 8192bytes per object): pfalse|obj k=4 |cap k=4 |obj k=5 |cap k=5 |obj k=6 |cap k=6 2.500%|139333497228|1038.11 TiB|558711157|4262.63 GiB|13815755|105.41 GiB 1.000%|104489450934| 778.50 TiB|436090254|3327.10 GiB|11077519| 84.51 GiB 0.125%| 57254889824| 426.58 TiB|261732190|1996.86 GiB| 7063017| 55.89 GiB This eliminates pretty neatly any k>6 as long as we use the raw SHA for addressing. filter size scales linearly with repository size for a given k and pfalse. Here's a table of filter sizes for a 1 TiB repository: pfalse| k=3 | k=4 | k=5 | k=6 2.500%| 138.78 MiB | 126.26 MiB | 123.00 MiB | 123.37 MiB 1.000%| 197.83 MiB | 168.36 MiB | 157.58 MiB | 153.87 MiB 0.125%| 421.14 MiB | 307.26 MiB | 262.56 MiB | 241.32 MiB For bup: * We want the bloom filter to fit in memory; if it doesn't, the k pagefaults per lookup will be worse than the two required for midx. * We want the pfalse_positive to be low enough that the cost of sometimes faulting on the midx doesn't overcome the benefit of the bloom filter. * We have readily available 160 bits for addressing the filter. * We want to be able to have a single bloom address entire repositories of reasonable size. Based on these parameters, a combination of k=4 and k=5 provides the behavior that bup needs. As such, I've implemented bloom addressing, adding and checking functions in C for these two values. Because k=5 requires less space and gives better overall pfalse_positive performance, it is preferred if a table with k=5 can represent the repository. None of this tells us what max_pfalse_positive to choose. Brandon Low 2011-02-04 """ import sys, os, math, mmap from bup import _helpers from bup.helpers import * BLOOM_VERSION = 2 MAX_BITS_EACH = 32 # Kinda arbitrary, but 4 bytes per entry is pretty big MAX_BLOOM_BITS = {4: 37, 5: 29} # 160/k-log2(8) MAX_PFALSE_POSITIVE = 1. # Totally arbitrary, needs benchmarking _total_searches = 0 _total_steps = 0 bloom_contains = _helpers.bloom_contains bloom_add = _helpers.bloom_add # FIXME: check bloom create() and ShaBloom handling/ownership of "f". # The ownership semantics should be clarified since the caller needs # to know who is responsible for closing it. class ShaBloom: """Wrapper which contains data from multiple index files. """ def __init__(self, filename, f=None, readwrite=False, expected=-1): self.name = filename self.rwfile = None self.map = None assert(filename.endswith('.bloom')) if readwrite: assert(expected > 0) self.rwfile = f = f or open(filename, 'r+b') f.seek(0) # Decide if we want to mmap() the pages as writable ('immediate' # write) or else map them privately for later writing back to # the file ('delayed' write). A bloom table's write access # pattern is such that we dirty almost all the pages after adding # very few entries. But the table is so big that dirtying # *all* the pages often exceeds Linux's default # /proc/sys/vm/dirty_ratio or /proc/sys/vm/dirty_background_ratio, # thus causing it to start flushing the table before we're # finished... even though there's more than enough space to # store the bloom table in RAM. # # To work around that behaviour, if we calculate that we'll # probably end up touching the whole table anyway (at least # one bit flipped per memory page), let's use a "private" mmap, # which defeats Linux's ability to flush it to disk. Then we'll # flush it as one big lump during close(). pages = os.fstat(f.fileno()).st_size / 4096 * 5 # assume k=5 self.delaywrite = expected > pages debug1('bloom: delaywrite=%r\n' % self.delaywrite) if self.delaywrite: self.map = mmap_readwrite_private(self.rwfile, close=False) else: self.map = mmap_readwrite(self.rwfile, close=False) else: self.rwfile = None f = f or open(filename, 'rb') self.map = mmap_read(f) got = str(self.map[0:4]) if got != 'BLOM': log('Warning: invalid BLOM header (%r) in %r\n' % (got, filename)) return self._init_failed() ver = struct.unpack('!I', self.map[4:8])[0] if ver < BLOOM_VERSION: log('Warning: ignoring old-style (v%d) bloom %r\n' % (ver, filename)) return self._init_failed() if ver > BLOOM_VERSION: log('Warning: ignoring too-new (v%d) bloom %r\n' % (ver, filename)) return self._init_failed() self.bits, self.k, self.entries = struct.unpack('!HHI', self.map[8:16]) idxnamestr = str(self.map[16 + 2**self.bits:]) if idxnamestr: self.idxnames = idxnamestr.split('\0') else: self.idxnames = [] def _init_failed(self): if self.map: self.map = None if self.rwfile: self.rwfile.close() self.rwfile = None self.idxnames = [] self.bits = self.entries = 0 def valid(self): return self.map and self.bits def __del__(self): self.close() def close(self): if self.map and self.rwfile: debug2("bloom: closing with %d entries\n" % self.entries) self.map[12:16] = struct.pack('!I', self.entries) if self.delaywrite: self.rwfile.seek(0) self.rwfile.write(self.map) else: self.map.flush() self.rwfile.seek(16 + 2**self.bits) if self.idxnames: self.rwfile.write('\0'.join(self.idxnames)) self._init_failed() def pfalse_positive(self, additional=0): n = self.entries + additional m = 8*2**self.bits k = self.k return 100*(1-math.exp(-k*float(n)/m))**k def add_idx(self, ix): """Add the object to the filter, return current pfalse_positive.""" if not self.map: raise Exception("Cannot add to closed bloom") self.entries += bloom_add(self.map, ix.shatable, self.bits, self.k) self.idxnames.append(os.path.basename(ix.name)) def exists(self, sha): """Return nonempty if the object probably exists in the bloom filter. If this function returns false, the object definitely does not exist. If it returns true, there is a small probability that it exists anyway, so you'll have to check it some other way. """ global _total_searches, _total_steps _total_searches += 1 if not self.map: return None found, steps = bloom_contains(self.map, str(sha), self.bits, self.k) _total_steps += steps return found def __len__(self): return int(self.entries) def create(name, expected, delaywrite=None, f=None, k=None): """Create and return a bloom filter for `expected` entries.""" bits = int(math.floor(math.log(expected*MAX_BITS_EACH/8,2))) k = k or ((bits <= MAX_BLOOM_BITS[5]) and 5 or 4) if bits > MAX_BLOOM_BITS[k]: log('bloom: warning, max bits exceeded, non-optimal\n') bits = MAX_BLOOM_BITS[k] debug1('bloom: using 2^%d bytes and %d hash functions\n' % (bits, k)) f = f or open(name, 'w+b') f.write('BLOM') f.write(struct.pack('!IHHI', BLOOM_VERSION, bits, k, 0)) assert(f.tell() == 16) # NOTE: On some systems this will not extend+zerofill, but it does on # darwin, linux, bsd and solaris. f.truncate(16+2**bits) f.seek(0) if delaywrite != None and not delaywrite: # tell it to expect very few objects, forcing a direct mmap expected = 1 return ShaBloom(name, f=f, readwrite=True, expected=expected) bup-0.25/lib/bup/bupsplit.c000066400000000000000000000111461225146730500156000ustar00rootroot00000000000000/* * Copyright 2011 Avery Pennarun. All rights reserved. * * (This license applies to bupsplit.c and bupsplit.h only.) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY AVERY PENNARUN ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "bupsplit.h" #include #include #include #include // According to librsync/rollsum.h: // "We should make this something other than zero to improve the // checksum algorithm: tridge suggests a prime number." // apenwarr: I unscientifically tried 0 and 7919, and they both ended up // slightly worse than the librsync value of 31 for my arbitrary test data. #define ROLLSUM_CHAR_OFFSET 31 typedef struct { unsigned s1, s2; uint8_t window[BUP_WINDOWSIZE]; int wofs; } Rollsum; // These formulas are based on rollsum.h in the librsync project. static void rollsum_add(Rollsum *r, uint8_t drop, uint8_t add) { r->s1 += add - drop; r->s2 += r->s1 - (BUP_WINDOWSIZE * (drop + ROLLSUM_CHAR_OFFSET)); } static void rollsum_init(Rollsum *r) { r->s1 = BUP_WINDOWSIZE * ROLLSUM_CHAR_OFFSET; r->s2 = BUP_WINDOWSIZE * (BUP_WINDOWSIZE-1) * ROLLSUM_CHAR_OFFSET; r->wofs = 0; memset(r->window, 0, BUP_WINDOWSIZE); } // For some reason, gcc 4.3 (at least) optimizes badly if find_ofs() // is static and rollsum_roll is an inline function. Let's use a macro // here instead to help out the optimizer. #define rollsum_roll(r, ch) do { \ rollsum_add((r), (r)->window[(r)->wofs], (ch)); \ (r)->window[(r)->wofs] = (ch); \ (r)->wofs = ((r)->wofs + 1) % BUP_WINDOWSIZE; \ } while (0) static uint32_t rollsum_digest(Rollsum *r) { return (r->s1 << 16) | (r->s2 & 0xffff); } static uint32_t rollsum_sum(uint8_t *buf, size_t ofs, size_t len) { size_t count; Rollsum r; rollsum_init(&r); for (count = ofs; count < len; count++) rollsum_roll(&r, buf[count]); return rollsum_digest(&r); } int bupsplit_find_ofs(const unsigned char *buf, int len, int *bits) { Rollsum r; int count; rollsum_init(&r); for (count = 0; count < len; count++) { rollsum_roll(&r, buf[count]); if ((r.s2 & (BUP_BLOBSIZE-1)) == ((~0) & (BUP_BLOBSIZE-1))) { if (bits) { unsigned rsum = rollsum_digest(&r); rsum >>= BUP_BLOBBITS; for (*bits = BUP_BLOBBITS; (rsum >>= 1) & 1; (*bits)++) ; } return count+1; } } return 0; } #ifndef BUP_NO_SELFTEST #define BUP_SELFTEST_SIZE 100000 int bupsplit_selftest() { uint8_t *buf = malloc(BUP_SELFTEST_SIZE); uint32_t sum1a, sum1b, sum2a, sum2b, sum3a, sum3b; unsigned count; srandom(1); for (count = 0; count < BUP_SELFTEST_SIZE; count++) buf[count] = random(); sum1a = rollsum_sum(buf, 0, BUP_SELFTEST_SIZE); sum1b = rollsum_sum(buf, 1, BUP_SELFTEST_SIZE); sum2a = rollsum_sum(buf, BUP_SELFTEST_SIZE - BUP_WINDOWSIZE*5/2, BUP_SELFTEST_SIZE - BUP_WINDOWSIZE); sum2b = rollsum_sum(buf, 0, BUP_SELFTEST_SIZE - BUP_WINDOWSIZE); sum3a = rollsum_sum(buf, 0, BUP_WINDOWSIZE+3); sum3b = rollsum_sum(buf, 3, BUP_WINDOWSIZE+3); fprintf(stderr, "sum1a = 0x%08x\n", sum1a); fprintf(stderr, "sum1b = 0x%08x\n", sum1b); fprintf(stderr, "sum2a = 0x%08x\n", sum2a); fprintf(stderr, "sum2b = 0x%08x\n", sum2b); fprintf(stderr, "sum3a = 0x%08x\n", sum3a); fprintf(stderr, "sum3b = 0x%08x\n", sum3b); free(buf); return sum1a!=sum1b || sum2a!=sum2b || sum3a!=sum3b; } #endif // !BUP_NO_SELFTEST bup-0.25/lib/bup/bupsplit.h000066400000000000000000000034371225146730500156110ustar00rootroot00000000000000/* * Copyright 2011 Avery Pennarun. All rights reserved. * * (This license applies to bupsplit.c and bupsplit.h only.) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY AVERY PENNARUN ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __BUPSPLIT_H #define __BUPSPLIT_H #define BUP_BLOBBITS (13) #define BUP_BLOBSIZE (1<\[)?((?(sb)[0-9a-f:]+|[^:/]+))(?(sb)\])' port = r'(?::(\d+))?' path = r'(/.*)?' url_match = re.match( '%s(?:%s%s)?%s' % (protocol, host, port, path), remote, re.I) if url_match: if not url_match.group(1) in ('ssh', 'bup', 'file'): raise ClientError, 'unexpected protocol: %s' % url_match.group(1) return url_match.group(1,3,4,5) else: rs = remote.split(':', 1) if len(rs) == 1 or rs[0] in ('', '-'): return 'file', None, None, rs[-1] else: return 'ssh', rs[0], None, rs[1] class Client: def __init__(self, remote, create=False, compression_level=1): self._busy = self.conn = None self.sock = self.p = self.pout = self.pin = None self.compression_level = compression_level is_reverse = os.environ.get('BUP_SERVER_REVERSE') if is_reverse: assert(not remote) remote = '%s:' % is_reverse (self.protocol, self.host, self.port, self.dir) = parse_remote(remote) self.cachedir = git.repo('index-cache/%s' % re.sub(r'[^@\w]', '_', "%s:%s" % (self.host, self.dir))) if is_reverse: self.pout = os.fdopen(3, 'rb') self.pin = os.fdopen(4, 'wb') self.conn = Conn(self.pout, self.pin) else: if self.protocol in ('ssh', 'file'): try: # FIXME: ssh and file shouldn't use the same module self.p = ssh.connect(self.host, self.port, 'server') self.pout = self.p.stdout self.pin = self.p.stdin self.conn = Conn(self.pout, self.pin) except OSError, e: raise ClientError, 'connect: %s' % e, sys.exc_info()[2] elif self.protocol == 'bup': self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.host, atoi(self.port) or 1982)) self.sockw = self.sock.makefile('wb') self.conn = DemuxConn(self.sock.fileno(), self.sockw) if self.dir: self.dir = re.sub(r'[\r\n]', ' ', self.dir) if create: self.conn.write('init-dir %s\n' % self.dir) else: self.conn.write('set-dir %s\n' % self.dir) self.check_ok() self.sync_indexes() def __del__(self): try: self.close() except IOError, e: if e.errno == errno.EPIPE: pass else: raise def close(self): if self.conn and not self._busy: self.conn.write('quit\n') if self.pin: self.pin.close() if self.sock and self.sockw: self.sockw.close() self.sock.shutdown(socket.SHUT_WR) if self.conn: self.conn.close() if self.pout: self.pout.close() if self.sock: self.sock.close() if self.p: self.p.wait() rv = self.p.wait() if rv: raise ClientError('server tunnel returned exit code %d' % rv) self.conn = None self.sock = self.p = self.pin = self.pout = None def check_ok(self): if self.p: rv = self.p.poll() if rv != None: raise ClientError('server exited unexpectedly with code %r' % rv) try: return self.conn.check_ok() except Exception, e: raise ClientError, e, sys.exc_info()[2] def check_busy(self): if self._busy: raise ClientError('already busy with command %r' % self._busy) def ensure_busy(self): if not self._busy: raise ClientError('expected to be busy, but not busy?!') def _not_busy(self): self._busy = None def sync_indexes(self): self.check_busy() conn = self.conn mkdirp(self.cachedir) # All cached idxs are extra until proven otherwise extra = set() for f in os.listdir(self.cachedir): debug1('%s\n' % f) if f.endswith('.idx'): extra.add(f) needed = set() conn.write('list-indexes\n') for line in linereader(conn): if not line: break assert(line.find('/') < 0) parts = line.split(' ') idx = parts[0] if len(parts) == 2 and parts[1] == 'load' and idx not in extra: # If the server requests that we load an idx and we don't # already have a copy of it, it is needed needed.add(idx) # Any idx that the server has heard of is proven not extra extra.discard(idx) self.check_ok() debug1('client: removing extra indexes: %s\n' % extra) for idx in extra: os.unlink(os.path.join(self.cachedir, idx)) debug1('client: server requested load of: %s\n' % needed) for idx in needed: self.sync_index(idx) git.auto_midx(self.cachedir) def sync_index(self, name): #debug1('requesting %r\n' % name) self.check_busy() mkdirp(self.cachedir) fn = os.path.join(self.cachedir, name) if os.path.exists(fn): msg = "won't request existing .idx, try `bup bloom --check %s`" % fn raise ClientError(msg) self.conn.write('send-index %s\n' % name) n = struct.unpack('!I', self.conn.read(4))[0] assert(n) f = open(fn + '.tmp', 'w') count = 0 progress('Receiving index from server: %d/%d\r' % (count, n)) for b in chunkyreader(self.conn, n): f.write(b) count += len(b) qprogress('Receiving index from server: %d/%d\r' % (count, n)) progress('Receiving index from server: %d/%d, done.\n' % (count, n)) self.check_ok() f.close() os.rename(fn + '.tmp', fn) def _make_objcache(self): return git.PackIdxList(self.cachedir) def _suggest_packs(self): ob = self._busy if ob: assert(ob == 'receive-objects-v2') self.conn.write('\xff\xff\xff\xff') # suspend receive-objects-v2 suggested = [] for line in linereader(self.conn): if not line: break debug2('%s\n' % line) if line.startswith('index '): idx = line[6:] debug1('client: received index suggestion: %s\n' % git.shorten_hash(idx)) suggested.append(idx) else: assert(line.endswith('.idx')) debug1('client: completed writing pack, idx: %s\n' % git.shorten_hash(line)) suggested.append(line) self.check_ok() if ob: self._busy = None idx = None for idx in suggested: self.sync_index(idx) git.auto_midx(self.cachedir) if ob: self._busy = ob self.conn.write('%s\n' % ob) return idx def new_packwriter(self): self.check_busy() def _set_busy(): self._busy = 'receive-objects-v2' self.conn.write('receive-objects-v2\n') return PackWriter_Remote(self.conn, objcache_maker = self._make_objcache, suggest_packs = self._suggest_packs, onopen = _set_busy, onclose = self._not_busy, ensure_busy = self.ensure_busy, compression_level = self.compression_level) def read_ref(self, refname): self.check_busy() self.conn.write('read-ref %s\n' % refname) r = self.conn.readline().strip() self.check_ok() if r: assert(len(r) == 40) # hexified sha return r.decode('hex') else: return None # nonexistent ref def update_ref(self, refname, newval, oldval): self.check_busy() self.conn.write('update-ref %s\n%s\n%s\n' % (refname, newval.encode('hex'), (oldval or '').encode('hex'))) self.check_ok() def cat(self, id): self.check_busy() self._busy = 'cat' self.conn.write('cat %s\n' % re.sub(r'[\n\r]', '_', id)) while 1: sz = struct.unpack('!I', self.conn.read(4))[0] if not sz: break yield self.conn.read(sz) e = self.check_ok() self._not_busy() if e: raise KeyError(str(e)) class PackWriter_Remote(git.PackWriter): def __init__(self, conn, objcache_maker, suggest_packs, onopen, onclose, ensure_busy, compression_level=1): git.PackWriter.__init__(self, objcache_maker) self.file = conn self.filename = 'remote socket' self.suggest_packs = suggest_packs self.onopen = onopen self.onclose = onclose self.ensure_busy = ensure_busy self._packopen = False self._bwcount = 0 self._bwtime = time.time() self.compression_level = compression_level def _open(self): if not self._packopen: self.onopen() self._packopen = True def _end(self): if self._packopen and self.file: self.file.write('\0\0\0\0') self._packopen = False self.onclose() # Unbusy self.objcache = None return self.suggest_packs() # Returns last idx received def close(self): id = self._end() self.file = None return id def abort(self): raise ClientError("don't know how to abort remote pack writing") def _raw_write(self, datalist, sha): assert(self.file) if not self._packopen: self._open() self.ensure_busy() data = ''.join(datalist) assert(data) assert(sha) crc = zlib.crc32(data) & 0xffffffff outbuf = ''.join((struct.pack('!I', len(data) + 20 + 4), sha, struct.pack('!I', crc), data)) try: (self._bwcount, self._bwtime) = _raw_write_bwlimit( self.file, outbuf, self._bwcount, self._bwtime) except IOError, e: raise ClientError, e, sys.exc_info()[2] self.outbytes += len(data) self.count += 1 if self.file.has_input(): self.suggest_packs() self.objcache.refresh() return sha, crc bup-0.25/lib/bup/csetup.py000066400000000000000000000005151225146730500154450ustar00rootroot00000000000000from distutils.core import setup, Extension _helpers_mod = Extension('_helpers', sources=['_helpers.c', 'bupsplit.c'], depends=['../../config/config.h']) setup(name='_helpers', version='0.1', description='accelerator library for bup', ext_modules=[_helpers_mod]) bup-0.25/lib/bup/drecurse.py000066400000000000000000000076531225146730500157700ustar00rootroot00000000000000import stat, os from bup.helpers import * import bup.xstat as xstat try: O_LARGEFILE = os.O_LARGEFILE except AttributeError: O_LARGEFILE = 0 try: O_NOFOLLOW = os.O_NOFOLLOW except AttributeError: O_NOFOLLOW = 0 # the use of fchdir() and lstat() is for two reasons: # - help out the kernel by not making it repeatedly look up the absolute path # - avoid race conditions caused by doing listdir() on a changing symlink class OsFile: def __init__(self, path): self.fd = None self.fd = os.open(path, os.O_RDONLY|O_LARGEFILE|O_NOFOLLOW|os.O_NDELAY) def __del__(self): if self.fd: fd = self.fd self.fd = None os.close(fd) def fchdir(self): os.fchdir(self.fd) def stat(self): return xstat.fstat(self.fd) _IFMT = stat.S_IFMT(0xffffffff) # avoid function call in inner loop def _dirlist(): l = [] for n in os.listdir('.'): try: st = xstat.lstat(n) except OSError, e: add_error(Exception('%s: %s' % (realpath(n), str(e)))) continue if (st.st_mode & _IFMT) == stat.S_IFDIR: n += '/' l.append((n,st)) l.sort(reverse=True) return l def _recursive_dirlist(prepend, xdev, bup_dir=None, excluded_paths=None, exclude_rxs=None): for (name,pst) in _dirlist(): path = prepend + name if excluded_paths: if os.path.normpath(path) in excluded_paths: debug1('Skipping %r: excluded.\n' % path) continue if exclude_rxs and should_rx_exclude_path(path, exclude_rxs): continue if name.endswith('/'): if xdev != None and pst.st_dev != xdev: debug1('Skipping %r: different filesystem.\n' % (prepend+name)) continue if bup_dir != None: if os.path.normpath(prepend+name) == bup_dir: debug1('Skipping BUP_DIR.\n') continue try: OsFile(name).fchdir() except OSError, e: add_error('%s: %s' % (prepend, e)) else: for i in _recursive_dirlist(prepend=prepend+name, xdev=xdev, bup_dir=bup_dir, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs): yield i os.chdir('..') yield (prepend + name, pst) def recursive_dirlist(paths, xdev, bup_dir=None, excluded_paths=None, exclude_rxs=None): startdir = OsFile('.') try: assert(type(paths) != type('')) for path in paths: try: pst = xstat.lstat(path) if stat.S_ISLNK(pst.st_mode): yield (path, pst) continue except OSError, e: add_error('recursive_dirlist: %s' % e) continue try: pfile = OsFile(path) except OSError, e: add_error(e) continue pst = pfile.stat() if xdev: xdev = pst.st_dev else: xdev = None if stat.S_ISDIR(pst.st_mode): pfile.fchdir() prepend = os.path.join(path, '') for i in _recursive_dirlist(prepend=prepend, xdev=xdev, bup_dir=bup_dir, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs): yield i startdir.fchdir() else: prepend = path yield (prepend,pst) except: try: startdir.fchdir() except: pass raise bup-0.25/lib/bup/git.py000066400000000000000000001040471225146730500147320ustar00rootroot00000000000000"""Git interaction library. bup repositories are in Git format. This library allows us to interact with the Git data structures. """ import os, sys, zlib, time, subprocess, struct, stat, re, tempfile, glob from bup.helpers import * from bup import _helpers, path, midx, bloom, xstat max_pack_size = 1000*1000*1000 # larger packs will slow down pruning max_pack_objects = 200*1000 # cache memory usage is about 83 bytes per object verbose = 0 ignore_midx = 0 home_repodir = os.path.expanduser('~/.bup') repodir = None _typemap = { 'blob':3, 'tree':2, 'commit':1, 'tag':4 } _typermap = { 3:'blob', 2:'tree', 1:'commit', 4:'tag' } _total_searches = 0 _total_steps = 0 class GitError(Exception): pass def repo(sub = ''): """Get the path to the git repository or one of its subdirectories.""" global repodir if not repodir: raise GitError('You should call check_repo_or_die()') # If there's a .git subdirectory, then the actual repo is in there. gd = os.path.join(repodir, '.git') if os.path.exists(gd): repodir = gd return os.path.join(repodir, sub) def shorten_hash(s): return re.sub(r'([^0-9a-z]|\b)([0-9a-z]{7})[0-9a-z]{33}([^0-9a-z]|\b)', r'\1\2*\3', s) def repo_rel(path): full = os.path.abspath(path) fullrepo = os.path.abspath(repo('')) if not fullrepo.endswith('/'): fullrepo += '/' if full.startswith(fullrepo): path = full[len(fullrepo):] if path.startswith('index-cache/'): path = path[len('index-cache/'):] return shorten_hash(path) def all_packdirs(): paths = [repo('objects/pack')] paths += glob.glob(repo('index-cache/*/.')) return paths def auto_midx(objdir): args = [path.exe(), 'midx', '--auto', '--dir', objdir] try: rv = subprocess.call(args, stdout=open('/dev/null', 'w')) except OSError, e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise if rv: add_error('%r: returned %d' % (args, rv)) args = [path.exe(), 'bloom', '--dir', objdir] try: rv = subprocess.call(args, stdout=open('/dev/null', 'w')) except OSError, e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise if rv: add_error('%r: returned %d' % (args, rv)) def mangle_name(name, mode, gitmode): """Mangle a file name to present an abstract name for segmented files. Mangled file names will have the ".bup" extension added to them. If a file's name already ends with ".bup", a ".bupl" extension is added to disambiguate normal files from semgmented ones. """ if stat.S_ISREG(mode) and not stat.S_ISREG(gitmode): return name + '.bup' elif name.endswith('.bup') or name[:-1].endswith('.bup'): return name + '.bupl' else: return name (BUP_NORMAL, BUP_CHUNKED) = (0,1) def demangle_name(name): """Remove name mangling from a file name, if necessary. The return value is a tuple (demangled_filename,mode), where mode is one of the following: * BUP_NORMAL : files that should be read as-is from the repository * BUP_CHUNKED : files that were chunked and need to be assembled For more information on the name mangling algorythm, see mangle_name() """ if name.endswith('.bupl'): return (name[:-5], BUP_NORMAL) elif name.endswith('.bup'): return (name[:-4], BUP_CHUNKED) else: return (name, BUP_NORMAL) def calc_hash(type, content): """Calculate some content's hash in the Git fashion.""" header = '%s %d\0' % (type, len(content)) sum = Sha1(header) sum.update(content) return sum.digest() def shalist_item_sort_key(ent): (mode, name, id) = ent assert(mode+0 == mode) if stat.S_ISDIR(mode): return name + '/' else: return name def tree_encode(shalist): """Generate a git tree object from (mode,name,hash) tuples.""" shalist = sorted(shalist, key = shalist_item_sort_key) l = [] for (mode,name,bin) in shalist: assert(mode) assert(mode+0 == mode) assert(name) assert(len(bin) == 20) s = '%o %s\0%s' % (mode,name,bin) assert(s[0] != '0') # 0-padded octal is not acceptable in a git tree l.append(s) return ''.join(l) def tree_decode(buf): """Generate a list of (mode,name,hash) from the git tree object in buf.""" ofs = 0 while ofs < len(buf): z = buf.find('\0', ofs) assert(z > ofs) spl = buf[ofs:z].split(' ', 1) assert(len(spl) == 2) mode,name = spl sha = buf[z+1:z+1+20] ofs = z+1+20 yield (int(mode, 8), name, sha) def _encode_packobj(type, content, compression_level=1): szout = '' sz = len(content) szbits = (sz & 0x0f) | (_typemap[type]<<4) sz >>= 4 while 1: if sz: szbits |= 0x80 szout += chr(szbits) if not sz: break szbits = sz & 0x7f sz >>= 7 if compression_level > 9: compression_level = 9 elif compression_level < 0: compression_level = 0 z = zlib.compressobj(compression_level) yield szout yield z.compress(content) yield z.flush() def _encode_looseobj(type, content, compression_level=1): z = zlib.compressobj(compression_level) yield z.compress('%s %d\0' % (type, len(content))) yield z.compress(content) yield z.flush() def _decode_looseobj(buf): assert(buf); s = zlib.decompress(buf) i = s.find('\0') assert(i > 0) l = s[:i].split(' ') type = l[0] sz = int(l[1]) content = s[i+1:] assert(type in _typemap) assert(sz == len(content)) return (type, content) def _decode_packobj(buf): assert(buf) c = ord(buf[0]) type = _typermap[(c & 0x70) >> 4] sz = c & 0x0f shift = 4 i = 0 while c & 0x80: i += 1 c = ord(buf[i]) sz |= (c & 0x7f) << shift shift += 7 if not (c & 0x80): break return (type, zlib.decompress(buf[i+1:])) class PackIdx: def __init__(self): assert(0) def find_offset(self, hash): """Get the offset of an object inside the index file.""" idx = self._idx_from_hash(hash) if idx != None: return self._ofs_from_idx(idx) return None def exists(self, hash, want_source=False): """Return nonempty if the object exists in this index.""" if hash and (self._idx_from_hash(hash) != None): return want_source and os.path.basename(self.name) or True return None def __len__(self): return int(self.fanout[255]) def _idx_from_hash(self, hash): global _total_searches, _total_steps _total_searches += 1 assert(len(hash) == 20) b1 = ord(hash[0]) start = self.fanout[b1-1] # range -1..254 end = self.fanout[b1] # range 0..255 want = str(hash) _total_steps += 1 # lookup table is a step while start < end: _total_steps += 1 mid = start + (end-start)/2 v = self._idx_to_hash(mid) if v < want: start = mid+1 elif v > want: end = mid else: # got it! return mid return None class PackIdxV1(PackIdx): """Object representation of a Git pack index (version 1) file.""" def __init__(self, filename, f): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) self.fanout = list(struct.unpack('!256I', str(buffer(self.map, 0, 256*4)))) self.fanout.append(0) # entry "-1" nsha = self.fanout[255] self.sha_ofs = 256*4 self.shatable = buffer(self.map, self.sha_ofs, nsha*24) def _ofs_from_idx(self, idx): return struct.unpack('!I', str(self.shatable[idx*24 : idx*24+4]))[0] def _idx_to_hash(self, idx): return str(self.shatable[idx*24+4 : idx*24+24]) def __iter__(self): for i in xrange(self.fanout[255]): yield buffer(self.map, 256*4 + 24*i + 4, 20) class PackIdxV2(PackIdx): """Object representation of a Git pack index (version 2) file.""" def __init__(self, filename, f): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) assert(str(self.map[0:8]) == '\377tOc\0\0\0\2') self.fanout = list(struct.unpack('!256I', str(buffer(self.map, 8, 256*4)))) self.fanout.append(0) # entry "-1" nsha = self.fanout[255] self.sha_ofs = 8 + 256*4 self.shatable = buffer(self.map, self.sha_ofs, nsha*20) self.ofstable = buffer(self.map, self.sha_ofs + nsha*20 + nsha*4, nsha*4) self.ofs64table = buffer(self.map, 8 + 256*4 + nsha*20 + nsha*4 + nsha*4) def _ofs_from_idx(self, idx): ofs = struct.unpack('!I', str(buffer(self.ofstable, idx*4, 4)))[0] if ofs & 0x80000000: idx64 = ofs & 0x7fffffff ofs = struct.unpack('!Q', str(buffer(self.ofs64table, idx64*8, 8)))[0] return ofs def _idx_to_hash(self, idx): return str(self.shatable[idx*20:(idx+1)*20]) def __iter__(self): for i in xrange(self.fanout[255]): yield buffer(self.map, 8 + 256*4 + 20*i, 20) _mpi_count = 0 class PackIdxList: def __init__(self, dir): global _mpi_count assert(_mpi_count == 0) # these things suck tons of VM; don't waste it _mpi_count += 1 self.dir = dir self.also = set() self.packs = [] self.do_bloom = False self.bloom = None self.refresh() def __del__(self): global _mpi_count _mpi_count -= 1 assert(_mpi_count == 0) def __iter__(self): return iter(idxmerge(self.packs)) def __len__(self): return sum(len(pack) for pack in self.packs) def exists(self, hash, want_source=False): """Return nonempty if the object exists in the index files.""" global _total_searches _total_searches += 1 if hash in self.also: return True if self.do_bloom and self.bloom: if self.bloom.exists(hash): self.do_bloom = False else: _total_searches -= 1 # was counted by bloom return None for i in xrange(len(self.packs)): p = self.packs[i] _total_searches -= 1 # will be incremented by sub-pack ix = p.exists(hash, want_source=want_source) if ix: # reorder so most recently used packs are searched first self.packs = [p] + self.packs[:i] + self.packs[i+1:] return ix self.do_bloom = True return None def refresh(self, skip_midx = False): """Refresh the index list. This method verifies if .midx files were superseded (e.g. all of its contents are in another, bigger .midx file) and removes the superseded files. If skip_midx is True, all work on .midx files will be skipped and .midx files will be removed from the list. The module-global variable 'ignore_midx' can force this function to always act as if skip_midx was True. """ self.bloom = None # Always reopen the bloom as it may have been relaced self.do_bloom = False skip_midx = skip_midx or ignore_midx d = dict((p.name, p) for p in self.packs if not skip_midx or not isinstance(p, midx.PackMidx)) if os.path.exists(self.dir): if not skip_midx: midxl = [] for ix in self.packs: if isinstance(ix, midx.PackMidx): for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix for full in glob.glob(os.path.join(self.dir,'*.midx')): if not d.get(full): mx = midx.PackMidx(full) (mxd, mxf) = os.path.split(mx.name) broken = False for n in mx.idxnames: if not os.path.exists(os.path.join(mxd, n)): log(('warning: index %s missing\n' + ' used by %s\n') % (n, mxf)) broken = True if broken: del mx unlink(full) else: midxl.append(mx) midxl.sort(key=lambda ix: (-len(ix), -xstat.stat(ix.name).st_mtime)) for ix in midxl: any_needed = False for sub in ix.idxnames: found = d.get(os.path.join(self.dir, sub)) if not found or isinstance(found, PackIdx): # doesn't exist, or exists but not in a midx any_needed = True break if any_needed: d[ix.name] = ix for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix elif not ix.force_keep: debug1('midx: removing redundant: %s\n' % os.path.basename(ix.name)) unlink(ix.name) for full in glob.glob(os.path.join(self.dir,'*.idx')): if not d.get(full): try: ix = open_idx(full) except GitError, e: add_error(e) continue d[full] = ix bfull = os.path.join(self.dir, 'bup.bloom') if self.bloom is None and os.path.exists(bfull): self.bloom = bloom.ShaBloom(bfull) self.packs = list(set(d.values())) self.packs.sort(lambda x,y: -cmp(len(x),len(y))) if self.bloom and self.bloom.valid() and len(self.bloom) >= len(self): self.do_bloom = True else: self.bloom = None debug1('PackIdxList: using %d index%s.\n' % (len(self.packs), len(self.packs)!=1 and 'es' or '')) def add(self, hash): """Insert an additional object in the list.""" self.also.add(hash) def open_idx(filename): if filename.endswith('.idx'): f = open(filename, 'rb') header = f.read(8) if header[0:4] == '\377tOc': version = struct.unpack('!I', header[4:8])[0] if version == 2: return PackIdxV2(filename, f) else: raise GitError('%s: expected idx file version 2, got %d' % (filename, version)) elif len(header) == 8 and header[0:4] < '\377tOc': return PackIdxV1(filename, f) else: raise GitError('%s: unrecognized idx file header' % filename) elif filename.endswith('.midx'): return midx.PackMidx(filename) else: raise GitError('idx filenames must end with .idx or .midx') def idxmerge(idxlist, final_progress=True): """Generate a list of all the objects reachable in a PackIdxList.""" def pfunc(count, total): qprogress('Reading indexes: %.2f%% (%d/%d)\r' % (count*100.0/total, count, total)) def pfinal(count, total): if final_progress: progress('Reading indexes: %.2f%% (%d/%d), done.\n' % (100, total, total)) return merge_iter(idxlist, 10024, pfunc, pfinal) def _make_objcache(): return PackIdxList(repo('objects/pack')) class PackWriter: """Writes Git objects inside a pack file.""" def __init__(self, objcache_maker=_make_objcache, compression_level=1): self.count = 0 self.outbytes = 0 self.filename = None self.file = None self.idx = None self.objcache_maker = objcache_maker self.objcache = None self.compression_level = compression_level def __del__(self): self.close() def _open(self): if not self.file: (fd,name) = tempfile.mkstemp(suffix='.pack', dir=repo('objects')) self.file = os.fdopen(fd, 'w+b') assert(name.endswith('.pack')) self.filename = name[:-5] self.file.write('PACK\0\0\0\2\0\0\0\0') self.idx = list(list() for i in xrange(256)) def _raw_write(self, datalist, sha): self._open() f = self.file # in case we get interrupted (eg. KeyboardInterrupt), it's best if # the file never has a *partial* blob. So let's make sure it's # all-or-nothing. (The blob shouldn't be very big anyway, thanks # to our hashsplit algorithm.) f.write() does its own buffering, # but that's okay because we'll flush it in _end(). oneblob = ''.join(datalist) try: f.write(oneblob) except IOError, e: raise GitError, e, sys.exc_info()[2] nw = len(oneblob) crc = zlib.crc32(oneblob) & 0xffffffff self._update_idx(sha, crc, nw) self.outbytes += nw self.count += 1 return nw, crc def _update_idx(self, sha, crc, size): assert(sha) if self.idx: self.idx[ord(sha[0])].append((sha, crc, self.file.tell() - size)) def _write(self, sha, type, content): if verbose: log('>') if not sha: sha = calc_hash(type, content) size, crc = self._raw_write(_encode_packobj(type, content, self.compression_level), sha=sha) if self.outbytes >= max_pack_size or self.count >= max_pack_objects: self.breakpoint() return sha def breakpoint(self): """Clear byte and object counts and return the last processed id.""" id = self._end() self.outbytes = self.count = 0 return id def _require_objcache(self): if self.objcache is None and self.objcache_maker: self.objcache = self.objcache_maker() if self.objcache is None: raise GitError( "PackWriter not opened or can't check exists w/o objcache") def exists(self, id, want_source=False): """Return non-empty if an object is found in the object cache.""" self._require_objcache() return self.objcache.exists(id, want_source=want_source) def maybe_write(self, type, content): """Write an object to the pack file if not present and return its id.""" sha = calc_hash(type, content) if not self.exists(sha): self._write(sha, type, content) self._require_objcache() self.objcache.add(sha) return sha def new_blob(self, blob): """Create a blob object in the pack with the supplied content.""" return self.maybe_write('blob', blob) def new_tree(self, shalist): """Create a tree object in the pack.""" content = tree_encode(shalist) return self.maybe_write('tree', content) def _new_commit(self, tree, parent, author, adate, committer, cdate, msg): l = [] if tree: l.append('tree %s' % tree.encode('hex')) if parent: l.append('parent %s' % parent.encode('hex')) if author: l.append('author %s %s' % (author, _git_date(adate))) if committer: l.append('committer %s %s' % (committer, _git_date(cdate))) l.append('') l.append(msg) return self.maybe_write('commit', '\n'.join(l)) def new_commit(self, parent, tree, date, msg): """Create a commit object in the pack.""" userline = '%s <%s@%s>' % (userfullname(), username(), hostname()) commit = self._new_commit(tree, parent, userline, date, userline, date, msg) return commit def abort(self): """Remove the pack file from disk.""" f = self.file if f: self.idx = None self.file = None f.close() os.unlink(self.filename + '.pack') def _end(self, run_midx=True): f = self.file if not f: return None self.file = None self.objcache = None idx = self.idx self.idx = None # update object count f.seek(8) cp = struct.pack('!i', self.count) assert(len(cp) == 4) f.write(cp) # calculate the pack sha1sum f.seek(0) sum = Sha1() for b in chunkyreader(f): sum.update(b) packbin = sum.digest() f.write(packbin) f.close() obj_list_sha = self._write_pack_idx_v2(self.filename + '.idx', idx, packbin) nameprefix = repo('objects/pack/pack-%s' % obj_list_sha) if os.path.exists(self.filename + '.map'): os.unlink(self.filename + '.map') os.rename(self.filename + '.pack', nameprefix + '.pack') os.rename(self.filename + '.idx', nameprefix + '.idx') if run_midx: auto_midx(repo('objects/pack')) return nameprefix def close(self, run_midx=True): """Close the pack file and move it to its definitive path.""" return self._end(run_midx=run_midx) def _write_pack_idx_v2(self, filename, idx, packbin): ofs64_count = 0 for section in idx: for entry in section: if entry[2] >= 2**31: ofs64_count += 1 # Length: header + fan-out + shas-and-crcs + overflow-offsets index_len = 8 + (4 * 256) + (28 * self.count) + (8 * ofs64_count) idx_map = None idx_f = open(filename, 'w+b') try: idx_f.truncate(index_len) idx_map = mmap_readwrite(idx_f, close=False) count = _helpers.write_idx(filename, idx_map, idx, self.count) assert(count == self.count) finally: if idx_map: idx_map.close() idx_f.close() idx_f = open(filename, 'a+b') try: idx_f.write(packbin) idx_f.seek(0) idx_sum = Sha1() b = idx_f.read(8 + 4*256) idx_sum.update(b) obj_list_sum = Sha1() for b in chunkyreader(idx_f, 20*self.count): idx_sum.update(b) obj_list_sum.update(b) namebase = obj_list_sum.hexdigest() for b in chunkyreader(idx_f): idx_sum.update(b) idx_f.write(idx_sum.digest()) return namebase finally: idx_f.close() def _git_date(date): return '%d %s' % (date, time.strftime('%z', time.localtime(date))) def _gitenv(): os.environ['GIT_DIR'] = os.path.abspath(repo()) def list_refs(refname = None): """Generate a list of tuples in the form (refname,hash). If a ref name is specified, list only this particular ref. """ argv = ['git', 'show-ref', '--'] if refname: argv += [refname] p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE) out = p.stdout.read().strip() rv = p.wait() # not fatal if rv: assert(not out) if out: for d in out.split('\n'): (sha, name) = d.split(' ', 1) yield (name, sha.decode('hex')) def read_ref(refname): """Get the commit id of the most recent commit made on a given ref.""" l = list(list_refs(refname)) if l: assert(len(l) == 1) return l[0][1] else: return None def rev_list(ref, count=None): """Generate a list of reachable commits in reverse chronological order. This generator walks through commits, from child to parent, that are reachable via the specified ref and yields a series of tuples of the form (date,hash). If count is a non-zero integer, limit the number of commits to "count" objects. """ assert(not ref.startswith('-')) opts = [] if count: opts += ['-n', str(atoi(count))] argv = ['git', 'rev-list', '--pretty=format:%ct'] + opts + [ref, '--'] p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE) commit = None for row in p.stdout: s = row.strip() if s.startswith('commit '): commit = s[7:].decode('hex') else: date = int(s) yield (date, commit) rv = p.wait() # not fatal if rv: raise GitError, 'git rev-list returned error %d' % rv def rev_get_date(ref): """Get the date of the latest commit on the specified ref.""" for (date, commit) in rev_list(ref, count=1): return date raise GitError, 'no such commit %r' % ref def rev_parse(committish): """Resolve the full hash for 'committish', if it exists. Should be roughly equivalent to 'git rev-parse'. Returns the hex value of the hash if it is found, None if 'committish' does not correspond to anything. """ head = read_ref(committish) if head: debug2("resolved from ref: commit = %s\n" % head.encode('hex')) return head pL = PackIdxList(repo('objects/pack')) if len(committish) == 40: try: hash = committish.decode('hex') except TypeError: return None if pL.exists(hash): return hash return None def update_ref(refname, newval, oldval): """Change the commit pointed to by a branch.""" if not oldval: oldval = '' assert(refname.startswith('refs/heads/')) p = subprocess.Popen(['git', 'update-ref', refname, newval.encode('hex'), oldval.encode('hex')], preexec_fn = _gitenv) _git_wait('git update-ref', p) def guess_repo(path=None): """Set the path value in the global variable "repodir". This makes bup look for an existing bup repository, but not fail if a repository doesn't exist. Usually, if you are interacting with a bup repository, you would not be calling this function but using check_repo_or_die(). """ global repodir if path: repodir = path if not repodir: repodir = os.environ.get('BUP_DIR') if not repodir: repodir = os.path.expanduser('~/.bup') def init_repo(path=None): """Create the Git bare repository for bup in a given path.""" guess_repo(path) d = repo() # appends a / to the path parent = os.path.dirname(os.path.dirname(d)) if parent and not os.path.exists(parent): raise GitError('parent directory "%s" does not exist\n' % parent) if os.path.exists(d) and not os.path.isdir(os.path.join(d, '.')): raise GitError('"%s" exists but is not a directory\n' % d) p = subprocess.Popen(['git', '--bare', 'init'], stdout=sys.stderr, preexec_fn = _gitenv) _git_wait('git init', p) # Force the index version configuration in order to ensure bup works # regardless of the version of the installed Git binary. p = subprocess.Popen(['git', 'config', 'pack.indexVersion', '2'], stdout=sys.stderr, preexec_fn = _gitenv) _git_wait('git config', p) # Enable the reflog p = subprocess.Popen(['git', 'config', 'core.logAllRefUpdates', 'true'], stdout=sys.stderr, preexec_fn = _gitenv) _git_wait('git config', p) def check_repo_or_die(path=None): """Make sure a bup repository exists, and abort if not. If the path to a particular repository was not specified, this function initializes the default repository automatically. """ guess_repo(path) try: os.stat(repo('objects/pack/.')) except OSError, e: if e.errno == errno.ENOENT: log('error: %r is not a bup repository; run "bup init"\n' % repo()) sys.exit(15) else: log('error: %s\n' % e) sys.exit(14) _ver = None def ver(): """Get Git's version and ensure a usable version is installed. The returned version is formatted as an ordered tuple with each position representing a digit in the version tag. For example, the following tuple would represent version 1.6.6.9: ('1', '6', '6', '9') """ global _ver if not _ver: p = subprocess.Popen(['git', '--version'], stdout=subprocess.PIPE) gvs = p.stdout.read() _git_wait('git --version', p) m = re.match(r'git version (\S+.\S+)', gvs) if not m: raise GitError('git --version weird output: %r' % gvs) _ver = tuple(m.group(1).split('.')) needed = ('1','5', '3', '1') if _ver < needed: raise GitError('git version %s or higher is required; you have %s' % ('.'.join(needed), '.'.join(_ver))) return _ver def _git_wait(cmd, p): rv = p.wait() if rv != 0: raise GitError('%s returned %d' % (cmd, rv)) def _git_capture(argv): p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn = _gitenv) r = p.stdout.read() _git_wait(repr(argv), p) return r class _AbortableIter: def __init__(self, it, onabort = None): self.it = it self.onabort = onabort self.done = None def __iter__(self): return self def next(self): try: return self.it.next() except StopIteration, e: self.done = True raise except: self.abort() raise def abort(self): """Abort iteration and call the abortion callback, if needed.""" if not self.done: self.done = True if self.onabort: self.onabort() def __del__(self): self.abort() _ver_warned = 0 class CatPipe: """Link to 'git cat-file' that is used to retrieve blob data.""" def __init__(self): global _ver_warned wanted = ('1','5','6') if ver() < wanted: if not _ver_warned: log('warning: git version < %s; bup will be slow.\n' % '.'.join(wanted)) _ver_warned = 1 self.get = self._slow_get else: self.p = self.inprogress = None self.get = self._fast_get def _abort(self): if self.p: self.p.stdout.close() self.p.stdin.close() self.p = None self.inprogress = None def _restart(self): self._abort() self.p = subprocess.Popen(['git', 'cat-file', '--batch'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds = True, bufsize = 4096, preexec_fn = _gitenv) def _fast_get(self, id): if not self.p or self.p.poll() != None: self._restart() assert(self.p) poll_result = self.p.poll() assert(poll_result == None) if self.inprogress: log('_fast_get: opening %r while %r is open\n' % (id, self.inprogress)) assert(not self.inprogress) assert(id.find('\n') < 0) assert(id.find('\r') < 0) assert(not id.startswith('-')) self.inprogress = id self.p.stdin.write('%s\n' % id) self.p.stdin.flush() hdr = self.p.stdout.readline() if hdr.endswith(' missing\n'): self.inprogress = None raise KeyError('blob %r is missing' % id) spl = hdr.split(' ') if len(spl) != 3 or len(spl[0]) != 40: raise GitError('expected blob, got %r' % spl) (hex, type, size) = spl it = _AbortableIter(chunkyreader(self.p.stdout, int(spl[2])), onabort = self._abort) try: yield type for blob in it: yield blob readline_result = self.p.stdout.readline() assert(readline_result == '\n') self.inprogress = None except Exception, e: it.abort() raise def _slow_get(self, id): assert(id.find('\n') < 0) assert(id.find('\r') < 0) assert(id[0] != '-') type = _git_capture(['git', 'cat-file', '-t', id]).strip() yield type p = subprocess.Popen(['git', 'cat-file', type, id], stdout=subprocess.PIPE, preexec_fn = _gitenv) for blob in chunkyreader(p.stdout): yield blob _git_wait('git cat-file', p) def _join(self, it): type = it.next() if type == 'blob': for blob in it: yield blob elif type == 'tree': treefile = ''.join(it) for (mode, name, sha) in tree_decode(treefile): for blob in self.join(sha.encode('hex')): yield blob elif type == 'commit': treeline = ''.join(it).split('\n')[0] assert(treeline.startswith('tree ')) for blob in self.join(treeline[5:]): yield blob else: raise GitError('invalid object type %r: expected blob/tree/commit' % type) def join(self, id): """Generate a list of the content of all blobs that can be reached from an object. The hash given in 'id' must point to a blob, a tree or a commit. The content of all blobs that can be seen from trees or commits will be added to the list. """ try: for d in self._join(self.get(id)): yield d except StopIteration: log('booger!\n') def tags(): """Return a dictionary of all tags in the form {hash: [tag_names, ...]}.""" tags = {} for (n,c) in list_refs(): if n.startswith('refs/tags/'): name = n[10:] if not c in tags: tags[c] = [] tags[c].append(name) # more than one tag can point at 'c' return tags bup-0.25/lib/bup/hashsplit.py000066400000000000000000000130151225146730500161400ustar00rootroot00000000000000import math from bup import _helpers from bup.helpers import * BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit BLOB_READ_SIZE = 1024*1024 MAX_PER_TREE = 256 progress_callback = None fanout = 16 GIT_MODE_FILE = 0100644 GIT_MODE_TREE = 040000 GIT_MODE_SYMLINK = 0120000 assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal # The purpose of this type of buffer is to avoid copying on peek(), get(), # and eat(). We do copy the buffer contents on put(), but that should # be ok if we always only put() large amounts of data at a time. class Buf: def __init__(self): self.data = '' self.start = 0 def put(self, s): if s: self.data = buffer(self.data, self.start) + s self.start = 0 def peek(self, count): return buffer(self.data, self.start, count) def eat(self, count): self.start += count def get(self, count): v = buffer(self.data, self.start, count) self.start += count return v def used(self): return len(self.data) - self.start def readfile_iter(files, progress=None): for filenum,f in enumerate(files): ofs = 0 b = '' while 1: if progress: progress(filenum, len(b)) fadvise_done(f, max(0, ofs - 1024*1024)) b = f.read(BLOB_READ_SIZE) ofs += len(b) if not b: fadvise_done(f, ofs) break yield b def _splitbuf(buf, basebits, fanbits): while 1: b = buf.peek(buf.used()) (ofs, bits) = _helpers.splitbuf(b) if ofs > BLOB_MAX: ofs = BLOB_MAX if ofs: buf.eat(ofs) level = (bits-basebits)//fanbits # integer division yield buffer(b, 0, ofs), level else: break while buf.used() >= BLOB_MAX: # limit max blob size yield buf.get(BLOB_MAX), 0 def _hashsplit_iter(files, progress): assert(BLOB_READ_SIZE > BLOB_MAX) basebits = _helpers.blobbits() fanbits = int(math.log(fanout or 128, 2)) buf = Buf() for inblock in readfile_iter(files, progress): buf.put(inblock) for buf_and_level in _splitbuf(buf, basebits, fanbits): yield buf_and_level if buf.used(): yield buf.get(buf.used()), 0 def _hashsplit_iter_keep_boundaries(files, progress): for real_filenum,f in enumerate(files): if progress: def prog(filenum, nbytes): # the inner _hashsplit_iter doesn't know the real file count, # so we'll replace it here. return progress(real_filenum, nbytes) else: prog = None for buf_and_level in _hashsplit_iter([f], progress=prog): yield buf_and_level def hashsplit_iter(files, keep_boundaries, progress): if keep_boundaries: return _hashsplit_iter_keep_boundaries(files, progress) else: return _hashsplit_iter(files, progress) total_split = 0 def split_to_blobs(makeblob, files, keep_boundaries, progress): global total_split for (blob, level) in hashsplit_iter(files, keep_boundaries, progress): sha = makeblob(blob) total_split += len(blob) if progress_callback: progress_callback(len(blob)) yield (sha, len(blob), level) def _make_shalist(l): ofs = 0 l = list(l) total = sum(size for mode,sha,size, in l) vlen = len('%x' % total) shalist = [] for (mode, sha, size) in l: shalist.append((mode, '%0*x' % (vlen,ofs), sha)) ofs += size assert(ofs == total) return (shalist, total) def _squish(maketree, stacks, n): i = 0 while i < n or len(stacks[i]) >= MAX_PER_TREE: while len(stacks) <= i+1: stacks.append([]) if len(stacks[i]) == 1: stacks[i+1] += stacks[i] elif stacks[i]: (shalist, size) = _make_shalist(stacks[i]) tree = maketree(shalist) stacks[i+1].append((GIT_MODE_TREE, tree, size)) stacks[i] = [] i += 1 def split_to_shalist(makeblob, maketree, files, keep_boundaries, progress=None): sl = split_to_blobs(makeblob, files, keep_boundaries, progress) assert(fanout != 0) if not fanout: shal = [] for (sha,size,level) in sl: shal.append((GIT_MODE_FILE, sha, size)) return _make_shalist(shal)[0] else: stacks = [[]] for (sha,size,level) in sl: stacks[0].append((GIT_MODE_FILE, sha, size)) _squish(maketree, stacks, level) #log('stacks: %r\n' % [len(i) for i in stacks]) _squish(maketree, stacks, len(stacks)-1) #log('stacks: %r\n' % [len(i) for i in stacks]) return _make_shalist(stacks[-1])[0] def split_to_blob_or_tree(makeblob, maketree, files, keep_boundaries): shalist = list(split_to_shalist(makeblob, maketree, files, keep_boundaries)) if len(shalist) == 1: return (shalist[0][0], shalist[0][2]) elif len(shalist) == 0: return (GIT_MODE_FILE, makeblob('')) else: return (GIT_MODE_TREE, maketree(shalist)) def open_noatime(name): fd = _helpers.open_noatime(name) try: return os.fdopen(fd, 'rb', 1024*1024) except: try: os.close(fd) except: pass raise def fadvise_done(f, ofs): assert(ofs >= 0) if ofs > 0 and hasattr(f, 'fileno'): _helpers.fadvise_done(f.fileno(), ofs) bup-0.25/lib/bup/helpers.py000066400000000000000000000641141225146730500156110ustar00rootroot00000000000000"""Helper functions and classes for bup.""" import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct import hashlib, heapq, operator, time, grp from bup import _version, _helpers import bup._helpers as _helpers import math # This function should really be in helpers, not in bup.options. But we # want options.py to be standalone so people can include it in other projects. from bup.options import _tty_width tty_width = _tty_width def atoi(s): """Convert the string 's' to an integer. Return 0 if s is not a number.""" try: return int(s or '0') except ValueError: return 0 def atof(s): """Convert the string 's' to a float. Return 0 if s is not a number.""" try: return float(s or '0') except ValueError: return 0 buglvl = atoi(os.environ.get('BUP_DEBUG', 0)) # If the platform doesn't have fdatasync (OS X), fall back to fsync. try: fdatasync = os.fdatasync except AttributeError: fdatasync = os.fsync # Write (blockingly) to sockets that may or may not be in blocking mode. # We need this because our stderr is sometimes eaten by subprocesses # (probably ssh) that sometimes make it nonblocking, if only temporarily, # leading to race conditions. Ick. We'll do it the hard way. def _hard_write(fd, buf): while buf: (r,w,x) = select.select([], [fd], [], None) if not w: raise IOError('select(fd) returned without being writable') try: sz = os.write(fd, buf) except OSError, e: if e.errno != errno.EAGAIN: raise assert(sz >= 0) buf = buf[sz:] _last_prog = 0 def log(s): """Print a log message to stderr.""" global _last_prog sys.stdout.flush() _hard_write(sys.stderr.fileno(), s) _last_prog = 0 def debug1(s): if buglvl >= 1: log(s) def debug2(s): if buglvl >= 2: log(s) istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1) istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2) _last_progress = '' def progress(s): """Calls log() if stderr is a TTY. Does nothing otherwise.""" global _last_progress if istty2: log(s) _last_progress = s def qprogress(s): """Calls progress() only if we haven't printed progress in a while. This avoids overloading the stderr buffer with excess junk. """ global _last_prog now = time.time() if now - _last_prog > 0.1: progress(s) _last_prog = now def reprogress(): """Calls progress() to redisplay the most recent progress message. Useful after you've printed some other message that wipes out the progress line. """ if _last_progress and _last_progress.endswith('\r'): progress(_last_progress) def mkdirp(d, mode=None): """Recursively create directories on path 'd'. Unlike os.makedirs(), it doesn't raise an exception if the last element of the path already exists. """ try: if mode: os.makedirs(d, mode) else: os.makedirs(d) except OSError, e: if e.errno == errno.EEXIST: pass else: raise def next(it): """Get the next item from an iterator, None if we reached the end.""" try: return it.next() except StopIteration: return None def merge_iter(iters, pfreq, pfunc, pfinal, key=None): if key: samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None) else: samekey = operator.eq count = 0 total = sum(len(it) for it in iters) iters = (iter(it) for it in iters) heap = ((next(it),it) for it in iters) heap = [(e,it) for e,it in heap if e] heapq.heapify(heap) pe = None while heap: if not count % pfreq: pfunc(count, total) e, it = heap[0] if not samekey(e, pe): pe = e yield e count += 1 try: e = it.next() # Don't use next() function, it's too expensive except StopIteration: heapq.heappop(heap) # remove current else: heapq.heapreplace(heap, (e, it)) # shift current to new location pfinal(count, total) def unlink(f): """Delete a file at path 'f' if it currently exists. Unlike os.unlink(), does not throw an exception if the file didn't already exist. """ try: os.unlink(f) except OSError, e: if e.errno == errno.ENOENT: pass # it doesn't exist, that's what you asked for def readpipe(argv): """Run a subprocess and return its output.""" p = subprocess.Popen(argv, stdout=subprocess.PIPE) r = p.stdout.read() p.wait() return r def realpath(p): """Get the absolute path of a file. Behaves like os.path.realpath, but doesn't follow a symlink for the last element. (ie. if 'p' itself is a symlink, this one won't follow it, but it will follow symlinks in p's directory) """ try: st = os.lstat(p) except OSError: st = None if st and stat.S_ISLNK(st.st_mode): (dir, name) = os.path.split(p) dir = os.path.realpath(dir) out = os.path.join(dir, name) else: out = os.path.realpath(p) #log('realpathing:%r,%r\n' % (p, out)) return out def detect_fakeroot(): "Return True if we appear to be running under fakeroot." return os.getenv("FAKEROOTKEY") != None def is_superuser(): if sys.platform.startswith('cygwin'): import ctypes return ctypes.cdll.shell32.IsUserAnAdmin() else: return os.geteuid() == 0 def _cache_key_value(get_value, key, cache): """Return (value, was_cached). If there is a value in the cache for key, use that, otherwise, call get_value(key) which should throw a KeyError if there is no value -- in which case the cached and returned value will be None. """ try: # Do we already have it (or know there wasn't one)? value = cache[key] return value, True except KeyError: pass value = None try: cache[key] = value = get_value(key) except KeyError: cache[key] = None return value, False _uid_to_pwd_cache = {} _name_to_pwd_cache = {} def pwd_from_uid(uid): """Return password database entry for uid (may be a cached value). Return None if no entry is found. """ global _uid_to_pwd_cache, _name_to_pwd_cache entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache) if entry and not cached: _name_to_pwd_cache[entry.pw_name] = entry return entry def pwd_from_name(name): """Return password database entry for name (may be a cached value). Return None if no entry is found. """ global _uid_to_pwd_cache, _name_to_pwd_cache entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache) if entry and not cached: _uid_to_pwd_cache[entry.pw_uid] = entry return entry _gid_to_grp_cache = {} _name_to_grp_cache = {} def grp_from_gid(gid): """Return password database entry for gid (may be a cached value). Return None if no entry is found. """ global _gid_to_grp_cache, _name_to_grp_cache entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache) if entry and not cached: _name_to_grp_cache[entry.gr_name] = entry return entry def grp_from_name(name): """Return password database entry for name (may be a cached value). Return None if no entry is found. """ global _gid_to_grp_cache, _name_to_grp_cache entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache) if entry and not cached: _gid_to_grp_cache[entry.gr_gid] = entry return entry _username = None def username(): """Get the user's login name.""" global _username if not _username: uid = os.getuid() _username = pwd_from_uid(uid)[0] or 'user%d' % uid return _username _userfullname = None def userfullname(): """Get the user's full name.""" global _userfullname if not _userfullname: uid = os.getuid() entry = pwd_from_uid(uid) if entry: _userfullname = entry[4].split(',')[0] or entry[0] if not _userfullname: _userfullname = 'user%d' % uid return _userfullname _hostname = None def hostname(): """Get the FQDN of this machine.""" global _hostname if not _hostname: _hostname = socket.getfqdn() return _hostname _resource_path = None def resource_path(subdir=''): global _resource_path if not _resource_path: _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.' return os.path.join(_resource_path, subdir) def format_filesize(size): unit = 1024.0 size = float(size) if size < unit: return "%d" % (size) exponent = int(math.log(size) / math.log(unit)) size_prefix = "KMGTPE"[exponent - 1] return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix) class NotOk(Exception): pass class BaseConn: def __init__(self, outp): self.outp = outp def close(self): while self._read(65536): pass def read(self, size): """Read 'size' bytes from input stream.""" self.outp.flush() return self._read(size) def readline(self): """Read from input stream until a newline is found.""" self.outp.flush() return self._readline() def write(self, data): """Write 'data' to output stream.""" #log('%d writing: %d bytes\n' % (os.getpid(), len(data))) self.outp.write(data) def has_input(self): """Return true if input stream is readable.""" raise NotImplemented("Subclasses must implement has_input") def ok(self): """Indicate end of output from last sent command.""" self.write('\nok\n') def error(self, s): """Indicate server error to the client.""" s = re.sub(r'\s+', ' ', str(s)) self.write('\nerror %s\n' % s) def _check_ok(self, onempty): self.outp.flush() rl = '' for rl in linereader(self): #log('%d got line: %r\n' % (os.getpid(), rl)) if not rl: # empty line continue elif rl == 'ok': return None elif rl.startswith('error '): #log('client: error: %s\n' % rl[6:]) return NotOk(rl[6:]) else: onempty(rl) raise Exception('server exited unexpectedly; see errors above') def drain_and_check_ok(self): """Remove all data for the current command from input stream.""" def onempty(rl): pass return self._check_ok(onempty) def check_ok(self): """Verify that server action completed successfully.""" def onempty(rl): raise Exception('expected "ok", got %r' % rl) return self._check_ok(onempty) class Conn(BaseConn): def __init__(self, inp, outp): BaseConn.__init__(self, outp) self.inp = inp def _read(self, size): return self.inp.read(size) def _readline(self): return self.inp.readline() def has_input(self): [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0) if rl: assert(rl[0] == self.inp.fileno()) return True else: return None def checked_reader(fd, n): while n > 0: rl, _, _ = select.select([fd], [], []) assert(rl[0] == fd) buf = os.read(fd, n) if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n) yield buf n -= len(buf) MAX_PACKET = 128 * 1024 def mux(p, outfd, outr, errr): try: fds = [outr, errr] while p.poll() is None: rl, _, _ = select.select(fds, [], []) for fd in rl: if fd == outr: buf = os.read(outr, MAX_PACKET) if not buf: break os.write(outfd, struct.pack('!IB', len(buf), 1) + buf) elif fd == errr: buf = os.read(errr, 1024) if not buf: break os.write(outfd, struct.pack('!IB', len(buf), 2) + buf) finally: os.write(outfd, struct.pack('!IB', 0, 3)) class DemuxConn(BaseConn): """A helper class for bup's client-server protocol.""" def __init__(self, infd, outp): BaseConn.__init__(self, outp) # Anything that comes through before the sync string was not # multiplexed and can be assumed to be debug/log before mux init. tail = '' while tail != 'BUPMUX': b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1) if not b: raise IOError('demux: unexpected EOF during initialization') tail += b sys.stderr.write(tail[:-6]) # pre-mux log messages tail = tail[-6:] self.infd = infd self.reader = None self.buf = None self.closed = False def write(self, data): self._load_buf(0) BaseConn.write(self, data) def _next_packet(self, timeout): if self.closed: return False rl, wl, xl = select.select([self.infd], [], [], timeout) if not rl: return False assert(rl[0] == self.infd) ns = ''.join(checked_reader(self.infd, 5)) n, fdw = struct.unpack('!IB', ns) assert(n <= MAX_PACKET) if fdw == 1: self.reader = checked_reader(self.infd, n) elif fdw == 2: for buf in checked_reader(self.infd, n): sys.stderr.write(buf) elif fdw == 3: self.closed = True debug2("DemuxConn: marked closed\n") return True def _load_buf(self, timeout): if self.buf is not None: return True while not self.closed: while not self.reader: if not self._next_packet(timeout): return False try: self.buf = self.reader.next() return True except StopIteration: self.reader = None return False def _read_parts(self, ix_fn): while self._load_buf(None): assert(self.buf is not None) i = ix_fn(self.buf) if i is None or i == len(self.buf): yv = self.buf self.buf = None else: yv = self.buf[:i] self.buf = self.buf[i:] yield yv if i is not None: break def _readline(self): def find_eol(buf): try: return buf.index('\n')+1 except ValueError: return None return ''.join(self._read_parts(find_eol)) def _read(self, size): csize = [size] def until_size(buf): # Closes on csize if len(buf) < csize[0]: csize[0] -= len(buf) return None else: return csize[0] return ''.join(self._read_parts(until_size)) def has_input(self): return self._load_buf(0) def linereader(f): """Generate a list of input lines from 'f' without terminating newlines.""" while 1: line = f.readline() if not line: break yield line[:-1] def chunkyreader(f, count = None): """Generate a list of chunks of data read from 'f'. If count is None, read until EOF is reached. If count is a positive integer, read 'count' bytes from 'f'. If EOF is reached while reading, raise IOError. """ if count != None: while count > 0: b = f.read(min(count, 65536)) if not b: raise IOError('EOF with %d bytes remaining' % count) yield b count -= len(b) else: while 1: b = f.read(65536) if not b: break yield b def slashappend(s): """Append "/" to 's' if it doesn't aleady end in "/".""" if s and not s.endswith('/'): return s + '/' else: return s def _mmap_do(f, sz, flags, prot, close): if not sz: st = os.fstat(f.fileno()) sz = st.st_size if not sz: # trying to open a zero-length map gives an error, but an empty # string has all the same behaviour of a zero-length map, ie. it has # no elements :) return '' map = mmap.mmap(f.fileno(), sz, flags, prot) if close: f.close() # map will persist beyond file close return map def mmap_read(f, sz = 0, close=True): """Create a read-only memory mapped region on file 'f'. If sz is 0, the region will cover the entire file. """ return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close) def mmap_readwrite(f, sz = 0, close=True): """Create a read-write memory mapped region on file 'f'. If sz is 0, the region will cover the entire file. """ return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE, close) def mmap_readwrite_private(f, sz = 0, close=True): """Create a read-write memory mapped region on file 'f'. If sz is 0, the region will cover the entire file. The map is private, which means the changes are never flushed back to the file. """ return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE, close) def parse_num(s): """Parse data size information into a float number. Here are some examples of conversions: 199.2k means 203981 bytes 1GB means 1073741824 bytes 2.1 tb means 2199023255552 bytes """ g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s)) if not g: raise ValueError("can't parse %r as a number" % s) (val, unit) = g.groups() num = float(val) unit = unit.lower() if unit in ['t', 'tb']: mult = 1024*1024*1024*1024 elif unit in ['g', 'gb']: mult = 1024*1024*1024 elif unit in ['m', 'mb']: mult = 1024*1024 elif unit in ['k', 'kb']: mult = 1024 elif unit in ['', 'b']: mult = 1 else: raise ValueError("invalid unit %r in number %r" % (unit, s)) return int(num*mult) def count(l): """Count the number of elements in an iterator. (consumes the iterator)""" return reduce(lambda x,y: x+1, l) saved_errors = [] def add_error(e): """Append an error message to the list of saved errors. Once processing is able to stop and output the errors, the saved errors are accessible in the module variable helpers.saved_errors. """ saved_errors.append(e) log('%-70s\n' % e) def clear_errors(): global saved_errors saved_errors = [] def handle_ctrl_c(): """Replace the default exception handler for KeyboardInterrupt (Ctrl-C). The new exception handler will make sure that bup will exit without an ugly stacktrace when Ctrl-C is hit. """ oldhook = sys.excepthook def newhook(exctype, value, traceback): if exctype == KeyboardInterrupt: log('\nInterrupted.\n') else: return oldhook(exctype, value, traceback) sys.excepthook = newhook def columnate(l, prefix): """Format elements of 'l' in columns with 'prefix' leading each line. The number of columns is determined automatically based on the string lengths. """ if not l: return "" l = l[:] clen = max(len(s) for s in l) ncols = (tty_width() - len(prefix)) / (clen + 2) if ncols <= 1: ncols = 1 clen = 0 cols = [] while len(l) % ncols: l.append('') rows = len(l)/ncols for s in range(0, len(l), rows): cols.append(l[s:s+rows]) out = '' for row in zip(*cols): out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n' return out def parse_date_or_fatal(str, fatal): """Parses the given date or calls Option.fatal(). For now we expect a string that contains a float.""" try: date = atof(str) except ValueError, e: raise fatal('invalid date format (should be a float): %r' % e) else: return date def parse_excludes(options, fatal): """Traverse the options and extract all excludes, or call Option.fatal().""" excluded_paths = [] for flag in options: (option, parameter) = flag if option == '--exclude': excluded_paths.append(realpath(parameter)) elif option == '--exclude-from': try: f = open(realpath(parameter)) except IOError, e: raise fatal("couldn't read %s" % parameter) for exclude_path in f.readlines(): excluded_paths.append(realpath(exclude_path.strip())) return excluded_paths def parse_rx_excludes(options, fatal): """Traverse the options and extract all rx excludes, or call Option.fatal().""" rxs = [v for f, v in options if f == '--exclude-rx'] for i in range(len(rxs)): try: rxs[i] = re.compile(rxs[i]) except re.error, ex: o.fatal('invalid --exclude-rx pattern (%s):' % (ex, rxs[i])) return rxs def should_rx_exclude_path(path, exclude_rxs): """Return True if path matches a regular expression in exclude_rxs.""" for rx in exclude_rxs: if rx.search(path): debug1('Skipping %r: excluded by rx pattern %r.\n' % (path, rx.pattern)) return True return False # FIXME: Carefully consider the use of functions (os.path.*, etc.) # that resolve against the current filesystem in the strip/graft # functions for example, but elsewhere as well. I suspect bup's not # always being careful about that. For some cases, the contents of # the current filesystem should be irrelevant, and consulting it might # produce the wrong result, perhaps via unintended symlink resolution, # for example. def path_components(path): """Break path into a list of pairs of the form (name, full_path_to_name). Path must start with '/'. Example: '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]""" if not path.startswith('/'): raise Exception, 'path must start with "/": %s' % path # Since we assume path startswith('/'), we can skip the first element. result = [('', '/')] norm_path = os.path.abspath(path) if norm_path == '/': return result full_path = '' for p in norm_path.split('/')[1:]: full_path += '/' + p result.append((p, full_path)) return result def stripped_path_components(path, strip_prefixes): """Strip any prefix in strip_prefixes from path and return a list of path components where each component is (name, none_or_full_fs_path_to_name). Assume path startswith('/'). See thelpers.py for examples.""" normalized_path = os.path.abspath(path) sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True) for bp in sorted_strip_prefixes: normalized_bp = os.path.abspath(bp) if normalized_path.startswith(normalized_bp): prefix = normalized_path[:len(normalized_bp)] result = [] for p in normalized_path[len(normalized_bp):].split('/'): if p: # not root prefix += '/' prefix += p result.append((p, prefix)) return result # Nothing to strip. return path_components(path) def grafted_path_components(graft_points, path): # Create a result that consists of some number of faked graft # directories before the graft point, followed by all of the real # directories from path that are after the graft point. Arrange # for the directory at the graft point in the result to correspond # to the "orig" directory in --graft orig=new. See t/thelpers.py # for some examples. # Note that given --graft orig=new, orig and new have *nothing* to # do with each other, even if some of their component names # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically # equivalent to --graft /foo/bar/baz=/x/y/z, or even # /foo/bar/baz=/x. # FIXME: This can't be the best solution... clean_path = os.path.abspath(path) for graft_point in graft_points: old_prefix, new_prefix = graft_point # Expand prefixes iff not absolute paths. old_prefix = os.path.normpath(old_prefix) new_prefix = os.path.normpath(new_prefix) if clean_path.startswith(old_prefix): escaped_prefix = re.escape(old_prefix) grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path) # Handle /foo=/ (at least) -- which produces //whatever. grafted_path = '/' + grafted_path.lstrip('/') clean_path_components = path_components(clean_path) # Count the components that were stripped. strip_count = 0 if old_prefix == '/' else old_prefix.count('/') new_prefix_parts = new_prefix.split('/') result_prefix = grafted_path.split('/')[:new_prefix.count('/')] result = [(p, None) for p in result_prefix] \ + clean_path_components[strip_count:] # Now set the graft point name to match the end of new_prefix. graft_point = len(result_prefix) result[graft_point] = \ (new_prefix_parts[-1], clean_path_components[strip_count][1]) if new_prefix == '/': # --graft ...=/ is a special case. return result[1:] return result return path_components(clean_path) Sha1 = hashlib.sha1 def version_date(): """Format bup's version date string for output.""" return _version.DATE.split(' ')[0] def version_commit(): """Get the commit hash of bup's current version.""" return _version.COMMIT def version_tag(): """Format bup's version tag (the official version number). When generated from a commit other than one pointed to with a tag, the returned string will be "unknown-" followed by the first seven positions of the commit hash. """ names = _version.NAMES.strip() assert(names[0] == '(') assert(names[-1] == ')') names = names[1:-1] l = [n.strip() for n in names.split(',')] for n in l: if n.startswith('tag: bup-'): return n[9:] return 'unknown-%s' % _version.COMMIT[:7] bup-0.25/lib/bup/hlinkdb.py000066400000000000000000000071201225146730500155540ustar00rootroot00000000000000import cPickle, errno, os, tempfile class Error(Exception): pass class HLinkDB: def __init__(self, filename): # Map a "dev:ino" node to a list of paths associated with that node. self._node_paths = {} # Map a path to a "dev:ino" node. self._path_node = {} self._filename = filename self._save_prepared = None self._tmpname = None f = None try: f = open(filename, 'r') except IOError, e: if e.errno == errno.ENOENT: pass else: raise if f: try: self._node_paths = cPickle.load(f) finally: f.close() f = None # Set up the reverse hard link index. for node, paths in self._node_paths.iteritems(): for path in paths: self._path_node[path] = node def prepare_save(self): """ Commit all of the relevant data to disk. Do as much work as possible without actually making the changes visible.""" if self._save_prepared: raise Error('save of %r already in progress' % self._filename) if self._node_paths: (dir, name) = os.path.split(self._filename) (ffd, self._tmpname) = tempfile.mkstemp('.tmp', name, dir) try: f = os.fdopen(ffd, 'wb', 65536) except: os.close(ffd) raise try: cPickle.dump(self._node_paths, f, 2) except: f.close() os.unlink(self._tmpname) self._tmpname = None raise else: f.close() f = None self._save_prepared = True def commit_save(self): if not self._save_prepared: raise Error('cannot commit save of %r; no save prepared' % self._filename) if self._tmpname: os.rename(self._tmpname, self._filename) self._tmpname = None else: # No data -- delete _filename if it exists. try: os.unlink(self._filename) except OSError, e: if e.errno == errno.ENOENT: pass else: raise self._save_prepared = None def abort_save(self): if self._tmpname: os.unlink(self._tmpname) self._tmpname = None def __del__(self): self.abort_save() def add_path(self, path, dev, ino): # Assume path is new. node = '%s:%s' % (dev, ino) self._path_node[path] = node link_paths = self._node_paths.get(node) if link_paths and path not in link_paths: link_paths.append(path) else: self._node_paths[node] = [path] def _del_node_path(self, node, path): link_paths = self._node_paths[node] link_paths.remove(path) if not link_paths: del self._node_paths[node] def change_path(self, path, new_dev, new_ino): prev_node = self._path_node.get(path) if prev_node: self._del_node_path(prev_node, path) self.add_path(new_dev, new_ino, path) def del_path(self, path): # Path may not be in db (if updating a pre-hardlink support index). node = self._path_node.get(path) if node: self._del_node_path(node, path) del self._path_node[path] def node_paths(self, dev, ino): node = '%s:%s' % (dev, ino) return self._node_paths[node] bup-0.25/lib/bup/index.py000066400000000000000000000446471225146730500152670ustar00rootroot00000000000000import metadata, os, stat, struct, tempfile from bup import xstat from bup.helpers import * EMPTY_SHA = '\0'*20 FAKE_SHA = '\x01'*20 INDEX_HDR = 'BUPI\0\0\0\5' # Time values are handled as integer nanoseconds since the epoch in # memory, but are written as xstat/metadata timespecs. This behavior # matches the existing metadata/xstat/.bupm code. # Record times (mtime, ctime, atime) as xstat/metadata timespecs, and # store all of the times in the index so they won't interfere with the # forthcoming metadata cache. INDEX_SIG = '!QQQqQqQqQIIQII20sHIIQ' ENTLEN = struct.calcsize(INDEX_SIG) FOOTER_SIG = '!Q' FOOTLEN = struct.calcsize(FOOTER_SIG) IX_EXISTS = 0x8000 # file exists on filesystem IX_HASHVALID = 0x4000 # the stored sha1 matches the filesystem IX_SHAMISSING = 0x2000 # the stored sha1 object doesn't seem to exist class Error(Exception): pass class MetaStoreReader: def __init__(self, filename): self._file = open(filename, 'rb') def close(self): if self._file: self._file.close() self._file = None def __del__(self): self.close() def metadata_at(self, ofs): self._file.seek(ofs) return metadata.Metadata.read(self._file) class MetaStoreWriter: # For now, we just append to the file, and try to handle any # truncation or corruption somewhat sensibly. def __init__(self, filename): # Map metadata hashes to bupindex.meta offsets. self._offsets = {} self._filename = filename # FIXME: see how slow this is; does it matter? m_file = open(filename, 'ab+') try: m_file.seek(0) try: m_off = m_file.tell() m = metadata.Metadata.read(m_file) while m: m_encoded = m.encode() self._offsets[m_encoded] = m_off m_off = m_file.tell() m = metadata.Metadata.read(m_file) except EOFError: pass except: log('index metadata in %r appears to be corrupt' % filename) raise finally: m_file.close() self._file = open(filename, 'ab') def close(self): if self._file: self._file.close() self._file = None def __del__(self): # Be optimistic. self.close() def store(self, metadata): meta_encoded = metadata.encode(include_path=False) ofs = self._offsets.get(meta_encoded) if ofs: return ofs ofs = self._file.tell() self._file.write(meta_encoded) self._offsets[meta_encoded] = ofs return ofs class Level: def __init__(self, ename, parent): self.parent = parent self.ename = ename self.list = [] self.count = 0 def write(self, f): (ofs,n) = (f.tell(), len(self.list)) if self.list: count = len(self.list) #log('popping %r with %d entries\n' # % (''.join(self.ename), count)) for e in self.list: e.write(f) if self.parent: self.parent.count += count + self.count return (ofs,n) def _golevel(level, f, ename, newentry, metastore, tmax): # close nodes back up the tree assert(level) default_meta_ofs = metastore.store(metadata.Metadata()) while ename[:len(level.ename)] != level.ename: n = BlankNewEntry(level.ename[-1], default_meta_ofs, tmax) n.flags |= IX_EXISTS (n.children_ofs,n.children_n) = level.write(f) level.parent.list.append(n) level = level.parent # create nodes down the tree while len(level.ename) < len(ename): level = Level(ename[:len(level.ename)+1], level) # are we in precisely the right place? assert(ename == level.ename) n = newentry or \ BlankNewEntry(ename and level.ename[-1] or None, default_meta_ofs, tmax) (n.children_ofs,n.children_n) = level.write(f) if level.parent: level.parent.list.append(n) level = level.parent return level class Entry: def __init__(self, basename, name, meta_ofs, tmax): self.basename = str(basename) self.name = str(name) self.meta_ofs = meta_ofs self.tmax = tmax self.children_ofs = 0 self.children_n = 0 def __repr__(self): return ("(%s,0x%04x,%d,%d,%d,%d,%d,%d,%d,%d,%s/%s,0x%04x,%d,0x%08x/%d)" % (self.name, self.dev, self.ino, self.nlink, self.ctime, self.mtime, self.atime, self.uid, self.gid, self.size, self.mode, self.gitmode, self.flags, self.meta_ofs, self.children_ofs, self.children_n)) def packed(self): try: ctime = xstat.nsecs_to_timespec(self.ctime) mtime = xstat.nsecs_to_timespec(self.mtime) atime = xstat.nsecs_to_timespec(self.atime) return struct.pack(INDEX_SIG, self.dev, self.ino, self.nlink, ctime[0], ctime[1], mtime[0], mtime[1], atime[0], atime[1], self.uid, self.gid, self.size, self.mode, self.gitmode, self.sha, self.flags, self.children_ofs, self.children_n, self.meta_ofs) except (DeprecationWarning, struct.error), e: log('pack error: %s (%r)\n' % (e, self)) raise def from_stat(self, st, meta_ofs, tstart, check_device=True): old = (self.dev if check_device else 0, self.ino, self.nlink, self.ctime, self.mtime, self.uid, self.gid, self.size, self.flags & IX_EXISTS) new = (st.st_dev if check_device else 0, st.st_ino, st.st_nlink, st.st_ctime, st.st_mtime, st.st_uid, st.st_gid, st.st_size, IX_EXISTS) self.dev = st.st_dev self.ino = st.st_ino self.nlink = st.st_nlink self.ctime = st.st_ctime self.mtime = st.st_mtime self.atime = st.st_atime self.uid = st.st_uid self.gid = st.st_gid self.size = st.st_size self.mode = st.st_mode self.flags |= IX_EXISTS self.meta_ofs = meta_ofs # Check that the ctime's "second" is at or after tstart's. ctime_sec_in_ns = xstat.fstime_floor_secs(st.st_ctime) * 10**9 if ctime_sec_in_ns >= tstart or old != new \ or self.sha == EMPTY_SHA or not self.gitmode: self.invalidate() self._fixup() def _fixup(self): if self.uid < 0: self.uid += 0x100000000 if self.gid < 0: self.gid += 0x100000000 assert(self.uid >= 0) assert(self.gid >= 0) self.mtime = self._fixup_time(self.mtime) self.ctime = self._fixup_time(self.ctime) def _fixup_time(self, t): if self.tmax != None and t > self.tmax: return self.tmax else: return t def is_valid(self): f = IX_HASHVALID|IX_EXISTS return (self.flags & f) == f def invalidate(self): self.flags &= ~IX_HASHVALID def validate(self, gitmode, sha): assert(sha) assert(gitmode) assert(gitmode+0 == gitmode) self.gitmode = gitmode self.sha = sha self.flags |= IX_HASHVALID|IX_EXISTS def exists(self): return not self.is_deleted() def sha_missing(self): return (self.flags & IX_SHAMISSING) or not (self.flags & IX_HASHVALID) def is_deleted(self): return (self.flags & IX_EXISTS) == 0 def set_deleted(self): if self.flags & IX_EXISTS: self.flags &= ~(IX_EXISTS | IX_HASHVALID) def is_real(self): return not self.is_fake() def is_fake(self): return not self.ctime def __cmp__(a, b): return (cmp(b.name, a.name) or cmp(a.is_valid(), b.is_valid()) or cmp(a.is_fake(), b.is_fake())) def write(self, f): f.write(self.basename + '\0' + self.packed()) class NewEntry(Entry): def __init__(self, basename, name, tmax, dev, ino, nlink, ctime, mtime, atime, uid, gid, size, mode, gitmode, sha, flags, meta_ofs, children_ofs, children_n): Entry.__init__(self, basename, name, meta_ofs, tmax) (self.dev, self.ino, self.nlink, self.ctime, self.mtime, self.atime, self.uid, self.gid, self.size, self.mode, self.gitmode, self.sha, self.flags, self.children_ofs, self.children_n ) = (dev, ino, nlink, ctime, mtime, atime, uid, gid, size, mode, gitmode, sha, flags, children_ofs, children_n) self._fixup() class BlankNewEntry(NewEntry): def __init__(self, basename, meta_ofs, tmax): NewEntry.__init__(self, basename, basename, tmax, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, EMPTY_SHA, 0, meta_ofs, 0, 0) class ExistingEntry(Entry): def __init__(self, parent, basename, name, m, ofs): Entry.__init__(self, basename, name, None, None) self.parent = parent self._m = m self._ofs = ofs (self.dev, self.ino, self.nlink, self.ctime, ctime_ns, self.mtime, mtime_ns, self.atime, atime_ns, self.uid, self.gid, self.size, self.mode, self.gitmode, self.sha, self.flags, self.children_ofs, self.children_n, self.meta_ofs ) = struct.unpack(INDEX_SIG, str(buffer(m, ofs, ENTLEN))) self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns)) self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns)) self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns)) # effectively, we don't bother messing with IX_SHAMISSING if # not IX_HASHVALID, since it's redundant, and repacking is more # expensive than not repacking. # This is implemented by having sha_missing() check IX_HASHVALID too. def set_sha_missing(self, val): val = val and 1 or 0 oldval = self.sha_missing() and 1 or 0 if val != oldval: flag = val and IX_SHAMISSING or 0 newflags = (self.flags & (~IX_SHAMISSING)) | flag self.flags = newflags self.repack() def unset_sha_missing(self, flag): if self.flags & IX_SHAMISSING: self.flags &= ~IX_SHAMISSING self.repack() def repack(self): self._m[self._ofs:self._ofs+ENTLEN] = self.packed() if self.parent and not self.is_valid(): self.parent.invalidate() self.parent.repack() def iter(self, name=None, wantrecurse=None): dname = name if dname and not dname.endswith('/'): dname += '/' ofs = self.children_ofs assert(ofs <= len(self._m)) assert(self.children_n < 1000000) for i in xrange(self.children_n): eon = self._m.find('\0', ofs) assert(eon >= 0) assert(eon >= ofs) assert(eon > ofs) basename = str(buffer(self._m, ofs, eon-ofs)) child = ExistingEntry(self, basename, self.name + basename, self._m, eon+1) if (not dname or child.name.startswith(dname) or child.name.endswith('/') and dname.startswith(child.name)): if not wantrecurse or wantrecurse(child): for e in child.iter(name=name, wantrecurse=wantrecurse): yield e if not name or child.name == name or child.name.startswith(dname): yield child ofs = eon + 1 + ENTLEN def __iter__(self): return self.iter() class Reader: def __init__(self, filename): self.filename = filename self.m = '' self.writable = False self.count = 0 f = None try: f = open(filename, 'r+') except IOError, e: if e.errno == errno.ENOENT: pass else: raise if f: b = f.read(len(INDEX_HDR)) if b != INDEX_HDR: log('warning: %s: header: expected %r, got %r\n' % (filename, INDEX_HDR, b)) else: st = os.fstat(f.fileno()) if st.st_size: self.m = mmap_readwrite(f) self.writable = True self.count = struct.unpack(FOOTER_SIG, str(buffer(self.m, st.st_size-FOOTLEN, FOOTLEN)))[0] def __del__(self): self.close() def __len__(self): return int(self.count) def forward_iter(self): ofs = len(INDEX_HDR) while ofs+ENTLEN <= len(self.m)-FOOTLEN: eon = self.m.find('\0', ofs) assert(eon >= 0) assert(eon >= ofs) assert(eon > ofs) basename = str(buffer(self.m, ofs, eon-ofs)) yield ExistingEntry(None, basename, basename, self.m, eon+1) ofs = eon + 1 + ENTLEN def iter(self, name=None, wantrecurse=None): if len(self.m) > len(INDEX_HDR)+ENTLEN: dname = name if dname and not dname.endswith('/'): dname += '/' root = ExistingEntry(None, '/', '/', self.m, len(self.m)-FOOTLEN-ENTLEN) for sub in root.iter(name=name, wantrecurse=wantrecurse): yield sub if not dname or dname == root.name: yield root def __iter__(self): return self.iter() def exists(self): return self.m def save(self): if self.writable and self.m: self.m.flush() def close(self): self.save() if self.writable and self.m: self.m.close() self.m = None self.writable = False def filter(self, prefixes, wantrecurse=None): for (rp, path) in reduce_paths(prefixes): for e in self.iter(rp, wantrecurse=wantrecurse): assert(e.name.startswith(rp)) name = path + e.name[len(rp):] yield (name, e) # FIXME: this function isn't very generic, because it splits the filename # in an odd way and depends on a terminating '/' to indicate directories. def pathsplit(p): """Split a path into a list of elements of the file system hierarchy.""" l = p.split('/') l = [i+'/' for i in l[:-1]] + l[-1:] if l[-1] == '': l.pop() # extra blank caused by terminating '/' return l class Writer: def __init__(self, filename, metastore, tmax): self.rootlevel = self.level = Level([], None) self.f = None self.count = 0 self.lastfile = None self.filename = None self.filename = filename = realpath(filename) self.metastore = metastore self.tmax = tmax (dir,name) = os.path.split(filename) (ffd,self.tmpname) = tempfile.mkstemp('.tmp', filename, dir) self.f = os.fdopen(ffd, 'wb', 65536) self.f.write(INDEX_HDR) def __del__(self): self.abort() def abort(self): f = self.f self.f = None if f: f.close() os.unlink(self.tmpname) def flush(self): if self.level: self.level = _golevel(self.level, self.f, [], None, self.metastore, self.tmax) self.count = self.rootlevel.count if self.count: self.count += 1 self.f.write(struct.pack(FOOTER_SIG, self.count)) self.f.flush() assert(self.level == None) def close(self): self.flush() f = self.f self.f = None if f: f.close() os.rename(self.tmpname, self.filename) def _add(self, ename, entry): if self.lastfile and self.lastfile <= ename: raise Error('%r must come before %r' % (''.join(e.name), ''.join(self.lastfile))) self.lastfile = ename self.level = _golevel(self.level, self.f, ename, entry, self.metastore, self.tmax) def add(self, name, st, meta_ofs, hashgen = None): endswith = name.endswith('/') ename = pathsplit(name) basename = ename[-1] #log('add: %r %r\n' % (basename, name)) flags = IX_EXISTS sha = None if hashgen: (gitmode, sha) = hashgen(name) flags |= IX_HASHVALID else: (gitmode, sha) = (0, EMPTY_SHA) if st: isdir = stat.S_ISDIR(st.st_mode) assert(isdir == endswith) e = NewEntry(basename, name, self.tmax, st.st_dev, st.st_ino, st.st_nlink, st.st_ctime, st.st_mtime, st.st_atime, st.st_uid, st.st_gid, st.st_size, st.st_mode, gitmode, sha, flags, meta_ofs, 0, 0) else: assert(endswith) meta_ofs = self.metastore.store(metadata.Metadata()) e = BlankNewEntry(basename, meta_ofs, self.tmax) e.gitmode = gitmode e.sha = sha e.flags = flags self._add(ename, e) def add_ixentry(self, e): e.children_ofs = e.children_n = 0 self._add(pathsplit(e.name), e) def new_reader(self): self.flush() return Reader(self.tmpname) def reduce_paths(paths): xpaths = [] for p in paths: rp = realpath(p) try: st = os.lstat(rp) if stat.S_ISDIR(st.st_mode): rp = slashappend(rp) p = slashappend(p) xpaths.append((rp, p)) except OSError, e: add_error('reduce_paths: %s' % e) xpaths.sort() paths = [] prev = None for (rp, p) in xpaths: if prev and (prev == rp or (prev.endswith('/') and rp.startswith(prev))): continue # already superceded by previous path paths.append((rp, p)) prev = rp paths.sort(reverse=True) return paths def merge(*iters): def pfunc(count, total): qprogress('bup: merging indexes (%d/%d)\r' % (count, total)) def pfinal(count, total): progress('bup: merging indexes (%d/%d), done.\n' % (count, total)) return merge_iter(iters, 1024, pfunc, pfinal, key='name') bup-0.25/lib/bup/ls.py000066400000000000000000000047631225146730500145710ustar00rootroot00000000000000"""Common code for listing files from a bup repository.""" import stat from bup import options, vfs from helpers import * def node_name(text, n, show_hash = False, show_filesize = False, filesize = None, human_readable = False): """Add symbols to a node's name to differentiate file types.""" prefix = '' if show_hash: prefix += "%s " % n.hash.encode('hex') if show_filesize: if human_readable: prefix += "%10s " % format_filesize(filesize) else: prefix += "%14d " % filesize if stat.S_ISDIR(n.mode): return '%s%s/' % (prefix, text) elif stat.S_ISLNK(n.mode): return '%s%s@' % (prefix, text) else: return '%s%s' % (prefix, text) optspec = """ %sls [-a] [path...] -- s,hash show hash for each file a,all show hidden files l show file sizes human-readable print human readable file sizes (i.e. 3.9K, 4.7M) """ def do_ls(args, pwd, default='.', onabort=None, spec_prefix=''): """Output a listing of a file or directory in the bup repository. When stdout is attached to a tty, the output is formatted in columns. When not attached to tty (for example when the output is piped to another command), one file is listed per line. """ if onabort: o = options.Options(optspec % spec_prefix, onabort=onabort) else: o = options.Options(optspec % spec_prefix) (opt, flags, extra) = o.parse(args) L = [] ret = 0 for path in (extra or [default]): try: n = pwd.try_resolve(path) if stat.S_ISDIR(n.mode): for sub in n: name = sub.name fsize = sub.size() if opt.l else None nname = node_name(name, sub, opt.hash, opt.l, fsize, opt.human_readable) if opt.all or not len(name)>1 or not name.startswith('.'): if istty1: L.append(nname) else: print nname else: nname = node_name(path, n, opt.hash, opt.l, None, opt.human_readable) if istty1: L.append(nname) else: print nname except vfs.NodeError, e: log('error: %s\n' % e) ret = 1 if istty1: sys.stdout.write(columnate(L, '')) return ret bup-0.25/lib/bup/metadata.py000066400000000000000000001127071225146730500157310ustar00rootroot00000000000000"""Metadata read/write support for bup.""" # Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. import errno, os, sys, stat, time, pwd, grp, socket from cStringIO import StringIO from bup import vint, xstat from bup.drecurse import recursive_dirlist from bup.helpers import add_error, mkdirp, log, is_superuser from bup.helpers import pwd_from_uid, pwd_from_name, grp_from_gid, grp_from_name from bup.xstat import utime, lutime xattr = None if sys.platform.startswith('linux'): try: import xattr except ImportError: log('Warning: Linux xattr support missing; install python-pyxattr.\n') if xattr: try: xattr.get_all except AttributeError: log('Warning: python-xattr module is too old; ' 'install python-pyxattr instead.\n') xattr = None posix1e = None if not (sys.platform.startswith('cygwin') \ or sys.platform.startswith('darwin') \ or sys.platform.startswith('netbsd')): try: import posix1e except ImportError: log('Warning: POSIX ACL support missing; install python-pylibacl.\n') try: from bup._helpers import get_linux_file_attr, set_linux_file_attr except ImportError: # No need for a warning here; the only reason they won't exist is that we're # not on Linux, in which case files don't have any linux attrs anyway, so # lacking the functions isn't a problem. get_linux_file_attr = set_linux_file_attr = None # WARNING: the metadata encoding is *not* stable yet. Caveat emptor! # Q: Consider hardlink support? # Q: Is it OK to store raw linux attr (chattr) flags? # Q: Can anything other than S_ISREG(x) or S_ISDIR(x) support posix1e ACLs? # Q: Is the application of posix1e has_extended() correct? # Q: Is one global --numeric-ids argument sufficient? # Q: Do nfsv4 acls trump posix1e acls? (seems likely) # Q: Add support for crtime -- ntfs, and (only internally?) ext*? # FIXME: Fix relative/abs path detection/stripping wrt other platforms. # FIXME: Add nfsv4 acl handling - see nfs4-acl-tools. # FIXME: Consider other entries mentioned in stat(2) (S_IFDOOR, etc.). # FIXME: Consider pack('vvvvsss', ...) optimization. # FIXME: Consider caching users/groups. ## FS notes: # # osx (varies between hfs and hfs+): # type - regular dir char block fifo socket ... # perms - rwxrwxrwxsgt # times - ctime atime mtime # uid # gid # hard-link-info (hfs+ only) # link-target # device-major/minor # attributes-osx see chflags # content-type # content-creator # forks # # ntfs # type - regular dir ... # times - creation, modification, posix change, access # hard-link-info # link-target # attributes - see attrib # ACLs # forks (alternate data streams) # crtime? # # fat # type - regular dir ... # perms - rwxrwxrwx (maybe - see wikipedia) # times - creation, modification, access # attributes - see attrib verbose = 0 _have_lchmod = hasattr(os, 'lchmod') def _clean_up_path_for_archive(p): # Not the most efficient approach. result = p # Take everything after any '/../'. pos = result.rfind('/../') if pos != -1: result = result[result.rfind('/../') + 4:] # Take everything after any remaining '../'. if result.startswith("../"): result = result[3:] # Remove any '/./' sequences. pos = result.find('/./') while pos != -1: result = result[0:pos] + '/' + result[pos + 3:] pos = result.find('/./') # Remove any leading '/'s. result = result.lstrip('/') # Replace '//' with '/' everywhere. pos = result.find('//') while pos != -1: result = result[0:pos] + '/' + result[pos + 2:] pos = result.find('//') # Take everything after any remaining './'. if result.startswith('./'): result = result[2:] # Take everything before any remaining '/.'. if result.endswith('/.'): result = result[:-2] if result == '' or result.endswith('/..'): result = '.' return result def _risky_path(p): if p.startswith('/'): return True if p.find('/../') != -1: return True if p.startswith('../'): return True if p.endswith('/..'): return True return False def _clean_up_extract_path(p): result = p.lstrip('/') if result == '': return '.' elif _risky_path(result): return None else: return result # These tags are currently conceptually private to Metadata, and they # must be unique, and must *never* be changed. _rec_tag_end = 0 _rec_tag_path = 1 _rec_tag_common = 2 # times, user, group, type, perms, etc. (legacy/broken) _rec_tag_symlink_target = 3 _rec_tag_posix1e_acl = 4 # getfacl(1), setfacl(1), etc. _rec_tag_nfsv4_acl = 5 # intended to supplant posix1e? (unimplemented) _rec_tag_linux_attr = 6 # lsattr(1) chattr(1) _rec_tag_linux_xattr = 7 # getfattr(1) setfattr(1) _rec_tag_hardlink_target = 8 # hard link target path _rec_tag_common_v2 = 9 # times, user, group, type, perms, etc. (current) class ApplyError(Exception): # Thrown when unable to apply any given bit of metadata to a path. pass class Metadata: # Metadata is stored as a sequence of tagged binary records. Each # record will have some subset of add, encode, load, create, and # apply methods, i.e. _add_foo... # We do allow an "empty" object as a special case, i.e. no # records. One can be created by trying to write Metadata(), and # for such an object, read() will return None. This is used by # "bup save", for example, as a placeholder in cases where # from_path() fails. # NOTE: if any relevant fields are added or removed, be sure to # update same_file() below. ## Common records # Timestamps are (sec, ns), relative to 1970-01-01 00:00:00, ns # must be non-negative and < 10**9. def _add_common(self, path, st): self.uid = st.st_uid self.gid = st.st_gid self.atime = st.st_atime self.mtime = st.st_mtime self.ctime = st.st_ctime self.user = self.group = '' entry = pwd_from_uid(st.st_uid) if entry: self.user = entry.pw_name entry = grp_from_gid(st.st_gid) if entry: self.group = entry.gr_name self.mode = st.st_mode # Only collect st_rdev if we might need it for a mknod() # during restore. On some platforms (i.e. kFreeBSD), it isn't # stable for other file types. For example "cp -a" will # change it for a plain file. if stat.S_ISCHR(st.st_mode) or stat.S_ISBLK(st.st_mode): self.rdev = st.st_rdev else: self.rdev = 0 def _same_common(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.uid == other.uid \ and self.gid == other.gid \ and self.rdev == other.rdev \ and self.mtime == other.mtime \ and self.ctime == other.ctime \ and self.user == other.user \ and self.group == other.group def _encode_common(self): if not self.mode: return None atime = xstat.nsecs_to_timespec(self.atime) mtime = xstat.nsecs_to_timespec(self.mtime) ctime = xstat.nsecs_to_timespec(self.ctime) result = vint.pack('vvsvsvvVvVvV', self.mode, self.uid, self.user, self.gid, self.group, self.rdev, atime[0], atime[1], mtime[0], mtime[1], ctime[0], ctime[1]) return result def _load_common_rec(self, port, legacy_format=False): unpack_fmt = 'vvsvsvvVvVvV' if legacy_format: unpack_fmt = 'VVsVsVvVvVvV' data = vint.read_bvec(port) (self.mode, self.uid, self.user, self.gid, self.group, self.rdev, self.atime, atime_ns, self.mtime, mtime_ns, self.ctime, ctime_ns) = vint.unpack(unpack_fmt, data) self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns)) self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns)) self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns)) def _recognized_file_type(self): return stat.S_ISREG(self.mode) \ or stat.S_ISDIR(self.mode) \ or stat.S_ISCHR(self.mode) \ or stat.S_ISBLK(self.mode) \ or stat.S_ISFIFO(self.mode) \ or stat.S_ISSOCK(self.mode) \ or stat.S_ISLNK(self.mode) def _create_via_common_rec(self, path, create_symlinks=True): if not self.mode: raise ApplyError('no metadata - cannot create path ' + path) # If the path already exists and is a dir, try rmdir. # If the path already exists and is anything else, try unlink. st = None try: st = xstat.lstat(path) except OSError, e: if e.errno != errno.ENOENT: raise if st: if stat.S_ISDIR(st.st_mode): try: os.rmdir(path) except OSError, e: if e.errno in (errno.ENOTEMPTY, errno.EEXIST): msg = 'refusing to overwrite non-empty dir ' + path raise Exception(msg) raise else: os.unlink(path) if stat.S_ISREG(self.mode): assert(self._recognized_file_type()) fd = os.open(path, os.O_CREAT|os.O_WRONLY|os.O_EXCL, 0600) os.close(fd) elif stat.S_ISDIR(self.mode): assert(self._recognized_file_type()) os.mkdir(path, 0700) elif stat.S_ISCHR(self.mode): assert(self._recognized_file_type()) os.mknod(path, 0600 | stat.S_IFCHR, self.rdev) elif stat.S_ISBLK(self.mode): assert(self._recognized_file_type()) os.mknod(path, 0600 | stat.S_IFBLK, self.rdev) elif stat.S_ISFIFO(self.mode): assert(self._recognized_file_type()) os.mknod(path, 0600 | stat.S_IFIFO) elif stat.S_ISSOCK(self.mode): try: os.mknod(path, 0600 | stat.S_IFSOCK) except OSError, e: if e.errno in (errno.EINVAL, errno.EPERM): s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(path) else: raise elif stat.S_ISLNK(self.mode): assert(self._recognized_file_type()) if self.symlink_target and create_symlinks: # on MacOS, symlink() permissions depend on umask, and there's # no way to chown a symlink after creating it, so we have to # be careful here! oldumask = os.umask((self.mode & 0777) ^ 0777) try: os.symlink(self.symlink_target, path) finally: os.umask(oldumask) # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2). else: assert(not self._recognized_file_type()) add_error('not creating "%s" with unrecognized mode "0x%x"\n' % (path, self.mode)) def _apply_common_rec(self, path, restore_numeric_ids=False): if not self.mode: raise ApplyError('no metadata - cannot apply to ' + path) # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2). # EACCES errors at this stage are fatal for the current path. if lutime and stat.S_ISLNK(self.mode): try: lutime(path, (self.atime, self.mtime)) except OSError, e: if e.errno == errno.EACCES: raise ApplyError('lutime: %s' % e) else: raise else: try: utime(path, (self.atime, self.mtime)) except OSError, e: if e.errno == errno.EACCES: raise ApplyError('utime: %s' % e) else: raise # Implement tar/rsync-like semantics; see bup-restore(1). # FIXME: should we consider caching user/group name <-> id # mappings, getgroups(), etc.? uid = gid = -1 # By default, do nothing. if is_superuser(): uid = self.uid gid = self.gid if not restore_numeric_ids: if self.uid != 0 and self.user: entry = pwd_from_name(self.user) if entry: uid = entry.pw_uid if self.gid != 0 and self.group: entry = grp_from_name(self.group) if entry: gid = entry.gr_gid else: # not superuser - only consider changing the group/gid user_gids = os.getgroups() if self.gid in user_gids: gid = self.gid if not restore_numeric_ids and self.gid != 0: # The grp might not exist on the local system. grps = filter(None, [grp_from_gid(x) for x in user_gids]) if self.group in [x.gr_name for x in grps]: g = grp_from_name(self.group) if g: gid = g.gr_gid if uid != -1 or gid != -1: try: os.lchown(path, uid, gid) except OSError, e: if e.errno == errno.EPERM: add_error('lchown: %s' % e) elif sys.platform.startswith('cygwin') \ and e.errno == errno.EINVAL: add_error('lchown: unknown uid/gid (%d/%d) for %s' % (uid, gid, path)) else: raise if _have_lchmod: os.lchmod(path, stat.S_IMODE(self.mode)) elif not stat.S_ISLNK(self.mode): os.chmod(path, stat.S_IMODE(self.mode)) ## Path records def _encode_path(self): if self.path: return vint.pack('s', self.path) else: return None def _load_path_rec(self, port): self.path = vint.unpack('s', vint.read_bvec(port))[0] ## Symlink targets def _add_symlink_target(self, path, st): try: if stat.S_ISLNK(st.st_mode): self.symlink_target = os.readlink(path) except OSError, e: add_error('readlink: %s', e) def _encode_symlink_target(self): return self.symlink_target def _load_symlink_target_rec(self, port): self.symlink_target = vint.read_bvec(port) ## Hardlink targets def _add_hardlink_target(self, target): self.hardlink_target = target def _same_hardlink_target(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.hardlink_target == other.hardlink_target def _encode_hardlink_target(self): return self.hardlink_target def _load_hardlink_target_rec(self, port): self.hardlink_target = vint.read_bvec(port) ## POSIX1e ACL records # Recorded as a list: # [txt_id_acl, num_id_acl] # or, if a directory: # [txt_id_acl, num_id_acl, txt_id_default_acl, num_id_default_acl] # The numeric/text distinction only matters when reading/restoring # a stored record. def _add_posix1e_acl(self, path, st): if not posix1e: return if not stat.S_ISLNK(st.st_mode): acls = None def_acls = None try: if posix1e.has_extended(path): acl = posix1e.ACL(file=path) acls = [acl, acl] # txt and num are the same if stat.S_ISDIR(st.st_mode): def_acl = posix1e.ACL(filedef=path) def_acls = [def_acl, def_acl] except EnvironmentError, e: if e.errno not in (errno.EOPNOTSUPP, errno.ENOSYS): raise if acls: txt_flags = posix1e.TEXT_ABBREVIATE num_flags = posix1e.TEXT_ABBREVIATE | posix1e.TEXT_NUMERIC_IDS acl_rep = [acls[0].to_any_text('', '\n', txt_flags), acls[1].to_any_text('', '\n', num_flags)] if def_acls: acl_rep.append(def_acls[0].to_any_text('', '\n', txt_flags)) acl_rep.append(def_acls[1].to_any_text('', '\n', num_flags)) self.posix1e_acl = acl_rep def _same_posix1e_acl(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.posix1e_acl == other.posix1e_acl def _encode_posix1e_acl(self): # Encode as two strings (w/default ACL string possibly empty). if self.posix1e_acl: acls = self.posix1e_acl if len(acls) == 2: acls.extend(['', '']) return vint.pack('ssss', acls[0], acls[1], acls[2], acls[3]) else: return None def _load_posix1e_acl_rec(self, port): acl_rep = vint.unpack('ssss', vint.read_bvec(port)) if acl_rep[2] == '': acl_rep = acl_rep[:2] self.posix1e_acl = acl_rep def _apply_posix1e_acl_rec(self, path, restore_numeric_ids=False): def apply_acl(acl_rep, kind): try: acl = posix1e.ACL(text = acl_rep) except IOError, e: if e.errno == 0: # pylibacl appears to return an IOError with errno # set to 0 if a group referred to by the ACL rep # doesn't exist on the current system. raise ApplyError("POSIX1e ACL: can't create %r for %r" % (acl_rep, path)) else: raise try: acl.applyto(path, kind) except IOError, e: if e.errno == errno.EPERM or e.errno == errno.EOPNOTSUPP: raise ApplyError('POSIX1e ACL applyto: %s' % e) else: raise if not posix1e: if self.posix1e_acl: add_error("%s: can't restore ACLs; posix1e support missing.\n" % path) return if self.posix1e_acl: acls = self.posix1e_acl if len(acls) > 2: if restore_numeric_ids: apply_acl(acls[3], posix1e.ACL_TYPE_DEFAULT) else: apply_acl(acls[2], posix1e.ACL_TYPE_DEFAULT) if restore_numeric_ids: apply_acl(acls[1], posix1e.ACL_TYPE_ACCESS) else: apply_acl(acls[0], posix1e.ACL_TYPE_ACCESS) ## Linux attributes (lsattr(1), chattr(1)) def _add_linux_attr(self, path, st): if not get_linux_file_attr: return if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode): try: attr = get_linux_file_attr(path) if attr != 0: self.linux_attr = attr except OSError, e: if e.errno == errno.EACCES: add_error('read Linux attr: %s' % e) elif e.errno in (errno.ENOTTY, errno.ENOSYS, errno.EOPNOTSUPP): # Assume filesystem doesn't support attrs. return else: raise def _same_linux_attr(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.linux_attr == other.linux_attr def _encode_linux_attr(self): if self.linux_attr: return vint.pack('V', self.linux_attr) else: return None def _load_linux_attr_rec(self, port): data = vint.read_bvec(port) self.linux_attr = vint.unpack('V', data)[0] def _apply_linux_attr_rec(self, path, restore_numeric_ids=False): if self.linux_attr: if not set_linux_file_attr: add_error("%s: can't restore linuxattrs: " "linuxattr support missing.\n" % path) return try: set_linux_file_attr(path, self.linux_attr) except OSError, e: if e.errno in (errno.ENOTTY, errno.EOPNOTSUPP, errno.ENOSYS, errno.EACCES): raise ApplyError('Linux chattr: %s (0x%s)' % (e, hex(self.linux_attr))) else: raise ## Linux extended attributes (getfattr(1), setfattr(1)) def _add_linux_xattr(self, path, st): if not xattr: return try: self.linux_xattr = xattr.get_all(path, nofollow=True) except EnvironmentError, e: if e.errno != errno.EOPNOTSUPP: raise def _same_linux_xattr(self, other): """Return true or false to indicate similarity in the hardlink sense.""" return self.linux_xattr == other.linux_xattr def _encode_linux_xattr(self): if self.linux_xattr: result = vint.pack('V', len(self.linux_xattr)) for name, value in self.linux_xattr: result += vint.pack('ss', name, value) return result else: return None def _load_linux_xattr_rec(self, file): data = vint.read_bvec(file) memfile = StringIO(data) result = [] for i in range(vint.read_vuint(memfile)): key = vint.read_bvec(memfile) value = vint.read_bvec(memfile) result.append((key, value)) self.linux_xattr = result def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False): if not xattr: if self.linux_xattr: add_error("%s: can't restore xattr; xattr support missing.\n" % path) return if not self.linux_xattr: return try: existing_xattrs = set(xattr.list(path, nofollow=True)) except IOError, e: if e.errno == errno.EACCES: raise ApplyError('xattr.set: %s' % e) else: raise for k, v in self.linux_xattr: if k not in existing_xattrs \ or v != xattr.get(path, k, nofollow=True): try: xattr.set(path, k, v, nofollow=True) except IOError, e: if e.errno == errno.EPERM \ or e.errno == errno.EOPNOTSUPP: raise ApplyError('xattr.set: %s' % e) else: raise existing_xattrs -= frozenset([k]) for k in existing_xattrs: try: xattr.remove(path, k, nofollow=True) except IOError, e: if e.errno == errno.EPERM: raise ApplyError('xattr.remove: %s' % e) else: raise def __init__(self): self.mode = None # optional members self.path = None self.size = None self.symlink_target = None self.hardlink_target = None self.linux_attr = None self.linux_xattr = None self.posix1e_acl = None def write(self, port, include_path=True): records = include_path and [(_rec_tag_path, self._encode_path())] or [] records.extend([(_rec_tag_common_v2, self._encode_common()), (_rec_tag_symlink_target, self._encode_symlink_target()), (_rec_tag_hardlink_target, self._encode_hardlink_target()), (_rec_tag_posix1e_acl, self._encode_posix1e_acl()), (_rec_tag_linux_attr, self._encode_linux_attr()), (_rec_tag_linux_xattr, self._encode_linux_xattr())]) for tag, data in records: if data: vint.write_vuint(port, tag) vint.write_bvec(port, data) vint.write_vuint(port, _rec_tag_end) def encode(self, include_path=True): port = StringIO() self.write(port, include_path) return port.getvalue() @staticmethod def read(port): # This method should either return a valid Metadata object, # return None if there was no information at all (just a # _rec_tag_end), throw EOFError if there was nothing at all to # read, or throw an Exception if a valid object could not be # read completely. tag = vint.read_vuint(port) if tag == _rec_tag_end: return None try: # From here on, EOF is an error. result = Metadata() while True: # only exit is error (exception) or _rec_tag_end if tag == _rec_tag_path: result._load_path_rec(port) elif tag == _rec_tag_common_v2: result._load_common_rec(port) elif tag == _rec_tag_symlink_target: result._load_symlink_target_rec(port) elif tag == _rec_tag_hardlink_target: result._load_hardlink_target_rec(port) elif tag == _rec_tag_posix1e_acl: result._load_posix1e_acl_rec(port) elif tag == _rec_tag_linux_attr: result._load_linux_attr_rec(port) elif tag == _rec_tag_linux_xattr: result._load_linux_xattr_rec(port) elif tag == _rec_tag_end: return result elif tag == _rec_tag_common: # Should be very rare. result._load_common_rec(port, legacy_format = True) else: # unknown record vint.skip_bvec(port) tag = vint.read_vuint(port) except EOFError: raise Exception("EOF while reading Metadata") def isdir(self): return stat.S_ISDIR(self.mode) def create_path(self, path, create_symlinks=True): self._create_via_common_rec(path, create_symlinks=create_symlinks) def apply_to_path(self, path=None, restore_numeric_ids=False): # apply metadata to path -- file must exist if not path: path = self.path if not path: raise Exception('Metadata.apply_to_path() called with no path') if not self._recognized_file_type(): add_error('not applying metadata to "%s"' % path + ' with unrecognized mode "0x%x"\n' % self.mode) return num_ids = restore_numeric_ids for apply_metadata in (self._apply_common_rec, self._apply_posix1e_acl_rec, self._apply_linux_attr_rec, self._apply_linux_xattr_rec): try: apply_metadata(path, restore_numeric_ids=num_ids) except ApplyError, e: add_error(e) def same_file(self, other): """Compare this to other for equivalency. Return true if their information implies they could represent the same file on disk, in the hardlink sense. Assume they're both regular files.""" return self._same_common(other) \ and self._same_hardlink_target(other) \ and self._same_posix1e_acl(other) \ and self._same_linux_attr(other) \ and self._same_linux_xattr(other) def from_path(path, statinfo=None, archive_path=None, save_symlinks=True, hardlink_target=None): result = Metadata() result.path = archive_path st = statinfo or xstat.lstat(path) result.size = st.st_size result._add_common(path, st) if save_symlinks: result._add_symlink_target(path, st) result._add_hardlink_target(hardlink_target) result._add_posix1e_acl(path, st) result._add_linux_attr(path, st) result._add_linux_xattr(path, st) return result def save_tree(output_file, paths, recurse=False, write_paths=True, save_symlinks=True, xdev=False): # Issue top-level rewrite warnings. for path in paths: safe_path = _clean_up_path_for_archive(path) if safe_path != path: log('archiving "%s" as "%s"\n' % (path, safe_path)) if not recurse: for p in paths: safe_path = _clean_up_path_for_archive(p) st = xstat.lstat(p) if stat.S_ISDIR(st.st_mode): safe_path += '/' m = from_path(p, statinfo=st, archive_path=safe_path, save_symlinks=save_symlinks) if verbose: print >> sys.stderr, m.path m.write(output_file, include_path=write_paths) else: start_dir = os.getcwd() try: for (p, st) in recursive_dirlist(paths, xdev=xdev): dirlist_dir = os.getcwd() os.chdir(start_dir) safe_path = _clean_up_path_for_archive(p) m = from_path(p, statinfo=st, archive_path=safe_path, save_symlinks=save_symlinks) if verbose: print >> sys.stderr, m.path m.write(output_file, include_path=write_paths) os.chdir(dirlist_dir) finally: os.chdir(start_dir) def _set_up_path(meta, create_symlinks=True): # Allow directories to exist as a special case -- might have # been created by an earlier longer path. if meta.isdir(): mkdirp(meta.path) else: parent = os.path.dirname(meta.path) if parent: mkdirp(parent) meta.create_path(meta.path, create_symlinks=create_symlinks) all_fields = frozenset(['path', 'mode', 'link-target', 'rdev', 'size', 'uid', 'gid', 'user', 'group', 'atime', 'mtime', 'ctime', 'linux-attr', 'linux-xattr', 'posix1e-acl']) def summary_str(meta): mode_val = xstat.mode_str(meta.mode) user_val = meta.user if not user_val: user_val = str(meta.uid) group_val = meta.group if not group_val: group_val = str(meta.gid) size_or_dev_val = '-' if stat.S_ISCHR(meta.mode) or stat.S_ISBLK(meta.mode): size_or_dev_val = '%d,%d' % (os.major(meta.rdev), os.minor(meta.rdev)) elif meta.size: size_or_dev_val = meta.size mtime_secs = xstat.fstime_floor_secs(meta.mtime) time_val = time.strftime('%Y-%m-%d %H:%M', time.localtime(mtime_secs)) path_val = meta.path or '' if stat.S_ISLNK(meta.mode): path_val += ' -> ' + meta.symlink_target return '%-10s %-11s %11s %16s %s' % (mode_val, user_val + "/" + group_val, size_or_dev_val, time_val, path_val) def detailed_str(meta, fields = None): # FIXME: should optional fields be omitted, or empty i.e. "rdev: # 0", "link-target:", etc. if not fields: fields = all_fields result = [] if 'path' in fields: path = meta.path or '' result.append('path: ' + path) if 'mode' in fields: result.append('mode: %s (%s)' % (oct(meta.mode), xstat.mode_str(meta.mode))) if 'link-target' in fields and stat.S_ISLNK(meta.mode): result.append('link-target: ' + meta.symlink_target) if 'rdev' in fields: if meta.rdev: result.append('rdev: %d,%d' % (os.major(meta.rdev), os.minor(meta.rdev))) else: result.append('rdev: 0') if 'size' in fields and meta.size: result.append('size: ' + str(meta.size)) if 'uid' in fields: result.append('uid: ' + str(meta.uid)) if 'gid' in fields: result.append('gid: ' + str(meta.gid)) if 'user' in fields: result.append('user: ' + meta.user) if 'group' in fields: result.append('group: ' + meta.group) if 'atime' in fields: # If we don't have xstat.lutime, that means we have to use # utime(), and utime() has no way to set the mtime/atime of a # symlink. Thus, the mtime/atime of a symlink is meaningless, # so let's not report it. (That way scripts comparing # before/after won't trigger.) if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append('atime: ' + xstat.fstime_to_sec_str(meta.atime)) else: result.append('atime: 0') if 'mtime' in fields: if xstat.lutime or not stat.S_ISLNK(meta.mode): result.append('mtime: ' + xstat.fstime_to_sec_str(meta.mtime)) else: result.append('mtime: 0') if 'ctime' in fields: result.append('ctime: ' + xstat.fstime_to_sec_str(meta.ctime)) if 'linux-attr' in fields and meta.linux_attr: result.append('linux-attr: ' + hex(meta.linux_attr)) if 'linux-xattr' in fields and meta.linux_xattr: for name, value in meta.linux_xattr: result.append('linux-xattr: %s -> %s' % (name, repr(value))) if 'posix1e-acl' in fields and meta.posix1e_acl: acl = meta.posix1e_acl[0] result.append('posix1e-acl: ' + acl + '\n') if stat.S_ISDIR(meta.mode): def_acl = meta.posix1e_acl[2] result.append('posix1e-acl-default: ' + def_acl + '\n') return '\n'.join(result) class _ArchiveIterator: def next(self): try: return Metadata.read(self._file) except EOFError: raise StopIteration() def __iter__(self): return self def __init__(self, file): self._file = file def display_archive(file): if verbose > 1: first_item = True for meta in _ArchiveIterator(file): if not first_item: print print detailed_str(meta) first_item = False elif verbose > 0: for meta in _ArchiveIterator(file): print summary_str(meta) elif verbose == 0: for meta in _ArchiveIterator(file): if not meta.path: print >> sys.stderr, \ 'bup: no metadata path, but asked to only display path', \ '(increase verbosity?)' sys.exit(1) print meta.path def start_extract(file, create_symlinks=True): for meta in _ArchiveIterator(file): if not meta: # Hit end record. break if verbose: print >> sys.stderr, meta.path xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % meta.path)) else: meta.path = xpath _set_up_path(meta, create_symlinks=create_symlinks) def finish_extract(file, restore_numeric_ids=False): all_dirs = [] for meta in _ArchiveIterator(file): if not meta: # Hit end record. break xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % dir.path)) else: if os.path.isdir(meta.path): all_dirs.append(meta) else: if verbose: print >> sys.stderr, meta.path meta.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids) all_dirs.sort(key = lambda x : len(x.path), reverse=True) for dir in all_dirs: # Don't need to check xpath -- won't be in all_dirs if not OK. xpath = _clean_up_extract_path(dir.path) if verbose: print >> sys.stderr, dir.path dir.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids) def extract(file, restore_numeric_ids=False, create_symlinks=True): # For now, just store all the directories and handle them last, # longest first. all_dirs = [] for meta in _ArchiveIterator(file): if not meta: # Hit end record. break xpath = _clean_up_extract_path(meta.path) if not xpath: add_error(Exception('skipping risky path "%s"' % meta.path)) else: meta.path = xpath if verbose: print >> sys.stderr, '+', meta.path _set_up_path(meta, create_symlinks=create_symlinks) if os.path.isdir(meta.path): all_dirs.append(meta) else: if verbose: print >> sys.stderr, '=', meta.path meta.apply_to_path(restore_numeric_ids=restore_numeric_ids) all_dirs.sort(key = lambda x : len(x.path), reverse=True) for dir in all_dirs: # Don't need to check xpath -- won't be in all_dirs if not OK. xpath = _clean_up_extract_path(dir.path) if verbose: print >> sys.stderr, '=', xpath # Shouldn't have to check for risky paths here (omitted above). dir.apply_to_path(path=dir.path, restore_numeric_ids=restore_numeric_ids) bup-0.25/lib/bup/midx.py000066400000000000000000000075371225146730500151160ustar00rootroot00000000000000import mmap from bup import _helpers from bup.helpers import * MIDX_VERSION = 4 extract_bits = _helpers.extract_bits _total_searches = 0 _total_steps = 0 class PackMidx: """Wrapper which contains data from multiple index files. Multiple index (.midx) files constitute a wrapper around index (.idx) files and make it possible for bup to expand Git's indexing capabilities to vast amounts of files. """ def __init__(self, filename): self.name = filename self.force_keep = False assert(filename.endswith('.midx')) self.map = mmap_read(open(filename)) if str(self.map[0:4]) != 'MIDX': log('Warning: skipping: invalid MIDX header in %r\n' % filename) self.force_keep = True return self._init_failed() ver = struct.unpack('!I', self.map[4:8])[0] if ver < MIDX_VERSION: log('Warning: ignoring old-style (v%d) midx %r\n' % (ver, filename)) self.force_keep = False # old stuff is boring return self._init_failed() if ver > MIDX_VERSION: log('Warning: ignoring too-new (v%d) midx %r\n' % (ver, filename)) self.force_keep = True # new stuff is exciting return self._init_failed() self.bits = _helpers.firstword(self.map[8:12]) self.entries = 2**self.bits self.fanout = buffer(self.map, 12, self.entries*4) self.sha_ofs = 12 + self.entries*4 self.nsha = nsha = self._fanget(self.entries-1) self.shatable = buffer(self.map, self.sha_ofs, nsha*20) self.which_ofs = self.sha_ofs + 20*nsha self.whichlist = buffer(self.map, self.which_ofs, nsha*4) self.idxnames = str(self.map[self.which_ofs + 4*nsha:]).split('\0') def _init_failed(self): self.bits = 0 self.entries = 1 self.fanout = buffer('\0\0\0\0') self.shatable = buffer('\0'*20) self.idxnames = [] def _fanget(self, i): start = i*4 s = self.fanout[start:start+4] return _helpers.firstword(s) def _get(self, i): return str(self.shatable[i*20:(i+1)*20]) def _get_idx_i(self, i): return struct.unpack('!I', self.whichlist[i*4:(i+1)*4])[0] def _get_idxname(self, i): return self.idxnames[self._get_idx_i(i)] def exists(self, hash, want_source=False): """Return nonempty if the object exists in the index files.""" global _total_searches, _total_steps _total_searches += 1 want = str(hash) el = extract_bits(want, self.bits) if el: start = self._fanget(el-1) startv = el << (32-self.bits) else: start = 0 startv = 0 end = self._fanget(el) endv = (el+1) << (32-self.bits) _total_steps += 1 # lookup table is a step hashv = _helpers.firstword(hash) #print '(%08x) %08x %08x %08x' % (extract_bits(want, 32), startv, hashv, endv) while start < end: _total_steps += 1 #print '! %08x %08x %08x %d - %d' % (startv, hashv, endv, start, end) mid = start + (hashv-startv)*(end-start-1)/(endv-startv) #print ' %08x %08x %08x %d %d %d' % (startv, hashv, endv, start, mid, end) v = self._get(mid) #print ' %08x' % self._num(v) if v < want: start = mid+1 startv = _helpers.firstword(v) elif v > want: end = mid endv = _helpers.firstword(v) else: # got it! return want_source and self._get_idxname(mid) or True return None def __iter__(self): for i in xrange(self._fanget(self.entries-1)): yield buffer(self.shatable, i*20, 20) def __len__(self): return int(self._fanget(self.entries-1)) bup-0.25/lib/bup/options.py000066400000000000000000000234661225146730500156470ustar00rootroot00000000000000# Copyright 2010-2012 Avery Pennarun and options.py contributors. # All rights reserved. # # (This license applies to this file but not necessarily the other files in # this package.) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY AVERY PENNARUN ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """Command-line options parser. With the help of an options spec string, easily parse command-line options. An options spec is made up of two parts, separated by a line with two dashes. The first part is the synopsis of the command and the second one specifies options, one per line. Each non-empty line in the synopsis gives a set of options that can be used together. Option flags must be at the begining of the line and multiple flags are separated by commas. Usually, options have a short, one character flag, and a longer one, but the short one can be omitted. Long option flags are used as the option's key for the OptDict produced when parsing options. When the flag definition is ended with an equal sign, the option takes one string as an argument, and that string will be converted to an integer when possible. Otherwise, the option does not take an argument and corresponds to a boolean flag that is true when the option is given on the command line. The option's description is found at the right of its flags definition, after one or more spaces. The description ends at the end of the line. If the description contains text enclosed in square brackets, the enclosed text will be used as the option's default value. Options can be put in different groups. Options in the same group must be on consecutive lines. Groups are formed by inserting a line that begins with a space. The text on that line will be output after an empty line. """ import sys, os, textwrap, getopt, re, struct def _invert(v, invert): if invert: return not v return v def _remove_negative_kv(k, v): if k.startswith('no-') or k.startswith('no_'): return k[3:], not v return k,v class OptDict(object): """Dictionary that exposes keys as attributes. Keys can be set or accessed with a "no-" or "no_" prefix to negate the value. """ def __init__(self, aliases): self._opts = {} self._aliases = aliases def _unalias(self, k): k, reinvert = _remove_negative_kv(k, False) k, invert = self._aliases[k] return k, invert ^ reinvert def __setitem__(self, k, v): k, invert = self._unalias(k) self._opts[k] = _invert(v, invert) def __getitem__(self, k): k, invert = self._unalias(k) return _invert(self._opts[k], invert) def __getattr__(self, k): return self[k] def _default_onabort(msg): sys.exit(97) def _intify(v): try: vv = int(v or '') if str(vv) == v: return vv except ValueError: pass return v def _atoi(v): try: return int(v or 0) except ValueError: return 0 def _tty_width(): s = struct.pack("HHHH", 0, 0, 0, 0) try: import fcntl, termios s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s) except (IOError, ImportError): return _atoi(os.environ.get('WIDTH')) or 70 (ysize,xsize,ypix,xpix) = struct.unpack('HHHH', s) return xsize or 70 class Options: """Option parser. When constructed, a string called an option spec must be given. It specifies the synopsis and option flags and their description. For more information about option specs, see the docstring at the top of this file. Two optional arguments specify an alternative parsing function and an alternative behaviour on abort (after having output the usage string). By default, the parser function is getopt.gnu_getopt, and the abort behaviour is to exit the program. """ def __init__(self, optspec, optfunc=getopt.gnu_getopt, onabort=_default_onabort): self.optspec = optspec self._onabort = onabort self.optfunc = optfunc self._aliases = {} self._shortopts = 'h?' self._longopts = ['help', 'usage'] self._hasparms = {} self._defaults = {} self._usagestr = self._gen_usage() # this also parses the optspec def _gen_usage(self): out = [] lines = self.optspec.strip().split('\n') lines.reverse() first_syn = True while lines: l = lines.pop() if l == '--': break out.append('%s: %s\n' % (first_syn and 'usage' or ' or', l)) first_syn = False out.append('\n') last_was_option = False while lines: l = lines.pop() if l.startswith(' '): out.append('%s%s\n' % (last_was_option and '\n' or '', l.lstrip())) last_was_option = False elif l: (flags,extra) = (l + ' ').split(' ', 1) extra = extra.strip() if flags.endswith('='): flags = flags[:-1] has_parm = 1 else: has_parm = 0 g = re.search(r'\[([^\]]*)\]$', extra) if g: defval = _intify(g.group(1)) else: defval = None flagl = flags.split(',') flagl_nice = [] flag_main, invert_main = _remove_negative_kv(flagl[0], False) self._defaults[flag_main] = _invert(defval, invert_main) for _f in flagl: f,invert = _remove_negative_kv(_f, 0) self._aliases[f] = (flag_main, invert_main ^ invert) self._hasparms[f] = has_parm if f == '#': self._shortopts += '0123456789' flagl_nice.append('-#') elif len(f) == 1: self._shortopts += f + (has_parm and ':' or '') flagl_nice.append('-' + f) else: f_nice = re.sub(r'\W', '_', f) self._aliases[f_nice] = (flag_main, invert_main ^ invert) self._longopts.append(f + (has_parm and '=' or '')) self._longopts.append('no-' + f) flagl_nice.append('--' + _f) flags_nice = ', '.join(flagl_nice) if has_parm: flags_nice += ' ...' prefix = ' %-20s ' % flags_nice argtext = '\n'.join(textwrap.wrap(extra, width=_tty_width(), initial_indent=prefix, subsequent_indent=' '*28)) out.append(argtext + '\n') last_was_option = True else: out.append('\n') last_was_option = False return ''.join(out).rstrip() + '\n' def usage(self, msg=""): """Print usage string to stderr and abort.""" sys.stderr.write(self._usagestr) if msg: sys.stderr.write(msg) e = self._onabort and self._onabort(msg) or None if e: raise e def fatal(self, msg): """Print an error message to stderr and abort with usage string.""" msg = '\nerror: %s\n' % msg return self.usage(msg) def parse(self, args): """Parse a list of arguments and return (options, flags, extra). In the returned tuple, "options" is an OptDict with known options, "flags" is a list of option flags that were used on the command-line, and "extra" is a list of positional arguments. """ try: (flags,extra) = self.optfunc(args, self._shortopts, self._longopts) except getopt.GetoptError, e: self.fatal(e) opt = OptDict(aliases=self._aliases) for k,v in self._defaults.iteritems(): opt[k] = v for (k,v) in flags: k = k.lstrip('-') if k in ('h', '?', 'help', 'usage'): self.usage() if (self._aliases.get('#') and k in ('0','1','2','3','4','5','6','7','8','9')): v = int(k) # guaranteed to be exactly one digit k, invert = self._aliases['#'] opt['#'] = v else: k, invert = opt._unalias(k) if not self._hasparms[k]: assert(v == '') v = (opt._opts.get(k) or 0) + 1 else: v = _intify(v) opt[k] = _invert(v, invert) return (opt,flags,extra) bup-0.25/lib/bup/path.py000066400000000000000000000005221225146730500150740ustar00rootroot00000000000000"""This is a separate module so we can cleanly getcwd() before anyone does chdir(). """ import sys, os startdir = os.getcwd() def exe(): return (os.environ.get('BUP_MAIN_EXE') or os.path.join(startdir, sys.argv[0])) def exedir(): return os.path.split(exe())[0] def exefile(): return os.path.split(exe())[1] bup-0.25/lib/bup/shquote.py000066400000000000000000000114551225146730500156370ustar00rootroot00000000000000import re q = "'" qq = '"' class QuoteError(Exception): pass def _quotesplit(line): inquote = None inescape = None wordstart = 0 word = '' for i in range(len(line)): c = line[i] if inescape: if inquote == q and c != q: word += '\\' # single-q backslashes can only quote single-q word += c inescape = False elif c == '\\': inescape = True elif c == inquote: inquote = None # this is un-sh-like, but do it for sanity when autocompleting yield (wordstart, word) word = '' wordstart = i+1 elif not inquote and not word and (c == q or c == qq): # the 'not word' constraint on this is un-sh-like, but do it # for sanity when autocompleting inquote = c wordstart = i elif not inquote and c in [' ', '\n', '\r', '\t']: if word: yield (wordstart, word) word = '' wordstart = i+1 else: word += c if word: yield (wordstart, word) if inquote or inescape or word: raise QuoteError() def quotesplit(line): """Split 'line' into a list of offset,word tuples. The words are produced after removing doublequotes, singlequotes, and backslash escapes. Note that this implementation isn't entirely sh-compatible. It only dequotes words that *start* with a quote character, that is, a string like hello"world" will not have its quotes removed, while a string like hello "world" will be turned into [(0, 'hello'), (6, 'world')] (ie. quotes removed). """ l = [] try: for i in _quotesplit(line): l.append(i) except QuoteError: pass return l def unfinished_word(line): """Returns the quotechar,word of any unfinished word at the end of 'line'. You can use this to determine if 'line' is a completely parseable line (ie. one that quotesplit() will finish successfully) or if you need to read more bytes first. Args: line: an input string Returns: quotechar,word: the initial quote char (or None), and the partial word. """ try: for (wordstart,word) in _quotesplit(line): pass except QuoteError: firstchar = line[wordstart] if firstchar in [q, qq]: return (firstchar, word) else: return (None, word) else: return (None, '') def quotify(qtype, word, terminate): """Return a string corresponding to given word, quoted using qtype. The resulting string is dequotable using quotesplit() and can be joined with other quoted strings by adding arbitrary whitespace separators. Args: qtype: one of '', shquote.qq, or shquote.q word: the string to quote. May contain arbitrary characters. terminate: include the trailing quote character, if any. Returns: The quoted string. """ if qtype == qq: return qq + word.replace(qq, '\\"') + (terminate and qq or '') elif qtype == q: return q + word.replace(q, "\\'") + (terminate and q or '') else: return re.sub(r'([\"\' \t\n\r])', r'\\\1', word) def quotify_list(words): """Return a minimally-quoted string produced by quoting each word. This calculates the qtype for each word depending on whether the word already includes singlequote characters, doublequote characters, both, or neither. Args: words: the list of words to quote. Returns: The resulting string, with quoted words separated by ' '. """ wordout = [] for word in words: qtype = q if word and not re.search(r'[\s\"\']', word): qtype = '' elif q in word and qq not in word: qtype = qq wordout.append(quotify(qtype, word, True)) return ' '.join(wordout) def what_to_add(qtype, origword, newword, terminate): """Return a qtype that is needed to finish a partial word. For example, given an origword of '\"frog' and a newword of '\"frogston', returns either: terminate=False: 'ston' terminate=True: 'ston\"' This is useful when calculating tab completion strings for readline. Args: qtype: the type of quoting to use (ie. the first character of origword) origword: the original word that needs completion. newword: the word we want it to be after completion. Must start with origword. terminate: true if we should add the actual quote character at the end. Returns: The string to append to origword to produce (quoted) newword. """ if not newword.startswith(origword): return '' else: qold = quotify(qtype, origword, terminate=False) return quotify(qtype, newword, terminate=terminate)[len(qold):] bup-0.25/lib/bup/ssh.py000066400000000000000000000033431225146730500147410ustar00rootroot00000000000000"""SSH connection. Connect to a remote host via SSH and execute a command on the host. """ import sys, os, re, subprocess from bup import helpers, path def connect(rhost, port, subcmd): """Connect to 'rhost' and execute the bup subcommand 'subcmd' on it.""" assert(not re.search(r'[^\w-]', subcmd)) nicedir = re.sub(r':', "_", path.exedir()) if rhost == '-': rhost = None if not rhost: argv = ['bup', subcmd] else: # WARNING: shell quoting security holes are possible here, so we # have to be super careful. We have to use 'sh -c' because # csh-derived shells can't handle PATH= notation. We can't # set PATH in advance, because ssh probably replaces it. We # can't exec *safely* using argv, because *both* ssh and 'sh -c' # allow shellquoting. So we end up having to double-shellquote # stuff here. escapedir = re.sub(r'([^\w/])', r'\\\\\\\1', nicedir) buglvl = helpers.atoi(os.environ.get('BUP_DEBUG')) force_tty = helpers.atoi(os.environ.get('BUP_FORCE_TTY')) cmd = r""" sh -c PATH=%s:'$PATH BUP_DEBUG=%s BUP_FORCE_TTY=%s bup %s' """ % (escapedir, buglvl, force_tty, subcmd) argv = ['ssh'] if port: argv.extend(('-p', port)) argv.extend((rhost, '--', cmd.strip())) #helpers.log('argv is: %r\n' % argv) def setup(): # runs in the child process if not rhost: os.environ['PATH'] = ':'.join([nicedir, os.environ.get('PATH', '')]) os.setsid() return subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE, preexec_fn=setup) bup-0.25/lib/bup/t/000077500000000000000000000000001225146730500140325ustar00rootroot00000000000000bup-0.25/lib/bup/t/__init__.py000066400000000000000000000000441225146730500161410ustar00rootroot00000000000000import sys sys.path[:0] = ['../..'] bup-0.25/lib/bup/t/tbloom.py000066400000000000000000000031451225146730500157030ustar00rootroot00000000000000import errno, platform, tempfile from bup import bloom from bup.helpers import * from wvtest import * @wvtest def test_bloom(): hashes = [os.urandom(20) for i in range(100)] class Idx: pass ix = Idx() ix.name='dummy.idx' ix.shatable = ''.join(hashes) for k in (4, 5): b = bloom.create('pybuptest.bloom', expected=100, k=k) b.add_idx(ix) WVPASSLT(b.pfalse_positive(), .1) b.close() b = bloom.ShaBloom('pybuptest.bloom') all_present = True for h in hashes: all_present &= b.exists(h) WVPASS(all_present) false_positives = 0 for h in [os.urandom(20) for i in range(1000)]: if b.exists(h): false_positives += 1 WVPASSLT(false_positives, 5) os.unlink('pybuptest.bloom') tf = tempfile.TemporaryFile() b = bloom.create('bup.bloom', f=tf, expected=100) WVPASSEQ(b.rwfile, tf) WVPASSEQ(b.k, 5) # Test large (~1GiB) filter. This may fail on s390 (31-bit # architecture), and anywhere else where the address space is # sufficiently limited. tf = tempfile.TemporaryFile() skip_test = False try: b = bloom.create('bup.bloom', f=tf, expected=2**28, delaywrite=False) except EnvironmentError, ex: (ptr_width, linkage) = platform.architecture() if ptr_width == '32bit' and ex.errno == errno.ENOMEM: WVMSG('skipping large bloom filter test (mmap probably failed) ' + str(ex)) skip_test = True else: raise if not skip_test: WVPASSEQ(b.k, 4) bup-0.25/lib/bup/t/tclient.py000066400000000000000000000104061225146730500160470ustar00rootroot00000000000000import sys, os, stat, time, random, subprocess, glob from bup import client, git from wvtest import * def randbytes(sz): s = '' for i in xrange(sz): s += chr(random.randrange(0,256)) return s s1 = randbytes(10000) s2 = randbytes(10000) s3 = randbytes(10000) IDX_PAT = '/*.idx' @wvtest def test_server_split_with_indexes(): os.environ['BUP_MAIN_EXE'] = '../../../bup' os.environ['BUP_DIR'] = bupdir = 'buptest_tclient.tmp' subprocess.call(['rm', '-rf', bupdir]) git.init_repo(bupdir) lw = git.PackWriter() c = client.Client(bupdir, create=True) rw = c.new_packwriter() lw.new_blob(s1) lw.close() rw.new_blob(s2) rw.breakpoint() rw.new_blob(s1) @wvtest def test_multiple_suggestions(): os.environ['BUP_MAIN_EXE'] = '../../../bup' os.environ['BUP_DIR'] = bupdir = 'buptest_tclient.tmp' subprocess.call(['rm', '-rf', bupdir]) git.init_repo(bupdir) lw = git.PackWriter() lw.new_blob(s1) lw.close() lw = git.PackWriter() lw.new_blob(s2) lw.close() WVPASSEQ(len(glob.glob(git.repo('objects/pack'+IDX_PAT))), 2) c = client.Client(bupdir, create=True) WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 0) rw = c.new_packwriter() s1sha = rw.new_blob(s1) WVPASS(rw.exists(s1sha)) s2sha = rw.new_blob(s2) # This is a little hacky, but ensures that we test the code under test while (len(glob.glob(c.cachedir+IDX_PAT)) < 2 and not c.conn.has_input()): pass rw.new_blob(s2) WVPASS(rw.objcache.exists(s1sha)) WVPASS(rw.objcache.exists(s2sha)) rw.new_blob(s3) WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 2) rw.close() WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 3) @wvtest def test_dumb_client_server(): os.environ['BUP_MAIN_EXE'] = '../../../bup' os.environ['BUP_DIR'] = bupdir = 'buptest_tclient.tmp' subprocess.call(['rm', '-rf', bupdir]) git.init_repo(bupdir) open(git.repo('bup-dumb-server'), 'w').close() lw = git.PackWriter() lw.new_blob(s1) lw.close() c = client.Client(bupdir, create=True) rw = c.new_packwriter() WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 1) rw.new_blob(s1) WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 1) rw.new_blob(s2) rw.close() WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 2) @wvtest def test_midx_refreshing(): os.environ['BUP_MAIN_EXE'] = bupmain = '../../../bup' os.environ['BUP_DIR'] = bupdir = 'buptest_tmidx.tmp' subprocess.call(['rm', '-rf', bupdir]) git.init_repo(bupdir) c = client.Client(bupdir, create=True) rw = c.new_packwriter() rw.new_blob(s1) p1base = rw.breakpoint() p1name = os.path.join(c.cachedir, p1base) s1sha = rw.new_blob(s1) # should not be written; it's already in p1 s2sha = rw.new_blob(s2) p2base = rw.close() p2name = os.path.join(c.cachedir, p2base) del rw pi = git.PackIdxList(bupdir + '/objects/pack') WVPASSEQ(len(pi.packs), 2) pi.refresh() WVPASSEQ(len(pi.packs), 2) WVPASSEQ(sorted([os.path.basename(i.name) for i in pi.packs]), sorted([p1base, p2base])) p1 = git.open_idx(p1name) WVPASS(p1.exists(s1sha)) p2 = git.open_idx(p2name) WVFAIL(p2.exists(s1sha)) WVPASS(p2.exists(s2sha)) subprocess.call([bupmain, 'midx', '-f']) pi.refresh() WVPASSEQ(len(pi.packs), 1) pi.refresh(skip_midx=True) WVPASSEQ(len(pi.packs), 2) pi.refresh(skip_midx=False) WVPASSEQ(len(pi.packs), 1) @wvtest def test_remote_parsing(): tests = ( (':/bup', ('file', None, None, '/bup')), ('file:///bup', ('file', None, None, '/bup')), ('192.168.1.1:/bup', ('ssh', '192.168.1.1', None, '/bup')), ('ssh://192.168.1.1:2222/bup', ('ssh', '192.168.1.1', '2222', '/bup')), ('ssh://[ff:fe::1]:2222/bup', ('ssh', 'ff:fe::1', '2222', '/bup')), ('bup://foo.com:1950', ('bup', 'foo.com', '1950', None)), ('bup://foo.com:1950/bup', ('bup', 'foo.com', '1950', '/bup')), ('bup://[ff:fe::1]/bup', ('bup', 'ff:fe::1', None, '/bup')), ) for remote, values in tests: WVPASSEQ(client.parse_remote(remote), values) try: client.parse_remote('http://asdf.com/bup') WVFAIL() except client.ClientError: WVPASS() bup-0.25/lib/bup/t/tgit.py000066400000000000000000000126251225146730500153610ustar00rootroot00000000000000import struct, os, tempfile, time from bup import git from bup.helpers import * from wvtest import * @wvtest def testmangle(): afile = 0100644 afile2 = 0100770 alink = 0120000 adir = 0040000 adir2 = 0040777 WVPASSEQ(git.mangle_name("a", adir2, adir), "a") WVPASSEQ(git.mangle_name(".bup", adir2, adir), ".bup.bupl") WVPASSEQ(git.mangle_name("a.bupa", adir2, adir), "a.bupa.bupl") WVPASSEQ(git.mangle_name("b.bup", alink, alink), "b.bup.bupl") WVPASSEQ(git.mangle_name("b.bu", alink, alink), "b.bu") WVPASSEQ(git.mangle_name("f", afile, afile2), "f") WVPASSEQ(git.mangle_name("f.bup", afile, afile2), "f.bup.bupl") WVPASSEQ(git.mangle_name("f.bup", afile, adir), "f.bup.bup") WVPASSEQ(git.mangle_name("f", afile, adir), "f.bup") WVPASSEQ(git.demangle_name("f.bup"), ("f", git.BUP_CHUNKED)) WVPASSEQ(git.demangle_name("f.bupl"), ("f", git.BUP_NORMAL)) WVPASSEQ(git.demangle_name("f.bup.bupl"), ("f.bup", git.BUP_NORMAL)) # for safety, we ignore .bup? suffixes we don't recognize. Future # versions might implement a .bup[a-z] extension as something other # than BUP_NORMAL. WVPASSEQ(git.demangle_name("f.bupa"), ("f.bupa", git.BUP_NORMAL)) @wvtest def testencode(): s = 'hello world' looseb = ''.join(git._encode_looseobj('blob', s)) looset = ''.join(git._encode_looseobj('tree', s)) loosec = ''.join(git._encode_looseobj('commit', s)) packb = ''.join(git._encode_packobj('blob', s)) packt = ''.join(git._encode_packobj('tree', s)) packc = ''.join(git._encode_packobj('commit', s)) WVPASSEQ(git._decode_looseobj(looseb), ('blob', s)) WVPASSEQ(git._decode_looseobj(looset), ('tree', s)) WVPASSEQ(git._decode_looseobj(loosec), ('commit', s)) WVPASSEQ(git._decode_packobj(packb), ('blob', s)) WVPASSEQ(git._decode_packobj(packt), ('tree', s)) WVPASSEQ(git._decode_packobj(packc), ('commit', s)) @wvtest def testpacks(): os.environ['BUP_MAIN_EXE'] = bupmain = '../../../bup' os.environ['BUP_DIR'] = bupdir = 'pybuptest.tmp' subprocess.call(['rm','-rf', bupdir]) git.init_repo(bupdir) git.verbose = 1 w = git.PackWriter() w.new_blob(os.urandom(100)) w.new_blob(os.urandom(100)) w.abort() w = git.PackWriter() hashes = [] nobj = 1000 for i in range(nobj): hashes.append(w.new_blob(str(i))) log('\n') nameprefix = w.close() print repr(nameprefix) WVPASS(os.path.exists(nameprefix + '.pack')) WVPASS(os.path.exists(nameprefix + '.idx')) r = git.open_idx(nameprefix + '.idx') print repr(r.fanout) for i in range(nobj): WVPASS(r.find_offset(hashes[i]) > 0) WVPASS(r.exists(hashes[99])) WVFAIL(r.exists('\0'*20)) pi = iter(r) for h in sorted(hashes): WVPASSEQ(str(pi.next()).encode('hex'), h.encode('hex')) WVFAIL(r.find_offset('\0'*20)) r = git.PackIdxList('pybuptest.tmp/objects/pack') WVPASS(r.exists(hashes[5])) WVPASS(r.exists(hashes[6])) WVFAIL(r.exists('\0'*20)) @wvtest def test_pack_name_lookup(): os.environ['BUP_MAIN_EXE'] = bupmain = '../../../bup' os.environ['BUP_DIR'] = bupdir = 'pybuptest.tmp' subprocess.call(['rm','-rf', bupdir]) git.init_repo(bupdir) git.verbose = 1 packdir = git.repo('objects/pack') idxnames = [] hashes = [] for start in range(0,28,2): w = git.PackWriter() for i in range(start, start+2): hashes.append(w.new_blob(str(i))) log('\n') idxnames.append(os.path.basename(w.close() + '.idx')) r = git.PackIdxList(packdir) WVPASSEQ(len(r.packs), 2) for e,idxname in enumerate(idxnames): for i in range(e*2, (e+1)*2): WVPASSEQ(r.exists(hashes[i], want_source=True), idxname) @wvtest def test_long_index(): w = git.PackWriter() obj_bin = struct.pack('!IIIII', 0x00112233, 0x44556677, 0x88990011, 0x22334455, 0x66778899) obj2_bin = struct.pack('!IIIII', 0x11223344, 0x55667788, 0x99001122, 0x33445566, 0x77889900) obj3_bin = struct.pack('!IIIII', 0x22334455, 0x66778899, 0x00112233, 0x44556677, 0x88990011) pack_bin = struct.pack('!IIIII', 0x99887766, 0x55443322, 0x11009988, 0x77665544, 0x33221100) idx = list(list() for i in xrange(256)) idx[0].append((obj_bin, 1, 0xfffffffff)) idx[0x11].append((obj2_bin, 2, 0xffffffffff)) idx[0x22].append((obj3_bin, 3, 0xff)) (fd,name) = tempfile.mkstemp(suffix='.idx', dir=git.repo('objects')) os.close(fd) w.count = 3 r = w._write_pack_idx_v2(name, idx, pack_bin) i = git.PackIdxV2(name, open(name, 'rb')) WVPASSEQ(i.find_offset(obj_bin), 0xfffffffff) WVPASSEQ(i.find_offset(obj2_bin), 0xffffffffff) WVPASSEQ(i.find_offset(obj3_bin), 0xff) os.remove(name) @wvtest def test_check_repo_or_die(): git.check_repo_or_die() WVPASS('check_repo_or_die') # if we reach this point the call above passed os.rename('pybuptest.tmp/objects/pack', 'pybuptest.tmp/objects/pack.tmp') open('pybuptest.tmp/objects/pack', 'w').close() try: git.check_repo_or_die() except SystemExit, e: WVPASSEQ(e.code, 14) else: WVFAIL() os.unlink('pybuptest.tmp/objects/pack') os.rename('pybuptest.tmp/objects/pack.tmp', 'pybuptest.tmp/objects/pack') try: git.check_repo_or_die('nonexistantbup.tmp') except SystemExit, e: WVPASSEQ(e.code, 15) else: WVFAIL() bup-0.25/lib/bup/t/thashsplit.py000066400000000000000000000001731225146730500165700ustar00rootroot00000000000000from bup import hashsplit, _helpers from wvtest import * @wvtest def test_rolling_sums(): WVPASS(_helpers.selftest()) bup-0.25/lib/bup/t/thelpers.py000066400000000000000000000060541225146730500162370ustar00rootroot00000000000000import math import os import bup._helpers as _helpers from bup.helpers import * from wvtest import * @wvtest def test_parse_num(): pn = parse_num WVPASSEQ(pn('1'), 1) WVPASSEQ(pn('0'), 0) WVPASSEQ(pn('1.5k'), 1536) WVPASSEQ(pn('2 gb'), 2*1024*1024*1024) WVPASSEQ(pn('1e+9 k'), 1000000000 * 1024) WVPASSEQ(pn('-3e-3mb'), int(-0.003 * 1024 * 1024)) @wvtest def test_detect_fakeroot(): if os.getenv('FAKEROOTKEY'): WVPASS(detect_fakeroot()) else: WVPASS(not detect_fakeroot()) @wvtest def test_path_components(): WVPASSEQ(path_components('/'), [('', '/')]) WVPASSEQ(path_components('/foo'), [('', '/'), ('foo', '/foo')]) WVPASSEQ(path_components('/foo/'), [('', '/'), ('foo', '/foo')]) WVPASSEQ(path_components('/foo/bar'), [('', '/'), ('foo', '/foo'), ('bar', '/foo/bar')]) WVEXCEPT(Exception, path_components, 'foo') @wvtest def test_stripped_path_components(): WVPASSEQ(stripped_path_components('/', []), [('', '/')]) WVPASSEQ(stripped_path_components('/', ['']), [('', '/')]) WVPASSEQ(stripped_path_components('/', ['/']), [('', '/')]) WVPASSEQ(stripped_path_components('/', ['/foo']), [('', '/')]) WVPASSEQ(stripped_path_components('/foo', ['/bar']), [('', '/'), ('foo', '/foo')]) WVPASSEQ(stripped_path_components('/foo', ['/foo']), [('', '/foo')]) WVPASSEQ(stripped_path_components('/foo/bar', ['/foo']), [('', '/foo'), ('bar', '/foo/bar')]) WVPASSEQ(stripped_path_components('/foo/bar', ['/bar', '/foo', '/baz']), [('', '/foo'), ('bar', '/foo/bar')]) WVPASSEQ(stripped_path_components('/foo/bar/baz', ['/foo/bar/baz']), [('', '/foo/bar/baz')]) WVEXCEPT(Exception, stripped_path_components, 'foo', []) @wvtest def test_grafted_path_components(): WVPASSEQ(grafted_path_components([('/chroot', '/')], '/foo'), [('', '/'), ('foo', '/foo')]) WVPASSEQ(grafted_path_components([('/foo/bar', '/')], '/foo/bar/baz/bax'), [('', '/foo/bar'), ('baz', '/foo/bar/baz'), ('bax', '/foo/bar/baz/bax')]) WVPASSEQ(grafted_path_components([('/foo/bar/baz', '/bax')], '/foo/bar/baz/1/2'), [('', None), ('bax', '/foo/bar/baz'), ('1', '/foo/bar/baz/1'), ('2', '/foo/bar/baz/1/2')]) WVPASSEQ(grafted_path_components([('/foo', '/bar/baz/bax')], '/foo/bar'), [('', None), ('bar', None), ('baz', None), ('bax', '/foo'), ('bar', '/foo/bar')]) WVPASSEQ(grafted_path_components([('/foo/bar/baz', '/a/b/c')], '/foo/bar/baz'), [('', None), ('a', None), ('b', None), ('c', '/foo/bar/baz')]) WVPASSEQ(grafted_path_components([('/', '/a/b/c/')], '/foo/bar'), [('', None), ('a', None), ('b', None), ('c', '/'), ('foo', '/foo'), ('bar', '/foo/bar')]) WVEXCEPT(Exception, grafted_path_components, 'foo', []) bup-0.25/lib/bup/t/tindex.py000066400000000000000000000113131225146730500156760ustar00rootroot00000000000000import os import time from bup import index, metadata from bup.helpers import * import bup.xstat as xstat from wvtest import * @wvtest def index_basic(): cd = os.path.realpath('../../../t') WVPASS(cd) sd = os.path.realpath(cd + '/sampledata') WVPASSEQ(index.realpath(cd + '/sampledata'), cd + '/sampledata') WVPASSEQ(os.path.realpath(cd + '/sampledata/x'), sd + '/x') WVPASSEQ(os.path.realpath(cd + '/sampledata/etc'), os.path.realpath('/etc')) WVPASSEQ(index.realpath(cd + '/sampledata/etc'), sd + '/etc') @wvtest def index_writer(): unlink('index.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') unlink('index.meta.tmp') ms = index.MetaStoreWriter('index.meta.tmp'); tmax = (time.time() - 1) * 10**9 w = index.Writer('index.tmp', ms, tmax) w.add('/var/tmp/sporky', fs, 0) w.add('/etc/passwd', fs, 0) w.add('/etc/', ds, 0) w.add('/', ds, 0) ms.close() w.close() def dump(m): for e in list(m): print '%s%s %s' % (e.is_valid() and ' ' or 'M', e.is_fake() and 'F' or ' ', e.name) def fake_validate(*l): for i in l: for e in i: e.validate(0100644, index.FAKE_SHA) e.repack() def eget(l, ename): for e in l: if e.name == ename: return e @wvtest def index_negative_timestamps(): # Makes 'foo' exist f = file('foo', 'wb') f.close() # Dec 31, 1969 os.utime("foo", (-86400, -86400)) ns_per_sec = 10**9 tstart = time.time() * ns_per_sec tmax = tstart - ns_per_sec e = index.BlankNewEntry("foo", 0, tmax) e.from_stat(xstat.stat("foo"), 0, tstart) assert len(e.packed()) WVPASS() # Jun 10, 1893 os.utime("foo", (-0x80000000, -0x80000000)) e = index.BlankNewEntry("foo", 0, tmax) e.from_stat(xstat.stat("foo"), 0, tstart) assert len(e.packed()) WVPASS() unlink('foo') @wvtest def index_dirty(): unlink('index.meta.tmp') unlink('index2.meta.tmp') unlink('index3.meta.tmp') default_meta = metadata.Metadata() ms1 = index.MetaStoreWriter('index.meta.tmp') ms2 = index.MetaStoreWriter('index2.meta.tmp') ms3 = index.MetaStoreWriter('index3.meta.tmp') meta_ofs1 = ms1.store(default_meta) meta_ofs2 = ms2.store(default_meta) meta_ofs3 = ms3.store(default_meta) unlink('index.tmp') unlink('index2.tmp') unlink('index3.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') tmax = (time.time() - 1) * 10**9 w1 = index.Writer('index.tmp', ms1, tmax) w1.add('/a/b/x', fs, meta_ofs1) w1.add('/a/b/c', fs, meta_ofs1) w1.add('/a/b/', ds, meta_ofs1) w1.add('/a/', ds, meta_ofs1) #w1.close() WVPASS() w2 = index.Writer('index2.tmp', ms2, tmax) w2.add('/a/b/n/2', fs, meta_ofs2) #w2.close() WVPASS() w3 = index.Writer('index3.tmp', ms3, tmax) w3.add('/a/c/n/3', fs, meta_ofs3) #w3.close() WVPASS() r1 = w1.new_reader() r2 = w2.new_reader() r3 = w3.new_reader() WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/']) fake_validate(r1) dump(r1) print [hex(e.flags) for e in r1] WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all) WVPASSEQ([e.name for e in r1 if not e.is_valid()], []) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) expect_invalid = ['/'] + r2all + r3all expect_real = (set(r1all) - set(r2all) - set(r3all)) \ | set(['/a/b/n/2', '/a/c/n/3']) dump(index.merge(r2, r1, r3)) for e in index.merge(r2, r1, r3): print e.name, hex(e.flags), e.ctime eiv = e.name in expect_invalid er = e.name in expect_real WVPASSEQ(eiv, not e.is_valid()) WVPASSEQ(er, e.is_real()) fake_validate(r2, r3) dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], []) e = eget(index.merge(r2, r1, r3), '/a/b/c') e.invalidate() e.repack() dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], ['/a/b/c', '/a/b/', '/a/', '/']) bup-0.25/lib/bup/t/tmetadata.py000066400000000000000000000236061225146730500163570ustar00rootroot00000000000000import errno, glob, grp, pwd, stat, tempfile, subprocess import bup.helpers as helpers from bup import git, metadata, vfs from bup.helpers import clear_errors, detect_fakeroot, is_superuser, realpath from wvtest import * from bup.xstat import utime, lutime top_dir = '../../..' bup_path = top_dir + '/bup' start_dir = os.getcwd() def ex(*cmd): try: cmd_str = ' '.join(cmd) print >> sys.stderr, cmd_str rc = subprocess.call(cmd) if rc < 0: print >> sys.stderr, 'terminated by signal', - rc sys.exit(1) elif rc > 0: print >> sys.stderr, 'returned exit status', rc sys.exit(1) except OSError, e: print >> sys.stderr, 'subprocess call failed:', e sys.exit(1) def setup_testfs(): assert(sys.platform.startswith('linux')) # Set up testfs with user_xattr, etc. subprocess.call(['umount', 'testfs']) ex('dd', 'if=/dev/zero', 'of=testfs.img', 'bs=1M', 'count=32') ex('mke2fs', '-F', '-j', '-m', '0', 'testfs.img') ex('rm', '-rf', 'testfs') os.mkdir('testfs') ex('mount', '-o', 'loop,acl,user_xattr', 'testfs.img', 'testfs') # Hide, so that tests can't create risks. os.chown('testfs', 0, 0) os.chmod('testfs', 0700) def cleanup_testfs(): subprocess.call(['umount', 'testfs']) helpers.unlink('testfs.img') @wvtest def test_clean_up_archive_path(): cleanup = metadata._clean_up_path_for_archive WVPASSEQ(cleanup('foo'), 'foo') WVPASSEQ(cleanup('/foo'), 'foo') WVPASSEQ(cleanup('///foo'), 'foo') WVPASSEQ(cleanup('/foo/bar'), 'foo/bar') WVPASSEQ(cleanup('foo/./bar'), 'foo/bar') WVPASSEQ(cleanup('/foo/./bar'), 'foo/bar') WVPASSEQ(cleanup('/foo/./bar/././baz'), 'foo/bar/baz') WVPASSEQ(cleanup('/foo/./bar///././baz'), 'foo/bar/baz') WVPASSEQ(cleanup('//./foo/./bar///././baz/.///'), 'foo/bar/baz/') WVPASSEQ(cleanup('./foo/./.bar'), 'foo/.bar') WVPASSEQ(cleanup('./foo/.'), 'foo') WVPASSEQ(cleanup('./foo/..'), '.') WVPASSEQ(cleanup('//./..//.../..//.'), '.') WVPASSEQ(cleanup('//./..//..././/.'), '...') WVPASSEQ(cleanup('/////.'), '.') WVPASSEQ(cleanup('/../'), '.') WVPASSEQ(cleanup(''), '.') @wvtest def test_risky_path(): risky = metadata._risky_path WVPASS(risky('/foo')) WVPASS(risky('///foo')) WVPASS(risky('/../foo')) WVPASS(risky('../foo')) WVPASS(risky('foo/..')) WVPASS(risky('foo/../')) WVPASS(risky('foo/../bar')) WVFAIL(risky('foo')) WVFAIL(risky('foo/')) WVFAIL(risky('foo///')) WVFAIL(risky('./foo')) WVFAIL(risky('foo/.')) WVFAIL(risky('./foo/.')) WVFAIL(risky('foo/bar')) WVFAIL(risky('foo/./bar')) @wvtest def test_clean_up_extract_path(): cleanup = metadata._clean_up_extract_path WVPASSEQ(cleanup('/foo'), 'foo') WVPASSEQ(cleanup('///foo'), 'foo') WVFAIL(cleanup('/../foo')) WVFAIL(cleanup('../foo')) WVFAIL(cleanup('foo/..')) WVFAIL(cleanup('foo/../')) WVFAIL(cleanup('foo/../bar')) WVPASSEQ(cleanup('foo'), 'foo') WVPASSEQ(cleanup('foo/'), 'foo/') WVPASSEQ(cleanup('foo///'), 'foo///') WVPASSEQ(cleanup('./foo'), './foo') WVPASSEQ(cleanup('foo/.'), 'foo/.') WVPASSEQ(cleanup('./foo/.'), './foo/.') WVPASSEQ(cleanup('foo/bar'), 'foo/bar') WVPASSEQ(cleanup('foo/./bar'), 'foo/./bar') WVPASSEQ(cleanup('/'), '.') WVPASSEQ(cleanup('./'), './') WVPASSEQ(cleanup('///foo/bar'), 'foo/bar') WVPASSEQ(cleanup('///foo/bar'), 'foo/bar') @wvtest def test_metadata_method(): tmpdir = tempfile.mkdtemp(prefix='bup-tmetadata-') try: bup_dir = tmpdir + '/bup' data_path = tmpdir + '/foo' os.mkdir(data_path) ex('touch', data_path + '/file') ex('ln', '-s', 'file', data_path + '/symlink') test_time1 = 13 * 1000000000 test_time2 = 42 * 1000000000 utime(data_path + '/file', (0, test_time1)) lutime(data_path + '/symlink', (0, 0)) utime(data_path, (0, test_time2)) ex(bup_path, '-d', bup_dir, 'init') ex(bup_path, '-d', bup_dir, 'index', '-v', data_path) ex(bup_path, '-d', bup_dir, 'save', '-tvvn', 'test', data_path) git.check_repo_or_die(bup_dir) top = vfs.RefList(None) n = top.lresolve('/test/latest' + realpath(data_path)) m = n.metadata() WVPASS(m.mtime == test_time2) WVPASS(len(n.subs()) == 2) WVPASS(n.name == 'foo') WVPASS(set([x.name for x in n.subs()]) == set(['file', 'symlink'])) for sub in n: if sub.name == 'file': m = sub.metadata() WVPASS(m.mtime == test_time1) elif sub.name == 'symlink': m = sub.metadata() WVPASS(m.mtime == 0) finally: subprocess.call(['rm', '-rf', tmpdir]) def _first_err(): if helpers.saved_errors: return str(helpers.saved_errors[0]) return '' @wvtest def test_from_path_error(): if is_superuser() or detect_fakeroot(): return tmpdir = tempfile.mkdtemp(prefix='bup-tmetadata-') try: path = tmpdir + '/foo' os.mkdir(path) m = metadata.from_path(path, archive_path=path, save_symlinks=True) WVPASSEQ(m.path, path) os.chmod(path, 000) metadata.from_path(path, archive_path=path, save_symlinks=True) if metadata.get_linux_file_attr: WVPASS(len(helpers.saved_errors) == 1) errmsg = _first_err() WVPASS(errmsg.startswith('read Linux attr')) clear_errors() finally: subprocess.call(['chmod', '-R', 'u+rwX', tmpdir]) subprocess.call(['rm', '-rf', tmpdir]) def _linux_attr_supported(path): # Expects path to denote a regular file or a directory. if not metadata.get_linux_file_attr: return False try: metadata.get_linux_file_attr(path) except OSError, e: if e.errno in (errno.ENOTTY, errno.ENOSYS, errno.EOPNOTSUPP): return False else: raise return True @wvtest def test_apply_to_path_restricted_access(): if is_superuser() or detect_fakeroot(): return if sys.platform.startswith('cygwin'): return # chmod 000 isn't effective. tmpdir = tempfile.mkdtemp(prefix='bup-tmetadata-') try: parent = tmpdir + '/foo' path = parent + '/bar' os.mkdir(parent) os.mkdir(path) clear_errors() m = metadata.from_path(path, archive_path=path, save_symlinks=True) WVPASSEQ(m.path, path) os.chmod(parent, 000) m.apply_to_path(path) print >> sys.stderr, helpers.saved_errors expected_errors = ['utime: '] if m.linux_attr and _linux_attr_supported(tmpdir): expected_errors.append('Linux chattr: ') if metadata.xattr and m.linux_xattr: expected_errors.append('xattr.set: ') WVPASS(len(helpers.saved_errors) == len(expected_errors)) for i in xrange(len(expected_errors)): WVPASS(str(helpers.saved_errors[i]).startswith(expected_errors[i])) clear_errors() finally: subprocess.call(['chmod', '-R', 'u+rwX', tmpdir]) subprocess.call(['rm', '-rf', tmpdir]) @wvtest def test_restore_over_existing_target(): tmpdir = tempfile.mkdtemp(prefix='bup-tmetadata-') try: path = tmpdir + '/foo' os.mkdir(path) dir_m = metadata.from_path(path, archive_path=path, save_symlinks=True) os.rmdir(path) open(path, 'w').close() file_m = metadata.from_path(path, archive_path=path, save_symlinks=True) # Restore dir over file. WVPASSEQ(dir_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISDIR(os.stat(path).st_mode)) # Restore dir over dir. WVPASSEQ(dir_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISDIR(os.stat(path).st_mode)) # Restore file over dir. WVPASSEQ(file_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISREG(os.stat(path).st_mode)) # Restore file over file. WVPASSEQ(file_m.create_path(path, create_symlinks=True), None) WVPASS(stat.S_ISREG(os.stat(path).st_mode)) # Restore file over non-empty dir. os.remove(path) os.mkdir(path) open(path + '/bar', 'w').close() WVEXCEPT(Exception, file_m.create_path, path, create_symlinks=True) # Restore dir over non-empty dir. os.remove(path + '/bar') os.mkdir(path + '/bar') WVEXCEPT(Exception, dir_m.create_path, path, create_symlinks=True) finally: subprocess.call(['rm', '-rf', tmpdir]) from bup.metadata import posix1e if not posix1e: @wvtest def POSIX1E_ACL_SUPPORT_IS_MISSING(): pass from bup.metadata import xattr if xattr: @wvtest def test_handling_of_incorrect_existing_linux_xattrs(): if not is_superuser() or detect_fakeroot(): WVMSG('skipping test -- not superuser') return setup_testfs() for f in glob.glob('testfs/*'): ex('rm', '-rf', f) path = 'testfs/foo' open(path, 'w').close() xattr.set(path, 'foo', 'bar', namespace=xattr.NS_USER) m = metadata.from_path(path, archive_path=path, save_symlinks=True) xattr.set(path, 'baz', 'bax', namespace=xattr.NS_USER) m.apply_to_path(path, restore_numeric_ids=False) WVPASSEQ(xattr.list(path), ['user.foo']) WVPASSEQ(xattr.get(path, 'user.foo'), 'bar') xattr.set(path, 'foo', 'baz', namespace=xattr.NS_USER) m.apply_to_path(path, restore_numeric_ids=False) WVPASSEQ(xattr.list(path), ['user.foo']) WVPASSEQ(xattr.get(path, 'user.foo'), 'bar') xattr.remove(path, 'foo', namespace=xattr.NS_USER) m.apply_to_path(path, restore_numeric_ids=False) WVPASSEQ(xattr.list(path), ['user.foo']) WVPASSEQ(xattr.get(path, 'user.foo'), 'bar') os.chdir(start_dir) cleanup_testfs() bup-0.25/lib/bup/t/toptions.py000066400000000000000000000060411225146730500162640ustar00rootroot00000000000000from bup import options from wvtest import * @wvtest def test_optdict(): d = options.OptDict({ 'x': ('x', False), 'y': ('y', False), 'z': ('z', False), 'other_thing': ('other_thing', False), 'no_other_thing': ('other_thing', True), 'no_z': ('z', True), 'no_smart': ('smart', True), 'smart': ('smart', False), 'stupid': ('smart', True), 'no_smart': ('smart', False), }) WVPASS('foo') d['x'] = 5 d['y'] = 4 d['z'] = 99 d['no_other_thing'] = 5 WVPASSEQ(d.x, 5) WVPASSEQ(d.y, 4) WVPASSEQ(d.z, 99) WVPASSEQ(d.no_z, False) WVPASSEQ(d.no_other_thing, True) WVEXCEPT(KeyError, lambda: d.p) invalid_optspec0 = """ """ invalid_optspec1 = """ prog """ invalid_optspec2 = """ -- x,y """ @wvtest def test_invalid_optspec(): WVPASS(options.Options(invalid_optspec0).parse([])) WVPASS(options.Options(invalid_optspec1).parse([])) WVPASS(options.Options(invalid_optspec2).parse([])) optspec = """ prog [stuff...] prog [-t] -- t test q,quiet quiet l,longoption= long option with parameters and a really really long description that will require wrapping p= short option with parameters onlylong long option with no short neveropt never called options deftest1= a default option with default [1] deftest2= a default option with [1] default [2] deftest3= a default option with [3] no actual default deftest4= a default option with [[square]] deftest5= a default option with "correct" [[square] s,smart,no-stupid disable stupidity x,extended,no-simple extended mode [2] #,compress= set compression level [5] """ @wvtest def test_options(): o = options.Options(optspec) (opt,flags,extra) = o.parse(['-tttqp', 7, '--longoption', '19', 'hanky', '--onlylong', '-7']) WVPASSEQ(flags[0], ('-t', '')) WVPASSEQ(flags[1], ('-t', '')) WVPASSEQ(flags[2], ('-t', '')) WVPASSEQ(flags[3], ('-q', '')) WVPASSEQ(flags[4], ('-p', 7)) WVPASSEQ(flags[5], ('--longoption', '19')) WVPASSEQ(extra, ['hanky']) WVPASSEQ((opt.t, opt.q, opt.p, opt.l, opt.onlylong, opt.neveropt), (3,1,7,19,1,None)) WVPASSEQ((opt.deftest1, opt.deftest2, opt.deftest3, opt.deftest4, opt.deftest5), (1,2,None,None,'[square')) WVPASSEQ((opt.stupid, opt.no_stupid), (True, None)) WVPASSEQ((opt.smart, opt.no_smart), (None, True)) WVPASSEQ((opt.x, opt.extended, opt.no_simple), (2,2,2)) WVPASSEQ((opt.no_x, opt.no_extended, opt.simple), (False,False,False)) WVPASSEQ(opt['#'], 7) WVPASSEQ(opt.compress, 7) (opt,flags,extra) = o.parse(['--onlylong', '-t', '--no-onlylong', '--smart', '--simple']) WVPASSEQ((opt.t, opt.q, opt.onlylong), (1, None, 0)) WVPASSEQ((opt.stupid, opt.no_stupid), (False, True)) WVPASSEQ((opt.smart, opt.no_smart), (True, False)) WVPASSEQ((opt.x, opt.extended, opt.no_simple), (0,0,0)) WVPASSEQ((opt.no_x, opt.no_extended, opt.simple), (True,True,True)) bup-0.25/lib/bup/t/tshquote.py000066400000000000000000000035231225146730500162630ustar00rootroot00000000000000from bup import shquote from wvtest import * def qst(line): return [word for offset,word in shquote.quotesplit(line)] @wvtest def test_shquote(): WVPASSEQ(qst(""" this is basic \t\n\r text """), ['this', 'is', 'basic', 'text']) WVPASSEQ(qst(r""" \"x\" "help" 'yelp' """), ['"x"', 'help', 'yelp']) WVPASSEQ(qst(r""" "'\"\"'" '\"\'' """), ["'\"\"'", '\\"\'']) WVPASSEQ(shquote.quotesplit(' this is "unfinished'), [(2,'this'), (7,'is'), (10,'unfinished')]) WVPASSEQ(shquote.quotesplit('"silly"\'will'), [(0,'silly'), (7,'will')]) WVPASSEQ(shquote.unfinished_word('this is a "billy" "goat'), ('"', 'goat')) WVPASSEQ(shquote.unfinished_word("'x"), ("'", 'x')) WVPASSEQ(shquote.unfinished_word("abra cadabra "), (None, '')) WVPASSEQ(shquote.unfinished_word("abra cadabra"), (None, 'cadabra')) (qtype, word) = shquote.unfinished_word("this is /usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, "/usr/local", True), "al") (qtype, word) = shquote.unfinished_word("this is '/usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, "/usr/local", True), "al'") (qtype, word) = shquote.unfinished_word("this is \"/usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, "/usr/local", True), "al\"") (qtype, word) = shquote.unfinished_word("this is \"/usr/loc") WVPASSEQ(shquote.what_to_add(qtype, word, "/usr/local", False), "al") (qtype, word) = shquote.unfinished_word("this is \\ hammer\\ \"") WVPASSEQ(word, ' hammer "') WVPASSEQ(shquote.what_to_add(qtype, word, " hammer \"time\"", True), "time\\\"") WVPASSEQ(shquote.quotify_list(['a', '', '"word"', "'third'", "'", "x y"]), "a '' '\"word\"' \"'third'\" \"'\" 'x y'") bup-0.25/lib/bup/t/tvint.py000066400000000000000000000044131225146730500155520ustar00rootroot00000000000000from bup import vint from wvtest import * from cStringIO import StringIO def encode_and_decode_vuint(x): f = StringIO() vint.write_vuint(f, x) return vint.read_vuint(StringIO(f.getvalue())) @wvtest def test_vuint(): for x in (0, 1, 42, 128, 10**16): WVPASSEQ(encode_and_decode_vuint(x), x) WVEXCEPT(Exception, vint.write_vuint, StringIO(), -1) WVEXCEPT(EOFError, vint.read_vuint, StringIO()) def encode_and_decode_vint(x): f = StringIO() vint.write_vint(f, x) return vint.read_vint(StringIO(f.getvalue())) @wvtest def test_vint(): values = (0, 1, 42, 64, 10**16) for x in values: WVPASSEQ(encode_and_decode_vint(x), x) for x in [-x for x in values]: WVPASSEQ(encode_and_decode_vint(x), x) WVEXCEPT(EOFError, vint.read_vint, StringIO()) def encode_and_decode_bvec(x): f = StringIO() vint.write_bvec(f, x) return vint.read_bvec(StringIO(f.getvalue())) @wvtest def test_bvec(): values = ('', 'x', 'foo', '\0', '\0foo', 'foo\0bar\0') for x in values: WVPASSEQ(encode_and_decode_bvec(x), x) WVEXCEPT(EOFError, vint.read_bvec, StringIO()) outf = StringIO() for x in ('foo', 'bar', 'baz', 'bax'): vint.write_bvec(outf, x) inf = StringIO(outf.getvalue()) WVPASSEQ(vint.read_bvec(inf), 'foo') WVPASSEQ(vint.read_bvec(inf), 'bar') vint.skip_bvec(inf) WVPASSEQ(vint.read_bvec(inf), 'bax') def pack_and_unpack(types, *values): data = vint.pack(types, *values) return vint.unpack(types, data) @wvtest def test_pack_and_unpack(): tests = [('', []), ('s', ['foo']), ('ss', ['foo', 'bar']), ('sV', ['foo', 0]), ('sv', ['foo', -1]), ('V', [0]), ('Vs', [0, 'foo']), ('VV', [0, 1]), ('Vv', [0, -1]), ('v', [0]), ('vs', [0, 'foo']), ('vV', [0, 1]), ('vv', [0, -1])] for test in tests: (types, values) = test WVPASSEQ(pack_and_unpack(types, *values), values) WVEXCEPT(Exception, vint.pack, 's') WVEXCEPT(Exception, vint.pack, 's', 'foo', 'bar') WVEXCEPT(Exception, vint.pack, 'x', 1) WVEXCEPT(Exception, vint.unpack, 's', '') WVEXCEPT(Exception, vint.unpack, 'x', '') bup-0.25/lib/bup/t/txstat.py000066400000000000000000000055401225146730500157370ustar00rootroot00000000000000import math, tempfile, subprocess from wvtest import * import bup._helpers as _helpers from bup import xstat @wvtest def test_fstime(): WVPASSEQ(xstat.timespec_to_nsecs((0, 0)), 0) WVPASSEQ(xstat.timespec_to_nsecs((1, 0)), 10**9) WVPASSEQ(xstat.timespec_to_nsecs((0, 10**9 / 2)), 500000000) WVPASSEQ(xstat.timespec_to_nsecs((1, 10**9 / 2)), 1500000000) WVPASSEQ(xstat.timespec_to_nsecs((-1, 0)), -10**9) WVPASSEQ(xstat.timespec_to_nsecs((-1, 10**9 / 2)), -500000000) WVPASSEQ(xstat.timespec_to_nsecs((-2, 10**9 / 2)), -1500000000) WVEXCEPT(Exception, xstat.timespec_to_nsecs, (0, -1)) WVPASSEQ(type(xstat.timespec_to_nsecs((2, 22222222))), type(0)) WVPASSEQ(type(xstat.timespec_to_nsecs((-2, 22222222))), type(0)) WVPASSEQ(xstat.nsecs_to_timespec(0), (0, 0)) WVPASSEQ(xstat.nsecs_to_timespec(10**9), (1, 0)) WVPASSEQ(xstat.nsecs_to_timespec(500000000), (0, 10**9 / 2)) WVPASSEQ(xstat.nsecs_to_timespec(1500000000), (1, 10**9 / 2)) WVPASSEQ(xstat.nsecs_to_timespec(-10**9), (-1, 0)) WVPASSEQ(xstat.nsecs_to_timespec(-500000000), (-1, 10**9 / 2)) WVPASSEQ(xstat.nsecs_to_timespec(-1500000000), (-2, 10**9 / 2)) x = xstat.nsecs_to_timespec(1977777778) WVPASSEQ(type(x[0]), type(0)) WVPASSEQ(type(x[1]), type(0)) x = xstat.nsecs_to_timespec(-1977777778) WVPASSEQ(type(x[0]), type(0)) WVPASSEQ(type(x[1]), type(0)) WVPASSEQ(xstat.fstime_floor_secs(0), 0) WVPASSEQ(xstat.fstime_floor_secs(10**9 / 2), 0) WVPASSEQ(xstat.fstime_floor_secs(10**9), 1) WVPASSEQ(xstat.fstime_floor_secs(-10**9 / 2), -1) WVPASSEQ(xstat.fstime_floor_secs(-10**9), -1) WVPASSEQ(type(xstat.fstime_floor_secs(10**9 / 2)), type(0)) WVPASSEQ(type(xstat.fstime_floor_secs(-10**9 / 2)), type(0)) try: _have_bup_utime_ns = _helpers.bup_utime_ns except AttributeError, e: _have_bup_utime_ns = False @wvtest def test_timespec_behavior(): if not _have_bup_utime_ns: return tmpdir = tempfile.mkdtemp(prefix='bup-tmetadata-') try: path = tmpdir + '/foo' open(path, 'w').close() frac_ts = (0, 10**9 / 2) _helpers.bup_utime_ns(path, (frac_ts, frac_ts)) st = _helpers.stat(path) atime_ts = st[8] mtime_ts = st[9] WVPASSEQ(atime_ts[0], 0) WVPASS(atime_ts[1] == 0 or atime_ts[1] == frac_ts[1]) WVPASSEQ(mtime_ts[0], 0) WVPASS(mtime_ts[1] == 0 or mtime_ts[1] == frac_ts[1]) if(mtime_ts[1] == frac_ts[1]): # Sub-second resolution -- check behavior of negative timespecs. neg_ts = (-43, 10**9 / 2) _helpers.bup_utime_ns(path, (neg_ts, neg_ts)) st = _helpers.stat(path) atime_ts = st[8] mtime_ts = st[9] WVPASSEQ(atime_ts, neg_ts) WVPASSEQ(mtime_ts, neg_ts) finally: subprocess.call(['rm', '-rf', tmpdir]) bup-0.25/lib/bup/vfs.py000066400000000000000000000440161225146730500147440ustar00rootroot00000000000000"""Virtual File System representing bup's repository contents. The vfs.py library makes it possible to expose contents from bup's repository and abstracts internal name mangling and storage from the exposition layer. """ import os, re, stat, time from bup import git, metadata from helpers import * from bup.hashsplit import GIT_MODE_TREE, GIT_MODE_FILE EMPTY_SHA='\0'*20 _cp = None def cp(): """Create a git.CatPipe object or reuse the already existing one.""" global _cp if not _cp: _cp = git.CatPipe() return _cp class NodeError(Exception): """VFS base exception.""" pass class NoSuchFile(NodeError): """Request of a file that does not exist.""" pass class NotDir(NodeError): """Attempt to do a directory action on a file that is not one.""" pass class NotFile(NodeError): """Access to a node that does not represent a file.""" pass class TooManySymlinks(NodeError): """Symlink dereferencing level is too deep.""" pass def _treeget(hash): it = cp().get(hash.encode('hex')) type = it.next() assert(type == 'tree') return git.tree_decode(''.join(it)) def _tree_decode(hash): tree = [(int(name,16),stat.S_ISDIR(mode),sha) for (mode,name,sha) in _treeget(hash)] assert(tree == list(sorted(tree))) return tree def _chunk_len(hash): return sum(len(b) for b in cp().join(hash.encode('hex'))) def _last_chunk_info(hash): tree = _tree_decode(hash) assert(tree) (ofs,isdir,sha) = tree[-1] if isdir: (subofs, sublen) = _last_chunk_info(sha) return (ofs+subofs, sublen) else: return (ofs, _chunk_len(sha)) def _total_size(hash): (lastofs, lastsize) = _last_chunk_info(hash) return lastofs + lastsize def _chunkiter(hash, startofs): assert(startofs >= 0) tree = _tree_decode(hash) # skip elements before startofs for i in xrange(len(tree)): if i+1 >= len(tree) or tree[i+1][0] > startofs: break first = i # iterate through what's left for i in xrange(first, len(tree)): (ofs,isdir,sha) = tree[i] skipmore = startofs-ofs if skipmore < 0: skipmore = 0 if isdir: for b in _chunkiter(sha, skipmore): yield b else: yield ''.join(cp().join(sha.encode('hex')))[skipmore:] class _ChunkReader: def __init__(self, hash, isdir, startofs): if isdir: self.it = _chunkiter(hash, startofs) self.blob = None else: self.it = None self.blob = ''.join(cp().join(hash.encode('hex')))[startofs:] self.ofs = startofs def next(self, size): out = '' while len(out) < size: if self.it and not self.blob: try: self.blob = self.it.next() except StopIteration: self.it = None if self.blob: want = size - len(out) out += self.blob[:want] self.blob = self.blob[want:] if not self.it: break debug2('next(%d) returned %d\n' % (size, len(out))) self.ofs += len(out) return out class _FileReader(object): def __init__(self, hash, size, isdir): self.hash = hash self.ofs = 0 self.size = size self.isdir = isdir self.reader = None def seek(self, ofs): if ofs > self.size: self.ofs = self.size elif ofs < 0: self.ofs = 0 else: self.ofs = ofs def tell(self): return self.ofs def read(self, count = -1): if count < 0: count = self.size - self.ofs if not self.reader or self.reader.ofs != self.ofs: self.reader = _ChunkReader(self.hash, self.isdir, self.ofs) try: buf = self.reader.next(count) except: self.reader = None raise # our offsets will be all screwed up otherwise self.ofs += len(buf) return buf def close(self): pass class Node: """Base class for file representation.""" def __init__(self, parent, name, mode, hash): self.parent = parent self.name = name self.mode = mode self.hash = hash self.ctime = self.mtime = self.atime = 0 self._subs = None self._metadata = None def __repr__(self): return "<%s object at X - name:%r hash:%s parent:%r>" \ % (self.__class__, self.name, self.hash.encode('hex'), self.parent.name if self.parent else None) def __cmp__(a, b): if a is b: return 0 return (cmp(a and a.parent, b and b.parent) or cmp(a and a.name, b and b.name)) def __iter__(self): return iter(self.subs()) def fullname(self, stop_at=None): """Get this file's full path.""" assert(self != stop_at) # would be the empty string; too weird if self.parent and self.parent != stop_at: return os.path.join(self.parent.fullname(stop_at=stop_at), self.name) else: return self.name def _mksubs(self): self._subs = {} def subs(self): """Get a list of nodes that are contained in this node.""" if self._subs == None: self._mksubs() return sorted(self._subs.values()) def sub(self, name): """Get node named 'name' that is contained in this node.""" if self._subs == None: self._mksubs() ret = self._subs.get(name) if not ret: raise NoSuchFile("no file %r in %r" % (name, self.name)) return ret def top(self): """Return the very top node of the tree.""" if self.parent: return self.parent.top() else: return self def fs_top(self): """Return the top node of the particular backup set. If this node isn't inside a backup set, return the root level. """ if self.parent and not isinstance(self.parent, CommitList): return self.parent.fs_top() else: return self def _lresolve(self, parts): #debug2('_lresolve %r in %r\n' % (parts, self.name)) if not parts: return self (first, rest) = (parts[0], parts[1:]) if first == '.': return self._lresolve(rest) elif first == '..': if not self.parent: raise NoSuchFile("no parent dir for %r" % self.name) return self.parent._lresolve(rest) elif rest: return self.sub(first)._lresolve(rest) else: return self.sub(first) def lresolve(self, path, stay_inside_fs=False): """Walk into a given sub-path of this node. If the last element is a symlink, leave it as a symlink, don't resolve it. (like lstat()) """ start = self if not path: return start if path.startswith('/'): if stay_inside_fs: start = self.fs_top() else: start = self.top() path = path[1:] parts = re.split(r'/+', path or '.') if not parts[-1]: parts[-1] = '.' #debug2('parts: %r %r\n' % (path, parts)) return start._lresolve(parts) def resolve(self, path = ''): """Like lresolve(), and dereference it if it was a symlink.""" return self.lresolve(path).lresolve('.') def try_resolve(self, path = ''): """Like resolve(), but don't worry if a symlink uses an invalid path. Returns an error if any intermediate nodes were invalid. """ n = self.lresolve(path) try: n = n.lresolve('.') except NoSuchFile: pass return n def nlinks(self): """Get the number of hard links to the current node.""" if self._subs == None: self._mksubs() return 1 def size(self): """Get the size of the current node.""" return 0 def open(self): """Open the current node. It is an error to open a non-file node.""" raise NotFile('%s is not a regular file' % self.name) def _populate_metadata(self): # Only Dirs contain .bupm files, so by default, do nothing. pass def metadata(self): """Return this Node's Metadata() object, if any.""" if self.parent: self.parent._populate_metadata() return self._metadata class File(Node): """A normal file from bup's repository.""" def __init__(self, parent, name, mode, hash, bupmode): Node.__init__(self, parent, name, mode, hash) self.bupmode = bupmode self._cached_size = None self._filereader = None def open(self): """Open the file.""" # You'd think FUSE might call this only once each time a file is # opened, but no; it's really more of a refcount, and it's called # once per read(). Thus, it's important to cache the filereader # object here so we're not constantly re-seeking. if not self._filereader: self._filereader = _FileReader(self.hash, self.size(), self.bupmode == git.BUP_CHUNKED) self._filereader.seek(0) return self._filereader def size(self): """Get this file's size.""" if self._cached_size == None: debug1('<<< 100: raise TooManySymlinks('too many levels of symlinks: %r' % self.fullname()) _symrefs += 1 try: try: return self.parent.lresolve(self.readlink(), stay_inside_fs=True) except NoSuchFile: raise NoSuchFile("%s: broken symlink to %r" % (self.fullname(), self.readlink())) finally: _symrefs -= 1 def _lresolve(self, parts): return self.dereference()._lresolve(parts) class FakeSymlink(Symlink): """A symlink that is not stored in the bup repository.""" def __init__(self, parent, name, toname): Symlink.__init__(self, parent, name, EMPTY_SHA, git.BUP_NORMAL) self.toname = toname def readlink(self): """Get the path that this link points at.""" return self.toname class Dir(Node): """A directory stored inside of bup's repository.""" def __init__(self, *args): Node.__init__(self, *args) self._bupm = None def _populate_metadata(self): if not self._subs: self._mksubs() if not self._bupm: return meta_stream = self._bupm.open() self._metadata = metadata.Metadata.read(meta_stream) for sub in self: if not stat.S_ISDIR(sub.mode): sub._metadata = metadata.Metadata.read(meta_stream) def _mksubs(self): self._subs = {} it = cp().get(self.hash.encode('hex')) type = it.next() if type == 'commit': del it it = cp().get(self.hash.encode('hex') + ':') type = it.next() assert(type == 'tree') for (mode,mangled_name,sha) in git.tree_decode(''.join(it)): if mangled_name == '.bupm': self._bupm = File(self, mangled_name, mode, sha, git.BUP_NORMAL) continue name = mangled_name (name,bupmode) = git.demangle_name(mangled_name) if bupmode == git.BUP_CHUNKED: mode = GIT_MODE_FILE if stat.S_ISDIR(mode): self._subs[name] = Dir(self, name, mode, sha) elif stat.S_ISLNK(mode): self._subs[name] = Symlink(self, name, sha, bupmode) else: self._subs[name] = File(self, name, mode, sha, bupmode) def metadata(self): """Return this Dir's Metadata() object, if any.""" self._populate_metadata() return self._metadata def metadata_file(self): """Return this Dir's .bupm File, if any.""" if not self._subs: self._mksubs() return self._bupm class CommitDir(Node): """A directory that contains all commits that are reachable by a ref. Contains a set of subdirectories named after the commits' first byte in hexadecimal. Each of those directories contain all commits with hashes that start the same as the directory name. The name used for those subdirectories is the hash of the commit without the first byte. This separation helps us avoid having too much directories on the same level as the number of commits grows big. """ def __init__(self, parent, name): Node.__init__(self, parent, name, GIT_MODE_TREE, EMPTY_SHA) def _mksubs(self): self._subs = {} refs = git.list_refs() for ref in refs: #debug2('ref name: %s\n' % ref[0]) revs = git.rev_list(ref[1].encode('hex')) for (date, commit) in revs: #debug2('commit: %s date: %s\n' % (commit.encode('hex'), date)) commithex = commit.encode('hex') containername = commithex[:2] dirname = commithex[2:] n1 = self._subs.get(containername) if not n1: n1 = CommitList(self, containername) self._subs[containername] = n1 if n1.commits.get(dirname): # Stop work for this ref, the rest should already be present break n1.commits[dirname] = (commit, date) class CommitList(Node): """A list of commits with hashes that start with the current node's name.""" def __init__(self, parent, name): Node.__init__(self, parent, name, GIT_MODE_TREE, EMPTY_SHA) self.commits = {} def _mksubs(self): self._subs = {} for (name, (hash, date)) in self.commits.items(): n1 = Dir(self, name, GIT_MODE_TREE, hash) n1.ctime = n1.mtime = date self._subs[name] = n1 class TagDir(Node): """A directory that contains all tags in the repository.""" def __init__(self, parent, name): Node.__init__(self, parent, name, GIT_MODE_TREE, EMPTY_SHA) def _mksubs(self): self._subs = {} for (name, sha) in git.list_refs(): if name.startswith('refs/tags/'): name = name[10:] date = git.rev_get_date(sha.encode('hex')) commithex = sha.encode('hex') target = '../.commit/%s/%s' % (commithex[:2], commithex[2:]) tag1 = FakeSymlink(self, name, target) tag1.ctime = tag1.mtime = date self._subs[name] = tag1 class BranchList(Node): """A list of links to commits reachable by a branch in bup's repository. Represents each commit as a symlink that points to the commit directory in /.commit/??/ . The symlink is named after the commit date. """ def __init__(self, parent, name, hash): Node.__init__(self, parent, name, GIT_MODE_TREE, hash) def _mksubs(self): self._subs = {} tags = git.tags() revs = list(git.rev_list(self.hash.encode('hex'))) latest = revs[0] for (date, commit) in revs: l = time.localtime(date) ls = time.strftime('%Y-%m-%d-%H%M%S', l) commithex = commit.encode('hex') target = '../.commit/%s/%s' % (commithex[:2], commithex[2:]) n1 = FakeSymlink(self, ls, target) n1.ctime = n1.mtime = date self._subs[ls] = n1 for tag in tags.get(commit, []): t1 = FakeSymlink(self, tag, target) t1.ctime = t1.mtime = date self._subs[tag] = t1 (date, commit) = latest commithex = commit.encode('hex') target = '../.commit/%s/%s' % (commithex[:2], commithex[2:]) n1 = FakeSymlink(self, 'latest', target) n1.ctime = n1.mtime = date self._subs['latest'] = n1 class RefList(Node): """A list of branches in bup's repository. The sub-nodes of the ref list are a series of CommitList for each commit hash pointed to by a branch. Also, a special sub-node named '.commit' contains all commit directories that are reachable via a ref (e.g. a branch). See CommitDir for details. """ def __init__(self, parent): Node.__init__(self, parent, '/', GIT_MODE_TREE, EMPTY_SHA) def _mksubs(self): self._subs = {} commit_dir = CommitDir(self, '.commit') self._subs['.commit'] = commit_dir tag_dir = TagDir(self, '.tag') self._subs['.tag'] = tag_dir for (name,sha) in git.list_refs(): if name.startswith('refs/heads/'): name = name[11:] date = git.rev_get_date(sha.encode('hex')) n1 = BranchList(self, name, sha) n1.ctime = n1.mtime = date self._subs[name] = n1 bup-0.25/lib/bup/vint.py000066400000000000000000000065411225146730500151270ustar00rootroot00000000000000"""Binary encodings for bup.""" # Copyright (C) 2010 Rob Browning # # This code is covered under the terms of the GNU Library General # Public License as described in the bup LICENSE file. from cStringIO import StringIO # Variable length integers are encoded as vints -- see jakarta lucene. def write_vuint(port, x): if x < 0: raise Exception("vuints must not be negative") elif x == 0: port.write('\0') else: while x: seven_bits = x & 0x7f x >>= 7 if x: port.write(chr(0x80 | seven_bits)) else: port.write(chr(seven_bits)) def read_vuint(port): c = port.read(1) if c == '': raise EOFError('encountered EOF while reading vuint'); result = 0 offset = 0 while c: b = ord(c) if b & 0x80: result |= ((b & 0x7f) << offset) offset += 7 c = port.read(1) else: result |= (b << offset) break return result def write_vint(port, x): # Sign is handled with the second bit of the first byte. All else # matches vuint. if x == 0: port.write('\0') else: if x < 0: x = -x sign_and_six_bits = (x & 0x3f) | 0x40 else: sign_and_six_bits = x & 0x3f x >>= 6 if x: port.write(chr(0x80 | sign_and_six_bits)) write_vuint(port, x) else: port.write(chr(sign_and_six_bits)) def read_vint(port): c = port.read(1) if c == '': raise EOFError('encountered EOF while reading vint'); negative = False result = 0 offset = 0 # Handle first byte with sign bit specially. if c: b = ord(c) if b & 0x40: negative = True result |= (b & 0x3f) if b & 0x80: offset += 6 c = port.read(1) elif negative: return -result else: return result while c: b = ord(c) if b & 0x80: result |= ((b & 0x7f) << offset) offset += 7 c = port.read(1) else: result |= (b << offset) break if negative: return -result else: return result def write_bvec(port, x): write_vuint(port, len(x)) port.write(x) def read_bvec(port): n = read_vuint(port) return port.read(n) def skip_bvec(port): port.read(read_vuint(port)) def pack(types, *args): if len(types) != len(args): raise Exception('number of arguments does not match format string') port = StringIO() for (type, value) in zip(types, args): if type == 'V': write_vuint(port, value) elif type == 'v': write_vint(port, value) elif type == 's': write_bvec(port, value) else: raise Exception('unknown xpack format string item "' + type + '"') return port.getvalue() def unpack(types, data): result = [] port = StringIO(data) for type in types: if type == 'V': result.append(read_vuint(port)) elif type == 'v': result.append(read_vint(port)) elif type == 's': result.append(read_bvec(port)) else: raise Exception('unknown xunpack format string item "' + type + '"') return result bup-0.25/lib/bup/xstat.py000066400000000000000000000070601225146730500153070ustar00rootroot00000000000000"""Enhanced stat operations for bup.""" import os import stat as pystat from bup import _helpers try: _have_bup_utime_ns = _helpers.bup_utime_ns except AttributeError, e: _have_bup_utime_ns = False try: _have_bup_lutime_ns = _helpers.bup_lutime_ns except AttributeError, e: _have_bup_lutime_ns = False def timespec_to_nsecs((ts_s, ts_ns)): # c.f. _helpers.c: timespec_vals_to_py_ns() if ts_ns < 0 or ts_ns > 999999999: raise Exception('invalid timespec nsec value') return ts_s * 10**9 + ts_ns def nsecs_to_timespec(ns): """Return (s, ns) where ns is always non-negative and t = s + ns / 10e8""" # metadata record rep (and libc rep) ns = int(ns) return (ns / 10**9, ns % 10**9) def fstime_floor_secs(ns): """Return largest integer not greater than ns / 10e8.""" return int(ns) / 10**9; def fstime_to_timespec(ns): return nsecs_to_timespec(ns) def fstime_to_sec_str(fstime): (s, ns) = fstime_to_timespec(fstime) if(s < 0): s += 1 if ns == 0: return '%d' % s else: return '%d.%09d' % (s, ns) if _have_bup_utime_ns: def utime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" atime = nsecs_to_timespec(times[0]) mtime = nsecs_to_timespec(times[1]) _helpers.bup_utime_ns(path, (atime, mtime)) else: def utime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" atime = fstime_floor_secs(times[0]) mtime = fstime_floor_secs(times[1]) os.utime(path, (atime, mtime)) if _have_bup_lutime_ns: def lutime(path, times): """Times must be provided as (atime_ns, mtime_ns).""" atime = nsecs_to_timespec(times[0]) mtime = nsecs_to_timespec(times[1]) _helpers.bup_lutime_ns(path, (atime, mtime)) else: lutime = False class stat_result: @staticmethod def from_xstat_rep(st): result = stat_result() (result.st_mode, result.st_ino, result.st_dev, result.st_nlink, result.st_uid, result.st_gid, result.st_rdev, result.st_size, result.st_atime, result.st_mtime, result.st_ctime) = st result.st_atime = timespec_to_nsecs(result.st_atime) result.st_mtime = timespec_to_nsecs(result.st_mtime) result.st_ctime = timespec_to_nsecs(result.st_ctime) return result def stat(path): return stat_result.from_xstat_rep(_helpers.stat(path)) def fstat(path): return stat_result.from_xstat_rep(_helpers.fstat(path)) def lstat(path): return stat_result.from_xstat_rep(_helpers.lstat(path)) def mode_str(mode): result = '' if pystat.S_ISREG(mode): result += '-' elif pystat.S_ISDIR(mode): result += 'd' elif pystat.S_ISCHR(mode): result += 'c' elif pystat.S_ISBLK(mode): result += 'b' elif pystat.S_ISFIFO(mode): result += 'p' elif pystat.S_ISLNK(mode): result += 'l' elif pystat.S_ISSOCK(mode): result += 's' else: result += '?' result += 'r' if (mode & pystat.S_IRUSR) else '-' result += 'w' if (mode & pystat.S_IWUSR) else '-' result += 'x' if (mode & pystat.S_IXUSR) else '-' result += 'r' if (mode & pystat.S_IRGRP) else '-' result += 'w' if (mode & pystat.S_IWGRP) else '-' result += 'x' if (mode & pystat.S_IXGRP) else '-' result += 'r' if (mode & pystat.S_IROTH) else '-' result += 'w' if (mode & pystat.S_IWOTH) else '-' result += 'x' if (mode & pystat.S_IXOTH) else '-' return result bup-0.25/lib/tornado/000077500000000000000000000000001225146730500144475ustar00rootroot00000000000000bup-0.25/lib/tornado/README000066400000000000000000000014241225146730500153300ustar00rootroot00000000000000Tornado ======= Tornado is an open source version of the scalable, non-blocking web server and and tools that power FriendFeed. Documentation and downloads are available at http://www.tornadoweb.org/ Tornado is licensed under the Apache Licence, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0.html). Installation ============ To install: python setup.py build sudo python setup.py install Tornado has been tested on Python 2.5 and 2.6. To use all of the features of Tornado, you need to have PycURL and a JSON library like simplejson installed. On Mac OS X, you can install the packages with: sudo easy_install setuptools pycurl==7.16.2.1 simplejson On Ubuntu Linux, you can install the packages with: sudo apt-get install python-pycurl python-simplejson bup-0.25/lib/tornado/__init__.py000066400000000000000000000011751225146730500165640ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Tornado web server and tools.""" bup-0.25/lib/tornado/auth.py000066400000000000000000001103671225146730500157720ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementations of various third-party authentication schemes. All the classes in this file are class Mixins designed to be used with web.py RequestHandler classes. The primary methods for each service are authenticate_redirect(), authorize_redirect(), and get_authenticated_user(). The former should be called to redirect the user to, e.g., the OpenID authentication page on the third party service, and the latter should be called upon return to get the user data from the data returned by the third party service. They all take slightly different arguments due to the fact all these services implement authentication and authorization slightly differently. See the individual service classes below for complete documentation. Example usage for Google OpenID: class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin): @tornado.web.asynchronous def get(self): if self.get_argument("openid.mode", None): self.get_authenticated_user(self.async_callback(self._on_auth)) return self.authenticate_redirect() def _on_auth(self, user): if not user: raise tornado.web.HTTPError(500, "Google auth failed") # Save the user with, e.g., set_secure_cookie() """ import binascii import cgi import hashlib import hmac import httpclient import escape import logging import time import urllib import urlparse import uuid class OpenIdMixin(object): """Abstract implementation of OpenID and Attribute Exchange. See GoogleMixin below for example implementations. """ def authenticate_redirect(self, callback_uri=None, ax_attrs=["name","email","language","username"]): """Returns the authentication URL for this service. After authentication, the service will redirect back to the given callback URI. We request the given attributes for the authenticated user by default (name, email, language, and username). If you don't need all those attributes for your app, you can request fewer with the ax_attrs keyword argument. """ callback_uri = callback_uri or self.request.path args = self._openid_args(callback_uri, ax_attrs=ax_attrs) self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args)) def get_authenticated_user(self, callback): """Fetches the authenticated user data upon redirect. This method should be called by the handler that receives the redirect from the authenticate_redirect() or authorize_redirect() methods. """ # Verify the OpenID response via direct request to the OP args = dict((k, v[-1]) for k, v in self.request.arguments.iteritems()) args["openid.mode"] = u"check_authentication" url = self._OPENID_ENDPOINT + "?" + urllib.urlencode(args) http = httpclient.AsyncHTTPClient() http.fetch(url, self.async_callback( self._on_authentication_verified, callback)) def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): url = urlparse.urljoin(self.request.full_url(), callback_uri) args = { "openid.ns": "http://specs.openid.net/auth/2.0", "openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select", "openid.identity": "http://specs.openid.net/auth/2.0/identifier_select", "openid.return_to": url, "openid.realm": self.request.protocol + "://" + self.request.host + "/", "openid.mode": "checkid_setup", } if ax_attrs: args.update({ "openid.ns.ax": "http://openid.net/srv/ax/1.0", "openid.ax.mode": "fetch_request", }) ax_attrs = set(ax_attrs) required = [] if "name" in ax_attrs: ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) required += ["firstname", "fullname", "lastname"] args.update({ "openid.ax.type.firstname": "http://axschema.org/namePerson/first", "openid.ax.type.fullname": "http://axschema.org/namePerson", "openid.ax.type.lastname": "http://axschema.org/namePerson/last", }) known_attrs = { "email": "http://axschema.org/contact/email", "language": "http://axschema.org/pref/language", "username": "http://axschema.org/namePerson/friendly", } for name in ax_attrs: args["openid.ax.type." + name] = known_attrs[name] required.append(name) args["openid.ax.required"] = ",".join(required) if oauth_scope: args.update({ "openid.ns.oauth": "http://specs.openid.net/extensions/oauth/1.0", "openid.oauth.consumer": self.request.host.split(":")[0], "openid.oauth.scope": oauth_scope, }) return args def _on_authentication_verified(self, callback, response): if response.error or u"is_valid:true" not in response.body: logging.warning("Invalid OpenID response: %s", response.error or response.body) callback(None) return # Make sure we got back at least an email from attribute exchange ax_ns = None for name, values in self.request.arguments.iteritems(): if name.startswith("openid.ns.") and \ values[-1] == u"http://openid.net/srv/ax/1.0": ax_ns = name[10:] break def get_ax_arg(uri): if not ax_ns: return u"" prefix = "openid." + ax_ns + ".type." ax_name = None for name, values in self.request.arguments.iteritems(): if values[-1] == uri and name.startswith(prefix): part = name[len(prefix):] ax_name = "openid." + ax_ns + ".value." + part break if not ax_name: return u"" return self.get_argument(ax_name, u"") email = get_ax_arg("http://axschema.org/contact/email") name = get_ax_arg("http://axschema.org/namePerson") first_name = get_ax_arg("http://axschema.org/namePerson/first") last_name = get_ax_arg("http://axschema.org/namePerson/last") username = get_ax_arg("http://axschema.org/namePerson/friendly") locale = get_ax_arg("http://axschema.org/pref/language").lower() user = dict() name_parts = [] if first_name: user["first_name"] = first_name name_parts.append(first_name) if last_name: user["last_name"] = last_name name_parts.append(last_name) if name: user["name"] = name elif name_parts: user["name"] = u" ".join(name_parts) elif email: user["name"] = email.split("@")[0] if email: user["email"] = email if locale: user["locale"] = locale if username: user["username"] = username callback(user) class OAuthMixin(object): """Abstract implementation of OAuth. See TwitterMixin and FriendFeedMixin below for example implementations. """ def authorize_redirect(self, callback_uri=None): """Redirects the user to obtain OAuth authorization for this service. Twitter and FriendFeed both require that you register a Callback URL with your application. You should call this method to log the user in, and then call get_authenticated_user() in the handler you registered as your Callback URL to complete the authorization process. This method sets a cookie called _oauth_request_token which is subsequently used (and cleared) in get_authenticated_user for security purposes. """ if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): raise Exception("This service does not support oauth_callback") http = httpclient.AsyncHTTPClient() http.fetch(self._oauth_request_token_url(), self.async_callback( self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri)) def get_authenticated_user(self, callback): """Gets the OAuth authorized user and access token on callback. This method should be called from the handler for your registered OAuth Callback URL to complete the registration process. We call callback with the authenticated user, which in addition to standard attributes like 'name' includes the 'access_key' attribute, which contains the OAuth access you can use to make authorized requests to this service on behalf of the user. """ request_key = self.get_argument("oauth_token") request_cookie = self.get_cookie("_oauth_request_token") if not request_cookie: logging.warning("Missing OAuth request token cookie") callback(None) return cookie_key, cookie_secret = request_cookie.split("|") if cookie_key != request_key: logging.warning("Request token does not match cookie") callback(None) return token = dict(key=cookie_key, secret=cookie_secret) http = httpclient.AsyncHTTPClient() http.fetch(self._oauth_access_token_url(token), self.async_callback( self._on_access_token, callback)) def _oauth_request_token_url(self): consumer_token = self._oauth_consumer_token() url = self._OAUTH_REQUEST_TOKEN_URL args = dict( oauth_consumer_key=consumer_token["key"], oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), oauth_version="1.0", ) signature = _oauth_signature(consumer_token, "GET", url, args) args["oauth_signature"] = signature return url + "?" + urllib.urlencode(args) def _on_request_token(self, authorize_url, callback_uri, response): if response.error: raise Exception("Could not get request token") request_token = _oauth_parse_response(response.body) data = "|".join([request_token["key"], request_token["secret"]]) self.set_cookie("_oauth_request_token", data) args = dict(oauth_token=request_token["key"]) if callback_uri: args["oauth_callback"] = urlparse.urljoin( self.request.full_url(), callback_uri) self.redirect(authorize_url + "?" + urllib.urlencode(args)) def _oauth_access_token_url(self, request_token): consumer_token = self._oauth_consumer_token() url = self._OAUTH_ACCESS_TOKEN_URL args = dict( oauth_consumer_key=consumer_token["key"], oauth_token=request_token["key"], oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), oauth_version="1.0", ) signature = _oauth_signature(consumer_token, "GET", url, args, request_token) args["oauth_signature"] = signature return url + "?" + urllib.urlencode(args) def _on_access_token(self, callback, response): if response.error: logging.warning("Could not fetch access token") callback(None) return access_token = _oauth_parse_response(response.body) user = self._oauth_get_user(access_token, self.async_callback( self._on_oauth_get_user, access_token, callback)) def _oauth_get_user(self, access_token, callback): raise NotImplementedError() def _on_oauth_get_user(self, access_token, callback, user): if not user: callback(None) return user["access_token"] = access_token callback(user) def _oauth_request_parameters(self, url, access_token, parameters={}, method="GET"): """Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments that will be sent with the request. """ consumer_token = self._oauth_consumer_token() base_args = dict( oauth_consumer_key=consumer_token["key"], oauth_token=access_token["key"], oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), oauth_version="1.0", ) args = {} args.update(base_args) args.update(parameters) signature = _oauth_signature(consumer_token, method, url, args, access_token) base_args["oauth_signature"] = signature return base_args class TwitterMixin(OAuthMixin): """Twitter OAuth authentication. To authenticate with Twitter, register your application with Twitter at http://twitter.com/apps. Then copy your Consumer Key and Consumer Secret to the application settings 'twitter_consumer_key' and 'twitter_consumer_secret'. Use this Mixin on the handler for the URL you registered as your application's Callback URL. When your application is set up, you can use this Mixin like this to authenticate the user with Twitter and get access to their stream: class TwitterHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): @tornado.web.asynchronous def get(self): if self.get_argument("oauth_token", None): self.get_authenticated_user(self.async_callback(self._on_auth)) return self.authorize_redirect() def _on_auth(self, user): if not user: raise tornado.web.HTTPError(500, "Twitter auth failed") # Save the user using, e.g., set_secure_cookie() The user object returned by get_authenticated_user() includes the attributes 'username', 'name', and all of the custom Twitter user attributes describe at http://apiwiki.twitter.com/Twitter-REST-API-Method%3A-users%C2%A0show in addition to 'access_token'. You should save the access token with the user; it is required to make requests on behalf of the user later with twitter_request(). """ _OAUTH_REQUEST_TOKEN_URL = "http://api.twitter.com/oauth/request_token" _OAUTH_ACCESS_TOKEN_URL = "http://api.twitter.com/oauth/access_token" _OAUTH_AUTHORIZE_URL = "http://api.twitter.com/oauth/authorize" _OAUTH_AUTHENTICATE_URL = "http://api.twitter.com/oauth/authenticate" _OAUTH_NO_CALLBACKS = True def authenticate_redirect(self): """Just like authorize_redirect(), but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. """ http = httpclient.AsyncHTTPClient() http.fetch(self._oauth_request_token_url(), self.async_callback( self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None)) def twitter_request(self, path, callback, access_token=None, post_args=None, **args): """Fetches the given API path, e.g., "/statuses/user_timeline/btaylor" The path should not include the format (we automatically append ".json" and parse the JSON output). If the request is a POST, post_args should be provided. Query string arguments should be given as keyword arguments. All the Twitter methods are documented at http://apiwiki.twitter.com/Twitter-API-Documentation. Many methods require an OAuth access token which you can obtain through authorize_redirect() and get_authenticated_user(). The user returned through that process includes an 'access_token' attribute that can be used to make authenticated requests via this method. Example usage: class MainHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): @tornado.web.authenticated @tornado.web.asynchronous def get(self): self.twitter_request( "/statuses/update", post_args={"status": "Testing Tornado Web Server"}, access_token=user["access_token"], callback=self.async_callback(self._on_post)) def _on_post(self, new_entry): if not new_entry: # Call failed; perhaps missing permission? self.authorize_redirect() return self.finish("Posted a message!") """ # Add the OAuth resource request signature if we have credentials url = "http://api.twitter.com/1" + path + ".json" if access_token: all_args = {} all_args.update(args) all_args.update(post_args or {}) consumer_token = self._oauth_consumer_token() method = "POST" if post_args is not None else "GET" oauth = self._oauth_request_parameters( url, access_token, all_args, method=method) args.update(oauth) if args: url += "?" + urllib.urlencode(args) callback = self.async_callback(self._on_twitter_request, callback) http = httpclient.AsyncHTTPClient() if post_args is not None: http.fetch(url, method="POST", body=urllib.urlencode(post_args), callback=callback) else: http.fetch(url, callback=callback) def _on_twitter_request(self, callback, response): if response.error: logging.warning("Error response %s fetching %s", response.error, response.request.url) callback(None) return callback(escape.json_decode(response.body)) def _oauth_consumer_token(self): self.require_setting("twitter_consumer_key", "Twitter OAuth") self.require_setting("twitter_consumer_secret", "Twitter OAuth") return dict( key=self.settings["twitter_consumer_key"], secret=self.settings["twitter_consumer_secret"]) def _oauth_get_user(self, access_token, callback): callback = self.async_callback(self._parse_user_response, callback) self.twitter_request( "/users/show/" + access_token["screen_name"], access_token=access_token, callback=callback) def _parse_user_response(self, callback, user): if user: user["username"] = user["screen_name"] callback(user) class FriendFeedMixin(OAuthMixin): """FriendFeed OAuth authentication. To authenticate with FriendFeed, register your application with FriendFeed at http://friendfeed.com/api/applications. Then copy your Consumer Key and Consumer Secret to the application settings 'friendfeed_consumer_key' and 'friendfeed_consumer_secret'. Use this Mixin on the handler for the URL you registered as your application's Callback URL. When your application is set up, you can use this Mixin like this to authenticate the user with FriendFeed and get access to their feed: class FriendFeedHandler(tornado.web.RequestHandler, tornado.auth.FriendFeedMixin): @tornado.web.asynchronous def get(self): if self.get_argument("oauth_token", None): self.get_authenticated_user(self.async_callback(self._on_auth)) return self.authorize_redirect() def _on_auth(self, user): if not user: raise tornado.web.HTTPError(500, "FriendFeed auth failed") # Save the user using, e.g., set_secure_cookie() The user object returned by get_authenticated_user() includes the attributes 'username', 'name', and 'description' in addition to 'access_token'. You should save the access token with the user; it is required to make requests on behalf of the user later with friendfeed_request(). """ _OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token" _OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token" _OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize" _OAUTH_NO_CALLBACKS = True def friendfeed_request(self, path, callback, access_token=None, post_args=None, **args): """Fetches the given relative API path, e.g., "/bret/friends" If the request is a POST, post_args should be provided. Query string arguments should be given as keyword arguments. All the FriendFeed methods are documented at http://friendfeed.com/api/documentation. Many methods require an OAuth access token which you can obtain through authorize_redirect() and get_authenticated_user(). The user returned through that process includes an 'access_token' attribute that can be used to make authenticated requests via this method. Example usage: class MainHandler(tornado.web.RequestHandler, tornado.auth.FriendFeedMixin): @tornado.web.authenticated @tornado.web.asynchronous def get(self): self.friendfeed_request( "/entry", post_args={"body": "Testing Tornado Web Server"}, access_token=self.current_user["access_token"], callback=self.async_callback(self._on_post)) def _on_post(self, new_entry): if not new_entry: # Call failed; perhaps missing permission? self.authorize_redirect() return self.finish("Posted a message!") """ # Add the OAuth resource request signature if we have credentials url = "http://friendfeed-api.com/v2" + path if access_token: all_args = {} all_args.update(args) all_args.update(post_args or {}) consumer_token = self._oauth_consumer_token() method = "POST" if post_args is not None else "GET" oauth = self._oauth_request_parameters( url, access_token, all_args, method=method) args.update(oauth) if args: url += "?" + urllib.urlencode(args) callback = self.async_callback(self._on_friendfeed_request, callback) http = httpclient.AsyncHTTPClient() if post_args is not None: http.fetch(url, method="POST", body=urllib.urlencode(post_args), callback=callback) else: http.fetch(url, callback=callback) def _on_friendfeed_request(self, callback, response): if response.error: logging.warning("Error response %s fetching %s", response.error, response.request.url) callback(None) return callback(escape.json_decode(response.body)) def _oauth_consumer_token(self): self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth") self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth") return dict( key=self.settings["friendfeed_consumer_key"], secret=self.settings["friendfeed_consumer_secret"]) def _oauth_get_user(self, access_token, callback): callback = self.async_callback(self._parse_user_response, callback) self.friendfeed_request( "/feedinfo/" + access_token["username"], include="id,name,description", access_token=access_token, callback=callback) def _parse_user_response(self, callback, user): if user: user["username"] = user["id"] callback(user) class GoogleMixin(OpenIdMixin, OAuthMixin): """Google Open ID / OAuth authentication. No application registration is necessary to use Google for authentication or to access Google resources on behalf of a user. To authenticate with Google, redirect with authenticate_redirect(). On return, parse the response with get_authenticated_user(). We send a dict containing the values for the user, including 'email', 'name', and 'locale'. Example usage: class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin): @tornado.web.asynchronous def get(self): if self.get_argument("openid.mode", None): self.get_authenticated_user(self.async_callback(self._on_auth)) return self.authenticate_redirect() def _on_auth(self, user): if not user: raise tornado.web.HTTPError(500, "Google auth failed") # Save the user with, e.g., set_secure_cookie() """ _OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud" _OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken" def authorize_redirect(self, oauth_scope, callback_uri=None, ax_attrs=["name","email","language","username"]): """Authenticates and authorizes for the given Google resource. Some of the available resources are: Gmail Contacts - http://www.google.com/m8/feeds/ Calendar - http://www.google.com/calendar/feeds/ Finance - http://finance.google.com/finance/feeds/ You can authorize multiple resources by separating the resource URLs with a space. """ callback_uri = callback_uri or self.request.path args = self._openid_args(callback_uri, ax_attrs=ax_attrs, oauth_scope=oauth_scope) self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args)) def get_authenticated_user(self, callback): """Fetches the authenticated user data upon redirect.""" # Look to see if we are doing combined OpenID/OAuth oauth_ns = "" for name, values in self.request.arguments.iteritems(): if name.startswith("openid.ns.") and \ values[-1] == u"http://specs.openid.net/extensions/oauth/1.0": oauth_ns = name[10:] break token = self.get_argument("openid." + oauth_ns + ".request_token", "") if token: http = httpclient.AsyncHTTPClient() token = dict(key=token, secret="") http.fetch(self._oauth_access_token_url(token), self.async_callback(self._on_access_token, callback)) else: OpenIdMixin.get_authenticated_user(self, callback) def _oauth_consumer_token(self): self.require_setting("google_consumer_key", "Google OAuth") self.require_setting("google_consumer_secret", "Google OAuth") return dict( key=self.settings["google_consumer_key"], secret=self.settings["google_consumer_secret"]) def _oauth_get_user(self, access_token, callback): OpenIdMixin.get_authenticated_user(self, callback) class FacebookMixin(object): """Facebook Connect authentication. To authenticate with Facebook, register your application with Facebook at http://www.facebook.com/developers/apps.php. Then copy your API Key and Application Secret to the application settings 'facebook_api_key' and 'facebook_secret'. When your application is set up, you can use this Mixin like this to authenticate the user with Facebook: class FacebookHandler(tornado.web.RequestHandler, tornado.auth.FacebookMixin): @tornado.web.asynchronous def get(self): if self.get_argument("session", None): self.get_authenticated_user(self.async_callback(self._on_auth)) return self.authenticate_redirect() def _on_auth(self, user): if not user: raise tornado.web.HTTPError(500, "Facebook auth failed") # Save the user using, e.g., set_secure_cookie() The user object returned by get_authenticated_user() includes the attributes 'facebook_uid' and 'name' in addition to session attributes like 'session_key'. You should save the session key with the user; it is required to make requests on behalf of the user later with facebook_request(). """ def authenticate_redirect(self, callback_uri=None, cancel_uri=None, extended_permissions=None): """Authenticates/installs this app for the current user.""" self.require_setting("facebook_api_key", "Facebook Connect") callback_uri = callback_uri or self.request.path args = { "api_key": self.settings["facebook_api_key"], "v": "1.0", "fbconnect": "true", "display": "page", "next": urlparse.urljoin(self.request.full_url(), callback_uri), "return_session": "true", } if cancel_uri: args["cancel_url"] = urlparse.urljoin( self.request.full_url(), cancel_uri) if extended_permissions: if isinstance(extended_permissions, basestring): extended_permissions = [extended_permissions] args["req_perms"] = ",".join(extended_permissions) self.redirect("http://www.facebook.com/login.php?" + urllib.urlencode(args)) def authorize_redirect(self, extended_permissions, callback_uri=None, cancel_uri=None): """Redirects to an authorization request for the given FB resource. The available resource names are listed at http://wiki.developers.facebook.com/index.php/Extended_permission. The most common resource types include: publish_stream read_stream email sms extended_permissions can be a single permission name or a list of names. To get the session secret and session key, call get_authenticated_user() just as you would with authenticate_redirect(). """ self.authenticate_redirect(callback_uri, cancel_uri, extended_permissions) def get_authenticated_user(self, callback): """Fetches the authenticated Facebook user. The authenticated user includes the special Facebook attributes 'session_key' and 'facebook_uid' in addition to the standard user attributes like 'name'. """ self.require_setting("facebook_api_key", "Facebook Connect") session = escape.json_decode(self.get_argument("session")) self.facebook_request( method="facebook.users.getInfo", callback=self.async_callback( self._on_get_user_info, callback, session), session_key=session["session_key"], uids=session["uid"], fields="uid,first_name,last_name,name,locale,pic_square," \ "profile_url,username") def facebook_request(self, method, callback, **args): """Makes a Facebook API REST request. We automatically include the Facebook API key and signature, but it is the callers responsibility to include 'session_key' and any other required arguments to the method. The available Facebook methods are documented here: http://wiki.developers.facebook.com/index.php/API Here is an example for the stream.get() method: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookMixin): @tornado.web.authenticated @tornado.web.asynchronous def get(self): self.facebook_request( method="stream.get", callback=self.async_callback(self._on_stream), session_key=self.current_user["session_key"]) def _on_stream(self, stream): if stream is None: # Not authorized to read the stream yet? self.redirect(self.authorize_redirect("read_stream")) return self.render("stream.html", stream=stream) """ self.require_setting("facebook_api_key", "Facebook Connect") self.require_setting("facebook_secret", "Facebook Connect") if not method.startswith("facebook."): method = "facebook." + method args["api_key"] = self.settings["facebook_api_key"] args["v"] = "1.0" args["method"] = method args["call_id"] = str(long(time.time() * 1e6)) args["format"] = "json" args["sig"] = self._signature(args) url = "http://api.facebook.com/restserver.php?" + \ urllib.urlencode(args) http = httpclient.AsyncHTTPClient() http.fetch(url, callback=self.async_callback( self._parse_response, callback)) def _on_get_user_info(self, callback, session, users): if users is None: callback(None) return callback({ "name": users[0]["name"], "first_name": users[0]["first_name"], "last_name": users[0]["last_name"], "uid": users[0]["uid"], "locale": users[0]["locale"], "pic_square": users[0]["pic_square"], "profile_url": users[0]["profile_url"], "username": users[0].get("username"), "session_key": session["session_key"], "session_expires": session.get("expires"), }) def _parse_response(self, callback, response): if response.error: logging.warning("HTTP error from Facebook: %s", response.error) callback(None) return try: json = escape.json_decode(response.body) except: logging.warning("Invalid JSON from Facebook: %r", response.body) callback(None) return if isinstance(json, dict) and json.get("error_code"): logging.warning("Facebook error: %d: %r", json["error_code"], json.get("error_msg")) callback(None) return callback(json) def _signature(self, args): parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())] body = "".join(parts) + self.settings["facebook_secret"] if isinstance(body, unicode): body = body.encode("utf-8") return hashlib.md5(body).hexdigest() def _oauth_signature(consumer_token, method, url, parameters={}, token=None): """Calculates the HMAC-SHA1 OAuth signature for the given request. See http://oauth.net/core/1.0/#signing_process """ parts = urlparse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()))) base_string = "&".join(_oauth_escape(e) for e in base_elems) key_elems = [consumer_token["secret"]] key_elems.append(token["secret"] if token else "") key = "&".join(key_elems) hash = hmac.new(key, base_string, hashlib.sha1) return binascii.b2a_base64(hash.digest())[:-1] def _oauth_escape(val): if isinstance(val, unicode): val = val.encode("utf-8") return urllib.quote(val, safe="~") def _oauth_parse_response(body): p = cgi.parse_qs(body, keep_blank_values=False) token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) # Add the extra parameters the Provider included to the token special = ("oauth_token", "oauth_token_secret") token.update((k, p[k][0]) for k in p if k not in special) return token bup-0.25/lib/tornado/autoreload.py000066400000000000000000000074121225146730500171640ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A module to automatically restart the server when a module is modified. This module depends on IOLoop, so it will not work in WSGI applications and Google AppEngine. """ import functools import ioloop import logging import os import sys import types try: import signal except ImportError: signal = None def start(io_loop=None, check_time=500): """Restarts the process automatically when a module is modified. We run on the I/O loop, and restarting is a destructive operation, so will terminate any pending requests. """ io_loop = io_loop or ioloop.IOLoop.instance() modify_times = {} callback = functools.partial(_reload_on_update, io_loop, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) scheduler.start() _reload_attempted = False def _reload_on_update(io_loop, modify_times): global _reload_attempted if _reload_attempted: # We already tried to reload and it didn't work, so don't try again. return for module in sys.modules.values(): # Some modules play games with sys.modules (e.g. email/__init__.py # in the standard library), and occasionally this can cause strange # failures in getattr. Just ignore anything that's not an ordinary # module. if not isinstance(module, types.ModuleType): continue path = getattr(module, "__file__", None) if not path: continue if path.endswith(".pyc") or path.endswith(".pyo"): path = path[:-1] try: modified = os.stat(path).st_mtime except: continue if path not in modify_times: modify_times[path] = modified continue if modify_times[path] != modified: logging.info("%s modified; restarting server", path) _reload_attempted = True for fd in io_loop._handlers.keys(): try: os.close(fd) except: pass if hasattr(signal, "setitimer"): # Clear the alarm signal set by # ioloop.set_blocking_log_threshold so it doesn't fire # after the exec. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: os.execv(sys.executable, [sys.executable] + sys.argv) except OSError: # Mac OS X versions prior to 10.6 do not support execv in # a process that contains multiple threads. Instead of # re-executing in the current process, start a new one # and cause the current process to exit. This isn't # ideal since the new process is detached from the parent # terminal and thus cannot easily be killed with ctrl-C, # but it's better than not being able to autoreload at # all. # Unfortunately the errno returned in this case does not # appear to be consistent, so we can't easily check for # this error specifically. os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + sys.argv) sys.exit(0) bup-0.25/lib/tornado/database.py000066400000000000000000000134551225146730500165750ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A lightweight wrapper around MySQLdb.""" import copy import MySQLdb.constants import MySQLdb.converters import MySQLdb.cursors import itertools import logging class Connection(object): """A lightweight wrapper around MySQLdb DB-API connections. The main value we provide is wrapping rows in a dict/object so that columns can be accessed by name. Typical usage: db = database.Connection("localhost", "mydatabase") for article in db.query("SELECT * FROM articles"): print article.title Cursors are hidden by the implementation, but other than that, the methods are very similar to the DB-API. We explicitly set the timezone to UTC and the character encoding to UTF-8 on all connections to avoid time zone and encoding errors. """ def __init__(self, host, database, user=None, password=None): self.host = host self.database = database args = dict(conv=CONVERSIONS, use_unicode=True, charset="utf8", db=database, init_command='SET time_zone = "+0:00"', sql_mode="TRADITIONAL") if user is not None: args["user"] = user if password is not None: args["passwd"] = password # We accept a path to a MySQL socket file or a host(:port) string if "/" in host: args["unix_socket"] = host else: self.socket = None pair = host.split(":") if len(pair) == 2: args["host"] = pair[0] args["port"] = int(pair[1]) else: args["host"] = host args["port"] = 3306 self._db = None self._db_args = args try: self.reconnect() except: logging.error("Cannot connect to MySQL on %s", self.host, exc_info=True) def __del__(self): self.close() def close(self): """Closes this database connection.""" if getattr(self, "_db", None) is not None: self._db.close() self._db = None def reconnect(self): """Closes the existing database connection and re-opens it.""" self.close() self._db = MySQLdb.connect(**self._db_args) self._db.autocommit(True) def iter(self, query, *parameters): """Returns an iterator for the given query and parameters.""" if self._db is None: self.reconnect() cursor = MySQLdb.cursors.SSCursor(self._db) try: self._execute(cursor, query, parameters) column_names = [d[0] for d in cursor.description] for row in cursor: yield Row(zip(column_names, row)) finally: cursor.close() def query(self, query, *parameters): """Returns a row list for the given query and parameters.""" cursor = self._cursor() try: self._execute(cursor, query, parameters) column_names = [d[0] for d in cursor.description] return [Row(itertools.izip(column_names, row)) for row in cursor] finally: cursor.close() def get(self, query, *parameters): """Returns the first row returned for the given query.""" rows = self.query(query, *parameters) if not rows: return None elif len(rows) > 1: raise Exception("Multiple rows returned for Database.get() query") else: return rows[0] def execute(self, query, *parameters): """Executes the given query, returning the lastrowid from the query.""" cursor = self._cursor() try: self._execute(cursor, query, parameters) return cursor.lastrowid finally: cursor.close() def executemany(self, query, parameters): """Executes the given query against all the given param sequences. We return the lastrowid from the query. """ cursor = self._cursor() try: cursor.executemany(query, parameters) return cursor.lastrowid finally: cursor.close() def _cursor(self): if self._db is None: self.reconnect() return self._db.cursor() def _execute(self, cursor, query, parameters): try: return cursor.execute(query, parameters) except OperationalError: logging.error("Error connecting to MySQL on %s", self.host) self.close() raise class Row(dict): """A dict that allows for object-like property access syntax.""" def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) # Fix the access conversions to properly recognize unicode/binary FIELD_TYPE = MySQLdb.constants.FIELD_TYPE FLAG = MySQLdb.constants.FLAG CONVERSIONS = copy.deepcopy(MySQLdb.converters.conversions) field_types = [FIELD_TYPE.BLOB, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING] if 'VARCHAR' in vars(FIELD_TYPE): field_types.append(FIELD_TYPE.VARCHAR) for field_type in field_types: CONVERSIONS[field_type].insert(0, (FLAG.BINARY, str)) # Alias some common MySQL exceptions IntegrityError = MySQLdb.IntegrityError OperationalError = MySQLdb.OperationalError bup-0.25/lib/tornado/epoll.c000066400000000000000000000060151225146730500157300ustar00rootroot00000000000000/* * Copyright 2009 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ #include "Python.h" #include #include #define MAX_EVENTS 24 /* * Simple wrapper around epoll_create. */ static PyObject* _epoll_create(void) { int fd = epoll_create(MAX_EVENTS); if (fd == -1) { PyErr_SetFromErrno(PyExc_Exception); return NULL; } return PyInt_FromLong(fd); } /* * Simple wrapper around epoll_ctl. We throw an exception if the call fails * rather than returning the error code since it is an infrequent (and likely * catastrophic) event when it does happen. */ static PyObject* _epoll_ctl(PyObject* self, PyObject* args) { int epfd, op, fd, events; struct epoll_event event; if (!PyArg_ParseTuple(args, "iiiI", &epfd, &op, &fd, &events)) { return NULL; } memset(&event, 0, sizeof(event)); event.events = events; event.data.fd = fd; if (epoll_ctl(epfd, op, fd, &event) == -1) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } Py_INCREF(Py_None); return Py_None; } /* * Simple wrapper around epoll_wait. We return None if the call times out and * throw an exception if an error occurs. Otherwise, we return a list of * (fd, event) tuples. */ static PyObject* _epoll_wait(PyObject* self, PyObject* args) { struct epoll_event events[MAX_EVENTS]; int epfd, timeout, num_events, i; PyObject* list; PyObject* tuple; if (!PyArg_ParseTuple(args, "ii", &epfd, &timeout)) { return NULL; } Py_BEGIN_ALLOW_THREADS num_events = epoll_wait(epfd, events, MAX_EVENTS, timeout); Py_END_ALLOW_THREADS if (num_events == -1) { PyErr_SetFromErrno(PyExc_Exception); return NULL; } list = PyList_New(num_events); for (i = 0; i < num_events; i++) { tuple = PyTuple_New(2); PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(events[i].data.fd)); PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(events[i].events)); PyList_SET_ITEM(list, i, tuple); } return list; } /* * Our method declararations */ static PyMethodDef kEpollMethods[] = { {"epoll_create", (PyCFunction)_epoll_create, METH_NOARGS, "Create an epoll file descriptor"}, {"epoll_ctl", _epoll_ctl, METH_VARARGS, "Control an epoll file descriptor"}, {"epoll_wait", _epoll_wait, METH_VARARGS, "Wait for events on an epoll file descriptor"}, {NULL, NULL, 0, NULL} }; /* * Module initialization */ PyMODINIT_FUNC initepoll(void) { Py_InitModule("epoll", kEpollMethods); } bup-0.25/lib/tornado/escape.py000066400000000000000000000074451225146730500162730ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Escaping/unescaping methods for HTML, JSON, URLs, and others.""" import htmlentitydefs import re import xml.sax.saxutils import urllib # json module is in the standard library as of python 2.6; fall back to # simplejson if present for older versions. try: import json assert hasattr(json, "loads") and hasattr(json, "dumps") _json_decode = lambda s: json.loads(s) _json_encode = lambda v: json.dumps(v) except: try: import simplejson _json_decode = lambda s: simplejson.loads(_unicode(s)) _json_encode = lambda v: simplejson.dumps(v) except ImportError: try: # For Google AppEngine from django.utils import simplejson _json_decode = lambda s: simplejson.loads(_unicode(s)) _json_encode = lambda v: simplejson.dumps(v) except ImportError: def _json_decode(s): raise NotImplementedError( "A JSON parser is required, e.g., simplejson at " "http://pypi.python.org/pypi/simplejson/") _json_encode = _json_decode def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML.""" return utf8(xml.sax.saxutils.escape(value, {'"': """})) def xhtml_unescape(value): """Un-escapes an XML-escaped string.""" return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) def json_encode(value): """JSON-encodes the given Python object.""" # JSON permits but does not require forward slashes to be escaped. # This is useful when json data is emitted in a tags from prematurely terminating # the javscript. Some json libraries do this escaping by default, # although python's standard library does not, so we do it here. # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped return _json_encode(value).replace("= 300: raise HTTPError(code, response=response) return response except pycurl.error, e: buffer.close() raise CurlError(*e) class AsyncHTTPClient(object): """An non-blocking HTTP client backed with pycurl. Example usage: import ioloop def handle_request(response): if response.error: print "Error:", response.error else: print response.body ioloop.IOLoop.instance().stop() http_client = httpclient.AsyncHTTPClient() http_client.fetch("http://www.google.com/", handle_request) ioloop.IOLoop.instance().start() fetch() can take a string URL or an HTTPRequest instance, which offers more options, like executing POST/PUT/DELETE requests. The keyword argument max_clients to the AsyncHTTPClient constructor determines the maximum number of simultaneous fetch() operations that can execute in parallel on each IOLoop. """ _ASYNC_CLIENTS = weakref.WeakKeyDictionary() def __new__(cls, io_loop=None, max_clients=10, max_simultaneous_connections=None): # There is one client per IOLoop since they share curl instances io_loop = io_loop or ioloop.IOLoop.instance() if io_loop in cls._ASYNC_CLIENTS: return cls._ASYNC_CLIENTS[io_loop] else: instance = super(AsyncHTTPClient, cls).__new__(cls) instance.io_loop = io_loop instance._multi = pycurl.CurlMulti() instance._curls = [_curl_create(max_simultaneous_connections) for i in xrange(max_clients)] instance._free_list = instance._curls[:] instance._requests = collections.deque() instance._fds = {} instance._events = {} instance._added_perform_callback = False instance._timeout = None instance._closed = False cls._ASYNC_CLIENTS[io_loop] = instance return instance def close(self): """Destroys this http client, freeing any file descriptors used. Not needed in normal use, but may be helpful in unittests that create and destroy http clients. No other methods may be called on the AsyncHTTPClient after close(). """ del AsyncHTTPClient._ASYNC_CLIENTS[self.io_loop] for curl in self._curls: curl.close() self._multi.close() self._closed = True def fetch(self, request, callback, **kwargs): """Executes an HTTPRequest, calling callback with an HTTPResponse. If an error occurs during the fetch, the HTTPResponse given to the callback has a non-None error attribute that contains the exception encountered during the request. You can call response.reraise() to throw the exception (if any) in the callback. """ if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) self._requests.append((request, callback)) self._add_perform_callback() def _add_perform_callback(self): if not self._added_perform_callback: self.io_loop.add_callback(self._perform) self._added_perform_callback = True def _handle_events(self, fd, events): self._events[fd] = events self._add_perform_callback() def _handle_timeout(self): self._timeout = None self._perform() def _perform(self): self._added_perform_callback = False if self._closed: return while True: while True: ret, num_handles = self._multi.perform() if ret != pycurl.E_CALL_MULTI_PERFORM: break # Update the set of active file descriptors. It is important # that this happen immediately after perform() because # fds that have been removed from fdset are free to be reused # in user callbacks. fds = {} (readable, writable, exceptable) = self._multi.fdset() for fd in readable: fds[fd] = fds.get(fd, 0) | 0x1 | 0x2 for fd in writable: fds[fd] = fds.get(fd, 0) | 0x4 for fd in exceptable: fds[fd] = fds.get(fd, 0) | 0x8 | 0x10 if fds and max(fds.iterkeys()) > 900: # Libcurl has a bug in which it behaves unpredictably with # file descriptors greater than 1024. (This is because # even though it uses poll() instead of select(), it still # uses FD_SET internally) Since curl opens its own file # descriptors we can't catch this problem when it happens, # and the best we can do is detect that it's about to # happen. Exiting is a lousy way to handle this error, # but there's not much we can do at this point. Exiting # (and getting restarted by whatever monitoring process # is handling crashed tornado processes) will at least # get things working again and hopefully bring the issue # to someone's attention. # If you run into this issue, you either have a file descriptor # leak or need to run more tornado processes (so that none # of them are handling more than 1000 simultaneous connections) print >> sys.stderr, "ERROR: File descriptor too high for libcurl. Exiting." logging.error("File descriptor too high for libcurl. Exiting.") sys.exit(1) for fd in self._fds: if fd not in fds: try: self.io_loop.remove_handler(fd) except (OSError, IOError), e: if e[0] != errno.ENOENT: raise for fd, events in fds.iteritems(): old_events = self._fds.get(fd, None) if old_events is None: self.io_loop.add_handler(fd, self._handle_events, events) elif old_events != events: try: self.io_loop.update_handler(fd, events) except (OSError, IOError), e: if e[0] == errno.ENOENT: self.io_loop.add_handler(fd, self._handle_events, events) else: raise self._fds = fds # Handle completed fetches completed = 0 while True: num_q, ok_list, err_list = self._multi.info_read() for curl in ok_list: self._finish(curl) completed += 1 for curl, errnum, errmsg in err_list: self._finish(curl, errnum, errmsg) completed += 1 if num_q == 0: break # Start fetching new URLs started = 0 while self._free_list and self._requests: started += 1 curl = self._free_list.pop() (request, callback) = self._requests.popleft() curl.info = { "headers": httputil.HTTPHeaders(), "buffer": cStringIO.StringIO(), "request": request, "callback": callback, "start_time": time.time(), } _curl_setup_request(curl, request, curl.info["buffer"], curl.info["headers"]) self._multi.add_handle(curl) if not started and not completed: break if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None if num_handles: self._timeout = self.io_loop.add_timeout( time.time() + 0.2, self._handle_timeout) def _finish(self, curl, curl_error=None, curl_message=None): info = curl.info curl.info = None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info["buffer"] if curl_error: error = CurlError(curl_error, curl_message) code = error.code body = None effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(pycurl.HTTP_CODE) effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) buffer.seek(0) try: info["callback"](HTTPResponse( request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, request_time=time.time() - info["start_time"])) except (KeyboardInterrupt, SystemExit): raise except: logging.error("Exception in callback %r", info["callback"], exc_info=True) class AsyncHTTPClient2(object): """Alternate implementation of AsyncHTTPClient. This class has the same interface as AsyncHTTPClient (so see that class for usage documentation) but is implemented with a different set of libcurl APIs (curl_multi_socket_action instead of fdset/perform). This implementation will likely become the default in the future, but for now should be considered somewhat experimental. The main advantage of this class over the original implementation is that it is immune to the fd > 1024 bug, so applications with a large number of simultaneous requests (e.g. long-polling) may prefer this version. Known bugs: * Timeouts connecting to localhost In some situations, this implementation will return a connection timeout when the old implementation would be able to connect. This has only been observed when connecting to localhost when using the kqueue-based IOLoop (mac/bsd), but it may also occur on epoll (linux) and, in principle, for non-localhost sites. While the bug is unrelated to IPv6, disabling IPv6 will avoid the most common manifestations of the bug, so this class disables IPv6 when it detects an affected version of libcurl. The underlying cause is a libcurl bug in versions up to and including 7.21.0 (it will be fixed in the not-yet-released 7.21.1) http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976 """ _ASYNC_CLIENTS = weakref.WeakKeyDictionary() def __new__(cls, io_loop=None, max_clients=10, max_simultaneous_connections=None): # There is one client per IOLoop since they share curl instances io_loop = io_loop or ioloop.IOLoop.instance() if io_loop in cls._ASYNC_CLIENTS: return cls._ASYNC_CLIENTS[io_loop] else: instance = super(AsyncHTTPClient2, cls).__new__(cls) instance.io_loop = io_loop instance._multi = pycurl.CurlMulti() instance._multi.setopt(pycurl.M_TIMERFUNCTION, instance._set_timeout) instance._multi.setopt(pycurl.M_SOCKETFUNCTION, instance._handle_socket) instance._curls = [_curl_create(max_simultaneous_connections) for i in xrange(max_clients)] instance._free_list = instance._curls[:] instance._requests = collections.deque() instance._fds = {} instance._timeout = None cls._ASYNC_CLIENTS[io_loop] = instance return instance def close(self): """Destroys this http client, freeing any file descriptors used. Not needed in normal use, but may be helpful in unittests that create and destroy http clients. No other methods may be called on the AsyncHTTPClient after close(). """ del AsyncHTTPClient2._ASYNC_CLIENTS[self.io_loop] for curl in self._curls: curl.close() self._multi.close() self._closed = True def fetch(self, request, callback, **kwargs): """Executes an HTTPRequest, calling callback with an HTTPResponse. If an error occurs during the fetch, the HTTPResponse given to the callback has a non-None error attribute that contains the exception encountered during the request. You can call response.reraise() to throw the exception (if any) in the callback. """ if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) self._requests.append((request, callback)) self._process_queue() self._set_timeout(0) def _handle_socket(self, event, fd, multi, data): """Called by libcurl when it wants to change the file descriptors it cares about. """ event_map = { pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE } if event == pycurl.POLL_REMOVE: self.io_loop.remove_handler(fd) del self._fds[fd] else: ioloop_event = event_map[event] if fd not in self._fds: self._fds[fd] = ioloop_event self.io_loop.add_handler(fd, self._handle_events, ioloop_event) else: self._fds[fd] = ioloop_event self.io_loop.update_handler(fd, ioloop_event) def _set_timeout(self, msecs): """Called by libcurl to schedule a timeout.""" if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = self.io_loop.add_timeout( time.time() + msecs/1000.0, self._handle_timeout) def _handle_events(self, fd, events): """Called by IOLoop when there is activity on one of our file descriptors. """ action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except Exception, e: ret = e[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() def _handle_timeout(self): """Called by IOLoop when the requested timeout has passed.""" self._timeout = None while True: try: ret, num_handles = self._multi.socket_action( pycurl.SOCKET_TIMEOUT, 0) except Exception, e: ret = e[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout != -1: self._set_timeout(new_timeout) def _finish_pending_requests(self): """Process any requests that were completed by the last call to multi.socket_action. """ while True: num_q, ok_list, err_list = self._multi.info_read() for curl in ok_list: self._finish(curl) for curl, errnum, errmsg in err_list: self._finish(curl, errnum, errmsg) if num_q == 0: break self._process_queue() def _process_queue(self): while True: started = 0 while self._free_list and self._requests: started += 1 curl = self._free_list.pop() (request, callback) = self._requests.popleft() curl.info = { "headers": httputil.HTTPHeaders(), "buffer": cStringIO.StringIO(), "request": request, "callback": callback, "start_time": time.time(), } # Disable IPv6 to mitigate the effects of this bug # on curl versions <= 7.21.0 # http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976 if pycurl.version_info()[2] <= 0x71500: # 7.21.0 curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) _curl_setup_request(curl, request, curl.info["buffer"], curl.info["headers"]) self._multi.add_handle(curl) if not started: break def _finish(self, curl, curl_error=None, curl_message=None): info = curl.info curl.info = None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info["buffer"] if curl_error: error = CurlError(curl_error, curl_message) code = error.code effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(pycurl.HTTP_CODE) effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) buffer.seek(0) try: info["callback"](HTTPResponse( request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, request_time=time.time() - info["start_time"])) except (KeyboardInterrupt, SystemExit): raise except: logging.error("Exception in callback %r", info["callback"], exc_info=True) class HTTPRequest(object): def __init__(self, url, method="GET", headers=None, body=None, auth_username=None, auth_password=None, connect_timeout=20.0, request_timeout=20.0, if_modified_since=None, follow_redirects=True, max_redirects=5, user_agent=None, use_gzip=True, network_interface=None, streaming_callback=None, header_callback=None, prepare_curl_callback=None, allow_nonstandard_methods=False): if headers is None: headers = httputil.HTTPHeaders() if if_modified_since: timestamp = calendar.timegm(if_modified_since.utctimetuple()) headers["If-Modified-Since"] = email.utils.formatdate( timestamp, localtime=False, usegmt=True) if "Pragma" not in headers: headers["Pragma"] = "" self.url = _utf8(url) self.method = method self.headers = headers self.body = body self.auth_username = _utf8(auth_username) self.auth_password = _utf8(auth_password) self.connect_timeout = connect_timeout self.request_timeout = request_timeout self.follow_redirects = follow_redirects self.max_redirects = max_redirects self.user_agent = user_agent self.use_gzip = use_gzip self.network_interface = network_interface self.streaming_callback = streaming_callback self.header_callback = header_callback self.prepare_curl_callback = prepare_curl_callback self.allow_nonstandard_methods = allow_nonstandard_methods class HTTPResponse(object): def __init__(self, request, code, headers={}, buffer=None, effective_url=None, error=None, request_time=None): self.request = request self.code = code self.headers = headers self.buffer = buffer self._body = None if effective_url is None: self.effective_url = request.url else: self.effective_url = effective_url if error is None: if self.code < 200 or self.code >= 300: self.error = HTTPError(self.code, response=self) else: self.error = None else: self.error = error self.request_time = request_time def _get_body(self): if self.buffer is None: return None elif self._body is None: self._body = self.buffer.getvalue() return self._body body = property(_get_body) def rethrow(self): if self.error: raise self.error def __repr__(self): args = ",".join("%s=%r" % i for i in self.__dict__.iteritems()) return "%s(%s)" % (self.__class__.__name__, args) def __del__(self): if self.buffer is not None: self.buffer.close() class HTTPError(Exception): """Exception thrown for an unsuccessful HTTP request. Attributes: code - HTTP error integer error code, e.g. 404. Error code 599 is used when no HTTP response was received, e.g. for a timeout. response - HTTPResponse object, if any. Note that if follow_redirects is False, redirects become HTTPErrors, and you can look at error.response.headers['Location'] to see the destination of the redirect. """ def __init__(self, code, message=None, response=None): self.code = code message = message or httplib.responses.get(code, "Unknown") self.response = response Exception.__init__(self, "HTTP %d: %s" % (self.code, message)) class CurlError(HTTPError): def __init__(self, errno, message): HTTPError.__init__(self, 599, message) self.errno = errno def _curl_create(max_simultaneous_connections=None): curl = pycurl.Curl() if logging.getLogger().isEnabledFor(logging.DEBUG): curl.setopt(pycurl.VERBOSE, 1) curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug) curl.setopt(pycurl.MAXCONNECTS, max_simultaneous_connections or 5) return curl def _curl_setup_request(curl, request, buffer, headers): curl.setopt(pycurl.URL, request.url) # Request headers may be either a regular dict or HTTPHeaders object if isinstance(request.headers, httputil.HTTPHeaders): curl.setopt(pycurl.HTTPHEADER, [_utf8("%s: %s" % i) for i in request.headers.get_all()]) else: curl.setopt(pycurl.HTTPHEADER, [_utf8("%s: %s" % i) for i in request.headers.iteritems()]) if request.header_callback: curl.setopt(pycurl.HEADERFUNCTION, request.header_callback) else: curl.setopt(pycurl.HEADERFUNCTION, lambda line: _curl_header_callback(headers, line)) if request.streaming_callback: curl.setopt(pycurl.WRITEFUNCTION, request.streaming_callback) else: curl.setopt(pycurl.WRITEFUNCTION, buffer.write) curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) curl.setopt(pycurl.MAXREDIRS, request.max_redirects) curl.setopt(pycurl.CONNECTTIMEOUT, int(request.connect_timeout)) curl.setopt(pycurl.TIMEOUT, int(request.request_timeout)) if request.user_agent: curl.setopt(pycurl.USERAGENT, _utf8(request.user_agent)) else: curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") if request.network_interface: curl.setopt(pycurl.INTERFACE, request.network_interface) if request.use_gzip: curl.setopt(pycurl.ENCODING, "gzip,deflate") else: curl.setopt(pycurl.ENCODING, "none") # Set the request method through curl's retarded interface which makes # up names for almost every single method curl_options = { "GET": pycurl.HTTPGET, "POST": pycurl.POST, "PUT": pycurl.UPLOAD, "HEAD": pycurl.NOBODY, } custom_methods = set(["DELETE"]) for o in curl_options.values(): curl.setopt(o, False) if request.method in curl_options: curl.unsetopt(pycurl.CUSTOMREQUEST) curl.setopt(curl_options[request.method], True) elif request.allow_nonstandard_methods or request.method in custom_methods: curl.setopt(pycurl.CUSTOMREQUEST, request.method) else: raise KeyError('unknown method ' + request.method) # Handle curl's cryptic options for every individual HTTP method if request.method in ("POST", "PUT"): request_buffer = cStringIO.StringIO(escape.utf8(request.body)) curl.setopt(pycurl.READFUNCTION, request_buffer.read) if request.method == "POST": def ioctl(cmd): if cmd == curl.IOCMD_RESTARTREAD: request_buffer.seek(0) curl.setopt(pycurl.IOCTLFUNCTION, ioctl) curl.setopt(pycurl.POSTFIELDSIZE, len(request.body)) else: curl.setopt(pycurl.INFILESIZE, len(request.body)) if request.auth_username and request.auth_password: userpwd = "%s:%s" % (request.auth_username, request.auth_password) curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) curl.setopt(pycurl.USERPWD, userpwd) logging.info("%s %s (username: %r)", request.method, request.url, request.auth_username) else: curl.unsetopt(pycurl.USERPWD) logging.info("%s %s", request.method, request.url) if request.prepare_curl_callback is not None: request.prepare_curl_callback(curl) def _curl_header_callback(headers, header_line): if header_line.startswith("HTTP/"): headers.clear() return if header_line == "\r\n": return headers.parse_line(header_line) def _curl_debug(debug_type, debug_msg): debug_types = ('I', '<', '>', '<', '>') if debug_type == 0: logging.debug('%s', debug_msg.strip()) elif debug_type in (1, 2): for line in debug_msg.splitlines(): logging.debug('%s %s', debug_types[debug_type], line) elif debug_type == 4: logging.debug('%s %r', debug_types[debug_type], debug_msg) def _utf8(value): if value is None: return value if isinstance(value, unicode): return value.encode("utf-8") assert isinstance(value, str) return value bup-0.25/lib/tornado/httpserver.py000066400000000000000000000431011225146730500172260ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking, single-threaded HTTP server.""" import cgi import errno import httputil import ioloop import iostream import logging import os import socket import time import urlparse try: import fcntl except ImportError: if os.name == 'nt': import win32_support as fcntl else: raise try: import ssl # Python 2.6+ except ImportError: ssl = None class HTTPServer(object): """A non-blocking, single-threaded HTTP server. A server is defined by a request callback that takes an HTTPRequest instance as an argument and writes a valid HTTP response with request.write(). request.finish() finishes the request (but does not necessarily close the connection in the case of HTTP/1.1 keep-alive requests). A simple example server that echoes back the URI you requested: import httpserver import ioloop def handle_request(request): message = "You requested %s\n" % request.uri request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % ( len(message), message)) request.finish() http_server = httpserver.HTTPServer(handle_request) http_server.listen(8888) ioloop.IOLoop.instance().start() HTTPServer is a very basic connection handler. Beyond parsing the HTTP request body and headers, the only HTTP semantics implemented in HTTPServer is HTTP/1.1 keep-alive connections. We do not, however, implement chunked encoding, so the request callback must provide a Content-Length header or implement chunked encoding for HTTP/1.1 requests for the server to run correctly for HTTP/1.1 clients. If the request handler is unable to do this, you can provide the no_keep_alive argument to the HTTPServer constructor, which will ensure the connection is closed on every request no matter what HTTP version the client is using. If xheaders is True, we support the X-Real-Ip and X-Scheme headers, which override the remote IP and HTTP scheme for all requests. These headers are useful when running Tornado behind a reverse proxy or load balancer. HTTPServer can serve HTTPS (SSL) traffic with Python 2.6+ and OpenSSL. To make this server serve SSL traffic, send the ssl_options dictionary argument with the arguments required for the ssl.wrap_socket() method, including "certfile" and "keyfile": HTTPServer(applicaton, ssl_options={ "certfile": os.path.join(data_dir, "mydomain.crt"), "keyfile": os.path.join(data_dir, "mydomain.key"), }) By default, listen() runs in a single thread in a single process. You can utilize all available CPUs on this machine by calling bind() and start() instead of listen(): http_server = httpserver.HTTPServer(handle_request) http_server.bind(8888) http_server.start() # Forks multiple sub-processes ioloop.IOLoop.instance().start() start() detects the number of CPUs on this machine and "pre-forks" that number of child processes so that we have one Tornado process per CPU, all with their own IOLoop. You can also pass in the specific number of child processes you want to run with if you want to override this auto-detection. """ def __init__(self, request_callback, no_keep_alive=False, io_loop=None, xheaders=False, ssl_options=None): """Initializes the server with the given request callback. If you use pre-forking/start() instead of the listen() method to start your server, you should not pass an IOLoop instance to this constructor. Each pre-forked child process will create its own IOLoop instance after the forking process. """ self.request_callback = request_callback self.no_keep_alive = no_keep_alive self.io_loop = io_loop self.xheaders = xheaders self.ssl_options = ssl_options self._socket = None self._started = False def listen(self, port, address=""): """Binds to the given port and starts the server in a single process. This method is a shortcut for: server.bind(port, address) server.start(1) """ self.bind(port, address) self.start(1) def bind(self, port, address=""): """Binds this server to the given port on the given IP address. To start the server, call start(). If you want to run this server in a single process, you can call listen() as a shortcut to the sequence of bind() and start() calls. """ assert not self._socket self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) flags = fcntl.fcntl(self._socket.fileno(), fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(self._socket.fileno(), fcntl.F_SETFD, flags) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(0) self._socket.bind((address, port)) self._socket.listen(128) def start(self, num_processes=1): """Starts this server in the IOLoop. By default, we run the server in this process and do not fork any additional child process. If num_processes is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If num_processes is given and > 1, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. """ assert not self._started self._started = True if num_processes is None or num_processes <= 0: # Use sysconf to detect the number of CPUs (cores) try: num_processes = os.sysconf("SC_NPROCESSORS_CONF") except ValueError: logging.error("Could not get num processors from sysconf; " "running with one process") num_processes = 1 if num_processes > 1 and ioloop.IOLoop.initialized(): logging.error("Cannot run in multiple processes: IOLoop instance " "has already been initialized. You cannot call " "IOLoop.instance() before calling start()") num_processes = 1 if num_processes > 1: logging.info("Pre-forking %d server processes", num_processes) for i in range(num_processes): if os.fork() == 0: self.io_loop = ioloop.IOLoop.instance() self.io_loop.add_handler( self._socket.fileno(), self._handle_events, ioloop.IOLoop.READ) return os.waitpid(-1, 0) else: if not self.io_loop: self.io_loop = ioloop.IOLoop.instance() self.io_loop.add_handler(self._socket.fileno(), self._handle_events, ioloop.IOLoop.READ) def stop(self): self.io_loop.remove_handler(self._socket.fileno()) self._socket.close() def _handle_events(self, fd, events): while True: try: connection, address = self._socket.accept() except socket.error, e: if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN): return raise if self.ssl_options is not None: assert ssl, "Python 2.6+ and OpenSSL required for SSL" connection = ssl.wrap_socket( connection, server_side=True, **self.ssl_options) try: stream = iostream.IOStream(connection, io_loop=self.io_loop) HTTPConnection(stream, address, self.request_callback, self.no_keep_alive, self.xheaders) except: logging.error("Error in connection callback", exc_info=True) class HTTPConnection(object): """Handles a connection to an HTTP client, executing HTTP requests. We parse HTTP headers and bodies, and execute the request callback until the HTTP conection is closed. """ def __init__(self, stream, address, request_callback, no_keep_alive=False, xheaders=False): self.stream = stream self.address = address self.request_callback = request_callback self.no_keep_alive = no_keep_alive self.xheaders = xheaders self._request = None self._request_finished = False self.stream.read_until("\r\n\r\n", self._on_headers) def write(self, chunk): assert self._request, "Request closed" if not self.stream.closed(): self.stream.write(chunk, self._on_write_complete) def finish(self): assert self._request, "Request closed" self._request_finished = True if not self.stream.writing(): self._finish_request() def _on_write_complete(self): if self._request_finished: self._finish_request() def _finish_request(self): if self.no_keep_alive: disconnect = True else: connection_header = self._request.headers.get("Connection") if self._request.supports_http_1_1(): disconnect = connection_header == "close" elif ("Content-Length" in self._request.headers or self._request.method in ("HEAD", "GET")): disconnect = connection_header != "Keep-Alive" else: disconnect = True self._request = None self._request_finished = False if disconnect: self.stream.close() return self.stream.read_until("\r\n\r\n", self._on_headers) def _on_headers(self, data): eol = data.find("\r\n") start_line = data[:eol] method, uri, version = start_line.split(" ") if not version.startswith("HTTP/"): raise Exception("Malformed HTTP version in HTTP Request-Line") headers = httputil.HTTPHeaders.parse(data[eol:]) self._request = HTTPRequest( connection=self, method=method, uri=uri, version=version, headers=headers, remote_ip=self.address[0]) content_length = headers.get("Content-Length") if content_length: content_length = int(content_length) if content_length > self.stream.max_buffer_size: raise Exception("Content-Length too long") if headers.get("Expect") == "100-continue": self.stream.write("HTTP/1.1 100 (Continue)\r\n\r\n") self.stream.read_bytes(content_length, self._on_request_body) return self.request_callback(self._request) def _on_request_body(self, data): self._request.body = data content_type = self._request.headers.get("Content-Type", "") if self._request.method == "POST": if content_type.startswith("application/x-www-form-urlencoded"): arguments = cgi.parse_qs(self._request.body) for name, values in arguments.iteritems(): values = [v for v in values if v] if values: self._request.arguments.setdefault(name, []).extend( values) elif content_type.startswith("multipart/form-data"): if 'boundary=' in content_type: boundary = content_type.split('boundary=',1)[1] if boundary: self._parse_mime_body(boundary, data) else: logging.warning("Invalid multipart/form-data") self.request_callback(self._request) def _parse_mime_body(self, boundary, data): # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith('"') and boundary.endswith('"'): boundary = boundary[1:-1] if data.endswith("\r\n"): footer_length = len(boundary) + 6 else: footer_length = len(boundary) + 4 parts = data[:-footer_length].split("--" + boundary + "\r\n") for part in parts: if not part: continue eoh = part.find("\r\n\r\n") if eoh == -1: logging.warning("multipart/form-data missing headers") continue headers = httputil.HTTPHeaders.parse(part[:eoh]) name_header = headers.get("Content-Disposition", "") if not name_header.startswith("form-data;") or \ not part.endswith("\r\n"): logging.warning("Invalid multipart/form-data") continue value = part[eoh + 4:-2] name_values = {} for name_part in name_header[10:].split(";"): name, name_value = name_part.strip().split("=", 1) name_values[name] = name_value.strip('"').decode("utf-8") if not name_values.get("name"): logging.warning("multipart/form-data value missing name") continue name = name_values["name"] if name_values.get("filename"): ctype = headers.get("Content-Type", "application/unknown") self._request.files.setdefault(name, []).append(dict( filename=name_values["filename"], body=value, content_type=ctype)) else: self._request.arguments.setdefault(name, []).append(value) class HTTPRequest(object): """A single HTTP request. GET/POST arguments are available in the arguments property, which maps arguments names to lists of values (to support multiple values for individual names). Names and values are both unicode always. File uploads are available in the files property, which maps file names to list of files. Each file is a dictionary of the form {"filename":..., "content_type":..., "body":...}. The content_type comes from the provided HTTP header and should not be trusted outright given that it can be easily forged. An HTTP request is attached to a single HTTP connection, which can be accessed through the "connection" attribute. Since connections are typically kept open in HTTP/1.1, multiple requests can be handled sequentially on a single connection. """ def __init__(self, method, uri, version="HTTP/1.0", headers=None, body=None, remote_ip=None, protocol=None, host=None, files=None, connection=None): self.method = method self.uri = uri self.version = version self.headers = headers or httputil.HTTPHeaders() self.body = body or "" if connection and connection.xheaders: # Squid uses X-Forwarded-For, others use X-Real-Ip self.remote_ip = self.headers.get( "X-Real-Ip", self.headers.get("X-Forwarded-For", remote_ip)) self.protocol = self.headers.get("X-Scheme", protocol) or "http" else: self.remote_ip = remote_ip self.protocol = protocol or "http" self.host = host or self.headers.get("Host") or "127.0.0.1" self.files = files or {} self.connection = connection self._start_time = time.time() self._finish_time = None scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) self.path = path self.query = query arguments = cgi.parse_qs(query) self.arguments = {} for name, values in arguments.iteritems(): values = [v for v in values if v] if values: self.arguments[name] = values def supports_http_1_1(self): """Returns True if this request supports HTTP/1.1 semantics""" return self.version == "HTTP/1.1" def write(self, chunk): """Writes the given chunk to the response stream.""" assert isinstance(chunk, str) self.connection.write(chunk) def finish(self): """Finishes this HTTP request on the open connection.""" self.connection.finish() self._finish_time = time.time() def full_url(self): """Reconstructs the full URL for this request.""" return self.protocol + "://" + self.host + self.uri def request_time(self): """Returns the amount of time it took for this request to execute.""" if self._finish_time is None: return time.time() - self._start_time else: return self._finish_time - self._start_time def __repr__(self): attrs = ("protocol", "host", "method", "uri", "version", "remote_ip", "remote_ip", "body") args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) return "%s(%s, headers=%s)" % ( self.__class__.__name__, args, dict(self.headers)) bup-0.25/lib/tornado/httputil.py000077500000000000000000000105561225146730500167100ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HTTP utility code shared by clients and servers.""" class HTTPHeaders(dict): """A dictionary that maintains Http-Header-Case for all keys. Supports multiple values per key via a pair of new methods, add() and get_list(). The regular dictionary interface returns a single value per key, with multiple values joined by a comma. >>> h = HTTPHeaders({"content-type": "text/html"}) >>> h.keys() ['Content-Type'] >>> h["Content-Type"] 'text/html' >>> h.add("Set-Cookie", "A=B") >>> h.add("Set-Cookie", "C=D") >>> h["set-cookie"] 'A=B,C=D' >>> h.get_list("set-cookie") ['A=B', 'C=D'] >>> for (k,v) in sorted(h.get_all()): ... print '%s: %s' % (k,v) ... Content-Type: text/html Set-Cookie: A=B Set-Cookie: C=D """ def __init__(self, *args, **kwargs): # Don't pass args or kwargs to dict.__init__, as it will bypass # our __setitem__ dict.__init__(self) self._as_list = {} self.update(*args, **kwargs) # new public methods def add(self, name, value): """Adds a new value for the given key.""" norm_name = HTTPHeaders._normalize_name(name) if norm_name in self: # bypass our override of __setitem__ since it modifies _as_list dict.__setitem__(self, norm_name, self[norm_name] + ',' + value) self._as_list[norm_name].append(value) else: self[norm_name] = value def get_list(self, name): """Returns all values for the given header as a list.""" norm_name = HTTPHeaders._normalize_name(name) return self._as_list.get(norm_name, []) def get_all(self): """Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name. """ for name, list in self._as_list.iteritems(): for value in list: yield (name, value) def parse_line(self, line): """Updates the dictionary with a single header line. >>> h = HTTPHeaders() >>> h.parse_line("Content-Type: text/html") >>> h.get('content-type') 'text/html' """ name, value = line.split(":", 1) self.add(name, value.strip()) @classmethod def parse(cls, headers): """Returns a dictionary from HTTP header text. >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.iteritems()) [('Content-Length', '42'), ('Content-Type', 'text/html')] """ h = cls() for line in headers.splitlines(): if line: h.parse_line(line) return h # dict implementation overrides def __setitem__(self, name, value): norm_name = HTTPHeaders._normalize_name(name) dict.__setitem__(self, norm_name, value) self._as_list[norm_name] = [value] def __getitem__(self, name): return dict.__getitem__(self, HTTPHeaders._normalize_name(name)) def __delitem__(self, name): norm_name = HTTPHeaders._normalize_name(name) dict.__delitem__(self, norm_name) del self._as_list[norm_name] def get(self, name, default=None): return dict.get(self, HTTPHeaders._normalize_name(name), default) def update(self, *args, **kwargs): # dict.update bypasses our __setitem__ for k, v in dict(*args, **kwargs).iteritems(): self[k] = v @staticmethod def _normalize_name(name): """Converts a name to Http-Header-Case. >>> HTTPHeaders._normalize_name("coNtent-TYPE") 'Content-Type' """ return "-".join([w.capitalize() for w in name.split("-")]) if __name__ == "__main__": import doctest doctest.testmod() bup-0.25/lib/tornado/ioloop.py000066400000000000000000000431641225146730500163320ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A level-triggered I/O loop for non-blocking sockets.""" import bisect import errno import os import logging import select import time import traceback try: import signal except ImportError: signal = None try: import fcntl except ImportError: if os.name == 'nt': import win32_support import win32_support as fcntl else: raise class IOLoop(object): """A level-triggered I/O loop. We use epoll if it is available, or else we fall back on select(). If you are implementing a system that needs to handle 1000s of simultaneous connections, you should use Linux and either compile our epoll module or use Python 2.6+ to get epoll support. Example usage for a simple TCP server: import errno import functools import ioloop import socket def connection_ready(sock, fd, events): while True: try: connection, address = sock.accept() except socket.error, e: if e[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): raise return connection.setblocking(0) handle_connection(connection, address) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) sock.bind(("", port)) sock.listen(128) io_loop = ioloop.IOLoop.instance() callback = functools.partial(connection_ready, sock) io_loop.add_handler(sock.fileno(), callback, io_loop.READ) io_loop.start() """ # Constants from the epoll module _EPOLLIN = 0x001 _EPOLLPRI = 0x002 _EPOLLOUT = 0x004 _EPOLLERR = 0x008 _EPOLLHUP = 0x010 _EPOLLRDHUP = 0x2000 _EPOLLONESHOT = (1 << 30) _EPOLLET = (1 << 31) # Our events map exactly to the epoll events NONE = 0 READ = _EPOLLIN WRITE = _EPOLLOUT ERROR = _EPOLLERR | _EPOLLHUP | _EPOLLRDHUP def __init__(self, impl=None): self._impl = impl or _poll() if hasattr(self._impl, 'fileno'): self._set_close_exec(self._impl.fileno()) self._handlers = {} self._events = {} self._callbacks = set() self._timeouts = [] self._running = False self._stopped = False self._blocking_log_threshold = None # Create a pipe that we send bogus data to when we want to wake # the I/O loop when it is idle if os.name != 'nt': r, w = os.pipe() self._set_nonblocking(r) self._set_nonblocking(w) self._set_close_exec(r) self._set_close_exec(w) self._waker_reader = os.fdopen(r, "r", 0) self._waker_writer = os.fdopen(w, "w", 0) else: self._waker_reader = self._waker_writer = win32_support.Pipe() r = self._waker_writer.reader_fd self.add_handler(r, self._read_waker, self.READ) @classmethod def instance(cls): """Returns a global IOLoop instance. Most single-threaded applications have a single, global IOLoop. Use this method instead of passing around IOLoop instances throughout your code. A common pattern for classes that depend on IOLoops is to use a default argument to enable programs with multiple IOLoops but not require the argument for simpler applications: class MyClass(object): def __init__(self, io_loop=None): self.io_loop = io_loop or IOLoop.instance() """ if not hasattr(cls, "_instance"): cls._instance = cls() return cls._instance @classmethod def initialized(cls): return hasattr(cls, "_instance") def add_handler(self, fd, handler, events): """Registers the given handler to receive the given events for fd.""" self._handlers[fd] = handler self._impl.register(fd, events | self.ERROR) def update_handler(self, fd, events): """Changes the events we listen for fd.""" self._impl.modify(fd, events | self.ERROR) def remove_handler(self, fd): """Stop listening for events on fd.""" self._handlers.pop(fd, None) self._events.pop(fd, None) try: self._impl.unregister(fd) except (OSError, IOError): logging.debug("Error deleting fd from IOLoop", exc_info=True) def set_blocking_log_threshold(self, s): """Logs a stack trace if the ioloop is blocked for more than s seconds. Pass None to disable. Requires python 2.6 on a unixy platform. """ if not hasattr(signal, "setitimer"): logging.error("set_blocking_log_threshold requires a signal module " "with the setitimer method") return self._blocking_log_threshold = s if s is not None: signal.signal(signal.SIGALRM, self._handle_alarm) def _handle_alarm(self, signal, frame): logging.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_log_threshold, ''.join(traceback.format_stack(frame))) def start(self): """Starts the I/O loop. The loop will run until one of the I/O handlers calls stop(), which will make the loop stop after the current event iteration completes. """ if self._stopped: self._stopped = False return self._running = True while True: # Never use an infinite timeout here - it can stall epoll poll_timeout = 0.2 # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. callbacks = list(self._callbacks) for callback in callbacks: # A callback can add or remove other callbacks if callback in self._callbacks: self._callbacks.remove(callback) self._run_callback(callback) if self._callbacks: poll_timeout = 0.0 if self._timeouts: now = time.time() while self._timeouts and self._timeouts[0].deadline <= now: timeout = self._timeouts.pop(0) self._run_callback(timeout.callback) if self._timeouts: milliseconds = self._timeouts[0].deadline - now poll_timeout = min(milliseconds, poll_timeout) if not self._running: break if self._blocking_log_threshold is not None: # clear alarm so it doesn't fire while poll is waiting for # events. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: event_pairs = self._impl.poll(poll_timeout) except Exception, e: # Depending on python version and IOLoop implementation, # different exception types may be thrown and there are # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') if (getattr(e, 'errno') == errno.EINTR or (isinstance(getattr(e, 'args'), tuple) and len(e.args) == 2 and e.args[0] == errno.EINTR)): logging.warning("Interrupted system call", exc_info=1) continue else: raise if self._blocking_log_threshold is not None: signal.setitimer(signal.ITIMER_REAL, self._blocking_log_threshold, 0) # Pop one fd at a time from the set of pending fds and run # its handler. Since that handler may perform actions on # other file descriptors, there may be reentrant calls to # this IOLoop that update self._events self._events.update(event_pairs) while self._events: fd, events = self._events.popitem() try: self._handlers[fd](fd, events) except (KeyboardInterrupt, SystemExit): raise except (OSError, IOError), e: if e[0] == errno.EPIPE: # Happens when the client closes the connection pass else: logging.error("Exception in I/O handler for fd %d", fd, exc_info=True) except: logging.error("Exception in I/O handler for fd %d", fd, exc_info=True) # reset the stopped flag so another start/stop pair can be issued self._stopped = False if self._blocking_log_threshold is not None: signal.setitimer(signal.ITIMER_REAL, 0, 0) def stop(self): """Stop the loop after the current event loop iteration is complete. If the event loop is not currently running, the next call to start() will return immediately. To use asynchronous methods from otherwise-synchronous code (such as unit tests), you can start and stop the event loop like this: ioloop = IOLoop() async_method(ioloop=ioloop, callback=ioloop.stop) ioloop.start() ioloop.start() will return after async_method has run its callback, whether that callback was invoked before or after ioloop.start. """ self._running = False self._stopped = True self._wake() def running(self): """Returns true if this IOLoop is currently running.""" return self._running def add_timeout(self, deadline, callback): """Calls the given callback at the time deadline from the I/O loop.""" timeout = _Timeout(deadline, callback) bisect.insort(self._timeouts, timeout) return timeout def remove_timeout(self, timeout): self._timeouts.remove(timeout) def add_callback(self, callback): """Calls the given callback on the next I/O loop iteration.""" self._callbacks.add(callback) self._wake() def remove_callback(self, callback): """Removes the given callback from the next I/O loop iteration.""" self._callbacks.remove(callback) def _wake(self): try: self._waker_writer.write("x") except IOError: pass def _run_callback(self, callback): try: callback() except (KeyboardInterrupt, SystemExit): raise except: self.handle_callback_exception(callback) def handle_callback_exception(self, callback): """This method is called whenever a callback run by the IOLoop throws an exception. By default simply logs the exception as an error. Subclasses may override this method to customize reporting of exceptions. The exception itself is not passed explicitly, but is available in sys.exc_info. """ logging.error("Exception in callback %r", callback, exc_info=True) def _read_waker(self, fd, events): try: while True: self._waker_reader.read() except IOError: pass def _set_nonblocking(self, fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) def _set_close_exec(self, fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) class _Timeout(object): """An IOLoop timeout, a UNIX timestamp and a callback""" # Reduce memory overhead when there are lots of pending callbacks __slots__ = ['deadline', 'callback'] def __init__(self, deadline, callback): self.deadline = deadline self.callback = callback def __cmp__(self, other): return cmp((self.deadline, id(self.callback)), (other.deadline, id(other.callback))) class PeriodicCallback(object): """Schedules the given callback to be called periodically. The callback is called every callback_time milliseconds. """ def __init__(self, callback, callback_time, io_loop=None): self.callback = callback self.callback_time = callback_time self.io_loop = io_loop or IOLoop.instance() self._running = True def start(self): timeout = time.time() + self.callback_time / 1000.0 self.io_loop.add_timeout(timeout, self._run) def stop(self): self._running = False def _run(self): if not self._running: return try: self.callback() except (KeyboardInterrupt, SystemExit): raise except: logging.error("Error in periodic callback", exc_info=True) self.start() class _EPoll(object): """An epoll-based event loop using our C module for Python 2.5 systems""" _EPOLL_CTL_ADD = 1 _EPOLL_CTL_DEL = 2 _EPOLL_CTL_MOD = 3 def __init__(self): self._epoll_fd = epoll.epoll_create() def fileno(self): return self._epoll_fd def register(self, fd, events): epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_ADD, fd, events) def modify(self, fd, events): epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_MOD, fd, events) def unregister(self, fd): epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_DEL, fd, 0) def poll(self, timeout): return epoll.epoll_wait(self._epoll_fd, int(timeout * 1000)) class _KQueue(object): """A kqueue-based event loop for BSD/Mac systems.""" def __init__(self): self._kqueue = select.kqueue() self._active = {} def fileno(self): return self._kqueue.fileno() def register(self, fd, events): self._control(fd, events, select.KQ_EV_ADD) self._active[fd] = events def modify(self, fd, events): self.unregister(fd) self.register(fd, events) def unregister(self, fd): events = self._active.pop(fd) self._control(fd, events, select.KQ_EV_DELETE) def _control(self, fd, events, flags): kevents = [] if events & IOLoop.WRITE: kevents.append(select.kevent( fd, filter=select.KQ_FILTER_WRITE, flags=flags)) if events & IOLoop.READ or not kevents: # Always read when there is not a write kevents.append(select.kevent( fd, filter=select.KQ_FILTER_READ, flags=flags)) # Even though control() takes a list, it seems to return EINVAL # on Mac OS X (10.6) when there is more than one event in the list. for kevent in kevents: self._kqueue.control([kevent], 0) def poll(self, timeout): kevents = self._kqueue.control(None, 1000, timeout) events = {} for kevent in kevents: fd = kevent.ident flags = 0 if kevent.filter == select.KQ_FILTER_READ: events[fd] = events.get(fd, 0) | IOLoop.READ if kevent.filter == select.KQ_FILTER_WRITE: events[fd] = events.get(fd, 0) | IOLoop.WRITE if kevent.flags & select.KQ_EV_ERROR: events[fd] = events.get(fd, 0) | IOLoop.ERROR return events.items() class _Select(object): """A simple, select()-based IOLoop implementation for non-Linux systems""" def __init__(self): self.read_fds = set() self.write_fds = set() self.error_fds = set() self.fd_sets = (self.read_fds, self.write_fds, self.error_fds) def register(self, fd, events): if events & IOLoop.READ: self.read_fds.add(fd) if events & IOLoop.WRITE: self.write_fds.add(fd) if events & IOLoop.ERROR: self.error_fds.add(fd) def modify(self, fd, events): self.unregister(fd) self.register(fd, events) def unregister(self, fd): self.read_fds.discard(fd) self.write_fds.discard(fd) self.error_fds.discard(fd) def poll(self, timeout): readable, writeable, errors = select.select( self.read_fds, self.write_fds, self.error_fds, timeout) events = {} for fd in readable: events[fd] = events.get(fd, 0) | IOLoop.READ for fd in writeable: events[fd] = events.get(fd, 0) | IOLoop.WRITE for fd in errors: events[fd] = events.get(fd, 0) | IOLoop.ERROR return events.items() # Choose a poll implementation. Use epoll if it is available, fall back to # select() for non-Linux platforms if hasattr(select, "epoll"): # Python 2.6+ on Linux _poll = select.epoll elif hasattr(select, "kqueue"): # Python 2.6+ on BSD or Mac _poll = _KQueue else: try: # Linux systems with our C module installed import epoll _poll = _EPoll except: # All other systems import sys if "linux" in sys.platform: logging.warning("epoll module not found; using select()") _poll = _Select bup-0.25/lib/tornado/iostream.py000066400000000000000000000213531225146730500166500ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A utility class to write to and read from a non-blocking socket.""" import errno import ioloop import logging import socket class IOStream(object): """A utility class to write to and read from a non-blocking socket. We support three methods: write(), read_until(), and read_bytes(). All of the methods take callbacks (since writing and reading are non-blocking and asynchronous). read_until() reads the socket until a given delimiter, and read_bytes() reads until a specified number of bytes have been read from the socket. A very simple (and broken) HTTP client using this class: import ioloop import iostream import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.connect(("friendfeed.com", 80)) stream = IOStream(s) def on_headers(data): headers = {} for line in data.split("\r\n"): parts = line.split(":") if len(parts) == 2: headers[parts[0].strip()] = parts[1].strip() stream.read_bytes(int(headers["Content-Length"]), on_body) def on_body(data): print data stream.close() ioloop.IOLoop.instance().stop() stream.write("GET / HTTP/1.0\r\n\r\n") stream.read_until("\r\n\r\n", on_headers) ioloop.IOLoop.instance().start() """ def __init__(self, socket, io_loop=None, max_buffer_size=104857600, read_chunk_size=4096): self.socket = socket self.socket.setblocking(False) self.io_loop = io_loop or ioloop.IOLoop.instance() self.max_buffer_size = max_buffer_size self.read_chunk_size = read_chunk_size self._read_buffer = "" self._write_buffer = "" self._read_delimiter = None self._read_bytes = None self._read_callback = None self._write_callback = None self._close_callback = None self._state = self.io_loop.ERROR self.io_loop.add_handler( self.socket.fileno(), self._handle_events, self._state) def read_until(self, delimiter, callback): """Call callback when we read the given delimiter.""" assert not self._read_callback, "Already reading" loc = self._read_buffer.find(delimiter) if loc != -1: self._run_callback(callback, self._consume(loc + len(delimiter))) return self._check_closed() self._read_delimiter = delimiter self._read_callback = callback self._add_io_state(self.io_loop.READ) def read_bytes(self, num_bytes, callback): """Call callback when we read the given number of bytes.""" assert not self._read_callback, "Already reading" if len(self._read_buffer) >= num_bytes: callback(self._consume(num_bytes)) return self._check_closed() self._read_bytes = num_bytes self._read_callback = callback self._add_io_state(self.io_loop.READ) def write(self, data, callback=None): """Write the given data to this stream. If callback is given, we call it when all of the buffered write data has been successfully written to the stream. If there was previously buffered write data and an old write callback, that callback is simply overwritten with this new callback. """ self._check_closed() self._write_buffer += data self._add_io_state(self.io_loop.WRITE) self._write_callback = callback def set_close_callback(self, callback): """Call the given callback when the stream is closed.""" self._close_callback = callback def close(self): """Close this stream.""" if self.socket is not None: self.io_loop.remove_handler(self.socket.fileno()) self.socket.close() self.socket = None if self._close_callback: self._run_callback(self._close_callback) def reading(self): """Returns true if we are currently reading from the stream.""" return self._read_callback is not None def writing(self): """Returns true if we are currently writing to the stream.""" return len(self._write_buffer) > 0 def closed(self): return self.socket is None def _handle_events(self, fd, events): if not self.socket: logging.warning("Got events for closed stream %d", fd) return if events & self.io_loop.READ: self._handle_read() if not self.socket: return if events & self.io_loop.WRITE: self._handle_write() if not self.socket: return if events & self.io_loop.ERROR: self.close() return state = self.io_loop.ERROR if self._read_delimiter or self._read_bytes: state |= self.io_loop.READ if self._write_buffer: state |= self.io_loop.WRITE if state != self._state: self._state = state self.io_loop.update_handler(self.socket.fileno(), self._state) def _run_callback(self, callback, *args, **kwargs): try: callback(*args, **kwargs) except: # Close the socket on an uncaught exception from a user callback # (It would eventually get closed when the socket object is # gc'd, but we don't want to rely on gc happening before we # run out of file descriptors) self.close() # Re-raise the exception so that IOLoop.handle_callback_exception # can see it and log the error raise def _handle_read(self): try: chunk = self.socket.recv(self.read_chunk_size) except socket.error, e: if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN): return else: logging.warning("Read error on %d: %s", self.socket.fileno(), e) self.close() return if not chunk: self.close() return self._read_buffer += chunk if len(self._read_buffer) >= self.max_buffer_size: logging.error("Reached maximum read buffer size") self.close() return if self._read_bytes: if len(self._read_buffer) >= self._read_bytes: num_bytes = self._read_bytes callback = self._read_callback self._read_callback = None self._read_bytes = None self._run_callback(callback, self._consume(num_bytes)) elif self._read_delimiter: loc = self._read_buffer.find(self._read_delimiter) if loc != -1: callback = self._read_callback delimiter_len = len(self._read_delimiter) self._read_callback = None self._read_delimiter = None self._run_callback(callback, self._consume(loc + delimiter_len)) def _handle_write(self): while self._write_buffer: try: num_bytes = self.socket.send(self._write_buffer) self._write_buffer = self._write_buffer[num_bytes:] except socket.error, e: if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN): break else: logging.warning("Write error on %d: %s", self.socket.fileno(), e) self.close() return if not self._write_buffer and self._write_callback: callback = self._write_callback self._write_callback = None self._run_callback(callback) def _consume(self, loc): result = self._read_buffer[:loc] self._read_buffer = self._read_buffer[loc:] return result def _check_closed(self): if not self.socket: raise IOError("Stream is closed") def _add_io_state(self, state): if not self._state & state: self._state = self._state | state self.io_loop.update_handler(self.socket.fileno(), self._state) bup-0.25/lib/tornado/locale.py000066400000000000000000000463771225146730500163010ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Translation methods for generating localized strings. To load a locale and generate a translated string: user_locale = locale.get("es_LA") print user_locale.translate("Sign out") locale.get() returns the closest matching locale, not necessarily the specific locale you requested. You can support pluralization with additional arguments to translate(), e.g.: people = [...] message = user_locale.translate( "%(list)s is online", "%(list)s are online", len(people)) print message % {"list": user_locale.list(people)} The first string is chosen if len(people) == 1, otherwise the second string is chosen. Applications should call one of load_translations (which uses a simple CSV format) or load_gettext_translations (which uses the .mo format supported by gettext and related tools). If neither method is called, the locale.translate method will simply return the original string. """ import csv import datetime import logging import os _default_locale = "en_US" _translations = {} _supported_locales = frozenset([_default_locale]) _use_gettext = False def get(*locale_codes): """Returns the closest match for the given locale codes. We iterate over all given locale codes in order. If we have a tight or a loose match for the code (e.g., "en" for "en_US"), we return the locale. Otherwise we move to the next code in the list. By default we return en_US if no translations are found for any of the specified locales. You can change the default locale with set_default_locale() below. """ return Locale.get_closest(*locale_codes) def set_default_locale(code): """Sets the default locale, used in get_closest_locale(). The default locale is assumed to be the language used for all strings in the system. The translations loaded from disk are mappings from the default locale to the destination locale. Consequently, you don't need to create a translation file for the default locale. """ global _default_locale global _supported_locales _default_locale = code _supported_locales = frozenset(_translations.keys() + [_default_locale]) def load_translations(directory): """Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., "My name is %(name)s") and their associated translations. The directory should have translation files of the form LOCALE.csv, e.g. es_GT.csv. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example "%(name)s liked this" may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). Example translation es_LA.csv: "I love you","Te amo" "%(name)s liked this","A %(name)s les gust\xf3 esto","plural" "%(name)s liked this","A %(name)s le gust\xf3 esto","singular" """ global _translations global _supported_locales _translations = {} for path in os.listdir(directory): if not path.endswith(".csv"): continue locale, extension = path.split(".") if locale not in LOCALE_NAMES: logging.error("Unrecognized locale %r (path: %s)", locale, os.path.join(directory, path)) continue f = open(os.path.join(directory, path), "r") _translations[locale] = {} for i, row in enumerate(csv.reader(f)): if not row or len(row) < 2: continue row = [c.decode("utf-8").strip() for c in row] english, translation = row[:2] if len(row) > 2: plural = row[2] or "unknown" else: plural = "unknown" if plural not in ("plural", "singular", "unknown"): logging.error("Unrecognized plural indicator %r in %s line %d", plural, path, i + 1) continue _translations[locale].setdefault(plural, {})[english] = translation f.close() _supported_locales = frozenset(_translations.keys() + [_default_locale]) logging.info("Supported locales: %s", sorted(_supported_locales)) def load_gettext_translations(directory, domain): """Loads translations from gettext's locale tree Locale tree is similar to system's /usr/share/locale, like: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have you app translated: 1. Generate POT translation file xgettext --language=Python --keyword=_:1,2 -d cyclone file1.py file2.html etc 2. Merge against existing POT file: msgmerge old.po cyclone.po > new.po 3. Compile: msgfmt cyclone.po -o {directory}/pt_BR/LC_MESSAGES/cyclone.mo """ import gettext global _translations global _supported_locales global _use_gettext _translations = {} for lang in os.listdir(directory): if os.path.isfile(os.path.join(directory, lang)): continue try: os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain+".mo")) _translations[lang] = gettext.translation(domain, directory, languages=[lang]) except Exception, e: logging.error("Cannot load translation for '%s': %s", lang, str(e)) continue _supported_locales = frozenset(_translations.keys() + [_default_locale]) _use_gettext = True logging.info("Supported locales: %s", sorted(_supported_locales)) def get_supported_locales(cls): """Returns a list of all the supported locale codes.""" return _supported_locales class Locale(object): @classmethod def get_closest(cls, *locale_codes): """Returns the closest match for the given locale code.""" for code in locale_codes: if not code: continue code = code.replace("-", "_") parts = code.split("_") if len(parts) > 2: continue elif len(parts) == 2: code = parts[0].lower() + "_" + parts[1].upper() if code in _supported_locales: return cls.get(code) if parts[0].lower() in _supported_locales: return cls.get(parts[0].lower()) return cls.get(_default_locale) @classmethod def get(cls, code): """Returns the Locale for the given locale code. If it is not supported, we raise an exception. """ if not hasattr(cls, "_cache"): cls._cache = {} if code not in cls._cache: assert code in _supported_locales translations = _translations.get(code, None) if translations is None: locale = CSVLocale(code, {}) elif _use_gettext: locale = GettextLocale(code, translations) else: locale = CSVLocale(code, translations) cls._cache[code] = locale return cls._cache[code] def __init__(self, code, translations): self.code = code self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown") self.rtl = False for prefix in ["fa", "ar", "he"]: if self.code.startswith(prefix): self.rtl = True break self.translations = translations # Initialize strings for date formatting _ = self.translate self._months = [ _("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"), _("September"), _("October"), _("November"), _("December")] self._weekdays = [ _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), _("Friday"), _("Saturday"), _("Sunday")] def translate(self, message, plural_message=None, count=None): raise NotImplementedError() def format_date(self, date, gmt_offset=0, relative=True, shorter=False, full_format=False): """Formats the given date (which should be GMT). By default, we return a relative time (e.g., "2 minutes ago"). You can return an absolute date string with relative=False. You can force a full format date ("July 10, 1980") with full_format=True. """ if self.code.startswith("ru"): relative = False if type(date) in (int, long, float): date = datetime.datetime.utcfromtimestamp(date) now = datetime.datetime.utcnow() # Round down to now. Due to click skew, things are somethings # slightly in the future. if date > now: date = now local_date = date - datetime.timedelta(minutes=gmt_offset) local_now = now - datetime.timedelta(minutes=gmt_offset) local_yesterday = local_now - datetime.timedelta(hours=24) difference = now - date seconds = difference.seconds days = difference.days _ = self.translate format = None if not full_format: if relative and days == 0: if seconds < 50: return _("1 second ago", "%(seconds)d seconds ago", seconds) % { "seconds": seconds } if seconds < 50 * 60: minutes = round(seconds / 60.0) return _("1 minute ago", "%(minutes)d minutes ago", minutes) % { "minutes": minutes } hours = round(seconds / (60.0 * 60)) return _("1 hour ago", "%(hours)d hours ago", hours) % { "hours": hours } if days == 0: format = _("%(time)s") elif days == 1 and local_date.day == local_yesterday.day and \ relative: format = _("yesterday") if shorter else \ _("yesterday at %(time)s") elif days < 5: format = _("%(weekday)s") if shorter else \ _("%(weekday)s at %(time)s") elif days < 334: # 11mo, since confusing for same month last year format = _("%(month_name)s %(day)s") if shorter else \ _("%(month_name)s %(day)s at %(time)s") if format is None: format = _("%(month_name)s %(day)s, %(year)s") if shorter else \ _("%(month_name)s %(day)s, %(year)s at %(time)s") tfhour_clock = self.code not in ("en", "en_US", "zh_CN") if tfhour_clock: str_time = "%d:%02d" % (local_date.hour, local_date.minute) elif self.code == "zh_CN": str_time = "%s%d:%02d" % ( (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12], local_date.hour % 12 or 12, local_date.minute) else: str_time = "%d:%02d %s" % ( local_date.hour % 12 or 12, local_date.minute, ("am", "pm")[local_date.hour >= 12]) return format % { "month_name": self._months[local_date.month - 1], "weekday": self._weekdays[local_date.weekday()], "day": str(local_date.day), "year": str(local_date.year), "time": str_time } def format_day(self, date, gmt_offset=0, dow=True): """Formats the given date as a day of week. Example: "Monday, January 22". You can remove the day of week with dow=False. """ local_date = date - datetime.timedelta(minutes=gmt_offset) _ = self.translate if dow: return _("%(weekday)s, %(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "weekday": self._weekdays[local_date.weekday()], "day": str(local_date.day), } else: return _("%(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "day": str(local_date.day), } def list(self, parts): """Returns a comma-separated list for the given list of parts. The format is, e.g., "A, B and C", "A and B" or just "A" for lists of size 1. """ _ = self.translate if len(parts) == 0: return "" if len(parts) == 1: return parts[0] comma = u' \u0648 ' if self.code.startswith("fa") else u", " return _("%(commas)s and %(last)s") % { "commas": comma.join(parts[:-1]), "last": parts[len(parts) - 1], } def friendly_number(self, value): """Returns a comma-separated number for the given integer.""" if self.code not in ("en", "en_US"): return str(value) value = str(value) parts = [] while value: parts.append(value[-3:]) value = value[:-3] return ",".join(reversed(parts)) class CSVLocale(Locale): """Locale implementation using tornado's CSV translation format.""" def translate(self, message, plural_message=None, count=None): """Returns the translation for the given message for this locale. If plural_message is given, you must also provide count. We return plural_message when count != 1, and we return the singular form for the given message when count == 1. """ if plural_message is not None: assert count is not None if count != 1: message = plural_message message_dict = self.translations.get("plural", {}) else: message_dict = self.translations.get("singular", {}) else: message_dict = self.translations.get("unknown", {}) return message_dict.get(message, message) class GettextLocale(Locale): """Locale implementation using the gettext module.""" def translate(self, message, plural_message=None, count=None): if plural_message is not None: assert count is not None return self.translations.ungettext(message, plural_message, count) else: return self.translations.ugettext(message) LOCALE_NAMES = { "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, "ar_AR": {"name_en": u"Arabic", "name": u"\u0627\u0644\u0639\u0631\u0628\u064a\u0629"}, "bg_BG": {"name_en": u"Bulgarian", "name": u"\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438"}, "bn_IN": {"name_en": u"Bengali", "name": u"\u09ac\u09be\u0982\u09b2\u09be"}, "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"}, "ca_ES": {"name_en": u"Catalan", "name": u"Catal\xe0"}, "cs_CZ": {"name_en": u"Czech", "name": u"\u010ce\u0161tina"}, "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"}, "da_DK": {"name_en": u"Danish", "name": u"Dansk"}, "de_DE": {"name_en": u"German", "name": u"Deutsch"}, "el_GR": {"name_en": u"Greek", "name": u"\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac"}, "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"}, "en_US": {"name_en": u"English (US)", "name": u"English (US)"}, "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Espa\xf1ol (Espa\xf1a)"}, "es_LA": {"name_en": u"Spanish", "name": u"Espa\xf1ol"}, "et_EE": {"name_en": u"Estonian", "name": u"Eesti"}, "eu_ES": {"name_en": u"Basque", "name": u"Euskara"}, "fa_IR": {"name_en": u"Persian", "name": u"\u0641\u0627\u0631\u0633\u06cc"}, "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"}, "fr_CA": {"name_en": u"French (Canada)", "name": u"Fran\xe7ais (Canada)"}, "fr_FR": {"name_en": u"French", "name": u"Fran\xe7ais"}, "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"}, "gl_ES": {"name_en": u"Galician", "name": u"Galego"}, "he_IL": {"name_en": u"Hebrew", "name": u"\u05e2\u05d1\u05e8\u05d9\u05ea"}, "hi_IN": {"name_en": u"Hindi", "name": u"\u0939\u093f\u0928\u094d\u0926\u0940"}, "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"}, "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"}, "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"}, "is_IS": {"name_en": u"Icelandic", "name": u"\xcdslenska"}, "it_IT": {"name_en": u"Italian", "name": u"Italiano"}, "ja_JP": {"name_en": u"Japanese", "name": u"\xe6\xe6\xe8"}, "ko_KR": {"name_en": u"Korean", "name": u"\xed\xea\xec"}, "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvi\u0173"}, "lv_LV": {"name_en": u"Latvian", "name": u"Latvie\u0161u"}, "mk_MK": {"name_en": u"Macedonian", "name": u"\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438"}, "ml_IN": {"name_en": u"Malayalam", "name": u"\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02"}, "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"}, "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokm\xe5l)"}, "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"}, "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"}, "pa_IN": {"name_en": u"Punjabi", "name": u"\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40"}, "pl_PL": {"name_en": u"Polish", "name": u"Polski"}, "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Portugu\xeas (Brasil)"}, "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Portugu\xeas (Portugal)"}, "ro_RO": {"name_en": u"Romanian", "name": u"Rom\xe2n\u0103"}, "ru_RU": {"name_en": u"Russian", "name": u"\u0420\u0443\u0441\u0441\u043a\u0438\u0439"}, "sk_SK": {"name_en": u"Slovak", "name": u"Sloven\u010dina"}, "sl_SI": {"name_en": u"Slovenian", "name": u"Sloven\u0161\u010dina"}, "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"}, "sr_RS": {"name_en": u"Serbian", "name": u"\u0421\u0440\u043f\u0441\u043a\u0438"}, "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"}, "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"}, "ta_IN": {"name_en": u"Tamil", "name": u"\u0ba4\u0bae\u0bbf\u0bb4\u0bcd"}, "te_IN": {"name_en": u"Telugu", "name": u"\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41"}, "th_TH": {"name_en": u"Thai", "name": u"\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22"}, "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"}, "tr_TR": {"name_en": u"Turkish", "name": u"T\xfcrk\xe7e"}, "uk_UA": {"name_en": u"Ukraini ", "name": u"\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430"}, "vi_VN": {"name_en": u"Vietnamese", "name": u"Ti\u1ebfng Vi\u1ec7t"}, "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"\xe4\xe6(\xe7\xe4)"}, "zh_HK": {"name_en": u"Chinese (Hong Kong)", "name": u"\xe4\xe6(\xe9\xe6)"}, "zh_TW": {"name_en": u"Chinese (Taiwan)", "name": u"\xe4\xe6(\xe5\xe7)"}, } bup-0.25/lib/tornado/options.py000066400000000000000000000323631225146730500165230ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A command line parsing module that lets modules define their own options. Each module defines its own options, e.g., from tornado.options import define, options define("mysql_host", default="127.0.0.1:3306", help="Main user DB") define("memcache_hosts", default="127.0.0.1:11011", multiple=True, help="Main user memcache servers") def connect(): db = database.Connection(options.mysql_host) ... The main() method of your application does not need to be aware of all of the options used throughout your program; they are all automatically loaded when the modules are loaded. Your main() method can parse the command line or parse a config file with: import tornado.options tornado.options.parse_config_file("/etc/server.conf") tornado.options.parse_command_line() Command line formats are what you would expect ("--myoption=myvalue"). Config files are just Python files. Global names become options, e.g., myoption = "myvalue" myotheroption = "myothervalue" We support datetimes, timedeltas, ints, and floats (just pass a 'type' kwarg to define). We also accept multi-value options. See the documentation for define() below. """ import datetime import logging import logging.handlers import re import sys import time # For pretty log messages, if available try: import curses except: curses = None def define(name, default=None, type=str, help=None, metavar=None, multiple=False): """Defines a new command line option. If type is given (one of str, float, int, datetime, or timedelta), we parse the command line arguments based on the given type. If multiple is True, we accept comma-separated values, and the option value is always a list. For multi-value integers, we also accept the syntax x:y, which turns into range(x, y) - very useful for long integer ranges. help and metavar are used to construct the automatically generated command line help string. The help message is formatted like: --name=METAVAR help string Command line option names must be unique globally. They can be parsed from the command line with parse_command_line() or parsed from a config file with parse_config_file. """ if name in options: raise Error("Option %r already defined in %s", name, options[name].file_name) frame = sys._getframe(0) options_file = frame.f_code.co_filename file_name = frame.f_back.f_code.co_filename if file_name == options_file: file_name = "" options[name] = _Option(name, file_name=file_name, default=default, type=type, help=help, metavar=metavar, multiple=multiple) def parse_command_line(args=None): """Parses all options given on the command line. We return all command line arguments that are not options as a list. """ if args is None: args = sys.argv remaining = [] for i in xrange(1, len(args)): # All things after the last option are command line arguments if not args[i].startswith("-"): remaining = args[i:] break if args[i] == "--": remaining = args[i+1:] break arg = args[i].lstrip("-") name, equals, value = arg.partition("=") name = name.replace('-', '_') if not name in options: print_help() raise Error('Unrecognized command line option: %r' % name) option = options[name] if not equals: if option.type == bool: value = "true" else: raise Error('Option %r requires a value' % name) option.parse(value) if options.help: print_help() sys.exit(0) # Set up log level and pretty console logging by default if options.logging != 'none': logging.getLogger().setLevel(getattr(logging, options.logging.upper())) enable_pretty_logging() return remaining def parse_config_file(path): """Parses and loads the Python config file at the given path.""" config = {} execfile(path, config, config) for name in config: if name in options: options[name].set(config[name]) def print_help(file=sys.stdout): """Prints all the command line options to stdout.""" print >> file, "Usage: %s [OPTIONS]" % sys.argv[0] print >> file, "" print >> file, "Options:" by_file = {} for option in options.itervalues(): by_file.setdefault(option.file_name, []).append(option) for filename, o in sorted(by_file.items()): if filename: print >> file, filename o.sort(key=lambda option: option.name) for option in o: prefix = option.name if option.metavar: prefix += "=" + option.metavar print >> file, " --%-30s %s" % (prefix, option.help or "") print >> file class _Options(dict): """Our global program options, an dictionary with object-like access.""" @classmethod def instance(cls): if not hasattr(cls, "_instance"): cls._instance = cls() return cls._instance def __getattr__(self, name): if isinstance(self.get(name), _Option): return self[name].value() raise AttributeError("Unrecognized option %r" % name) class _Option(object): def __init__(self, name, default=None, type=str, help=None, metavar=None, multiple=False, file_name=None): if default is None and multiple: default = [] self.name = name self.type = type self.help = help self.metavar = metavar self.multiple = multiple self.file_name = file_name self.default = default self._value = None def value(self): return self.default if self._value is None else self._value def parse(self, value): _parse = { datetime.datetime: self._parse_datetime, datetime.timedelta: self._parse_timedelta, bool: self._parse_bool, str: self._parse_string, }.get(self.type, self.type) if self.multiple: if self._value is None: self._value = [] for part in value.split(","): if self.type in (int, long): # allow ranges of the form X:Y (inclusive at both ends) lo, _, hi = part.partition(":") lo = _parse(lo) hi = _parse(hi) if hi else lo self._value.extend(range(lo, hi+1)) else: self._value.append(_parse(part)) else: self._value = _parse(value) return self.value() def set(self, value): if self.multiple: if not isinstance(value, list): raise Error("Option %r is required to be a list of %s" % (self.name, self.type.__name__)) for item in value: if item != None and not isinstance(item, self.type): raise Error("Option %r is required to be a list of %s" % (self.name, self.type.__name__)) else: if value != None and not isinstance(value, self.type): raise Error("Option %r is required to be a %s" % (self.name, self.type.__name__)) self._value = value # Supported date/time formats in our options _DATETIME_FORMATS = [ "%a %b %d %H:%M:%S %Y", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%dT%H:%M", "%Y%m%d %H:%M:%S", "%Y%m%d %H:%M", "%Y-%m-%d", "%Y%m%d", "%H:%M:%S", "%H:%M", ] def _parse_datetime(self, value): for format in self._DATETIME_FORMATS: try: return datetime.datetime.strptime(value, format) except ValueError: pass raise Error('Unrecognized date/time format: %r' % value) _TIMEDELTA_ABBREVS = [ ('hours', ['h']), ('minutes', ['m', 'min']), ('seconds', ['s', 'sec']), ('milliseconds', ['ms']), ('microseconds', ['us']), ('days', ['d']), ('weeks', ['w']), ] _TIMEDELTA_ABBREV_DICT = dict( (abbrev, full) for full, abbrevs in _TIMEDELTA_ABBREVS for abbrev in abbrevs) _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' _TIMEDELTA_PATTERN = re.compile( r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE) def _parse_timedelta(self, value): try: sum = datetime.timedelta() start = 0 while start < len(value): m = self._TIMEDELTA_PATTERN.match(value, start) if not m: raise Exception() num = float(m.group(1)) units = m.group(2) or 'seconds' units = self._TIMEDELTA_ABBREV_DICT.get(units, units) sum += datetime.timedelta(**{units: num}) start = m.end() return sum except: raise def _parse_bool(self, value): return value.lower() not in ("false", "0", "f") def _parse_string(self, value): return value.decode("utf-8") class Error(Exception): pass def enable_pretty_logging(): """Turns on formatted logging output as configured.""" if (options.log_to_stderr or (options.log_to_stderr is None and not options.log_file_prefix)): # Set up color if we are in a tty and curses is installed color = False if curses and sys.stderr.isatty(): try: curses.setupterm() if curses.tigetnum("colors") > 0: color = True except: pass channel = logging.StreamHandler() channel.setFormatter(_LogFormatter(color=color)) logging.getLogger().addHandler(channel) if options.log_file_prefix: channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) channel.setFormatter(_LogFormatter(color=False)) logging.getLogger().addHandler(channel) class _LogFormatter(logging.Formatter): def __init__(self, color, *args, **kwargs): logging.Formatter.__init__(self, *args, **kwargs) self._color = color if color: fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or "" self._colors = { logging.DEBUG: curses.tparm(fg_color, 4), # Blue logging.INFO: curses.tparm(fg_color, 2), # Green logging.WARNING: curses.tparm(fg_color, 3), # Yellow logging.ERROR: curses.tparm(fg_color, 1), # Red } self._normal = curses.tigetstr("sgr0") def format(self, record): try: record.message = record.getMessage() except Exception, e: record.message = "Bad message (%r): %r" % (e, record.__dict__) record.asctime = time.strftime( "%y%m%d %H:%M:%S", self.converter(record.created)) prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \ record.__dict__ if self._color: prefix = (self._colors.get(record.levelno, self._normal) + prefix + self._normal) formatted = prefix + " " + record.message if record.exc_info: if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: formatted = formatted.rstrip() + "\n" + record.exc_text return formatted.replace("\n", "\n ") options = _Options.instance() # Default options define("help", type=bool, help="show this help information") define("logging", default="info", help=("Set the Python log level. If 'none', tornado won't touch the " "logging configuration."), metavar="info|warning|error|none") define("log_to_stderr", type=bool, default=None, help=("Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set.")) define("log_file_prefix", type=str, default=None, metavar="PATH", help=("Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)")) define("log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover") define("log_file_num_backups", type=int, default=10, help="number of log files to keep") bup-0.25/lib/tornado/s3server.py000066400000000000000000000225701225146730500166030ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an S3-like storage server based on local files. Useful to test features that will eventually run on S3, or if you want to run something locally that was once running on S3. We don't support all the features of S3, but it does work with the standard S3 client for the most basic semantics. To use the standard S3 client with this module: c = S3.AWSAuthConnection("", "", server="localhost", port=8888, is_secure=False) c.create_bucket("mybucket") c.put("mybucket", "mykey", "a value") print c.get("mybucket", "mykey").body """ import bisect import datetime import escape import hashlib import httpserver import ioloop import os import os.path import urllib import web def start(port, root_directory="/tmp/s3", bucket_depth=0): """Starts the mock S3 server on the given port at the given path.""" application = S3Application(root_directory, bucket_depth) http_server = httpserver.HTTPServer(application) http_server.listen(port) ioloop.IOLoop.instance().start() class S3Application(web.Application): """Implementation of an S3-like storage server based on local files. If bucket depth is given, we break files up into multiple directories to prevent hitting file system limits for number of files in each directories. 1 means one level of directories, 2 means 2, etc. """ def __init__(self, root_directory, bucket_depth=0): web.Application.__init__(self, [ (r"/", RootHandler), (r"/([^/]+)/(.+)", ObjectHandler), (r"/([^/]+)/", BucketHandler), ]) self.directory = os.path.abspath(root_directory) if not os.path.exists(self.directory): os.makedirs(self.directory) self.bucket_depth = bucket_depth class BaseRequestHandler(web.RequestHandler): SUPPORTED_METHODS = ("PUT", "GET", "DELETE") def render_xml(self, value): assert isinstance(value, dict) and len(value) == 1 self.set_header("Content-Type", "application/xml; charset=UTF-8") name = value.keys()[0] parts = [] parts.append('<' + escape.utf8(name) + ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') self._render_parts(value.values()[0], parts) parts.append('') self.finish('\n' + ''.join(parts)) def _render_parts(self, value, parts=[]): if isinstance(value, basestring): parts.append(escape.xhtml_escape(value)) elif isinstance(value, int) or isinstance(value, long): parts.append(str(value)) elif isinstance(value, datetime.datetime): parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) elif isinstance(value, dict): for name, subvalue in value.iteritems(): if not isinstance(subvalue, list): subvalue = [subvalue] for subsubvalue in subvalue: parts.append('<' + escape.utf8(name) + '>') self._render_parts(subsubvalue, parts) parts.append('') else: raise Exception("Unknown S3 value type %r", value) def _object_path(self, bucket, object_name): if self.application.bucket_depth < 1: return os.path.abspath(os.path.join( self.application.directory, bucket, object_name)) hash = hashlib.md5(object_name).hexdigest() path = os.path.abspath(os.path.join( self.application.directory, bucket)) for i in range(self.application.bucket_depth): path = os.path.join(path, hash[:2 * (i + 1)]) return os.path.join(path, object_name) class RootHandler(BaseRequestHandler): def get(self): names = os.listdir(self.application.directory) buckets = [] for name in names: path = os.path.join(self.application.directory, name) info = os.stat(path) buckets.append({ "Name": name, "CreationDate": datetime.datetime.utcfromtimestamp( info.st_ctime), }) self.render_xml({"ListAllMyBucketsResult": { "Buckets": {"Bucket": buckets}, }}) class BucketHandler(BaseRequestHandler): def get(self, bucket_name): prefix = self.get_argument("prefix", u"") marker = self.get_argument("marker", u"") max_keys = int(self.get_argument("max-keys", 50000)) path = os.path.abspath(os.path.join(self.application.directory, bucket_name)) terse = int(self.get_argument("terse", 0)) if not path.startswith(self.application.directory) or \ not os.path.isdir(path): raise web.HTTPError(404) object_names = [] for root, dirs, files in os.walk(path): for file_name in files: object_names.append(os.path.join(root, file_name)) skip = len(path) + 1 for i in range(self.application.bucket_depth): skip += 2 * (i + 1) + 1 object_names = [n[skip:] for n in object_names] object_names.sort() contents = [] start_pos = 0 if marker: start_pos = bisect.bisect_right(object_names, marker, start_pos) if prefix: start_pos = bisect.bisect_left(object_names, prefix, start_pos) truncated = False for object_name in object_names[start_pos:]: if not object_name.startswith(prefix): break if len(contents) >= max_keys: truncated = True break object_path = self._object_path(bucket_name, object_name) c = {"Key": object_name} if not terse: info = os.stat(object_path) c.update({ "LastModified": datetime.datetime.utcfromtimestamp( info.st_mtime), "Size": info.st_size, }) contents.append(c) marker = object_name self.render_xml({"ListBucketResult": { "Name": bucket_name, "Prefix": prefix, "Marker": marker, "MaxKeys": max_keys, "IsTruncated": truncated, "Contents": contents, }}) def put(self, bucket_name): path = os.path.abspath(os.path.join( self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or \ os.path.exists(path): raise web.HTTPError(403) os.makedirs(path) self.finish() def delete(self, bucket_name): path = os.path.abspath(os.path.join( self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or \ not os.path.isdir(path): raise web.HTTPError(404) if len(os.listdir(path)) > 0: raise web.HTTPError(403) os.rmdir(path) self.set_status(204) self.finish() class ObjectHandler(BaseRequestHandler): def get(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or \ not os.path.isfile(path): raise web.HTTPError(404) info = os.stat(path) self.set_header("Content-Type", "application/unknown") self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp( info.st_mtime)) object_file = open(path, "r") try: self.finish(object_file.read()) finally: object_file.close() def put(self, bucket, object_name): object_name = urllib.unquote(object_name) bucket_dir = os.path.abspath(os.path.join( self.application.directory, bucket)) if not bucket_dir.startswith(self.application.directory) or \ not os.path.isdir(bucket_dir): raise web.HTTPError(404) path = self._object_path(bucket, object_name) if not path.startswith(bucket_dir) or os.path.isdir(path): raise web.HTTPError(403) directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) object_file = open(path, "w") object_file.write(self.request.body) object_file.close() self.finish() def delete(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or \ not os.path.isfile(path): raise web.HTTPError(404) os.unlink(path) self.set_status(204) self.finish() bup-0.25/lib/tornado/template.py000066400000000000000000000444351225146730500166460ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A simple template system that compiles templates to Python code. Basic usage looks like: t = template.Template("{{ myvalue }}") print t.generate(myvalue="XXX") Loader is a class that loads templates from a root directory and caches the compiled templates: loader = template.Loader("/home/btaylor") print loader.load("test.html").generate(myvalue="XXX") We compile all templates to raw Python. Error-reporting is currently... uh, interesting. Syntax for the templates ### base.html {% block title %}Default title{% end %}
    {% for student in students %} {% block student %}
  • {{ escape(student.name) }}
  • {% end %} {% end %}
### bold.html {% extends "base.html" %} {% block title %}A bolder title{% end %} {% block student %}
  • {{ escape(student.name) }}
  • {% block %} Unlike most other template systems, we do not put any restrictions on the expressions you can include in your statements. if and for blocks get translated exactly into Python, do you can do complex expressions like: {% for student in [p for p in people if p.student and p.age > 23] %}
  • {{ escape(student.name) }}
  • {% end %} Translating directly to Python means you can apply functions to expressions easily, like the escape() function in the examples above. You can pass functions in to your template just like any other variable: ### Python code def add(x, y): return x + y template.execute(add=add) ### The template {{ add(1, 2) }} We provide the functions escape(), url_escape(), json_encode(), and squeeze() to all templates by default. """ from __future__ import with_statement import cStringIO import datetime import escape import logging import os.path import re class Template(object): """A compiled template. We compile into Python from the given template_string. You can generate the template from variables with generate(). """ def __init__(self, template_string, name="", loader=None, compress_whitespace=None): self.name = name if compress_whitespace is None: compress_whitespace = name.endswith(".html") or \ name.endswith(".js") reader = _TemplateReader(name, template_string) self.file = _File(_parse(reader)) self.code = self._generate_python(loader, compress_whitespace) try: self.compiled = compile(self.code, self.name, "exec") except: formatted_code = _format_code(self.code).rstrip() logging.error("%s code:\n%s", self.name, formatted_code) raise def generate(self, **kwargs): """Generate this template with the given arguments.""" namespace = { "escape": escape.xhtml_escape, "url_escape": escape.url_escape, "json_encode": escape.json_encode, "squeeze": escape.squeeze, "datetime": datetime, } namespace.update(kwargs) exec self.compiled in namespace execute = namespace["_execute"] try: return execute() except: formatted_code = _format_code(self.code).rstrip() logging.error("%s code:\n%s", self.name, formatted_code) raise def _generate_python(self, loader, compress_whitespace): buffer = cStringIO.StringIO() try: named_blocks = {} ancestors = self._get_ancestors(loader) ancestors.reverse() for ancestor in ancestors: ancestor.find_named_blocks(loader, named_blocks) self.file.find_named_blocks(loader, named_blocks) writer = _CodeWriter(buffer, named_blocks, loader, self, compress_whitespace) ancestors[0].generate(writer) return buffer.getvalue() finally: buffer.close() def _get_ancestors(self, loader): ancestors = [self.file] for chunk in self.file.body.chunks: if isinstance(chunk, _ExtendsBlock): if not loader: raise ParseError("{% extends %} block found, but no " "template loader") template = loader.load(chunk.name, self.name) ancestors.extend(template._get_ancestors(loader)) return ancestors class Loader(object): """A template loader that loads from a single root directory. You must use a template loader to use template constructs like {% extends %} and {% include %}. Loader caches all templates after they are loaded the first time. """ def __init__(self, root_directory): self.root = os.path.abspath(root_directory) self.templates = {} def reset(self): self.templates = {} def resolve_path(self, name, parent_path=None): if parent_path and not parent_path.startswith("<") and \ not parent_path.startswith("/") and \ not name.startswith("/"): current_path = os.path.join(self.root, parent_path) file_dir = os.path.dirname(os.path.abspath(current_path)) relative_path = os.path.abspath(os.path.join(file_dir, name)) if relative_path.startswith(self.root): name = relative_path[len(self.root) + 1:] return name def load(self, name, parent_path=None): name = self.resolve_path(name, parent_path=parent_path) if name not in self.templates: path = os.path.join(self.root, name) f = open(path, "r") self.templates[name] = Template(f.read(), name=name, loader=self) f.close() return self.templates[name] class _Node(object): def each_child(self): return () def generate(self, writer): raise NotImplementedError() def find_named_blocks(self, loader, named_blocks): for child in self.each_child(): child.find_named_blocks(loader, named_blocks) class _File(_Node): def __init__(self, body): self.body = body def generate(self, writer): writer.write_line("def _execute():") with writer.indent(): writer.write_line("_buffer = []") self.body.generate(writer) writer.write_line("return ''.join(_buffer)") def each_child(self): return (self.body,) class _ChunkList(_Node): def __init__(self, chunks): self.chunks = chunks def generate(self, writer): for chunk in self.chunks: chunk.generate(writer) def each_child(self): return self.chunks class _NamedBlock(_Node): def __init__(self, name, body=None): self.name = name self.body = body def each_child(self): return (self.body,) def generate(self, writer): writer.named_blocks[self.name].generate(writer) def find_named_blocks(self, loader, named_blocks): named_blocks[self.name] = self.body _Node.find_named_blocks(self, loader, named_blocks) class _ExtendsBlock(_Node): def __init__(self, name): self.name = name class _IncludeBlock(_Node): def __init__(self, name, reader): self.name = name self.template_name = reader.name def find_named_blocks(self, loader, named_blocks): included = loader.load(self.name, self.template_name) included.file.find_named_blocks(loader, named_blocks) def generate(self, writer): included = writer.loader.load(self.name, self.template_name) old = writer.current_template writer.current_template = included included.file.body.generate(writer) writer.current_template = old class _ApplyBlock(_Node): def __init__(self, method, body=None): self.method = method self.body = body def each_child(self): return (self.body,) def generate(self, writer): method_name = "apply%d" % writer.apply_counter writer.apply_counter += 1 writer.write_line("def %s():" % method_name) with writer.indent(): writer.write_line("_buffer = []") self.body.generate(writer) writer.write_line("return ''.join(_buffer)") writer.write_line("_buffer.append(%s(%s()))" % ( self.method, method_name)) class _ControlBlock(_Node): def __init__(self, statement, body=None): self.statement = statement self.body = body def each_child(self): return (self.body,) def generate(self, writer): writer.write_line("%s:" % self.statement) with writer.indent(): self.body.generate(writer) class _IntermediateControlBlock(_Node): def __init__(self, statement): self.statement = statement def generate(self, writer): writer.write_line("%s:" % self.statement, writer.indent_size() - 1) class _Statement(_Node): def __init__(self, statement): self.statement = statement def generate(self, writer): writer.write_line(self.statement) class _Expression(_Node): def __init__(self, expression): self.expression = expression def generate(self, writer): writer.write_line("_tmp = %s" % self.expression) writer.write_line("if isinstance(_tmp, str): _buffer.append(_tmp)") writer.write_line("elif isinstance(_tmp, unicode): " "_buffer.append(_tmp.encode('utf-8'))") writer.write_line("else: _buffer.append(str(_tmp))") class _Text(_Node): def __init__(self, value): self.value = value def generate(self, writer): value = self.value # Compress lots of white space to a single character. If the whitespace # breaks a line, have it continue to break a line, but just with a # single \n character if writer.compress_whitespace and "
    " not in value:
                value = re.sub(r"([\t ]+)", " ", value)
                value = re.sub(r"(\s*\n\s*)", "\n", value)
    
            if value:
                writer.write_line('_buffer.append(%r)' % value)
    
    
    class ParseError(Exception):
        """Raised for template syntax errors."""
        pass
    
    
    class _CodeWriter(object):
        def __init__(self, file, named_blocks, loader, current_template,
                     compress_whitespace):
            self.file = file
            self.named_blocks = named_blocks
            self.loader = loader
            self.current_template = current_template
            self.compress_whitespace = compress_whitespace
            self.apply_counter = 0
            self._indent = 0
    
        def indent(self):
            return self
    
        def indent_size(self):
            return self._indent
    
        def __enter__(self):
            self._indent += 1
            return self
    
        def __exit__(self, *args):
            assert self._indent > 0
            self._indent -= 1
    
        def write_line(self, line, indent=None):
            if indent == None:
                indent = self._indent
            for i in xrange(indent):
                self.file.write("    ")
            print >> self.file, line
    
    
    class _TemplateReader(object):
        def __init__(self, name, text):
            self.name = name
            self.text = text
            self.line = 0
            self.pos = 0
    
        def find(self, needle, start=0, end=None):
            assert start >= 0, start
            pos = self.pos
            start += pos
            if end is None:
                index = self.text.find(needle, start)
            else:
                end += pos
                assert end >= start
                index = self.text.find(needle, start, end)
            if index != -1:
                index -= pos
            return index
    
        def consume(self, count=None):
            if count is None:
                count = len(self.text) - self.pos
            newpos = self.pos + count
            self.line += self.text.count("\n", self.pos, newpos)
            s = self.text[self.pos:newpos]
            self.pos = newpos
            return s
    
        def remaining(self):
            return len(self.text) - self.pos
    
        def __len__(self):
            return self.remaining()
    
        def __getitem__(self, key):
            if type(key) is slice:
                size = len(self)
                start, stop, step = slice.indices(size)
                if start is None: start = self.pos
                else: start += self.pos
                if stop is not None: stop += self.pos
                return self.text[slice(start, stop, step)]
            elif key < 0:
                return self.text[key]
            else:
                return self.text[self.pos + key]
    
        def __str__(self):
            return self.text[self.pos:]
    
    
    def _format_code(code):
        lines = code.splitlines()
        format = "%%%dd  %%s\n" % len(repr(len(lines) + 1))
        return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
    
    
    def _parse(reader, in_block=None):
        body = _ChunkList([])
        while True:
            # Find next template directive
            curly = 0
            while True:
                curly = reader.find("{", curly)
                if curly == -1 or curly + 1 == reader.remaining():
                    # EOF
                    if in_block:
                        raise ParseError("Missing {%% end %%} block for %s" %
                                         in_block)
                    body.chunks.append(_Text(reader.consume()))
                    return body
                # If the first curly brace is not the start of a special token,
                # start searching from the character after it
                if reader[curly + 1] not in ("{", "%"):
                    curly += 1
                    continue
                # When there are more than 2 curlies in a row, use the
                # innermost ones.  This is useful when generating languages
                # like latex where curlies are also meaningful
                if (curly + 2 < reader.remaining() and
                    reader[curly + 1] == '{' and reader[curly + 2] == '{'):
                    curly += 1
                    continue
                break
    
            # Append any text before the special token
            if curly > 0:
                body.chunks.append(_Text(reader.consume(curly)))
    
            start_brace = reader.consume(2)
            line = reader.line
    
            # Expression
            if start_brace == "{{":
                end = reader.find("}}")
                if end == -1 or reader.find("\n", 0, end) != -1:
                    raise ParseError("Missing end expression }} on line %d" % line)
                contents = reader.consume(end).strip()
                reader.consume(2)
                if not contents:
                    raise ParseError("Empty expression on line %d" % line)
                body.chunks.append(_Expression(contents))
                continue
    
            # Block
            assert start_brace == "{%", start_brace
            end = reader.find("%}")
            if end == -1 or reader.find("\n", 0, end) != -1:
                raise ParseError("Missing end block %%} on line %d" % line)
            contents = reader.consume(end).strip()
            reader.consume(2)
            if not contents:
                raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
    
            operator, space, suffix = contents.partition(" ")
            suffix = suffix.strip()
    
            # Intermediate ("else", "elif", etc) blocks
            intermediate_blocks = {
                "else": set(["if", "for", "while"]),
                "elif": set(["if"]),
                "except": set(["try"]),
                "finally": set(["try"]),
            }
            allowed_parents = intermediate_blocks.get(operator)
            if allowed_parents is not None:
                if not in_block:
                    raise ParseError("%s outside %s block" %
                                (operator, allowed_parents))
                if in_block not in allowed_parents:
                    raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
                body.chunks.append(_IntermediateControlBlock(contents))
                continue
    
            # End tag
            elif operator == "end":
                if not in_block:
                    raise ParseError("Extra {%% end %%} block on line %d" % line)
                return body
    
            elif operator in ("extends", "include", "set", "import", "comment"):
                if operator == "comment":
                    continue
                if operator == "extends":
                    suffix = suffix.strip('"').strip("'")
                    if not suffix:
                        raise ParseError("extends missing file path on line %d" % line)
                    block = _ExtendsBlock(suffix)
                elif operator == "import":
                    if not suffix:
                        raise ParseError("import missing statement on line %d" % line)
                    block = _Statement(contents)
                elif operator == "include":
                    suffix = suffix.strip('"').strip("'")
                    if not suffix:
                        raise ParseError("include missing file path on line %d" % line)
                    block = _IncludeBlock(suffix, reader)
                elif operator == "set":
                    if not suffix:
                        raise ParseError("set missing statement on line %d" % line)
                    block = _Statement(suffix)
                body.chunks.append(block)
                continue
    
            elif operator in ("apply", "block", "try", "if", "for", "while"):
                # parse inner body recursively
                block_body = _parse(reader, operator)
                if operator == "apply":
                    if not suffix:
                        raise ParseError("apply missing method name on line %d" % line)
                    block = _ApplyBlock(suffix, block_body)
                elif operator == "block":
                    if not suffix:
                        raise ParseError("block missing name on line %d" % line)
                    block = _NamedBlock(suffix, block_body)
                else:
                    block = _ControlBlock(contents, block_body)
                body.chunks.append(block)
                continue
    
            else:
                raise ParseError("unknown operator: %r" % operator)
    bup-0.25/lib/tornado/test/000077500000000000000000000000001225146730500154265ustar00rootroot00000000000000bup-0.25/lib/tornado/test/README000066400000000000000000000003761225146730500163140ustar00rootroot00000000000000Test coverage is almost non-existent, but it's a start.  Be sure to
    set PYTHONPATH apprioriately (generally to the root directory of your
    tornado checkout) when running tests to make sure you're getting the
    version of the tornado package that you expect.bup-0.25/lib/tornado/test/test_ioloop.py000077500000000000000000000017621225146730500203510ustar00rootroot00000000000000#!/usr/bin/env python
    
    import unittest
    import time
    
    from tornado import ioloop
    
    
    class TestIOLoop(unittest.TestCase):
        def setUp(self):
            self.loop = ioloop.IOLoop()
    
        def tearDown(self):
            pass
    
        def _callback(self):
            self.called = True
            self.loop.stop()
    
        def _schedule_callback(self):
            self.loop.add_callback(self._callback)
            # Scroll away the time so we can check if we woke up immediately
            self._start_time = time.time()
            self.called = False
    
        def test_add_callback(self):
            self.loop.add_timeout(time.time(), self._schedule_callback)
            self.loop.start() # Set some long poll timeout so we can check wakeup
            self.assertAlmostEqual(time.time(), self._start_time, places=2)
            self.assertTrue(self.called)
    
    
    if __name__ == "__main__":
        import logging
    
        logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(msecs)03d %(levelname)-8s %(name)-8s %(message)s', datefmt='%H:%M:%S')
    
        unittest.main()
    bup-0.25/lib/tornado/web.py000066400000000000000000001603241225146730500156040ustar00rootroot00000000000000#!/usr/bin/env python
    #
    # Copyright 2009 Facebook
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    
    """The Tornado web framework.
    
    The Tornado web framework looks a bit like web.py (http://webpy.org/) or
    Google's webapp (http://code.google.com/appengine/docs/python/tools/webapp/),
    but with additional tools and optimizations to take advantage of the
    Tornado non-blocking web server and tools.
    
    Here is the canonical "Hello, world" example app:
    
        import tornado.httpserver
        import tornado.ioloop
        import tornado.web
    
        class MainHandler(tornado.web.RequestHandler):
            def get(self):
                self.write("Hello, world")
    
        if __name__ == "__main__":
            application = tornado.web.Application([
                (r"/", MainHandler),
            ])
            http_server = tornado.httpserver.HTTPServer(application)
            http_server.listen(8888)
            tornado.ioloop.IOLoop.instance().start()
    
    See the Tornado walkthrough on GitHub for more details and a good
    getting started guide.
    """
    
    import base64
    import binascii
    import calendar
    import Cookie
    import cStringIO
    import datetime
    import email.utils
    import escape
    import functools
    import gzip
    import hashlib
    import hmac
    import httplib
    import locale
    import logging
    import mimetypes
    import os.path
    import re
    import stat
    import sys
    import template
    import time
    import types
    import urllib
    import urlparse
    import uuid
    
    class RequestHandler(object):
        """Subclass this class and define get() or post() to make a handler.
    
        If you want to support more methods than the standard GET/HEAD/POST, you
        should override the class variable SUPPORTED_METHODS in your
        RequestHandler class.
        """
        SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PUT")
    
        def __init__(self, application, request, transforms=None):
            self.application = application
            self.request = request
            self._headers_written = False
            self._finished = False
            self._auto_finish = True
            self._transforms = transforms or []
            self.ui = _O((n, self._ui_method(m)) for n, m in
                         application.ui_methods.iteritems())
            self.ui["modules"] = _O((n, self._ui_module(n, m)) for n, m in
                                    application.ui_modules.iteritems())
            self.clear()
            # Check since connection is not available in WSGI
            if hasattr(self.request, "connection"):
                self.request.connection.stream.set_close_callback(
                    self.on_connection_close)
    
        @property
        def settings(self):
            return self.application.settings
    
        def head(self, *args, **kwargs):
            raise HTTPError(405)
    
        def get(self, *args, **kwargs):
            raise HTTPError(405)
    
        def post(self, *args, **kwargs):
            raise HTTPError(405)
    
        def delete(self, *args, **kwargs):
            raise HTTPError(405)
    
        def put(self, *args, **kwargs):
            raise HTTPError(405)
    
        def prepare(self):
            """Called before the actual handler method.
    
            Useful to override in a handler if you want a common bottleneck for
            all of your requests.
            """
            pass
    
        def on_connection_close(self):
            """Called in async handlers if the client closed the connection.
    
            You may override this to clean up resources associated with
            long-lived connections.
    
            Note that the select()-based implementation of IOLoop does not detect
            closed connections and so this method will not be called until
            you try (and fail) to produce some output.  The epoll- and kqueue-
            based implementations should detect closed connections even while
            the request is idle.
            """
            pass
    
        def clear(self):
            """Resets all headers and content for this response."""
            self._headers = {
                "Server": "TornadoServer/0.1",
                "Content-Type": "text/html; charset=UTF-8",
            }
            if not self.request.supports_http_1_1():
                if self.request.headers.get("Connection") == "Keep-Alive":
                    self.set_header("Connection", "Keep-Alive")
            self._write_buffer = []
            self._status_code = 200
    
        def set_status(self, status_code):
            """Sets the status code for our response."""
            assert status_code in httplib.responses
            self._status_code = status_code
    
        def set_header(self, name, value):
            """Sets the given response header name and value.
    
            If a datetime is given, we automatically format it according to the
            HTTP specification. If the value is not a string, we convert it to
            a string. All header values are then encoded as UTF-8.
            """
            if isinstance(value, datetime.datetime):
                t = calendar.timegm(value.utctimetuple())
                value = email.utils.formatdate(t, localtime=False, usegmt=True)
            elif isinstance(value, int) or isinstance(value, long):
                value = str(value)
            else:
                value = _utf8(value)
                # If \n is allowed into the header, it is possible to inject
                # additional headers or split the request. Also cap length to
                # prevent obviously erroneous values.
                safe_value = re.sub(r"[\x00-\x1f]", " ", value)[:4000]
                if safe_value != value:
                    raise ValueError("Unsafe header value %r", value)
            self._headers[name] = value
    
        _ARG_DEFAULT = []
        def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
            """Returns the value of the argument with the given name.
    
            If default is not provided, the argument is considered to be
            required, and we throw an HTTP 404 exception if it is missing.
    
            If the argument appears in the url more than once, we return the
            last value.
    
            The returned value is always unicode.
            """
            args = self.get_arguments(name, strip=strip)
            if not args:
                if default is self._ARG_DEFAULT:
                    raise HTTPError(404, "Missing argument %s" % name)
                return default
            return args[-1]
    
        def get_arguments(self, name, strip=True):
            """Returns a list of the arguments with the given name.
    
            If the argument is not present, returns an empty list.
    
            The returned values are always unicode.
            """
            values = self.request.arguments.get(name, [])
            # Get rid of any weird control chars
            values = [re.sub(r"[\x00-\x08\x0e-\x1f]", " ", x) for x in values]
            values = [_unicode(x) for x in values]
            if strip:
                values = [x.strip() for x in values]
            return values
    
    
        @property
        def cookies(self):
            """A dictionary of Cookie.Morsel objects."""
            if not hasattr(self, "_cookies"):
                self._cookies = Cookie.BaseCookie()
                if "Cookie" in self.request.headers:
                    try:
                        self._cookies.load(self.request.headers["Cookie"])
                    except:
                        self.clear_all_cookies()
            return self._cookies
    
        def get_cookie(self, name, default=None):
            """Gets the value of the cookie with the given name, else default."""
            if name in self.cookies:
                return self.cookies[name].value
            return default
    
        def set_cookie(self, name, value, domain=None, expires=None, path="/",
                       expires_days=None, **kwargs):
            """Sets the given cookie name/value with the given options.
    
            Additional keyword arguments are set on the Cookie.Morsel
            directly.
            See http://docs.python.org/library/cookie.html#morsel-objects
            for available attributes.
            """
            name = _utf8(name)
            value = _utf8(value)
            if re.search(r"[\x00-\x20]", name + value):
                # Don't let us accidentally inject bad stuff
                raise ValueError("Invalid cookie %r: %r" % (name, value))
            if not hasattr(self, "_new_cookies"):
                self._new_cookies = []
            new_cookie = Cookie.BaseCookie()
            self._new_cookies.append(new_cookie)
            new_cookie[name] = value
            if domain:
                new_cookie[name]["domain"] = domain
            if expires_days is not None and not expires:
                expires = datetime.datetime.utcnow() + datetime.timedelta(
                    days=expires_days)
            if expires:
                timestamp = calendar.timegm(expires.utctimetuple())
                new_cookie[name]["expires"] = email.utils.formatdate(
                    timestamp, localtime=False, usegmt=True)
            if path:
                new_cookie[name]["path"] = path
            for k, v in kwargs.iteritems():
                new_cookie[name][k] = v
    
        def clear_cookie(self, name, path="/", domain=None):
            """Deletes the cookie with the given name."""
            expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
            self.set_cookie(name, value="", path=path, expires=expires,
                            domain=domain)
    
        def clear_all_cookies(self):
            """Deletes all the cookies the user sent with this request."""
            for name in self.cookies.iterkeys():
                self.clear_cookie(name)
    
        def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
            """Signs and timestamps a cookie so it cannot be forged.
    
            You must specify the 'cookie_secret' setting in your Application
            to use this method. It should be a long, random sequence of bytes
            to be used as the HMAC secret for the signature.
    
            To read a cookie set with this method, use get_secure_cookie().
            """
            timestamp = str(int(time.time()))
            value = base64.b64encode(value)
            signature = self._cookie_signature(name, value, timestamp)
            value = "|".join([value, timestamp, signature])
            self.set_cookie(name, value, expires_days=expires_days, **kwargs)
    
        def get_secure_cookie(self, name, include_name=True, value=None):
            """Returns the given signed cookie if it validates, or None.
    
            In older versions of Tornado (0.1 and 0.2), we did not include the
            name of the cookie in the cookie signature. To read these old-style
            cookies, pass include_name=False to this method. Otherwise, all
            attempts to read old-style cookies will fail (and you may log all
            your users out whose cookies were written with a previous Tornado
            version).
            """
            if value is None: value = self.get_cookie(name)
            if not value: return None
            parts = value.split("|")
            if len(parts) != 3: return None
            if include_name:
                signature = self._cookie_signature(name, parts[0], parts[1])
            else:
                signature = self._cookie_signature(parts[0], parts[1])
            if not _time_independent_equals(parts[2], signature):
                logging.warning("Invalid cookie signature %r", value)
                return None
            timestamp = int(parts[1])
            if timestamp < time.time() - 31 * 86400:
                logging.warning("Expired cookie %r", value)
                return None
            try:
                return base64.b64decode(parts[0])
            except:
                return None
    
        def _cookie_signature(self, *parts):
            self.require_setting("cookie_secret", "secure cookies")
            hash = hmac.new(self.application.settings["cookie_secret"],
                            digestmod=hashlib.sha1)
            for part in parts: hash.update(part)
            return hash.hexdigest()
    
        def redirect(self, url, permanent=False):
            """Sends a redirect to the given (optionally relative) URL."""
            if self._headers_written:
                raise Exception("Cannot redirect after headers have been written")
            self.set_status(301 if permanent else 302)
            # Remove whitespace
            url = re.sub(r"[\x00-\x20]+", "", _utf8(url))
            self.set_header("Location", urlparse.urljoin(self.request.uri, url))
            self.finish()
    
        def write(self, chunk):
            """Writes the given chunk to the output buffer.
    
            To write the output to the network, use the flush() method below.
    
            If the given chunk is a dictionary, we write it as JSON and set
            the Content-Type of the response to be text/javascript.
            """
            assert not self._finished
            if isinstance(chunk, dict):
                chunk = escape.json_encode(chunk)
                self.set_header("Content-Type", "text/javascript; charset=UTF-8")
            chunk = _utf8(chunk)
            self._write_buffer.append(chunk)
    
        def render(self, template_name, **kwargs):
            """Renders the template with the given arguments as the response."""
            html = self.render_string(template_name, **kwargs)
    
            # Insert the additional JS and CSS added by the modules on the page
            js_embed = []
            js_files = []
            css_embed = []
            css_files = []
            html_heads = []
            html_bodies = []
            for module in getattr(self, "_active_modules", {}).itervalues():
                embed_part = module.embedded_javascript()
                if embed_part: js_embed.append(_utf8(embed_part))
                file_part = module.javascript_files()
                if file_part:
                    if isinstance(file_part, basestring):
                        js_files.append(file_part)
                    else:
                        js_files.extend(file_part)
                embed_part = module.embedded_css()
                if embed_part: css_embed.append(_utf8(embed_part))
                file_part = module.css_files()
                if file_part:
                    if isinstance(file_part, basestring):
                        css_files.append(file_part)
                    else:
                        css_files.extend(file_part)
                head_part = module.html_head()
                if head_part: html_heads.append(_utf8(head_part))
                body_part = module.html_body()
                if body_part: html_bodies.append(_utf8(body_part))
            if js_files:
                # Maintain order of JavaScript files given by modules
                paths = []
                unique_paths = set()
                for path in js_files:
                    if not path.startswith("/") and not path.startswith("http:"):
                        path = self.static_url(path)
                    if path not in unique_paths:
                        paths.append(path)
                        unique_paths.add(path)
                js = ''.join(''
                             for p in paths)
                sloc = html.rindex('')
                html = html[:sloc] + js + '\n' + html[sloc:]
            if js_embed:
                js = ''
                sloc = html.rindex('')
                html = html[:sloc] + js + '\n' + html[sloc:]
            if css_files:
                paths = set()
                for path in css_files:
                    if not path.startswith("/") and not path.startswith("http:"):
                        paths.add(self.static_url(path))
                    else:
                        paths.add(path)
                css = ''.join(''
                              for p in paths)
                hloc = html.index('')
                html = html[:hloc] + css + '\n' + html[hloc:]
            if css_embed:
                css = ''
                hloc = html.index('')
                html = html[:hloc] + css + '\n' + html[hloc:]
            if html_heads:
                hloc = html.index('')
                html = html[:hloc] + ''.join(html_heads) + '\n' + html[hloc:]
            if html_bodies:
                hloc = html.index('')
                html = html[:hloc] + ''.join(html_bodies) + '\n' + html[hloc:]
            self.finish(html)
    
        def render_string(self, template_name, **kwargs):
            """Generate the given template with the given arguments.
    
            We return the generated string. To generate and write a template
            as a response, use render() above.
            """
            # If no template_path is specified, use the path of the calling file
            template_path = self.get_template_path()
            if not template_path:
                frame = sys._getframe(0)
                web_file = frame.f_code.co_filename
                while frame.f_code.co_filename == web_file:
                    frame = frame.f_back
                template_path = os.path.dirname(frame.f_code.co_filename)
            if not getattr(RequestHandler, "_templates", None):
                RequestHandler._templates = {}
            if template_path not in RequestHandler._templates:
                loader = self.application.settings.get("template_loader") or\
                  template.Loader(template_path)
                RequestHandler._templates[template_path] = loader
            t = RequestHandler._templates[template_path].load(template_name)
            args = dict(
                handler=self,
                request=self.request,
                current_user=self.current_user,
                locale=self.locale,
                _=self.locale.translate,
                static_url=self.static_url,
                xsrf_form_html=self.xsrf_form_html,
                reverse_url=self.application.reverse_url
            )
            args.update(self.ui)
            args.update(kwargs)
            return t.generate(**args)
    
        def flush(self, include_footers=False):
            """Flushes the current output buffer to the nextwork."""
            if self.application._wsgi:
                raise Exception("WSGI applications do not support flush()")
    
            chunk = "".join(self._write_buffer)
            self._write_buffer = []
            if not self._headers_written:
                self._headers_written = True
                for transform in self._transforms:
                    self._headers, chunk = transform.transform_first_chunk(
                        self._headers, chunk, include_footers)
                headers = self._generate_headers()
            else:
                for transform in self._transforms:
                    chunk = transform.transform_chunk(chunk, include_footers)
                headers = ""
    
            # Ignore the chunk and only write the headers for HEAD requests
            if self.request.method == "HEAD":
                if headers: self.request.write(headers)
                return
    
            if headers or chunk:
                self.request.write(headers + chunk)
    
        def finish(self, chunk=None):
            """Finishes this response, ending the HTTP request."""
            assert not self._finished
            if chunk is not None: self.write(chunk)
    
            # Automatically support ETags and add the Content-Length header if
            # we have not flushed any content yet.
            if not self._headers_written:
                if (self._status_code == 200 and self.request.method == "GET" and
                    "Etag" not in self._headers):
                    hasher = hashlib.sha1()
                    for part in self._write_buffer:
                        hasher.update(part)
                    etag = '"%s"' % hasher.hexdigest()
                    inm = self.request.headers.get("If-None-Match")
                    if inm and inm.find(etag) != -1:
                        self._write_buffer = []
                        self.set_status(304)
                    else:
                        self.set_header("Etag", etag)
                if "Content-Length" not in self._headers:
                    content_length = sum(len(part) for part in self._write_buffer)
                    self.set_header("Content-Length", content_length)
    
            if hasattr(self.request, "connection"):
                # Now that the request is finished, clear the callback we
                # set on the IOStream (which would otherwise prevent the
                # garbage collection of the RequestHandler when there
                # are keepalive connections)
                self.request.connection.stream.set_close_callback(None)
    
            if not self.application._wsgi:
                self.flush(include_footers=True)
                self.request.finish()
                self._log()
            self._finished = True
    
        def send_error(self, status_code=500, **kwargs):
            """Sends the given HTTP error code to the browser.
    
            We also send the error HTML for the given error code as returned by
            get_error_html. Override that method if you want custom error pages
            for your application.
            """
            if self._headers_written:
                logging.error("Cannot send error response after headers written")
                if not self._finished:
                    self.finish()
                return
            self.clear()
            self.set_status(status_code)
            message = self.get_error_html(status_code, **kwargs)
            self.finish(message)
    
        def get_error_html(self, status_code, **kwargs):
            """Override to implement custom error pages.
    
            If this error was caused by an uncaught exception, the
            exception object can be found in kwargs e.g. kwargs['exception']
            """
            return "%(code)d: %(message)s" \
                   "%(code)d: %(message)s" % {
                "code": status_code,
                "message": httplib.responses[status_code],
            }
    
        @property
        def locale(self):
            """The local for the current session.
    
            Determined by either get_user_locale, which you can override to
            set the locale based on, e.g., a user preference stored in a
            database, or get_browser_locale, which uses the Accept-Language
            header.
            """
            if not hasattr(self, "_locale"):
                self._locale = self.get_user_locale()
                if not self._locale:
                    self._locale = self.get_browser_locale()
                    assert self._locale
            return self._locale
    
        def get_user_locale(self):
            """Override to determine the locale from the authenticated user.
    
            If None is returned, we use the Accept-Language header.
            """
            return None
    
        def get_browser_locale(self, default="en_US"):
            """Determines the user's locale from Accept-Language header.
    
            See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
            """
            if "Accept-Language" in self.request.headers:
                languages = self.request.headers["Accept-Language"].split(",")
                locales = []
                for language in languages:
                    parts = language.strip().split(";")
                    if len(parts) > 1 and parts[1].startswith("q="):
                        try:
                            score = float(parts[1][2:])
                        except (ValueError, TypeError):
                            score = 0.0
                    else:
                        score = 1.0
                    locales.append((parts[0], score))
                if locales:
                    locales.sort(key=lambda (l, s): s, reverse=True)
                    codes = [l[0] for l in locales]
                    return locale.get(*codes)
            return locale.get(default)
    
        @property
        def current_user(self):
            """The authenticated user for this request.
    
            Determined by either get_current_user, which you can override to
            set the user based on, e.g., a cookie. If that method is not
            overridden, this method always returns None.
    
            We lazy-load the current user the first time this method is called
            and cache the result after that.
            """
            if not hasattr(self, "_current_user"):
                self._current_user = self.get_current_user()
            return self._current_user
    
        def get_current_user(self):
            """Override to determine the current user from, e.g., a cookie."""
            return None
    
        def get_login_url(self):
            """Override to customize the login URL based on the request.
    
            By default, we use the 'login_url' application setting.
            """
            self.require_setting("login_url", "@tornado.web.authenticated")
            return self.application.settings["login_url"]
    
        def get_template_path(self):
            """Override to customize template path for each handler.
    
            By default, we use the 'template_path' application setting.
            Return None to load templates relative to the calling file.
            """
            return self.application.settings.get("template_path")
    
        @property
        def xsrf_token(self):
            """The XSRF-prevention token for the current user/session.
    
            To prevent cross-site request forgery, we set an '_xsrf' cookie
            and include the same '_xsrf' value as an argument with all POST
            requests. If the two do not match, we reject the form submission
            as a potential forgery.
    
            See http://en.wikipedia.org/wiki/Cross-site_request_forgery
            """
            if not hasattr(self, "_xsrf_token"):
                token = self.get_cookie("_xsrf")
                if not token:
                    token = binascii.b2a_hex(uuid.uuid4().bytes)
                    expires_days = 30 if self.current_user else None
                    self.set_cookie("_xsrf", token, expires_days=expires_days)
                self._xsrf_token = token
            return self._xsrf_token
    
        def check_xsrf_cookie(self):
            """Verifies that the '_xsrf' cookie matches the '_xsrf' argument.
    
            To prevent cross-site request forgery, we set an '_xsrf' cookie
            and include the same '_xsrf' value as an argument with all POST
            requests. If the two do not match, we reject the form submission
            as a potential forgery.
    
            See http://en.wikipedia.org/wiki/Cross-site_request_forgery
            """
            if self.request.headers.get("X-Requested-With") == "XMLHttpRequest":
                return
            token = self.get_argument("_xsrf", None)
            if not token:
                raise HTTPError(403, "'_xsrf' argument missing from POST")
            if self.xsrf_token != token:
                raise HTTPError(403, "XSRF cookie does not match POST argument")
    
        def xsrf_form_html(self):
            """An HTML  element to be included with all POST forms.
    
            It defines the _xsrf input value, which we check on all POST
            requests to prevent cross-site request forgery. If you have set
            the 'xsrf_cookies' application setting, you must include this
            HTML within all of your HTML forms.
    
            See check_xsrf_cookie() above for more information.
            """
            return ''
    
        def static_url(self, path):
            """Returns a static URL for the given relative static file path.
    
            This method requires you set the 'static_path' setting in your
            application (which specifies the root directory of your static
            files).
    
            We append ?v= to the returned URL, which makes our
            static file handler set an infinite expiration header on the
            returned content. The signature is based on the content of the
            file.
    
            If this handler has a "include_host" attribute, we include the
            full host for every static URL, including the "http://". Set
            this attribute for handlers whose output needs non-relative static
            path names.
            """
            self.require_setting("static_path", "static_url")
            if not hasattr(RequestHandler, "_static_hashes"):
                RequestHandler._static_hashes = {}
            hashes = RequestHandler._static_hashes
            if path not in hashes:
                try:
                    f = open(os.path.join(
                        self.application.settings["static_path"], path))
                    hashes[path] = hashlib.md5(f.read()).hexdigest()
                    f.close()
                except:
                    logging.error("Could not open static file %r", path)
                    hashes[path] = None
            base = self.request.protocol + "://" + self.request.host \
                if getattr(self, "include_host", False) else ""
            static_url_prefix = self.settings.get('static_url_prefix', '/static/')
            if hashes.get(path):
                return base + static_url_prefix + path + "?v=" + hashes[path][:5]
            else:
                return base + static_url_prefix + path
    
        def async_callback(self, callback, *args, **kwargs):
            """Wrap callbacks with this if they are used on asynchronous requests.
    
            Catches exceptions and properly finishes the request.
            """
            if callback is None:
                return None
            if args or kwargs:
                callback = functools.partial(callback, *args, **kwargs)
            def wrapper(*args, **kwargs):
                try:
                    return callback(*args, **kwargs)
                except Exception, e:
                    if self._headers_written:
                        logging.error("Exception after headers written",
                                      exc_info=True)
                    else:
                        self._handle_request_exception(e)
            return wrapper
    
        def require_setting(self, name, feature="this feature"):
            """Raises an exception if the given app setting is not defined."""
            if not self.application.settings.get(name):
                raise Exception("You must define the '%s' setting in your "
                                "application to use %s" % (name, feature))
    
        def reverse_url(self, name, *args):
            return self.application.reverse_url(name, *args)
    
        def _execute(self, transforms, *args, **kwargs):
            """Executes this request with the given output transforms."""
            self._transforms = transforms
            try:
                if self.request.method not in self.SUPPORTED_METHODS:
                    raise HTTPError(405)
                # If XSRF cookies are turned on, reject form submissions without
                # the proper cookie
                if self.request.method == "POST" and \
                   self.application.settings.get("xsrf_cookies"):
                    self.check_xsrf_cookie()
                self.prepare()
                if not self._finished:
                    getattr(self, self.request.method.lower())(*args, **kwargs)
                    if self._auto_finish and not self._finished:
                        self.finish()
            except Exception, e:
                self._handle_request_exception(e)
    
        def _generate_headers(self):
            lines = [self.request.version + " " + str(self._status_code) + " " +
                     httplib.responses[self._status_code]]
            lines.extend(["%s: %s" % (n, v) for n, v in self._headers.iteritems()])
            for cookie_dict in getattr(self, "_new_cookies", []):
                for cookie in cookie_dict.values():
                    lines.append("Set-Cookie: " + cookie.OutputString(None))
            return "\r\n".join(lines) + "\r\n\r\n"
    
        def _log(self):
            if self._status_code < 400:
                log_method = logging.info
            elif self._status_code < 500:
                log_method = logging.warning
            else:
                log_method = logging.error
            request_time = 1000.0 * self.request.request_time()
            log_method("%d %s %.2fms", self._status_code,
                       self._request_summary(), request_time)
    
        def _request_summary(self):
            return self.request.method + " " + self.request.uri + " (" + \
                self.request.remote_ip + ")"
    
        def _handle_request_exception(self, e):
            if isinstance(e, HTTPError):
                if e.log_message:
                    format = "%d %s: " + e.log_message
                    args = [e.status_code, self._request_summary()] + list(e.args)
                    logging.warning(format, *args)
                if e.status_code not in httplib.responses:
                    logging.error("Bad HTTP status code: %d", e.status_code)
                    self.send_error(500, exception=e)
                else:
                    self.send_error(e.status_code, exception=e)
            else:
                logging.error("Uncaught exception %s\n%r", self._request_summary(),
                              self.request, exc_info=e)
                self.send_error(500, exception=e)
    
        def _ui_module(self, name, module):
            def render(*args, **kwargs):
                if not hasattr(self, "_active_modules"):
                    self._active_modules = {}
                if name not in self._active_modules:
                    self._active_modules[name] = module(self)
                rendered = self._active_modules[name].render(*args, **kwargs)
                return rendered
            return render
    
        def _ui_method(self, method):
            return lambda *args, **kwargs: method(self, *args, **kwargs)
    
    
    def asynchronous(method):
        """Wrap request handler methods with this if they are asynchronous.
    
        If this decorator is given, the response is not finished when the
        method returns. It is up to the request handler to call self.finish()
        to finish the HTTP request. Without this decorator, the request is
        automatically finished when the get() or post() method returns.
    
           class MyRequestHandler(web.RequestHandler):
               @web.asynchronous
               def get(self):
                  http = httpclient.AsyncHTTPClient()
                  http.fetch("http://friendfeed.com/", self._on_download)
    
               def _on_download(self, response):
                  self.write("Downloaded!")
                  self.finish()
    
        """
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            if self.application._wsgi:
                raise Exception("@asynchronous is not supported for WSGI apps")
            self._auto_finish = False
            return method(self, *args, **kwargs)
        return wrapper
    
    
    def removeslash(method):
        """Use this decorator to remove trailing slashes from the request path.
    
        For example, a request to '/foo/' would redirect to '/foo' with this
        decorator. Your request handler mapping should use a regular expression
        like r'/foo/*' in conjunction with using the decorator.
        """
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            if self.request.path.endswith("/"):
                if self.request.method == "GET":
                    uri = self.request.path.rstrip("/")
                    if self.request.query: uri += "?" + self.request.query
                    self.redirect(uri)
                    return
                raise HTTPError(404)
            return method(self, *args, **kwargs)
        return wrapper
    
    
    def addslash(method):
        """Use this decorator to add a missing trailing slash to the request path.
    
        For example, a request to '/foo' would redirect to '/foo/' with this
        decorator. Your request handler mapping should use a regular expression
        like r'/foo/?' in conjunction with using the decorator.
        """
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            if not self.request.path.endswith("/"):
                if self.request.method == "GET":
                    uri = self.request.path + "/"
                    if self.request.query: uri += "?" + self.request.query
                    self.redirect(uri)
                    return
                raise HTTPError(404)
            return method(self, *args, **kwargs)
        return wrapper
    
    
    class Application(object):
        """A collection of request handlers that make up a web application.
    
        Instances of this class are callable and can be passed directly to
        HTTPServer to serve the application:
    
            application = web.Application([
                (r"/", MainPageHandler),
            ])
            http_server = httpserver.HTTPServer(application)
            http_server.listen(8080)
            ioloop.IOLoop.instance().start()
    
        The constructor for this class takes in a list of URLSpec objects
        or (regexp, request_class) tuples. When we receive requests, we
        iterate over the list in order and instantiate an instance of the
        first request class whose regexp matches the request path.
    
        Each tuple can contain an optional third element, which should be a
        dictionary if it is present. That dictionary is passed as keyword
        arguments to the contructor of the handler. This pattern is used
        for the StaticFileHandler below:
    
            application = web.Application([
                (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
            ])
    
        We support virtual hosts with the add_handlers method, which takes in
        a host regular expression as the first argument:
    
            application.add_handlers(r"www\.myhost\.com", [
                (r"/article/([0-9]+)", ArticleHandler),
            ])
    
        You can serve static files by sending the static_path setting as a
        keyword argument. We will serve those files from the /static/ URI
        (this is configurable with the static_url_prefix setting),
        and we will serve /favicon.ico and /robots.txt from the same directory.
        """
        def __init__(self, handlers=None, default_host="", transforms=None,
                     wsgi=False, **settings):
            if transforms is None:
                self.transforms = []
                if settings.get("gzip"):
                    self.transforms.append(GZipContentEncoding)
                self.transforms.append(ChunkedTransferEncoding)
            else:
                self.transforms = transforms
            self.handlers = []
            self.named_handlers = {}
            self.default_host = default_host
            self.settings = settings
            self.ui_modules = {}
            self.ui_methods = {}
            self._wsgi = wsgi
            self._load_ui_modules(settings.get("ui_modules", {}))
            self._load_ui_methods(settings.get("ui_methods", {}))
            if self.settings.get("static_path"):
                path = self.settings["static_path"]
                handlers = list(handlers or [])
                static_url_prefix = settings.get("static_url_prefix",
                                                 "/static/")
                handlers = [
                    (re.escape(static_url_prefix) + r"(.*)", StaticFileHandler,
                     dict(path=path)),
                    (r"/(favicon\.ico)", StaticFileHandler, dict(path=path)),
                    (r"/(robots\.txt)", StaticFileHandler, dict(path=path)),
                ] + handlers
            if handlers: self.add_handlers(".*$", handlers)
    
            # Automatically reload modified modules
            if self.settings.get("debug") and not wsgi:
                import autoreload
                autoreload.start()
    
        def add_handlers(self, host_pattern, host_handlers):
            """Appends the given handlers to our handler list."""
            if not host_pattern.endswith("$"):
                host_pattern += "$"
            handlers = []
            # The handlers with the wildcard host_pattern are a special
            # case - they're added in the constructor but should have lower
            # precedence than the more-precise handlers added later.
            # If a wildcard handler group exists, it should always be last
            # in the list, so insert new groups just before it.
            if self.handlers and self.handlers[-1][0].pattern == '.*$':
                self.handlers.insert(-1, (re.compile(host_pattern), handlers))
            else:
                self.handlers.append((re.compile(host_pattern), handlers))
    
            for spec in host_handlers:
                if type(spec) is type(()):
                    assert len(spec) in (2, 3)
                    pattern = spec[0]
                    handler = spec[1]
                    if len(spec) == 3:
                        kwargs = spec[2]
                    else:
                        kwargs = {}
                    spec = URLSpec(pattern, handler, kwargs)
                handlers.append(spec)
                if spec.name:
                    if spec.name in self.named_handlers:
                        logging.warning(
                            "Multiple handlers named %s; replacing previous value",
                            spec.name)
                    self.named_handlers[spec.name] = spec
    
        def add_transform(self, transform_class):
            """Adds the given OutputTransform to our transform list."""
            self.transforms.append(transform_class)
    
        def _get_host_handlers(self, request):
            host = request.host.lower().split(':')[0]
            for pattern, handlers in self.handlers:
                if pattern.match(host):
                    return handlers
            # Look for default host if not behind load balancer (for debugging)
            if "X-Real-Ip" not in request.headers:
                for pattern, handlers in self.handlers:
                    if pattern.match(self.default_host):
                        return handlers
            return None
    
        def _load_ui_methods(self, methods):
            if type(methods) is types.ModuleType:
                self._load_ui_methods(dict((n, getattr(methods, n))
                                           for n in dir(methods)))
            elif isinstance(methods, list):
                for m in methods: self._load_ui_methods(m)
            else:
                for name, fn in methods.iteritems():
                    if not name.startswith("_") and hasattr(fn, "__call__") \
                       and name[0].lower() == name[0]:
                        self.ui_methods[name] = fn
    
        def _load_ui_modules(self, modules):
            if type(modules) is types.ModuleType:
                self._load_ui_modules(dict((n, getattr(modules, n))
                                           for n in dir(modules)))
            elif isinstance(modules, list):
                for m in modules: self._load_ui_modules(m)
            else:
                assert isinstance(modules, dict)
                for name, cls in modules.iteritems():
                    try:
                        if issubclass(cls, UIModule):
                            self.ui_modules[name] = cls
                    except TypeError:
                        pass
    
        def __call__(self, request):
            """Called by HTTPServer to execute the request."""
            transforms = [t(request) for t in self.transforms]
            handler = None
            args = []
            kwargs = {}
            handlers = self._get_host_handlers(request)
            if not handlers:
                handler = RedirectHandler(
                    request, "http://" + self.default_host + "/")
            else:
                for spec in handlers:
                    match = spec.regex.match(request.path)
                    if match:
                        handler = spec.handler_class(self, request, **spec.kwargs)
                        # Pass matched groups to the handler.  Since
                        # match.groups() includes both named and unnamed groups,
                        # we want to use either groups or groupdict but not both.
                        kwargs = dict((k, urllib.unquote(v))
                                      for (k, v) in match.groupdict().iteritems())
                        if kwargs:
                            args = []
                        else:
                            args = [urllib.unquote(s) for s in match.groups()]
                        break
                if not handler:
                    handler = ErrorHandler(self, request, 404)
    
            # In debug mode, re-compile templates and reload static files on every
            # request so you don't need to restart to see changes
            if self.settings.get("debug"):
                if getattr(RequestHandler, "_templates", None):
                  map(lambda loader: loader.reset(),
                      RequestHandler._templates.values())
                RequestHandler._static_hashes = {}
    
            handler._execute(transforms, *args, **kwargs)
            return handler
    
        def reverse_url(self, name, *args):
            """Returns a URL path for handler named `name`
    
            The handler must be added to the application as a named URLSpec
            """
            if name in self.named_handlers:
                return self.named_handlers[name].reverse(*args)
            raise KeyError("%s not found in named urls" % name)
    
    
    class HTTPError(Exception):
        """An exception that will turn into an HTTP error response."""
        def __init__(self, status_code, log_message=None, *args):
            self.status_code = status_code
            self.log_message = log_message
            self.args = args
    
        def __str__(self):
            message = "HTTP %d: %s" % (
                self.status_code, httplib.responses[self.status_code])
            if self.log_message:
                return message + " (" + (self.log_message % self.args) + ")"
            else:
                return message
    
    
    class ErrorHandler(RequestHandler):
        """Generates an error response with status_code for all requests."""
        def __init__(self, application, request, status_code):
            RequestHandler.__init__(self, application, request)
            self.set_status(status_code)
    
        def prepare(self):
            raise HTTPError(self._status_code)
    
    
    class RedirectHandler(RequestHandler):
        """Redirects the client to the given URL for all GET requests.
    
        You should provide the keyword argument "url" to the handler, e.g.:
    
            application = web.Application([
                (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
            ])
        """
        def __init__(self, application, request, url, permanent=True):
            RequestHandler.__init__(self, application, request)
            self._url = url
            self._permanent = permanent
    
        def get(self):
            self.redirect(self._url, permanent=self._permanent)
    
    
    class StaticFileHandler(RequestHandler):
        """A simple handler that can serve static content from a directory.
    
        To map a path to this handler for a static data directory /var/www,
        you would add a line to your application like:
    
            application = web.Application([
                (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
            ])
    
        The local root directory of the content should be passed as the "path"
        argument to the handler.
    
        To support aggressive browser caching, if the argument "v" is given
        with the path, we set an infinite HTTP expiration header. So, if you
        want browsers to cache a file indefinitely, send them to, e.g.,
        /static/images/myimage.png?v=xxx.
        """
        def __init__(self, application, request, path):
            RequestHandler.__init__(self, application, request)
            self.root = os.path.abspath(path) + os.path.sep
    
        def head(self, path):
            self.get(path, include_body=False)
    
        def get(self, path, include_body=True):
            abspath = os.path.abspath(os.path.join(self.root, path))
            if not abspath.startswith(self.root):
                raise HTTPError(403, "%s is not in root static directory", path)
            if not os.path.exists(abspath):
                raise HTTPError(404)
            if not os.path.isfile(abspath):
                raise HTTPError(403, "%s is not a file", path)
    
            stat_result = os.stat(abspath)
            modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
    
            self.set_header("Last-Modified", modified)
            if "v" in self.request.arguments:
                self.set_header("Expires", datetime.datetime.utcnow() + \
                                           datetime.timedelta(days=365*10))
                self.set_header("Cache-Control", "max-age=" + str(86400*365*10))
            else:
                self.set_header("Cache-Control", "public")
            mime_type, encoding = mimetypes.guess_type(abspath)
            if mime_type:
                self.set_header("Content-Type", mime_type)
    
            self.set_extra_headers(path)
    
            # Check the If-Modified-Since, and don't send the result if the
            # content has not been modified
            ims_value = self.request.headers.get("If-Modified-Since")
            if ims_value is not None:
                date_tuple = email.utils.parsedate(ims_value)
                if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
                if if_since >= modified:
                    self.set_status(304)
                    return
    
            if not include_body:
                return
            self.set_header("Content-Length", stat_result[stat.ST_SIZE])
            file = open(abspath, "rb")
            try:
                self.write(file.read())
            finally:
                file.close()
    
        def set_extra_headers(self, path):
          """For subclass to add extra headers to the response"""
          pass
    
    
    class FallbackHandler(RequestHandler):
        """A RequestHandler that wraps another HTTP server callback.
    
        The fallback is a callable object that accepts an HTTPRequest,
        such as an Application or tornado.wsgi.WSGIContainer.  This is most
        useful to use both tornado RequestHandlers and WSGI in the same server.
        Typical usage:
            wsgi_app = tornado.wsgi.WSGIContainer(
                django.core.handlers.wsgi.WSGIHandler())
            application = tornado.web.Application([
                (r"/foo", FooHandler),
                (r".*", FallbackHandler, dict(fallback=wsgi_app),
            ])
        """
        def __init__(self, app, request, fallback):
            RequestHandler.__init__(self, app, request)
            self.fallback = fallback
    
        def prepare(self):
            self.fallback(self.request)
            self._finished = True
    
    
    class OutputTransform(object):
        """A transform modifies the result of an HTTP request (e.g., GZip encoding)
    
        A new transform instance is created for every request. See the
        ChunkedTransferEncoding example below if you want to implement a
        new Transform.
        """
        def __init__(self, request):
            pass
    
        def transform_first_chunk(self, headers, chunk, finishing):
            return headers, chunk
    
        def transform_chunk(self, chunk, finishing):
            return chunk
    
    
    class GZipContentEncoding(OutputTransform):
        """Applies the gzip content encoding to the response.
    
        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
        """
        CONTENT_TYPES = set([
            "text/plain", "text/html", "text/css", "text/xml",
            "application/x-javascript", "application/xml", "application/atom+xml",
            "text/javascript", "application/json", "application/xhtml+xml"])
        MIN_LENGTH = 5
    
        def __init__(self, request):
            self._gzipping = request.supports_http_1_1() and \
                "gzip" in request.headers.get("Accept-Encoding", "")
    
        def transform_first_chunk(self, headers, chunk, finishing):
            if self._gzipping:
                ctype = headers.get("Content-Type", "").split(";")[0]
                self._gzipping = (ctype in self.CONTENT_TYPES) and \
                    (not finishing or len(chunk) >= self.MIN_LENGTH) and \
                    (finishing or "Content-Length" not in headers) and \
                    ("Content-Encoding" not in headers)
            if self._gzipping:
                headers["Content-Encoding"] = "gzip"
                self._gzip_value = cStringIO.StringIO()
                self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
                self._gzip_pos = 0
                chunk = self.transform_chunk(chunk, finishing)
                if "Content-Length" in headers:
                    headers["Content-Length"] = str(len(chunk))
            return headers, chunk
    
        def transform_chunk(self, chunk, finishing):
            if self._gzipping:
                self._gzip_file.write(chunk)
                if finishing:
                    self._gzip_file.close()
                else:
                    self._gzip_file.flush()
                chunk = self._gzip_value.getvalue()
                if self._gzip_pos > 0:
                    chunk = chunk[self._gzip_pos:]
                self._gzip_pos += len(chunk)
            return chunk
    
    
    class ChunkedTransferEncoding(OutputTransform):
        """Applies the chunked transfer encoding to the response.
    
        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1
        """
        def __init__(self, request):
            self._chunking = request.supports_http_1_1()
    
        def transform_first_chunk(self, headers, chunk, finishing):
            if self._chunking:
                # No need to chunk the output if a Content-Length is specified
                if "Content-Length" in headers or "Transfer-Encoding" in headers:
                    self._chunking = False
                else:
                    headers["Transfer-Encoding"] = "chunked"
                    chunk = self.transform_chunk(chunk, finishing)
            return headers, chunk
    
        def transform_chunk(self, block, finishing):
            if self._chunking:
                # Don't write out empty chunks because that means END-OF-STREAM
                # with chunked encoding
                if block:
                    block = ("%x" % len(block)) + "\r\n" + block + "\r\n"
                if finishing:
                    block += "0\r\n\r\n"
            return block
    
    
    def authenticated(method):
        """Decorate methods with this to require that the user be logged in."""
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            if not self.current_user:
                if self.request.method == "GET":
                    url = self.get_login_url()
                    if "?" not in url:
                        url += "?" + urllib.urlencode(dict(next=self.request.uri))
                    self.redirect(url)
                    return
                raise HTTPError(403)
            return method(self, *args, **kwargs)
        return wrapper
    
    
    class UIModule(object):
        """A UI re-usable, modular unit on a page.
    
        UI modules often execute additional queries, and they can include
        additional CSS and JavaScript that will be included in the output
        page, which is automatically inserted on page render.
        """
        def __init__(self, handler):
            self.handler = handler
            self.request = handler.request
            self.ui = handler.ui
            self.current_user = handler.current_user
            self.locale = handler.locale
    
        def render(self, *args, **kwargs):
            raise NotImplementedError()
    
        def embedded_javascript(self):
            """Returns a JavaScript string that will be embedded in the page."""
            return None
    
        def javascript_files(self):
            """Returns a list of JavaScript files required by this module."""
            return None
    
        def embedded_css(self):
            """Returns a CSS string that will be embedded in the page."""
            return None
    
        def css_files(self):
            """Returns a list of CSS files required by this module."""
            return None
    
        def html_head(self):
            """Returns a CSS string that will be put in the  element"""
            return None
    
        def html_body(self):
            """Returns an HTML string that will be put in the  element"""
            return None
    
        def render_string(self, path, **kwargs):
            return self.handler.render_string(path, **kwargs)
    
    class URLSpec(object):
        """Specifies mappings between URLs and handlers."""
        def __init__(self, pattern, handler_class, kwargs={}, name=None):
            """Creates a URLSpec.
    
            Parameters:
            pattern: Regular expression to be matched.  Any groups in the regex
                will be passed in to the handler's get/post/etc methods as
                arguments.
            handler_class: RequestHandler subclass to be invoked.
            kwargs (optional): A dictionary of additional arguments to be passed
                to the handler's constructor.
            name (optional): A name for this handler.  Used by
                Application.reverse_url.
            """
            if not pattern.endswith('$'):
                pattern += '$'
            self.regex = re.compile(pattern)
            self.handler_class = handler_class
            self.kwargs = kwargs
            self.name = name
            self._path, self._group_count = self._find_groups()
    
        def _find_groups(self):
            """Returns a tuple (reverse string, group count) for a url.
    
            For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
            would return ('/%s/%s/', 2).
            """
            pattern = self.regex.pattern
            if pattern.startswith('^'):
                pattern = pattern[1:]
            if pattern.endswith('$'):
                pattern = pattern[:-1]
    
            if self.regex.groups != pattern.count('('):
                # The pattern is too complicated for our simplistic matching,
                # so we can't support reversing it.
                return (None, None)
    
            pieces = []
            for fragment in pattern.split('('):
                if ')' in fragment:
                    paren_loc = fragment.index(')')
                    if paren_loc >= 0:
                        pieces.append('%s' + fragment[paren_loc + 1:])
                else:
                    pieces.append(fragment)
    
            return (''.join(pieces), self.regex.groups)
    
        def reverse(self, *args):
            assert self._path is not None, \
                "Cannot reverse url regex " + self.regex.pattern
            assert len(args) == self._group_count, "required number of arguments "\
                "not found"
            if not len(args):
                return self._path
            return self._path % tuple([str(a) for a in args])
    
    url = URLSpec
    
    def _utf8(s):
        if isinstance(s, unicode):
            return s.encode("utf-8")
        assert isinstance(s, str)
        return s
    
    
    def _unicode(s):
        if isinstance(s, str):
            try:
                return s.decode("utf-8")
            except UnicodeDecodeError:
                raise HTTPError(400, "Non-utf8 argument")
        assert isinstance(s, unicode)
        return s
    
    
    def _time_independent_equals(a, b):
        if len(a) != len(b):
            return False
        result = 0
        for x, y in zip(a, b):
            result |= ord(x) ^ ord(y)
        return result == 0
    
    
    class _O(dict):
        """Makes a dictionary behave like an object."""
        def __getattr__(self, name):
            try:
                return self[name]
            except KeyError:
                raise AttributeError(name)
    
        def __setattr__(self, name, value):
            self[name] = value
    bup-0.25/lib/tornado/websocket.py000066400000000000000000000127561225146730500170220ustar00rootroot00000000000000#!/usr/bin/env python
    #
    # Copyright 2009 Facebook
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    
    import functools
    import logging
    import tornado.escape
    import tornado.web
    
    class WebSocketHandler(tornado.web.RequestHandler):
        """A request handler for HTML 5 Web Sockets.
    
        See http://www.w3.org/TR/2009/WD-websockets-20091222/ for details on the
        JavaScript interface. We implement the protocol as specified at
        http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-55.
    
        Here is an example Web Socket handler that echos back all received messages
        back to the client:
    
          class EchoWebSocket(websocket.WebSocketHandler):
              def open(self):
                  self.receive_message(self.on_message)
    
              def on_message(self, message):
                  self.write_message(u"You said: " + message)
                  # receive_message only reads a single message, so call it
                  # again to listen for the next one
                  self.receive_message(self.on_message)
    
        Web Sockets are not standard HTTP connections. The "handshake" is HTTP,
        but after the handshake, the protocol is message-based. Consequently,
        most of the Tornado HTTP facilities are not available in handlers of this
        type. The only communication methods available to you are send_message()
        and receive_message(). Likewise, your request handler class should
        implement open() method rather than get() or post().
    
        If you map the handler above to "/websocket" in your application, you can
        invoke it in JavaScript with:
    
          var ws = new WebSocket("ws://localhost:8888/websocket");
          ws.onopen = function() {
             ws.send("Hello, world");
          };
          ws.onmessage = function (evt) {
             alert(evt.data);
          };
    
        This script pops up an alert box that says "You said: Hello, world".
        """
        def __init__(self, application, request):
            tornado.web.RequestHandler.__init__(self, application, request)
            self.stream = request.connection.stream
    
        def _execute(self, transforms, *args, **kwargs):
            if self.request.headers.get("Upgrade") != "WebSocket" or \
               self.request.headers.get("Connection") != "Upgrade" or \
               not self.request.headers.get("Origin"):
                message = "Expected WebSocket headers"
                self.stream.write(
                    "HTTP/1.1 403 Forbidden\r\nContent-Length: " +
                    str(len(message)) + "\r\n\r\n" + message)
                return
            self.stream.write(
                "HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
                "Upgrade: WebSocket\r\n"
                "Connection: Upgrade\r\n"
                "Server: TornadoServer/0.1\r\n"
                "WebSocket-Origin: " + self.request.headers["Origin"] + "\r\n"
                "WebSocket-Location: ws://" + self.request.host +
                self.request.path + "\r\n\r\n")
            self.async_callback(self.open)(*args, **kwargs)
    
        def write_message(self, message):
            """Sends the given message to the client of this Web Socket."""
            if isinstance(message, dict):
                message = tornado.escape.json_encode(message)
            if isinstance(message, unicode):
                message = message.encode("utf-8")
            assert isinstance(message, str)
            self.stream.write("\x00" + message + "\xff")
    
        def receive_message(self, callback):
            """Calls callback when the browser calls send() on this Web Socket."""
            callback = self.async_callback(callback)
            self.stream.read_bytes(
                1, functools.partial(self._on_frame_type, callback))
    
        def close(self):
            """Closes this Web Socket.
    
            The browser will receive the onclose event for the open web socket
            when this method is called.
            """
            self.stream.close()
    
        def async_callback(self, callback, *args, **kwargs):
            """Wrap callbacks with this if they are used on asynchronous requests.
    
            Catches exceptions properly and closes this Web Socket if an exception
            is uncaught.
            """
            if args or kwargs:
                callback = functools.partial(callback, *args, **kwargs)
            def wrapper(*args, **kwargs):
                try:
                    return callback(*args, **kwargs)
                except Exception, e:
                    logging.error("Uncaught exception in %s",
                                  self.request.path, exc_info=True)
                    self.stream.close()
            return wrapper
    
        def _on_frame_type(self, callback, byte):
            if ord(byte) & 0x80 == 0x80:
                raise Exception("Length-encoded format not yet supported")
            self.stream.read_until(
                "\xff", functools.partial(self._on_end_delimiter, callback))
    
        def _on_end_delimiter(self, callback, frame):
            callback(frame[:-1].decode("utf-8", "replace"))
    
        def _not_supported(self, *args, **kwargs):
            raise Exception("Method not supported for Web Sockets")
    
    for method in ["write", "redirect", "set_header", "send_error", "set_cookie",
                   "set_status", "flush", "finish"]:
        setattr(WebSocketHandler, method, WebSocketHandler._not_supported)
    bup-0.25/lib/tornado/win32_support.py000066400000000000000000000103271225146730500175620ustar00rootroot00000000000000# NOTE: win32 support is currently experimental, and not recommended
    # for production use.
    
    import ctypes
    import ctypes.wintypes
    import os
    import socket
    import errno
    
    
    # See: http://msdn.microsoft.com/en-us/library/ms738573(VS.85).aspx
    ioctlsocket = ctypes.windll.ws2_32.ioctlsocket
    ioctlsocket.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.LONG, ctypes.wintypes.ULONG)
    ioctlsocket.restype = ctypes.c_int
    
    # See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
    SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
    SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
    SetHandleInformation.restype = ctypes.wintypes.BOOL
    
    HANDLE_FLAG_INHERIT = 0x00000001
    
    
    F_GETFD = 1
    F_SETFD = 2
    F_GETFL = 3
    F_SETFL = 4
    
    FD_CLOEXEC = 1
    
    os.O_NONBLOCK = 2048
    
    FIONBIO = 126
    
    
    def fcntl(fd, op, arg=0):
        if op == F_GETFD or op == F_GETFL:
            return 0
        elif op == F_SETFD:
            # Check that the flag is CLOEXEC and translate
            if arg == FD_CLOEXEC:
                success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, arg)
                if not success:
                    raise ctypes.GetLastError()
            else:
                raise ValueError("Unsupported arg")
        #elif op == F_SETFL:
            ## Check that the flag is NONBLOCK and translate
            #if arg == os.O_NONBLOCK:
                ##pass
                #result = ioctlsocket(fd, FIONBIO, 1)
                #if result != 0:
                    #raise ctypes.GetLastError()
            #else:
                #raise ValueError("Unsupported arg")
        else:
            raise ValueError("Unsupported op")
    
    
    class Pipe(object):
        """Create an OS independent asynchronous pipe"""
        def __init__(self):
            # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
    
            self.writer = socket.socket()
            # Disable buffering -- pulling the trigger sends 1 byte,
            # and we want that sent immediately, to wake up ASAP.
            self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    
            count = 0
            while 1:
                count += 1
                # Bind to a local port; for efficiency, let the OS pick
                # a free port for us.
                # Unfortunately, stress tests showed that we may not
                # be able to connect to that port ("Address already in
                # use") despite that the OS picked it.  This appears
                # to be a race bug in the Windows socket implementation.
                # So we loop until a connect() succeeds (almost always
                # on the first try).  See the long thread at
                # http://mail.zope.org/pipermail/zope/2005-July/160433.html
                # for hideous details.
                a = socket.socket()
                a.bind(("127.0.0.1", 0))
                connect_address = a.getsockname()  # assigned (host, port) pair
                a.listen(1)
                try:
                    self.writer.connect(connect_address)
                    break    # success
                except socket.error, detail:
                    if detail[0] != errno.WSAEADDRINUSE:
                        # "Address already in use" is the only error
                        # I've seen on two WinXP Pro SP2 boxes, under
                        # Pythons 2.3.5 and 2.4.1.
                        raise
                    # (10048, 'Address already in use')
                    # assert count <= 2 # never triggered in Tim's tests
                    if count >= 10:  # I've never seen it go above 2
                        a.close()
                        self.writer.close()
                        raise socket.error("Cannot bind trigger!")
                    # Close `a` and try again.  Note:  I originally put a short
                    # sleep() here, but it didn't appear to help or hurt.
                    a.close()
    
            self.reader, addr = a.accept()
            self.reader.setblocking(0)
            self.writer.setblocking(0)
            a.close()
            self.reader_fd = self.reader.fileno()
    
        def read(self):
            """Emulate a file descriptors read method"""
            try:
                return self.reader.recv(1)
            except socket.error, ex:
                if ex.args[0] == errno.EWOULDBLOCK:
                    raise IOError
                raise
    
        def write(self, data):
            """Emulate a file descriptors write method"""
            return self.writer.send(data)
    bup-0.25/lib/tornado/wsgi.py000066400000000000000000000266461225146730500160100ustar00rootroot00000000000000#!/usr/bin/env python
    #
    # Copyright 2009 Facebook
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    
    """WSGI support for the Tornado web framework.
    
    We export WSGIApplication, which is very similar to web.Application, except
    no asynchronous methods are supported (since WSGI does not support
    non-blocking requests properly). If you call self.flush() or other
    asynchronous methods in your request handlers running in a WSGIApplication,
    we throw an exception.
    
    Example usage:
    
        import tornado.web
        import tornado.wsgi
        import wsgiref.simple_server
    
        class MainHandler(tornado.web.RequestHandler):
            def get(self):
                self.write("Hello, world")
    
        if __name__ == "__main__":
            application = tornado.wsgi.WSGIApplication([
                (r"/", MainHandler),
            ])
            server = wsgiref.simple_server.make_server('', 8888, application)
            server.serve_forever()
    
    See the 'appengine' demo for an example of using this module to run
    a Tornado app on Google AppEngine.
    
    Since no asynchronous methods are available for WSGI applications, the
    httpclient and auth modules are both not available for WSGI applications.
    
    We also export WSGIContainer, which lets you run other WSGI-compatible
    frameworks on the Tornado HTTP server and I/O loop. See WSGIContainer for
    details and documentation.
    """
    
    import cgi
    import cStringIO
    import escape
    import httplib
    import httputil
    import logging
    import sys
    import time
    import urllib
    import web
    
    class WSGIApplication(web.Application):
        """A WSGI-equivalent of web.Application.
    
        We support the same interface, but handlers running in a WSGIApplication
        do not support flush() or asynchronous methods.
        """
        def __init__(self, handlers=None, default_host="", **settings):
            web.Application.__init__(self, handlers, default_host, transforms=[],
                                     wsgi=True, **settings)
    
        def __call__(self, environ, start_response):
            handler = web.Application.__call__(self, HTTPRequest(environ))
            assert handler._finished
            status = str(handler._status_code) + " " + \
                httplib.responses[handler._status_code]
            headers = handler._headers.items()
            for cookie_dict in getattr(handler, "_new_cookies", []):
                for cookie in cookie_dict.values():
                    headers.append(("Set-Cookie", cookie.OutputString(None)))
            start_response(status, headers)
            return handler._write_buffer
    
    
    class HTTPRequest(object):
        """Mimics httpserver.HTTPRequest for WSGI applications."""
        def __init__(self, environ):
            """Parses the given WSGI environ to construct the request."""
            self.method = environ["REQUEST_METHOD"]
            self.path = urllib.quote(environ.get("SCRIPT_NAME", ""))
            self.path += urllib.quote(environ.get("PATH_INFO", ""))
            self.uri = self.path
            self.arguments = {}
            self.query = environ.get("QUERY_STRING", "")
            if self.query:
                self.uri += "?" + self.query
                arguments = cgi.parse_qs(self.query)
                for name, values in arguments.iteritems():
                    values = [v for v in values if v]
                    if values: self.arguments[name] = values
            self.version = "HTTP/1.1"
            self.headers = httputil.HTTPHeaders()
            if environ.get("CONTENT_TYPE"):
                self.headers["Content-Type"] = environ["CONTENT_TYPE"]
            if environ.get("CONTENT_LENGTH"):
                self.headers["Content-Length"] = int(environ["CONTENT_LENGTH"])
            for key in environ:
                if key.startswith("HTTP_"):
                    self.headers[key[5:].replace("_", "-")] = environ[key]
            if self.headers.get("Content-Length"):
                self.body = environ["wsgi.input"].read()
            else:
                self.body = ""
            self.protocol = environ["wsgi.url_scheme"]
            self.remote_ip = environ.get("REMOTE_ADDR", "")
            if environ.get("HTTP_HOST"):
                self.host = environ["HTTP_HOST"]
            else:
                self.host = environ["SERVER_NAME"]
    
            # Parse request body
            self.files = {}
            content_type = self.headers.get("Content-Type", "")
            if content_type.startswith("application/x-www-form-urlencoded"):
                for name, values in cgi.parse_qs(self.body).iteritems():
                    self.arguments.setdefault(name, []).extend(values)
            elif content_type.startswith("multipart/form-data"):
                if 'boundary=' in content_type:
                    boundary = content_type.split('boundary=',1)[1]
                    if boundary: self._parse_mime_body(boundary)
                else:
                    logging.warning("Invalid multipart/form-data")
    
            self._start_time = time.time()
            self._finish_time = None
    
        def supports_http_1_1(self):
            """Returns True if this request supports HTTP/1.1 semantics"""
            return self.version == "HTTP/1.1"
    
        def full_url(self):
            """Reconstructs the full URL for this request."""
            return self.protocol + "://" + self.host + self.uri
    
        def request_time(self):
            """Returns the amount of time it took for this request to execute."""
            if self._finish_time is None:
                return time.time() - self._start_time
            else:
                return self._finish_time - self._start_time
    
        def _parse_mime_body(self, boundary):
            if boundary.startswith('"') and boundary.endswith('"'):
                boundary = boundary[1:-1]
            if self.body.endswith("\r\n"):
                footer_length = len(boundary) + 6
            else:
                footer_length = len(boundary) + 4
            parts = self.body[:-footer_length].split("--" + boundary + "\r\n")
            for part in parts:
                if not part: continue
                eoh = part.find("\r\n\r\n")
                if eoh == -1:
                    logging.warning("multipart/form-data missing headers")
                    continue
                headers = httputil.HTTPHeaders.parse(part[:eoh])
                name_header = headers.get("Content-Disposition", "")
                if not name_header.startswith("form-data;") or \
                   not part.endswith("\r\n"):
                    logging.warning("Invalid multipart/form-data")
                    continue
                value = part[eoh + 4:-2]
                name_values = {}
                for name_part in name_header[10:].split(";"):
                    name, name_value = name_part.strip().split("=", 1)
                    name_values[name] = name_value.strip('"').decode("utf-8")
                if not name_values.get("name"):
                    logging.warning("multipart/form-data value missing name")
                    continue
                name = name_values["name"]
                if name_values.get("filename"):
                    ctype = headers.get("Content-Type", "application/unknown")
                    self.files.setdefault(name, []).append(dict(
                        filename=name_values["filename"], body=value,
                        content_type=ctype))
                else:
                    self.arguments.setdefault(name, []).append(value)
    
    
    class WSGIContainer(object):
        """Makes a WSGI-compatible function runnable on Tornado's HTTP server.
    
        Wrap a WSGI function in a WSGIContainer and pass it to HTTPServer to
        run it. For example:
    
            def simple_app(environ, start_response):
                status = "200 OK"
                response_headers = [("Content-type", "text/plain")]
                start_response(status, response_headers)
                return ["Hello world!\n"]
    
            container = tornado.wsgi.WSGIContainer(simple_app)
            http_server = tornado.httpserver.HTTPServer(container)
            http_server.listen(8888)
            tornado.ioloop.IOLoop.instance().start()
    
        This class is intended to let other frameworks (Django, web.py, etc)
        run on the Tornado HTTP server and I/O loop. It has not yet been
        thoroughly tested in production.
        """
        def __init__(self, wsgi_application):
            self.wsgi_application = wsgi_application
    
        def __call__(self, request):
            data = {}
            response = []
            def start_response(status, response_headers, exc_info=None):
                data["status"] = status
                data["headers"] = response_headers
                return response.append
            app_response = self.wsgi_application(
                WSGIContainer.environ(request), start_response)
            response.extend(app_response)
            body = "".join(response)
            if hasattr(app_response, "close"):
                app_response.close()
            if not data: raise Exception("WSGI app did not call start_response")
    
            status_code = int(data["status"].split()[0])
            headers = data["headers"]
            header_set = set(k.lower() for (k,v) in headers)
            body = escape.utf8(body)
            if "content-length" not in header_set:
                headers.append(("Content-Length", str(len(body))))
            if "content-type" not in header_set:
                headers.append(("Content-Type", "text/html; charset=UTF-8"))
            if "server" not in header_set:
                headers.append(("Server", "TornadoServer/0.1"))
    
            parts = ["HTTP/1.1 " + data["status"] + "\r\n"]
            for key, value in headers:
                parts.append(escape.utf8(key) + ": " + escape.utf8(value) + "\r\n")
            parts.append("\r\n")
            parts.append(body)
            request.write("".join(parts))
            request.finish()
            self._log(status_code, request)
    
        @staticmethod
        def environ(request):
            hostport = request.host.split(":")
            if len(hostport) == 2:
                host = hostport[0]
                port = int(hostport[1])
            else:
                host = request.host
                port = 443 if request.protocol == "https" else 80
            environ = {
                "REQUEST_METHOD": request.method,
                "SCRIPT_NAME": "",
                "PATH_INFO": request.path,
                "QUERY_STRING": request.query,
                "REMOTE_ADDR": request.remote_ip,
                "SERVER_NAME": host,
                "SERVER_PORT": port,
                "SERVER_PROTOCOL": request.version,
                "wsgi.version": (1, 0),
                "wsgi.url_scheme": request.protocol,
                "wsgi.input": cStringIO.StringIO(escape.utf8(request.body)),
                "wsgi.errors": sys.stderr,
                "wsgi.multithread": False,
                "wsgi.multiprocess": True,
                "wsgi.run_once": False,
            }
            if "Content-Type" in request.headers:
                environ["CONTENT_TYPE"] = request.headers["Content-Type"]
            if "Content-Length" in request.headers:
                environ["CONTENT_LENGTH"] = request.headers["Content-Length"]
            for key, value in request.headers.iteritems():
                environ["HTTP_" + key.replace("-", "_").upper()] = value
            return environ
    
        def _log(self, status_code, request):
            if status_code < 400:
                log_method = logging.info
            elif status_code < 500:
                log_method = logging.warning
            else:
                log_method = logging.error
            request_time = 1000.0 * request.request_time()
            summary = request.method + " " + request.uri + " (" + \
                request.remote_ip + ")"
            log_method("%d %s %.2fms", status_code, summary, request_time)
    
    bup-0.25/lib/web/000077500000000000000000000000001225146730500135565ustar00rootroot00000000000000bup-0.25/lib/web/list-directory.html000066400000000000000000000027241225146730500174260ustar00rootroot00000000000000
    
        
            
            Directory listing for {{ escape(path) }}
            
        
        
            
    {% if files_hidden %}
    {% if hidden_shown %} Hide hidden files {% else %} Show hidden files {% end %}
    {% end %} {% for (display, link, size) in dir_contents %} {% end %}
    Name Size
    {{ display }} {% if size != None %}{{ size }}{% else %} {% end %}
    bup-0.25/lib/web/static/000077500000000000000000000000001225146730500150455ustar00rootroot00000000000000bup-0.25/lib/web/static/styles.css000066400000000000000000000003311225146730500170770ustar00rootroot00000000000000body { font-family: sans-serif } #wrapper { width: 90%; margin: auto; } #breadcrumb { margin: 10px 0; } table { width: auto; } th { text-align: left; } .dir-size { padding-left:15px; }bup-0.25/main.py000077500000000000000000000146141225146730500135420ustar00rootroot00000000000000#!/usr/bin/env python import sys, os, subprocess, signal, getopt argv = sys.argv exe = os.path.realpath(argv[0]) exepath = os.path.split(exe)[0] or '.' exeprefix = os.path.split(os.path.abspath(exepath))[0] # fix the PYTHONPATH to include our lib dir if os.path.exists("%s/lib/bup/cmd/." % exeprefix): # installed binary in /.../bin. # eg. /usr/bin/bup means /usr/lib/bup/... is where our libraries are. cmdpath = "%s/lib/bup/cmd" % exeprefix libpath = "%s/lib/bup" % exeprefix resourcepath = libpath else: # running from the src directory without being installed first cmdpath = os.path.join(exepath, 'cmd') libpath = os.path.join(exepath, 'lib') resourcepath = libpath sys.path[:0] = [libpath] os.environ['PYTHONPATH'] = libpath + ':' + os.environ.get('PYTHONPATH', '') os.environ['BUP_MAIN_EXE'] = os.path.abspath(exe) os.environ['BUP_RESOURCE_PATH'] = resourcepath from bup import helpers from bup.helpers import * # after running 'bup newliner', the tty_width() ioctl won't work anymore os.environ['WIDTH'] = str(tty_width()) def usage(msg=""): log('Usage: bup [-?|--help] [-d BUP_DIR] [--debug] [--profile] ' ' [options...]\n\n') common = dict( ftp = 'Browse backup sets using an ftp-like client', fsck = 'Check backup sets for damage and add redundancy information', fuse = 'Mount your backup sets as a filesystem', help = 'Print detailed help for the given command', index = 'Create or display the index of files to back up', on = 'Backup a remote machine to the local one', restore = 'Extract files from a backup set', save = 'Save files into a backup set (note: run "bup index" first)', tag = 'Tag commits for easier access', web = 'Launch a web server to examine backup sets', ) log('Common commands:\n') for cmd,synopsis in sorted(common.items()): log(' %-10s %s\n' % (cmd, synopsis)) log('\n') log('Other available commands:\n') cmds = [] for c in sorted(os.listdir(cmdpath) + os.listdir(exepath)): if c.startswith('bup-') and c.find('.') < 0: cname = c[4:] if cname not in common: cmds.append(c[4:]) log(columnate(cmds, ' ')) log('\n') log("See 'bup help COMMAND' for more information on " + "a specific command.\n") if msg: log("\n%s\n" % msg) sys.exit(99) if len(argv) < 2: usage() # Handle global options. try: optspec = ['help', 'version', 'debug', 'profile', 'bup-dir='] global_args, subcmd = getopt.getopt(argv[1:], '?VDd:', optspec) except getopt.GetoptError, ex: usage('error: %s' % ex.msg) help_requested = None do_profile = False for opt in global_args: if opt[0] in ['-?', '--help']: help_requested = True elif opt[0] in ['-V', '--version']: subcmd = ['version'] elif opt[0] in ['-D', '--debug']: helpers.buglvl += 1 os.environ['BUP_DEBUG'] = str(helpers.buglvl) elif opt[0] in ['--profile']: do_profile = True elif opt[0] in ['-d', '--bup-dir']: os.environ['BUP_DIR'] = opt[1] else: usage('error: unexpected option "%s"' % opt[0]) # Make BUP_DIR absolute, so we aren't affected by chdir (i.e. save -C, etc.). if 'BUP_DIR' in os.environ: os.environ['BUP_DIR'] = os.path.abspath(os.environ['BUP_DIR']) if len(subcmd) == 0: if help_requested: subcmd = ['help'] else: usage() if help_requested and subcmd[0] != 'help': subcmd = ['help'] + subcmd if len(subcmd) > 1 and subcmd[1] == '--help' and subcmd[0] != 'help': subcmd = ['help', subcmd[0]] + subcmd[2:] subcmd_name = subcmd[0] if not subcmd_name: usage() def subpath(s): sp = os.path.join(exepath, 'bup-%s' % s) if not os.path.exists(sp): sp = os.path.join(cmdpath, 'bup-%s' % s) return sp subcmd[0] = subpath(subcmd_name) if not os.path.exists(subcmd[0]): usage('error: unknown command "%s"' % subcmd_name) already_fixed = atoi(os.environ.get('BUP_FORCE_TTY')) if subcmd_name in ['mux', 'ftp', 'help']: already_fixed = True fix_stdout = not already_fixed and os.isatty(1) fix_stderr = not already_fixed and os.isatty(2) def force_tty(): if fix_stdout or fix_stderr: amt = (fix_stdout and 1 or 0) + (fix_stderr and 2 or 0) os.environ['BUP_FORCE_TTY'] = str(amt) os.setsid() # make sure ctrl-c is sent just to us, not to child too if fix_stdout or fix_stderr: realf = fix_stderr and 2 or 1 drealf = os.dup(realf) # Popen goes crazy with stdout=2 n = subprocess.Popen([subpath('newliner')], stdin=subprocess.PIPE, stdout=drealf, close_fds=True, preexec_fn=force_tty) os.close(drealf) outf = fix_stdout and n.stdin.fileno() or None errf = fix_stderr and n.stdin.fileno() or None else: n = None outf = None errf = None ret = 95 p = None forward_signals = True def handler(signum, frame): debug1('\nbup: signal %d received\n' % signum) if not p or not forward_signals: return if signum != signal.SIGTSTP: os.kill(p.pid, signum) else: # SIGTSTP: stop the child, then ourselves. os.kill(p.pid, signal.SIGSTOP) signal.signal(signal.SIGTSTP, signal.SIG_DFL) os.kill(os.getpid(), signal.SIGTSTP) # Back from suspend -- reestablish the handler. signal.signal(signal.SIGTSTP, handler) ret = 94 signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTSTP, handler) signal.signal(signal.SIGCONT, handler) try: try: c = (do_profile and [sys.executable, '-m', 'cProfile'] or []) + subcmd if not n and not outf and not errf: # shortcut when no bup-newliner stuff is needed os.execvp(c[0], c) else: p = subprocess.Popen(c, stdout=outf, stderr=errf, preexec_fn=force_tty) while 1: # if we get a signal while waiting, we have to keep waiting, just # in case our child doesn't die. ret = p.wait() forward_signals = False break except OSError, e: log('%s: %s\n' % (subcmd[0], e)) ret = 98 finally: if p and p.poll() == None: os.kill(p.pid, signal.SIGTERM) p.wait() if n: n.stdin.close() try: n.wait() except: pass sys.exit(ret) bup-0.25/t/000077500000000000000000000000001225146730500124765ustar00rootroot00000000000000bup-0.25/t/compare-trees000077500000000000000000000031241225146730500151720ustar00rootroot00000000000000#!/usr/bin/env bash # Test that src and dest trees are as identical as bup is capable of # making them. For now, use rsync -niaHAX ... usage() { cat <&2; exit 1;; esac done shift $(($OPTIND - 1)) || exit $? if ! test $# -eq 2 then usage 1>&2 exit 1 fi src="$1" dest="$2" tmpfile="$(mktemp /tmp/bup-test-XXXXXXX)" || exit $? trap "rm -rf '$tmpfile'" EXIT || exit $? rsync_opts="-niaH$verify_content --delete" rsync_version=$(rsync --version) if [[ ! "$rsync_version" =~ "ACLs" ]] || [[ "$rsync_version" =~ "no ACLs" ]]; then echo "Not comparing ACLs (not supported by available rsync)" 1>&2 else case $OSTYPE in cygwin|darwin|netbsd) echo "Not comparing ACLs (not yet supported on $OSTYPE)" 1>&2 ;; *) rsync_opts="$rsync_opts -A" ;; esac fi if [[ ! "$rsync_version" =~ "xattrs" ]] || [[ "$rsync_version" =~ "no xattrs" ]]; then echo "Not comparing xattrs (not supported by available rsync)" 1>&2 else rsync_opts="$rsync_opts -X" fi rsync $rsync_opts "$src" "$dest" > "$tmpfile" || exit $? if test $(wc -l < "$tmpfile") != 0; then echo "Differences between $src and $dest" cat "$tmpfile" exit 1 fi exit 0 bup-0.25/t/configure-sampledata000077500000000000000000000013511225146730500165160ustar00rootroot00000000000000#!/usr/bin/env bash top=$(pwd) usage() { echo 'Usage: t/configure-sampledata [--setup | --clean]' } if test "$#" -ne 1; then usage 1>&2; exit 1 fi case "$1" in '--setup') ( cd t/sampledata || exit $? ln -sf a b || exit $? ln -sf b c || exit $? ln -sf /etc . || exit $? ) || exit $? ;; '--clean') ( cd t/sampledata || exit $? # test -e is false for dangling symlinks. if test -h b -o -e b; then rm b || exit $?; fi if test -h c -o -e c; then rm c || exit $?; fi if test -h etc -o -e etc; then rm etc || exit $?; fi ) ;; *) usage 1>&2; exit 1 ;; esac bup-0.25/t/force-delete000077500000000000000000000011551225146730500147640ustar00rootroot00000000000000#!/bin/bash set -o pipefail # Try *hard* to delete $@. Among other things, some systems have # r-xr-xr-x for root and other system dirs. rc=0 rm -rf "$@" # Maybe we'll get lucky. for f in "$@"; do test -e "$f" || continue if test "$(type -p setfacl)"; then setfacl -Rb "$f" fi if test "$(type -p chattr)"; then chattr -R -aisu "$f" fi chmod -R u+rwX "$f" rm -r "$f" if test -e "$f"; then rc=1 find "$f" -ls lsattr -aR "$f" getfacl -R "$f" fi done if test "$rc" -ne 0; then echo "Failed to delete everything" 1>&2 fi exit "$rc" bup-0.25/t/hardlink-sets000077500000000000000000000024031225146730500151730ustar00rootroot00000000000000#!/usr/bin/env python import os, stat, sys # Print the full paths of all the files in each hardlink set # underneath one of the paths. Separate sets with a blank line, sort # the paths within each set, and sort the sets by their first path. def usage(): print >> sys.stderr, "Usage: hardlink-sets " if len(sys.argv) < 2: usage() sys.exit(1) def on_walk_error(e): raise e hardlink_set = {} for p in sys.argv[1:]: for root, dirs, files in os.walk(p, onerror = on_walk_error): for filename in files: full_path = os.path.join(root, filename) st = os.lstat(full_path) if not stat.S_ISDIR(st.st_mode): node = '%s:%s' % (st.st_dev, st.st_ino) link_paths = hardlink_set.get(node) if link_paths: link_paths.append(full_path) else: hardlink_set[node] = [full_path] # Sort the link sets. for node, link_paths in hardlink_set.items(): link_paths.sort() first_set = True for link_paths in sorted(hardlink_set.values(), key = lambda x : x[0]): if len(link_paths) > 1: if first_set: first_set = False else: print for p in sorted(link_paths): print p sys.exit(0) bup-0.25/t/id-other-than000077500000000000000000000017221225146730500150710ustar00rootroot00000000000000#!/usr/bin/env python import grp import pwd import sys def usage(): print >> sys.stderr, "Usage: id-other-than <--user|--group> ID [ID ...]" if len(sys.argv) < 2: usage() sys.exit(1) def is_integer(x): try: int(x) return True except ValueError, e: return False excluded_ids = set(int(x) for x in sys.argv[2:] if is_integer(x)) excluded_names = (x for x in sys.argv[2:] if not is_integer(x)) if sys.argv[1] == '--user': for x in excluded_names: excluded_ids.add(pwd.getpwnam(x).pw_uid) for x in pwd.getpwall(): if x.pw_uid not in excluded_ids: print x.pw_name + ':' + str(x.pw_uid) sys.exit(0) elif sys.argv[1] == '--group': for x in excluded_names: excluded_ids.add(grp.getgrnam(x).gr_gid) for x in grp.getgrall(): if x.gr_gid not in excluded_ids: print x.gr_name + ':' + str(x.gr_gid) sys.exit(0) else: usage() sys.exit(1) bup-0.25/t/lib.sh000066400000000000000000000021431225146730500136000ustar00rootroot00000000000000# Assumes shell is Bash, and pipefail is set. bup_t_lib_script_home=$(cd "$(dirname $0)" && pwd) force-delete() { "$bup_t_lib_script_home/force-delete" "$@" } realpath() { test "$#" -eq 1 || return $? echo "$1" | \ PYTHONPATH="$bup_t_lib_script_home/../lib" python -c \ "import sys, bup.helpers; print bup.helpers.realpath(sys.stdin.readline())" \ || return $? } current-filesystem() { local kernel="$(uname -s)" || return $? case "$kernel" in NetBSD) df -G . | sed -En 's/.* ([^ ]*) fstype.*/\1/p' ;; SunOS) df -g . | sed -En 's/.* ([^ ]*) fstype.*/\1/p' ;; *) df -T . | awk 'END{print $2}' esac } path-filesystems() ( # Return filesystem for each dir from $1 to /. # Perhaps for /foo/bar, "ext4\next4\nbtrfs\n". test "$#" -eq 1 || exit $? cd "$1" || exit $? current-filesystem || exit $? dir="$(pwd)" || exit $? while test "$dir" != /; do cd .. || exit $? dir="$(pwd)" || exit $? current-filesystem || exit $? done exit 0 ) bup-0.25/t/mksock000077500000000000000000000001701225146730500137110ustar00rootroot00000000000000#!/usr/bin/env python import socket, sys s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) s.bind(sys.argv[1]) bup-0.25/t/root-status000077500000000000000000000005621225146730500147330ustar00rootroot00000000000000#!/usr/bin/env python import sys if sys.platform.startswith('cygwin'): import ctypes if ctypes.cdll.shell32.IsUserAnAdmin(): print 'root' else: print 'none' else: import os if os.environ.get('FAKEROOTKEY'): print 'fake' else: if os.geteuid() == 0: print 'root' else: print 'none' bup-0.25/t/sampledata/000077500000000000000000000000001225146730500146115ustar00rootroot00000000000000bup-0.25/t/sampledata/b2/000077500000000000000000000000001225146730500151145ustar00rootroot00000000000000bup-0.25/t/sampledata/b2/foozy000066400000000000000000000000001225146730500161730ustar00rootroot00000000000000bup-0.25/t/sampledata/b2/foozy2000066400000000000000000000000001225146730500162550ustar00rootroot00000000000000bup-0.25/t/sampledata/x000066400000000000000000000000351225146730500150010ustar00rootroot00000000000000Sun Jan 3 01:54:26 EST 2010 bup-0.25/t/sampledata/y-2000000066400000000000000000000001251225146730500153610ustar00rootroot00000000000000this file should come *before* y/ in the sort order, because of that trailing slash. bup-0.25/t/sampledata/y/000077500000000000000000000000001225146730500150615ustar00rootroot00000000000000bup-0.25/t/sampledata/y/testfile1000066400000000000000000004657101225146730500167210ustar00rootroot00000000000000#!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu bup-0.25/t/sampledata/y/text000066400000000000000000000000471225146730500157710ustar00rootroot00000000000000this is a text file. See me be texty! bup-0.25/t/subtree-hash000077500000000000000000000014351225146730500150210ustar00rootroot00000000000000#!/usr/bin/env bash # Usage: subtree-hash ROOT_HASH [SUBDIR ...] subtree_hash() { root_hash="$1" if test "$#" -eq 1; then echo $root_hash else subdir="$2" subtree_info="$(git ls-tree "$root_hash" | grep -E " $subdir\$")" || true if test "$(echo "$subtree_info" | wc -l)" -ne 1; then echo "Didn't find just one matching line in subtree $root_hash" 1>&2 return 1 fi subtree_hash="$(echo "$subtree_info" | cut -d' ' -f 3 | cut -d$'\t' -f 1)" || true if test -z "$subtree_hash"; then echo "Unable to find subtree hash in git output: $subtree_info" 1>&2 return 1 fi shift 2 || exit $? subtree_hash "$subtree_hash" "$@" || exit $? fi } subtree_hash "$@" bup-0.25/t/test-cat-file.sh000077500000000000000000000026031225146730500154770ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS bup init WVPASS cd "$tmpdir" WVSTART "cat-file" WVPASS mkdir src WVPASS date > src/foo WVPASS bup index src WVPASS bup save -n src src WVPASS bup cat-file "src/latest/$(pwd)/src/foo" > cat-foo WVPASS diff -u src/foo cat-foo WVSTART "cat-file --meta" WVPASS bup meta --create --no-paths src/foo > src-foo.meta WVPASS bup cat-file --meta "src/latest/$(pwd)/src/foo" > cat-foo.meta WVPASS bup meta -tvvf src-foo.meta | WVPASS grep -vE '^atime: ' > src-foo.list WVPASS bup meta -tvvf cat-foo.meta | WVPASS grep -vE '^atime: ' > cat-foo.list WVPASS diff -u src-foo.list cat-foo.list WVSTART "cat-file --bupm" WVPASS bup cat-file --bupm "src/latest/$(pwd)/src/" > bup-cat-bupm src_hash=$(WVPASS bup ls -s "src/latest/$(pwd)" | cut -d' ' -f 1) || exit $? bupm_hash=$(WVPASS git ls-tree "$src_hash" | grep -F .bupm | cut -d' ' -f 3) \ || exit $? bupm_hash=$(WVPASS echo "$bupm_hash" | cut -d' ' -f 1) || exit $? WVPASS git cat-file blob "$bupm_hash" > git-cat-bupm if ! cmp git-cat-bupm bup-cat-bupm; then cmp -l git-cat-bupm bup-cat-bupm diff -uN <(bup meta -tvvf git-cat-bupm) <(bup meta -tvvf bup-cat-bupm) WVPASS cmp git-cat-bupm bup-cat-bupm fi WVPASS rm -rf "$tmpdir" bup-0.25/t/test-command-without-init-fails.sh000077500000000000000000000005131225146730500211650ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh set -o pipefail WVSTART 'all' top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS mkdir "$tmpdir/foo" bup index "$tmpdir/foo" &> /dev/null index_rc=$? WVPASSEQ "$index_rc" "15" WVPASS rm -rf "$tmpdir" bup-0.25/t/test-index-check-device.sh000077500000000000000000000040711225146730500174330ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh . ./t/lib.sh set -o pipefail if [ $(t/root-status) != root ]; then echo 'Not root: skipping --check-device tests.' exit 0 # FIXME: add WVSKIP. fi if test -z "$(type -p losetup)"; then echo 'Unable to find losetup: skipping --check-device tests.' exit 0 # FIXME: add WVSKIP. fi if test -z "$(type -p mke2fs)"; then echo 'Unable to find mke2fs: skipping --check-device tests.' exit 0 # FIXME: add WVSKIP. fi WVSTART '--check-device' top="$(pwd)" tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } srcmnt="$(WVPASS wvmkmountpt)" || exit $? tmpmnt1="$(WVPASS wvmkmountpt)" || exit $? tmpmnt2="$(WVPASS wvmkmountpt)" || exit $? WVPASS cd "$tmpdir" WVPASS dd if=/dev/zero of=testfs.img bs=1M count=32 WVPASS mke2fs -F -j -m 0 testfs.img WVPASS mount -o loop testfs.img "$tmpmnt1" # Hide, so that tests can't create risks. WVPASS chown root:root "$tmpmnt1" WVPASS chmod 0700 "$tmpmnt1" # Create trivial content. WVPASS date > "$tmpmnt1/foo" WVPASS umount "$tmpmnt1" # Mount twice, so we'll have the same content with different devices. WVPASS mount -oro,loop testfs.img "$tmpmnt1" WVPASS mount -oro,loop testfs.img "$tmpmnt2" # Test default behavior: --check-device. WVPASS mount -oro --bind "$tmpmnt1" "$srcmnt" WVPASS bup init WVPASS bup index --fake-valid "$srcmnt" WVPASS umount "$srcmnt" WVPASS mount -oro --bind "$tmpmnt2" "$srcmnt" WVPASS bup index "$srcmnt" WVPASSEQ "$(bup index --status "$srcmnt")" \ "M $srcmnt/lost+found/ M $srcmnt/foo M $srcmnt/" WVPASS umount "$srcmnt" WVSTART '--no-check-device' WVPASS mount -oro --bind "$tmpmnt1" "$srcmnt" WVPASS bup index --clear WVPASS bup index --fake-valid "$srcmnt" WVPASS umount "$srcmnt" WVPASS mount -oro --bind "$tmpmnt2" "$srcmnt" WVPASS bup index --no-check-device "$srcmnt" WVPASS bup index --status "$srcmnt" WVPASSEQ "$(bup index --status "$srcmnt")" \ " $srcmnt/lost+found/ $srcmnt/foo $srcmnt/" WVPASS umount "$srcmnt" WVPASS umount "$tmpmnt1" WVPASS umount "$tmpmnt2" WVPASS rm -r "$tmpmnt1" "$tmpmnt2" "$tmpdir" bup-0.25/t/test-meta.sh000077500000000000000000000646311225146730500147520ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . t/lib.sh set -o pipefail TOP="$(WVPASS pwd)" || exit $? export BUP_DIR="$TOP/buptest.tmp" bup() { "$TOP/bup" "$@" } hardlink-sets() { "$TOP/t/hardlink-sets" "$@" } id-other-than() { "$TOP/t/id-other-than" "$@" } # Very simple metadata tests -- create a test tree then check that bup # meta can reproduce the metadata correctly (according to bup xstat) # via create, extract, start-extract, and finish-extract. The current # tests are crude, and this does not fully test devices, varying # users/groups, acls, attrs, etc. genstat() { ( export PATH="$TOP:$PATH" # pick up bup # Skip atime (test elsewhere) to avoid the observer effect. WVPASS find . | WVPASS sort \ | WVPASS xargs bup xstat --exclude-fields ctime,atime,size ) } test-src-create-extract() { # Test bup meta create/extract for ./src -> ./src-restore. # Also writes to ./src-stat and ./src-restore-stat. ( (WVPASS cd src; WVPASS genstat) > src-stat || exit $? WVPASS bup meta --create --recurse --file src.meta src # Test extract. WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS cd src-restore WVPASS bup meta --extract --file ../src.meta WVPASS test -d src (WVPASS cd src; WVPASS genstat >../../src-restore-stat) || exit $? WVPASS diff -U5 ../src-stat ../src-restore-stat # Test start/finish extract. WVPASS force-delete src WVPASS bup meta --start-extract --file ../src.meta WVPASS test -d src WVPASS bup meta --finish-extract --file ../src.meta (WVPASS cd src; WVPASS genstat >../../src-restore-stat) || exit $? WVPASS diff -U5 ../src-stat ../src-restore-stat ) } test-src-save-restore() { # Test bup save/restore metadata for ./src -> ./src-restore. Also # writes to ./src.bup. Note that for now this just tests the # restore below src/, in order to avoid having to worry about # operations that require root (like chown /home). ( WVPASS rm -rf src.bup WVPASS mkdir src.bup export BUP_DIR=$(pwd)/src.bup WVPASS bup init WVPASS bup index src WVPASS bup save -t -n src src # Test extract. WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src WVPASS "$TOP/t/compare-trees" -c src/ src-restore/src/ WVPASS rm -rf src.bup ) } universal-cleanup() { if [ $(t/root-status) != root ]; then return 0; fi umount "$TOP/bupmeta.tmp/testfs" || true umount "$TOP/bupmeta.tmp/testfs-limited" || true } WVPASS universal-cleanup trap universal-cleanup EXIT setup-test-tree() { WVPASS force-delete "$BUP_DIR" WVPASS force-delete "$TOP/bupmeta.tmp" WVPASS mkdir -p "$TOP/bupmeta.tmp/src" WVPASS cp -pPR Documentation cmd config lib "$TOP/bupmeta.tmp"/src # Add some hard links for the general tests. ( WVPASS cd "$TOP/bupmeta.tmp"/src WVPASS touch hardlink-target WVPASS ln hardlink-target hardlink-1 WVPASS ln hardlink-target hardlink-2 WVPASS ln hardlink-target hardlink-3 ) || exit $? # Add some trivial files for the index, modify, save tests. ( WVPASS cd "$TOP/bupmeta.tmp"/src WVPASS mkdir volatile WVPASS touch volatile/{1,2,3} ) || exit $? # Regression test for metadata sort order. Previously, these two # entries would sort in the wrong order because the metadata # entries were being sorted by mangled name, but the index isn't. WVPASS dd if=/dev/zero of="$TOP/bupmeta.tmp"/src/foo bs=1k count=33 WVPASS touch -t 201111111111 "$TOP/bupmeta.tmp"/src/foo WVPASS touch -t 201112121111 "$TOP/bupmeta.tmp"/src/foo-bar t/mksock "$TOP/bupmeta.tmp/src/test-socket" || true } # Use the test tree to check bup meta. WVSTART 'meta --create/--extract' ( WVPASS setup-test-tree WVPASS cd "$TOP/bupmeta.tmp" WVPASS test-src-create-extract # Test a top-level file (not dir). WVPASS touch src-file WVPASS bup meta -cf src-file.meta src-file WVPASS mkdir dest WVPASS cd dest WVPASS bup meta -xf ../src-file.meta ) || exit $? # Use the test tree to check bup save/restore metadata. WVSTART 'metadata save/restore (general)' ( WVPASS setup-test-tree WVPASS cd "$TOP/bupmeta.tmp" WVPASS test-src-save-restore # Test a deeper subdir/ to make sure top-level non-dir metadata is # restored correctly. We need at least one dir and one non-dir at # the "top-level". WVPASS test -f src/lib/__init__.py WVPASS test -d src/lib/bup WVPASS rm -rf src.bup WVPASS mkdir src.bup export BUP_DIR=$(pwd)/src.bup WVPASS bup init WVPASS touch -t 201111111111 src-restore # Make sure the top won't match. WVPASS bup index src WVPASS bup save -t -n src src WVPASS force-delete src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/src/lib/" WVPASS touch -t 201211111111 src-restore # Make sure the top won't match. # Check that the only difference is the top dir. WVFAIL $TOP/t/compare-trees -c src/lib/ src-restore/ > tmp-compare-trees WVPASSEQ $(cat tmp-compare-trees | wc -l) 2 WVPASS tail -n +2 tmp-compare-trees | WVPASS grep -qE '^\.d[^ ]+ \./$' ) || exit $? # Test that we pull the index (not filesystem) metadata for any # unchanged files whenever we're saving other files in a given # directory. WVSTART 'metadata save/restore (using index metadata)' ( WVPASS setup-test-tree WVPASS cd "$TOP/bupmeta.tmp" # ...for now -- might be a problem with hardlink restores that was # causing noise wrt this test. WVPASS rm -rf src/hardlink* # Pause here to keep the filesystem changes far enough away from # the first index run that bup won't cap their index timestamps # (see "bup help index" for more information). Without this # sleep, the compare-trees test below "Bup should *not* pick up # these metadata..." may fail. WVPASS sleep 1 WVPASS rm -rf src.bup WVPASS mkdir src.bup export BUP_DIR=$(pwd)/src.bup WVPASS bup init WVPASS bup index src WVPASS bup save -t -n src src WVPASS force-delete src-restore-1 WVPASS mkdir src-restore-1 WVPASS bup restore -C src-restore-1 "/src/latest$(pwd)/" WVPASS test -d src-restore-1/src WVPASS "$TOP/t/compare-trees" -c src/ src-restore-1/src/ WVPASS echo "blarg" > src/volatile/1 WVPASS cp -a src/volatile/1 src-restore-1/src/volatile/ WVPASS bup index src # Bup should *not* pick up these metadata changes. WVPASS touch src/volatile/2 WVPASS bup save -t -n src src WVPASS force-delete src-restore-2 WVPASS mkdir src-restore-2 WVPASS bup restore -C src-restore-2 "/src/latest$(pwd)/" WVPASS test -d src-restore-2/src WVPASS "$TOP/t/compare-trees" -c src-restore-1/src/ src-restore-2/src/ WVPASS rm -rf src.bup ) || exit $? setup-hardlink-test() { WVPASS rm -rf "$TOP/bupmeta.tmp"/src "$TOP/bupmeta.tmp"/src.bup WVPASS mkdir "$TOP/bupmeta.tmp"/src "$TOP/bupmeta.tmp"/src.bup WVPASS bup init } hardlink-test-run-restore() { WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/" WVPASS test -d src-restore/src } # Test hardlinks more carefully. WVSTART 'metadata save/restore (hardlinks)' ( export BUP_DIR="$TOP/bupmeta.tmp/src.bup" WVPASS force-delete "$TOP/bupmeta.tmp" WVPASS mkdir -p "$TOP/bupmeta.tmp" WVPASS cd "$TOP/bupmeta.tmp" # Test trivial case - single hardlink. WVPASS setup-hardlink-test ( WVPASS cd "$TOP/bupmeta.tmp"/src WVPASS touch hardlink-target WVPASS ln hardlink-target hardlink-1 ) || exit $? WVPASS bup index src WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS "$TOP/t/compare-trees" -c src/ src-restore/src/ # Test the case where the hardlink hasn't changed, but the tree # needs to be saved again. i.e. the save-cmd.py "if hashvalid:" # case. ( WVPASS cd "$TOP/bupmeta.tmp"/src WVPASS echo whatever > something-new ) || exit $? WVPASS bup index src WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS "$TOP/t/compare-trees" -c src/ src-restore/src/ # Test hardlink changes between index runs. # WVPASS setup-hardlink-test WVPASS cd "$TOP/bupmeta.tmp"/src WVPASS touch hardlink-target-a WVPASS touch hardlink-target-b WVPASS ln hardlink-target-a hardlink-b-1 WVPASS ln hardlink-target-a hardlink-a-1 WVPASS cd .. WVPASS bup index -vv src WVPASS rm src/hardlink-b-1 WVPASS ln src/hardlink-target-b src/hardlink-b-1 WVPASS bup index -vv src WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS echo ./src/hardlink-a-1 > hardlink-sets.expected WVPASS echo ./src/hardlink-target-a >> hardlink-sets.expected WVPASS echo >> hardlink-sets.expected WVPASS echo ./src/hardlink-b-1 >> hardlink-sets.expected WVPASS echo ./src/hardlink-target-b >> hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored # Test hardlink changes between index and save -- hardlink set [a # b c d] changes to [a b] [c d]. At least right now bup should # notice and recreate the latter. WVPASS setup-hardlink-test WVPASS cd "$TOP/bupmeta.tmp"/src WVPASS touch a WVPASS ln a b WVPASS ln a c WVPASS ln a d WVPASS cd .. WVPASS bup index -vv src WVPASS rm src/c src/d WVPASS touch src/c WVPASS ln src/c src/d WVPASS bup save -t -n src src WVPASS hardlink-test-run-restore WVPASS echo ./src/a > hardlink-sets.expected WVPASS echo ./src/b >> hardlink-sets.expected WVPASS echo >> hardlink-sets.expected WVPASS echo ./src/c >> hardlink-sets.expected WVPASS echo ./src/d >> hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored # Test that we don't link outside restore tree. WVPASS setup-hardlink-test WVPASS cd "$TOP/bupmeta.tmp" WVPASS mkdir src/a src/b WVPASS touch src/a/1 WVPASS ln src/a/1 src/b/1 WVPASS bup index -vv src WVPASS bup save -t -n src src WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS bup restore -C src-restore "/src/latest$(pwd)/src/a/" WVPASS test -e src-restore/1 WVPASS echo -n > hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored # Test that we do link within separate sub-trees. WVPASS setup-hardlink-test WVPASS cd "$TOP/bupmeta.tmp" WVPASS mkdir src/a src/b WVPASS touch src/a/1 WVPASS ln src/a/1 src/b/1 WVPASS bup index -vv src/a src/b WVPASS bup save -t -n src src/a src/b WVPASS hardlink-test-run-restore WVPASS echo ./src/a/1 > hardlink-sets.expected WVPASS echo ./src/b/1 >> hardlink-sets.expected (WVPASS cd src-restore; WVPASS hardlink-sets .) > hardlink-sets.restored \ || exit $? WVPASS diff -u hardlink-sets.expected hardlink-sets.restored ) || exit $? WVSTART 'meta --edit' ( WVPASS force-delete "$TOP/bupmeta.tmp" WVPASS mkdir "$TOP/bupmeta.tmp" WVPASS cd "$TOP/bupmeta.tmp" WVPASS mkdir src WVPASS bup meta -cf src.meta src WVPASS bup meta --edit --set-uid 0 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^uid: 0' WVPASS bup meta --edit --set-uid 1000 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^uid: 1000' WVPASS bup meta --edit --set-gid 0 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^gid: 0' WVPASS bup meta --edit --set-gid 1000 src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^gid: 1000' WVPASS bup meta --edit --set-user foo src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^user: foo' WVPASS bup meta --edit --set-user bar src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^user: bar' WVPASS bup meta --edit --unset-user src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^user:' WVPASS bup meta --edit --set-user bar --unset-user src.meta \ | WVPASS bup meta -tvvf - | WVPASS grep -qE '^user:' WVPASS bup meta --edit --unset-user --set-user bar src.meta \ | WVPASS bup meta -tvvf - | WVPASS grep -qE '^user: bar' WVPASS bup meta --edit --set-group foo src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^group: foo' WVPASS bup meta --edit --set-group bar src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^group: bar' WVPASS bup meta --edit --unset-group src.meta | WVPASS bup meta -tvvf - \ | WVPASS grep -qE '^group:' WVPASS bup meta --edit --set-group bar --unset-group src.meta \ | WVPASS bup meta -tvvf - | WVPASS grep -qE '^group:' WVPASS bup meta --edit --unset-group --set-group bar src.meta \ | WVPASS bup meta -tvvf - | grep -qE '^group: bar' ) || exit $? WVSTART 'meta --no-recurse' ( set +e WVPASS force-delete "$TOP/bupmeta.tmp" WVPASS mkdir "$TOP/bupmeta.tmp" WVPASS cd "$TOP/bupmeta.tmp" WVPASS mkdir src WVPASS mkdir src/foo WVPASS touch src/foo/{1,2,3} WVPASS bup meta -cf src.meta src WVPASSEQ "$(LC_ALL=C; bup meta -tf src.meta | sort)" "src/ src/foo/ src/foo/1 src/foo/2 src/foo/3" WVPASS bup meta --no-recurse -cf src.meta src WVPASSEQ "$(LC_ALL=C; bup meta -tf src.meta | sort)" "src/" ) || exit $? # Test ownership restoration (when not root or fakeroot). ( if [ $(t/root-status) != none ]; then exit 0 fi first_group="$(WVPASS python -c 'import os,grp; \ print grp.getgrgid(os.getgroups()[0])[0]')" || exit $? last_group="$(python -c 'import os,grp; \ print grp.getgrgid(os.getgroups()[-1])[0]')" || exit $? WVSTART 'metadata (restoration of ownership)' WVPASS force-delete "$TOP/bupmeta.tmp" WVPASS mkdir "$TOP/bupmeta.tmp" WVPASS cd "$TOP/bupmeta.tmp" WVPASS touch src # Some systems always assign the parent dir group to new paths # (sgid). Make sure the group is one we're in. WVPASS chgrp -R "$first_group" src WVPASS bup meta -cf src.meta src WVPASS mkdir dest WVPASS cd dest # Make sure we don't change (or try to change) the user when not root. WVPASS bup meta --edit --set-user root ../src.meta | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE '^user: root' WVPASS rm -rf src WVPASS bup meta --edit --unset-user --set-uid 0 ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE '^user: root' # Make sure we can restore one of the user's groups. WVPASS rm -rf src WVPASS bup meta --edit --set-group "$last_group" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^group: $last_group" # Make sure we can restore one of the user's gids. user_gids="$(id -G)" last_gid="$(echo ${user_gids/* /})" WVPASS rm -rf src WVPASS bup meta --edit --unset-group --set-gid "$last_gid" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^gid: $last_gid" # Test --numeric-ids (gid). WVPASS rm -rf src current_gidx=$(bup meta -tvvf ../src.meta | grep -e '^gid:') WVPASS bup meta --edit --set-group "$last_group" ../src.meta \ | WVPASS bup meta -x --numeric-ids new_gidx=$(bup xstat src | grep -e '^gid:') WVPASSEQ "$current_gidx" "$new_gidx" # Test that restoring an unknown user works. unknown_user=$("$TOP"/t/unknown-owner --user) WVPASS rm -rf src current_uidx=$(bup meta -tvvf ../src.meta | grep -e '^uid:') WVPASS bup meta --edit --set-user "$unknown_user" ../src.meta \ | WVPASS bup meta -x new_uidx=$(bup xstat src | grep -e '^uid:') WVPASSEQ "$current_uidx" "$new_uidx" # Test that restoring an unknown group works. unknown_group=$("$TOP"/t/unknown-owner --group) WVPASS rm -rf src current_gidx=$(bup meta -tvvf ../src.meta | grep -e '^gid:') WVPASS bup meta --edit --set-group "$unknown_group" ../src.meta \ | WVPASS bup meta -x new_gidx=$(bup xstat src | grep -e '^gid:') WVPASSEQ "$current_gidx" "$new_gidx" ) || exit $? # Test ownership restoration (when root or fakeroot). ( if [ $(t/root-status) = none ]; then exit 0 fi WVSTART 'metadata (restoration of ownership as root)' WVPASS force-delete "$TOP/bupmeta.tmp" WVPASS mkdir "$TOP/bupmeta.tmp" WVPASS cd "$TOP/bupmeta.tmp" WVPASS touch src WVPASS chown 0:0 src # In case the parent dir is sgid, etc. WVPASS bup meta -cf src.meta src WVPASS mkdir dest WVPASS chmod 700 dest # so we can't accidentally do something insecure WVPASS cd dest other_uinfo="$(id-other-than --user "$(id -un)")" other_user="${other_uinfo%%:*}" other_uid="${other_uinfo##*:}" other_ginfo="$(id-other-than --group "$(id -gn)")" other_group="${other_ginfo%%:*}" other_gid="${other_ginfo##*:}" # Make sure we can restore a uid (must be in /etc/passwd b/c cygwin). WVPASS bup meta --edit --unset-user --set-uid "$other_uid" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^uid: $other_uid" # Make sure we can restore a gid (must be in /etc/group b/c cygwin). WVPASS bup meta --edit --unset-group --set-gid "$other_gid" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^gid: $other_gid" other_uinfo2="$(id-other-than --user "$(id -un)" "$other_user")" other_user2="${other_uinfo2%%:*}" other_uid2="${other_uinfo2##*:}" other_ginfo2="$(id-other-than --group "$(id -gn)" "$other_group")" other_group2="${other_ginfo2%%:*}" other_gid2="${other_ginfo2##*:}" # Try to restore a user (and see that user trumps uid when uid is not 0). WVPASS bup meta --edit \ --set-uid "$other_uid2" --set-user "$some_user" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^user: $some_user" # Try to restore a group (and see that group trumps gid when gid is not 0). WVPASS bup meta --edit \ --set-gid "$other_gid2" --set-group "$some_group" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qE "^group: $some_user" # Test --numeric-ids (uid). Note the name 'root' is not handled # specially, so we use that here as the test user name. We assume # that the root user's uid is never 42. WVPASS rm -rf src WVPASS bup meta --edit --set-user root --set-uid "$other_uid" ../src.meta \ | WVPASS bup meta -x --numeric-ids new_uidx=$(bup xstat src | grep -e '^uid:') WVPASSEQ "$new_uidx" "uid: $other_uid" # Test --numeric-ids (gid). Note the name 'root' is not handled # specially, so we use that here as the test group name. We # assume that the root group's gid is never 42. WVPASS rm -rf src WVPASS bup meta --edit --set-group root --set-gid "$other_gid" ../src.meta \ | WVPASS bup meta -x --numeric-ids new_gidx=$(bup xstat src | grep -e '^gid:') WVPASSEQ "$new_gidx" "gid: $other_gid" # Test that restoring an unknown user works. unknown_user=$("$TOP"/t/unknown-owners --user) WVPASS rm -rf src WVPASS bup meta --edit \ --set-uid "$other_uid" --set-user "$unknown_user" ../src.meta \ | WVPASS bup meta -x new_uidx=$(bup xstat src | grep -e '^uid:') WVPASSEQ "$new_uidx" "uid: $other_uid" # Test that restoring an unknown group works. unknown_group=$("$TOP"/t/unknown-owners --group) WVPASS rm -rf src WVPASS bup meta --edit \ --set-gid "$other_gid" --set-group "$unknown_group" ../src.meta \ | WVPASS bup meta -x new_gidx=$(bup xstat src | grep -e '^gid:') WVPASSEQ "$new_gidx" "gid: $other_gid" if ! [[ $(uname) =~ CYGWIN ]]; then # For now, skip these on Cygwin because it doesn't allow # restoring an unknown uid/gid. # Make sure a uid of 0 trumps a non-root user. WVPASS bup meta --edit --set-user "$some_user" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE "^user: $some_user" WVPASS bup xstat src | WVPASS grep -qE "^uid: 0" # Make sure a gid of 0 trumps a non-root group. WVPASS bup meta --edit --set-group "$some_user" ../src.meta \ | WVPASS bup meta -x WVPASS bup xstat src | WVPASS grep -qvE "^group: $some_group" WVPASS bup xstat src | WVPASS grep -qE "^gid: 0" fi ) || exit $? # Root-only tests that require an FS with all the trimmings: ACLs, # Linux attr, Linux xattr, etc. if [ $(t/root-status) = root ]; then ( # Some cleanup handled in universal-cleanup() above. # These tests are only likely to work under Linux for now # (patches welcome). [[ $(uname) =~ Linux ]] || exit 0 WVSTART 'meta - general (as root)' WVPASS setup-test-tree WVPASS cd "$TOP/bupmeta.tmp" umount testfs WVPASS dd if=/dev/zero of=testfs.img bs=1M count=32 # Make sure we have all the options the chattr test needs # (i.e. create a "normal" ext4 filesystem). WVPASS mke2fs -F -m 0 \ -I 256 \ -O has_journal,extent,huge_file,flex_bg,uninit_bg,dir_nlink,extra_isize \ testfs.img WVPASS mkdir testfs WVPASS mount -o loop,acl,user_xattr testfs.img testfs # Hide, so that tests can't create risks. WVPASS chown root:root testfs WVPASS chmod 0700 testfs umount testfs-limited WVPASS dd if=/dev/zero of=testfs-limited.img bs=1M count=32 WVPASS mkfs -t vfat testfs-limited.img WVPASS mkdir testfs-limited WVPASS mount -o loop,uid=root,gid=root,umask=0077 \ testfs-limited.img testfs-limited WVPASS cp -pPR src testfs/src (WVPASS cd testfs; WVPASS test-src-create-extract) || exit $? WVSTART 'meta - atime (as root)' WVPASS force-delete testfs/src WVPASS mkdir testfs/src ( WVPASS mkdir testfs/src/foo WVPASS touch testfs/src/bar PYTHONPATH="$TOP/lib" \ WVPASS python -c "from bup import xstat; \ x = xstat.timespec_to_nsecs((42, 0));\ xstat.utime('testfs/src/foo', (x, x));\ xstat.utime('testfs/src/bar', (x, x));" WVPASS cd testfs WVPASS bup meta -v --create --recurse --file src.meta src WVPASS bup meta -tvf src.meta # Test extract. WVPASS force-delete src-restore WVPASS mkdir src-restore WVPASS cd src-restore WVPASS bup meta --extract --file ../src.meta WVPASSEQ "$(bup xstat --include-fields=atime src/foo)" "atime: 42" WVPASSEQ "$(bup xstat --include-fields=atime src/bar)" "atime: 42" # Test start/finish extract. WVPASS force-delete src WVPASS bup meta --start-extract --file ../src.meta WVPASS test -d src WVPASS bup meta --finish-extract --file ../src.meta WVPASSEQ "$(bup xstat --include-fields=atime src/foo)" "atime: 42" WVPASSEQ "$(bup xstat --include-fields=atime src/bar)" "atime: 42" ) || exit $? WVSTART 'meta - Linux attr (as root)' WVPASS force-delete testfs/src WVPASS mkdir testfs/src ( WVPASS touch testfs/src/foo WVPASS mkdir testfs/src/bar WVPASS chattr +acdeijstuADST testfs/src/foo WVPASS chattr +acdeijstuADST testfs/src/bar (WVPASS cd testfs; WVPASS test-src-create-extract) || exit $? # Test restoration to a limited filesystem (vfat). ( WVPASS bup meta --create --recurse --file testfs/src.meta \ testfs/src WVPASS force-delete testfs-limited/src-restore WVPASS mkdir testfs-limited/src-restore WVPASS cd testfs-limited/src-restore WVFAIL bup meta --extract --file ../../testfs/src.meta 2>&1 \ | WVPASS grep -e '^Linux chattr:' \ | WVPASS python -c \ 'import sys; exit(not len(sys.stdin.readlines()) == 3)' ) || exit $? ) || exit $? WVSTART 'meta - Linux xattr (as root)' WVPASS force-delete testfs/src WVPASS mkdir testfs/src WVPASS touch testfs/src/foo WVPASS mkdir testfs/src/bar WVPASS attr -s foo -V bar testfs/src/foo WVPASS attr -s foo -V bar testfs/src/bar (WVPASS cd testfs; WVPASS test-src-create-extract) || exit $? # Test restoration to a limited filesystem (vfat). ( WVPASS bup meta --create --recurse --file testfs/src.meta \ testfs/src WVPASS force-delete testfs-limited/src-restore WVPASS mkdir testfs-limited/src-restore WVPASS cd testfs-limited/src-restore WVFAIL bup meta --extract --file ../../testfs/src.meta WVFAIL bup meta --extract --file ../../testfs/src.meta 2>&1 \ | WVPASS grep -e '^xattr\.set:' \ | WVPASS python -c \ 'import sys; exit(not len(sys.stdin.readlines()) == 2)' ) || exit $? WVSTART 'meta - POSIX.1e ACLs (as root)' WVPASS force-delete testfs/src WVPASS mkdir testfs/src WVPASS touch testfs/src/foo WVPASS mkdir testfs/src/bar WVPASS setfacl -m u:root:r testfs/src/foo WVPASS setfacl -m u:root:r testfs/src/bar (WVPASS cd testfs; WVPASS test-src-create-extract) || exit $? # Test restoration to a limited filesystem (vfat). ( WVPASS bup meta --create --recurse --file testfs/src.meta \ testfs/src WVPASS force-delete testfs-limited/src-restore WVPASS mkdir testfs-limited/src-restore WVPASS cd testfs-limited/src-restore WVFAIL bup meta --extract --file ../../testfs/src.meta 2>&1 \ | WVPASS grep -e '^POSIX1e ACL applyto:' \ | WVPASS python -c \ 'import sys; exit(not len(sys.stdin.readlines()) == 2)' ) || exit $? ) || exit $? fi bup-0.25/t/test-redundant-saves.sh000077500000000000000000000031641225146730500171210ustar00rootroot00000000000000#!/usr/bin/env bash # Test that running save more than once with no other changes produces # the exact same tree. # Note: we can't compare the top-level hash (i.e. the output of "save # -t" because that currently pulls the metadata for unindexed parent # directories directly from the filesystem, and the relevant atimes # may change between runs. So instead we extract the roots of the # indexed trees for comparison via t/subtree-hash. . ./wvtest-bup.sh set -o pipefail WVSTART 'all' top="$(pwd)" tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$BUP_DIR" bup() { "$top/bup" "$@"; } WVPASS mkdir -p "$tmpdir/src" WVPASS mkdir -p "$tmpdir/src/d" WVPASS mkdir -p "$tmpdir/src/d/e" WVPASS touch "$tmpdir/src/"{f,b,a,d} WVPASS touch "$tmpdir/src/d/z" WVPASS bup init WVPASS bup index -u "$tmpdir/src" declare -a indexed_top IFS=/ indexed_top="${tmpdir##/}" indexed_top=(${indexed_top%%/}) unset IFS tree1=$(WVPASS bup save -t "$tmpdir/src") || exit $? indexed_tree1="$(WVPASS t/subtree-hash "$tree1" "${indexed_top[@]}" src)" \ || exit $? result="$(WVPASS cd "$tmpdir/src"; WVPASS bup index -m)" || exit $? WVPASSEQ "$result" "" tree2=$(WVPASS bup save -t "$tmpdir/src") || exit $? indexed_tree2="$(WVPASS t/subtree-hash "$tree2" "${indexed_top[@]}" src)" \ || exit $? WVPASSEQ "$indexed_tree1" "$indexed_tree2" result="$(WVPASS bup index -s / | WVFAIL grep ^D)" || exit $? WVPASSEQ "$result" "" tree3=$(WVPASS bup save -t /) || exit $? indexed_tree3="$(WVPASS t/subtree-hash "$tree3" "${indexed_top[@]}" src)" || exit $? WVPASSEQ "$indexed_tree1" "$indexed_tree3" WVPASS rm -rf "$tmpdir" bup-0.25/t/test-restore-map-owner.sh000077500000000000000000000060211225146730500173770ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh if [ $(t/root-status) != root ]; then echo 'Not root: skipping restore --map-* tests.' exit 0 # FIXME: add WVSKIP. fi top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } uid=$(WVPASS id -u) || exit $? user=$(WVPASS id -un) || exit $? gid=$(WVPASS id -g) || exit $? group=$(WVPASS id -gn) || exit $? other_uinfo=$(WVPASS t/id-other-than --user "$user") || exit $? other_user="${other_uinfo%%:*}" other_uid="${other_uinfo##*:}" other_ginfo=$(WVPASS t/id-other-than --group "$group") || exit $? other_group="${other_ginfo%%:*}" other_gid="${other_ginfo##*:}" WVPASS bup init WVPASS cd "$tmpdir" WVSTART "restore --map-user/group/uid/gid (control)" WVPASS mkdir src WVPASS touch src/foo # Some systems assign the parent dir group to new paths. WVPASS chgrp -R "$group" src WVPASS bup index src WVPASS bup save -n src src WVPASS bup restore -C dest "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $user\$" foo-xstat WVPASS grep -qE "^uid: $uid\$" foo-xstat WVPASS grep -qE "^group: $group\$" foo-xstat WVPASS grep -qE "^gid: $gid\$" foo-xstat WVSTART "restore --map-user/group/uid/gid (user/group)" WVPASS rm -rf dest # Have to remap uid/gid too because we're root and 0 would win). WVPASS bup restore -C dest \ --map-uid "$uid=$other_uid" --map-gid "$gid=$other_gid" \ --map-user "$user=$other_user" --map-group "$group=$other_group" \ "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $other_user\$" foo-xstat WVPASS grep -qE "^uid: $other_uid\$" foo-xstat WVPASS grep -qE "^group: $other_group\$" foo-xstat WVPASS grep -qE "^gid: $other_gid\$" foo-xstat WVSTART "restore --map-user/group/uid/gid (user/group trumps uid/gid)" WVPASS rm -rf dest WVPASS bup restore -C dest \ --map-uid "$uid=$other_uid" --map-gid "$gid=$other_gid" \ "src/latest/$(pwd)/src/" # Should be no changes. WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $user\$" foo-xstat WVPASS grep -qE "^uid: $uid\$" foo-xstat WVPASS grep -qE "^group: $group\$" foo-xstat WVPASS grep -qE "^gid: $gid\$" foo-xstat WVSTART "restore --map-user/group/uid/gid (uid/gid)" WVPASS rm -rf dest WVPASS bup restore -C dest \ --map-user "$user=" --map-group "$group=" \ --map-uid "$uid=$other_uid" --map-gid "$gid=$other_gid" \ "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^user: $other_user\$" foo-xstat WVPASS grep -qE "^uid: $other_uid\$" foo-xstat WVPASS grep -qE "^group: $other_group\$" foo-xstat WVPASS grep -qE "^gid: $other_gid\$" foo-xstat WVSTART "restore --map-user/group/uid/gid (zero uid/gid trumps all)" WVPASS rm -rf dest WVPASS bup restore -C dest \ --map-user "$user=$other_user" --map-group "$group=$other_group" \ --map-uid "$uid=0" --map-gid "$gid=0" \ "src/latest/$(pwd)/src/" WVPASS bup xstat dest/foo > foo-xstat WVPASS grep -qE "^uid: 0\$" foo-xstat WVPASS grep -qE "^gid: 0\$" foo-xstat WVPASS rm -rf "$tmpdir" bup-0.25/t/test-restore-single-file.sh000077500000000000000000000012711225146730500176720ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh set -o pipefail WVSTART 'all' top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" bup() { "$top/bup" "$@"; } WVPASS mkdir "$tmpdir/foo" WVPASS mkdir "$tmpdir/foo/bar" # Make sure a dir sorts before baz (regression test). WVPASS touch "$tmpdir/foo/baz" WVPASS WVPASS bup init WVPASS WVPASS bup index "$tmpdir/foo" WVPASS bup save -n foo "$tmpdir/foo" # Make sure the timestamps will differ if metadata isn't being restored. WVPASS bup tick WVPASS bup restore -C "$tmpdir/restore" "foo/latest/$tmpdir/foo/baz" WVPASS "$top/t/compare-trees" "$tmpdir/foo/baz" "$tmpdir/restore/baz" WVPASS rm -rf "$tmpdir" bup-0.25/t/test-rm-between-index-and-save.sh000077500000000000000000000040331225146730500206600ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh set -o pipefail top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" D="$tmpdir/data" bup() { "$top/bup" "$@"; } WVSTART "remove file" # Fixed in commit 8585613c1f45f3e20feec00b24fc7e3a948fa23e ("Store # metadata in the index....") WVPASS mkdir "$D" WVPASS bup init WVPASS echo "content" > "$D"/foo WVPASS echo "content" > "$D"/bar WVPASS bup tick WVPASS bup index -ux "$D" WVPASS bup save -n save-fail-missing "$D" WVPASS echo "content" > "$D"/baz WVPASS bup tick WVPASS bup index -ux "$D" WVPASS rm "$D"/foo # When "bup tick" is removed above, this may fail (complete with warning), # since the ctime/mtime of "foo" might be pushed back: WVPASS bup save -n save-fail-missing "$D" # when the save-call failed, foo is missing from output, since only # then bup notices, that it was removed: WVPASSEQ "$(bup ls -a save-fail-missing/latest/$TOP/$D/)" "bar baz foo" # index/save again WVPASS bup tick WVPASS bup index -ux "$D" WVPASS bup save -n save-fail-missing "$D" # now foo is gone: WVPASSEQ "$(bup ls -a save-fail-missing/latest/$TOP/$D/)" "bar baz" # TODO: Test for racecondition between reading a file and reading its metadata? WVSTART "remove dir" WVPASS rm -r "$D" WVPASS mkdir "$D" WVPASS rm -r "$BUP_DIR" WVPASS bup init WVPASS mkdir "$D"/foo WVPASS mkdir "$D"/bar WVPASS bup tick WVPASS bup index -ux "$D" WVPASS bup save -n save-fail-missing "$D" WVPASS touch "$D"/bar WVPASS mkdir "$D"/baz WVPASS bup tick WVPASS bup index -ux "$D" WVPASS rmdir "$D"/foo # with directories, bup notices that foo is missing, so it fails # (complete with delayed error) WVFAIL bup save -n save-fail-missing "$D" # ... so "foo" is absent from "bup ls" WVPASSEQ "$(bup ls -a save-fail-missing/latest/$TOP/$D/)" "bar/ baz/" # Index again: WVPASS bup tick WVPASS bup index -ux "$D" # no non-zero-exitcode anymore: WVPASS bup save -n save-fail-missing "$D" # foo is (still...) missing, of course: WVPASSEQ "$(bup ls -a save-fail-missing/latest/$TOP/$D/)" "bar/ baz/" WVPASS rm -rf "$tmpdir" bup-0.25/t/test-save-creates-no-unrefs.sh000077500000000000000000000007621225146730500203130ustar00rootroot00000000000000#!/usr/bin/env bash . ./wvtest-bup.sh WVSTART 'all' top="$(WVPASS pwd)" || exit $? tmpdir="$(WVPASS wvmktempdir)" || exit $? export BUP_DIR="$tmpdir/bup" export GIT_DIR="$BUP_DIR" bup() { "$top/bup" "$@"; } WVPASS mkdir -p "$tmpdir/src" WVPASS touch "$tmpdir/src/foo" WVPASS bup init WVPASS bup index "$tmpdir/src" WVPASS bup save -n src "$tmpdir/src" WVPASSEQ "$(git fsck --unreachable)" "" WVPASS bup save -n src "$tmpdir/src" WVPASSEQ "$(git fsck --unreachable)" "" WVPASS rm -rf "$tmpdir" bup-0.25/t/test.sh000077500000000000000000000643071225146730500140260ustar00rootroot00000000000000#!/usr/bin/env bash . wvtest.sh . t/lib.sh set -o pipefail TOP="$(WVPASS /bin/pwd)" || exit $? export BUP_DIR="$TOP/buptest.tmp" bup() { "$TOP/bup" "$@" } WVSTART "init" WVPASS rm -rf "$BUP_DIR" WVPASS bup init WVSTART "index" D=bupdata.tmp WVPASS force-delete $D WVPASS mkdir $D WVFAIL bup index --exclude-from $D/cannot-exist $D WVPASSEQ "$(bup index --check -p)" "" WVPASSEQ "$(bup index --check -p $D)" "" WVFAIL [ -e $D.fake ] WVFAIL bup index --check -u $D.fake WVPASS bup index --check -u $D WVPASSEQ "$(bup index --check -p $D)" "$D/" WVPASS touch $D/a WVPASS bup random 128k >$D/b WVPASS mkdir $D/d $D/d/e WVPASS bup random 512 >$D/f WVPASS ln -s non-existent-file $D/g WVPASSEQ "$(bup index -s $D/)" "A $D/" WVPASSEQ "$(bup index -s $D/b)" "" WVPASSEQ "$(bup index --check -us $D/b)" "A $D/b" WVPASSEQ "$(bup index --check -us $D/b $D/d)" \ "A $D/d/e/ A $D/d/ A $D/b" WVPASS touch $D/d/z WVPASS bup tick WVPASSEQ "$(bup index --check -usx $D)" \ "A $D/g A $D/f A $D/d/z A $D/d/e/ A $D/d/ A $D/b A $D/a A $D/" WVPASSEQ "$(bup index --check -us $D/a $D/b --fake-valid)" \ " $D/b $D/a" WVPASSEQ "$(bup index --check -us $D/a)" " $D/a" # stays unmodified WVPASSEQ "$(bup index --check -us $D/d --fake-valid)" \ " $D/d/z $D/d/e/ $D/d/" WVPASS touch $D/d/z WVPASS bup index -u $D/d/z # becomes modified WVPASSEQ "$(bup index -s $D/a $D $D/b)" \ "A $D/g A $D/f M $D/d/z $D/d/e/ M $D/d/ $D/b $D/a A $D/" WVPASS bup index -u $D/d/e $D/a --fake-invalid WVPASSEQ "$(cd $D && bup index -m .)" \ "./g ./f ./d/z ./d/e/ ./d/ ./a ./" WVPASSEQ "$(cd $D && bup index -m)" \ "g f d/z d/e/ d/ a ./" WVPASSEQ "$(cd $D && bup index -s .)" "$(cd $D && bup index -s .)" WVFAIL bup save -t $D/doesnt-exist-filename WVPASS mv $BUP_DIR/bupindex $BUP_DIR/bi.old WVFAIL bup save -t $D/d/e/fifotest WVPASS mkfifo $D/d/e/fifotest WVPASS bup index -u $D/d/e/fifotest WVPASS bup save -t $D/d/e/fifotest WVPASS bup save -t $D/d/e WVPASS rm -f $D/d/e/fifotest WVPASS bup index -u $D/d/e WVFAIL bup save -t $D/d/e/fifotest WVPASS mv $BUP_DIR/bi.old $BUP_DIR/bupindex WVPASS bup index -u $D/d/e WVPASS bup save -t $D/d/e WVPASSEQ "$(cd $D && bup index -m)" \ "g f d/z d/ a ./" WVPASS bup save -t $D/d WVPASS bup index --fake-invalid $D/d/z WVPASS bup save -t $D/d/z WVPASS bup save -t $D/d/z # test regenerating trees when no files are changed WVPASS bup save -t $D/d WVPASSEQ "$(cd $D && bup index -m)" \ "g f a ./" WVPASS bup save -r :$BUP_DIR -n r-test $D WVFAIL bup save -r :$BUP_DIR/fake/path -n r-test $D WVFAIL bup save -r :$BUP_DIR -n r-test $D/fake/path WVSTART "split" WVPASS echo a >a.tmp WVPASS echo b >b.tmp WVPASS bup split -b a.tmp >taga.tmp WVPASS bup split -b b.tmp >tagb.tmp WVPASS cat a.tmp b.tmp | WVPASS bup split -b >tagab.tmp WVPASSEQ $(cat taga.tmp | wc -l) 1 WVPASSEQ $(cat tagb.tmp | wc -l) 1 WVPASSEQ $(cat tagab.tmp | wc -l) 1 WVPASSEQ $(cat tag[ab].tmp | wc -l) 2 WVPASSEQ "$(bup split -b a.tmp b.tmp)" "$(cat tagab.tmp)" WVPASSEQ "$(bup split -b --keep-boundaries a.tmp b.tmp)" "$(cat tag[ab].tmp)" WVPASSEQ "$(cat tag[ab].tmp | bup split -b --keep-boundaries --git-ids)" \ "$(cat tag[ab].tmp)" WVPASSEQ "$(cat tag[ab].tmp | bup split -b --git-ids)" \ "$(cat tagab.tmp)" WVPASS bup split --bench -b tags1.tmp WVPASS bup split -vvvv -b t/testfile2 >tags2.tmp WVPASS bup margin WVPASS bup midx -f WVPASS bup midx --check -a WVPASS bup midx -o $BUP_DIR/objects/pack/test1.midx \ $BUP_DIR/objects/pack/*.idx WVPASS bup midx --check -a WVPASS bup midx -o $BUP_DIR/objects/pack/test1.midx \ $BUP_DIR/objects/pack/*.idx \ $BUP_DIR/objects/pack/*.idx WVPASS bup midx --check -a all=$(echo $BUP_DIR/objects/pack/*.idx $BUP_DIR/objects/pack/*.midx) WVPASS bup midx -o $BUP_DIR/objects/pack/zzz.midx $all WVPASS bup tick WVPASS bup midx -o $BUP_DIR/objects/pack/yyy.midx $all WVPASS bup midx -a WVPASSEQ "$(echo $BUP_DIR/objects/pack/*.midx)" \ "$BUP_DIR/objects/pack/yyy.midx" WVPASS bup margin WVPASS bup split -t t/testfile2 >tags2t.tmp WVPASS bup split -t t/testfile2 --fanout 3 >tags2tf.tmp WVPASS bup split -r "$BUP_DIR" -c t/testfile2 >tags2c.tmp WVPASS bup split -r :$BUP_DIR -c t/testfile2 >tags2c.tmp WVPASS ls -lR \ | WVPASS bup split -r :$BUP_DIR -c --fanout 3 --max-pack-objects 3 -n lslr \ || exit $? WVPASS bup ls WVFAIL bup ls /does-not-exist WVPASS bup ls /lslr WVPASS bup ls /lslr/latest WVPASS bup ls /lslr/latest/ #WVPASS bup ls /lslr/1971-01-01 # all dates always exist WVFAIL diff -u tags1.tmp tags2.tmp # fanout must be different from non-fanout WVFAIL diff tags2t.tmp tags2tf.tmp WVPASS wc -c t/testfile1 t/testfile2 WVPASS wc -l tags1.tmp tags2.tmp WVSTART "bloom" WVPASS bup bloom -c $(ls -1 $BUP_DIR/objects/pack/*.idx|head -n1) WVPASS rm $BUP_DIR/objects/pack/bup.bloom WVPASS bup bloom -k 4 WVPASS bup bloom -c $(ls -1 $BUP_DIR/objects/pack/*.idx|head -n1) WVPASS bup bloom -d buptest.tmp/objects/pack --ruin --force WVFAIL bup bloom -c $(ls -1 $BUP_DIR/objects/pack/*.idx|head -n1) WVPASS bup bloom --force -k 5 WVPASS bup bloom -c $(ls -1 $BUP_DIR/objects/pack/*.idx|head -n1) WVSTART "memtest" WVPASS bup memtest -c1 -n100 WVPASS bup memtest -c1 -n100 --existing WVSTART "join" WVPASS bup join $(cat tags1.tmp) >out1.tmp WVPASS bup join out2.tmp WVPASS bup join out2c.tmp WVPASS bup join -r ":$BUP_DIR" out2c.tmp WVPASS diff -u t/testfile1 out1.tmp WVPASS diff -u t/testfile2 out2.tmp WVPASS diff -u t/testfile2 out2t.tmp WVPASS diff -u t/testfile2 out2c.tmp WVSTART "save/git-fsck" ( WVPASS cd "$BUP_DIR" #git repack -Ad #git prune (WVPASS cd "$TOP/t/sampledata" && WVPASS bup save -vvn master /) || exit $? result="$(git fsck --full --strict 2>&1)" || exit $? n=$(echo "$result" | WVFAIL egrep -v 'dangling (commit|tree|blob)' | WVPASS tee -a /dev/stderr | WVPASS wc -l) || exit $? WVPASS [ "$n" -eq 0 ] ) || exit $? WVSTART "restore" WVPASS force-delete buprestore.tmp WVFAIL bup restore boink WVPASS touch $TOP/$D/$D WVPASS bup index -u $TOP/$D WVPASS bup save -n master / WVPASS bup restore -C buprestore.tmp "/master/latest/$TOP/$D" WVPASSEQ "$(ls buprestore.tmp)" "bupdata.tmp" WVPASS force-delete buprestore.tmp WVPASS bup restore -C buprestore.tmp "/master/latest/$TOP/$D/" WVPASS touch $D/non-existent-file buprestore.tmp/non-existent-file # else diff fails WVPASS diff -ur $D/ buprestore.tmp/ ( tmp=testrestore.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(pwd)/$tmp/bup" WVPASS WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save --strip -n foo $tmp/src WVSTART "restore /foo/latest" WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/ $tmp/restore/latest/ WVSTART "restore /foo/latest/" WVPASS force-delete "$tmp/restore" WVPASS bup restore -C $tmp/restore /foo/latest/ for x in $tmp/src/*; do WVPASS t/compare-trees $x/ $tmp/restore/$(basename $x); done WVSTART "restore /foo/latest/." WVPASS force-delete "$tmp/restore" WVPASS bup restore -C $tmp/restore /foo/latest/. WVPASS t/compare-trees $tmp/src/ $tmp/restore/ WVSTART "restore /foo/latest/x" WVPASS force-delete "$tmp/restore" WVPASS bup restore -C $tmp/restore /foo/latest/x WVPASS t/compare-trees $tmp/src/x/ $tmp/restore/x/ WVSTART "restore /foo/latest/x/" WVPASS force-delete "$tmp/restore" WVPASS bup restore -C $tmp/restore /foo/latest/x/ for x in $tmp/src/x/*; do WVPASS t/compare-trees $x/ $tmp/restore/$(basename $x); done WVSTART "restore /foo/latest/x/." WVPASS force-delete "$tmp/restore" WVPASS bup restore -C $tmp/restore /foo/latest/x/. WVPASS t/compare-trees $tmp/src/x/ $tmp/restore/ ) || exit $? WVSTART "ftp" WVPASS bup ftp "cat /master/latest/$TOP/$D/b" >$D/b.new WVPASS bup ftp "cat /master/latest/$TOP/$D/f" >$D/f.new WVPASS bup ftp "cat /master/latest/$TOP/$D/f"{,} >$D/f2.new WVPASS bup ftp "cat /master/latest/$TOP/$D/a" >$D/a.new WVPASSEQ "$(sha1sum <$D/b)" "$(sha1sum <$D/b.new)" WVPASSEQ "$(sha1sum <$D/f)" "$(sha1sum <$D/f.new)" WVPASSEQ "$(cat $D/f.new{,} | sha1sum)" "$(sha1sum <$D/f2.new)" WVPASSEQ "$(sha1sum <$D/a)" "$(sha1sum <$D/a.new)" WVSTART "tag" WVFAIL bup tag -d v0.n 2>/dev/null WVFAIL bup tag v0.n non-existant 2>/dev/null WVPASSEQ "$(bup tag)" "" WVPASS bup tag v0.1 master WVPASSEQ "$(bup tag)" "v0.1" WVFAIL bup tag v0.1 master WVPASS bup tag -f v0.1 master WVPASS bup tag -d v0.1 WVPASS bup tag -f -d v0.1 WVFAIL bup tag -d v0.1 # This section destroys data in the bup repository, so it is done last. WVSTART "fsck" WVPASS bup fsck WVPASS bup fsck --quick if bup fsck --par2-ok; then WVSTART "fsck (par2)" else WVSTART "fsck (PAR2 IS MISSING)" fi WVPASS bup fsck -g WVPASS bup fsck -r WVPASS bup damage $BUP_DIR/objects/pack/*.pack -n10 -s1 -S0 WVFAIL bup fsck --quick WVFAIL bup fsck --quick --disable-par2 WVPASS chmod u+w $BUP_DIR/objects/pack/*.idx WVPASS bup damage $BUP_DIR/objects/pack/*.idx -n10 -s1 -S0 WVFAIL bup fsck --quick -j4 WVPASS bup damage $BUP_DIR/objects/pack/*.pack -n10 -s1024 --percent 0.4 -S0 WVFAIL bup fsck --quick WVFAIL bup fsck --quick -rvv -j99 # fails because repairs were needed if bup fsck --par2-ok; then WVPASS bup fsck -r # ok because of repairs from last time WVPASS bup damage $BUP_DIR/objects/pack/*.pack -n202 -s1 --equal -S0 WVFAIL bup fsck WVFAIL bup fsck -rvv # too many errors to be repairable WVFAIL bup fsck -r # too many errors to be repairable else WVFAIL bup fsck --quick -r # still fails because par2 was missing fi WVSTART "exclude-bupdir" D=exclude-bupdir.tmp WVPASS force-delete $D WVPASS mkdir $D export BUP_DIR="$D/.bup" WVPASS bup init WVPASS touch $D/a WVPASS bup random 128k >$D/b WVPASS mkdir $D/d $D/d/e WVPASS bup random 512 >$D/f WVPASS bup index -ux $D WVPASS bup save -n exclude-bupdir $D WVPASSEQ "$(bup ls -a exclude-bupdir/latest/$TOP/$D/)" "a b d/ f" WVSTART "exclude" ( D=exclude.tmp WVPASS force-delete $D WVPASS mkdir $D export BUP_DIR="$D/.bup" WVPASS bup init WVPASS touch $D/a WVPASS bup random 128k >$D/b WVPASS mkdir $D/d $D/d/e WVPASS bup random 512 >$D/f WVPASS bup random 512 >$D/j WVPASS bup index -ux --exclude $D/d --exclude $D/j $D WVPASS bup save -n exclude $D WVPASSEQ "$(bup ls exclude/latest/$TOP/$D/)" "a b f" WVPASS mkdir $D/g $D/h WVPASS bup index -ux --exclude $D/d --exclude $TOP/$D/g --exclude $D/h \ --exclude $TOP/$D/j $D WVPASS bup save -n exclude $D WVPASSEQ "$(bup ls exclude/latest/$TOP/$D/)" "a b f" ) || exit $? WVSTART "exclude-from" ( D=exclude-fromdir.tmp EXCLUDE_FILE=exclude-from.tmp WVPASS echo "$D/d $TOP/$D/g $D/h $D/i" > $EXCLUDE_FILE WVPASS force-delete $D WVPASS mkdir $D export BUP_DIR="$D/.bup" WVPASS bup init WVPASS touch $D/a WVPASS bup random 128k >$D/b WVPASS mkdir $D/d $D/d/e WVPASS bup random 512 >$D/f WVPASS mkdir $D/g $D/h WVPASS bup random 128k > $D/i WVPASS bup index -ux --exclude-from $EXCLUDE_FILE $D WVPASS bup save -n exclude-from $D WVPASSEQ "$(bup ls exclude-from/latest/$TOP/$D/)" "a b f" WVPASS rm $EXCLUDE_FILE ) || exit $? WVSTART "save (no index)" ( tmp=save-no-index.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVFAIL bup save -n nothing / WVPASS rm -r "$tmp" ) || exit $? WVSTART "save --strip" ( tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save --strip -n foo $tmp/src/x/y WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/x/y/ "$tmp/restore/latest/" ) || exit $? WVSTART "save --strip-path (relative)" ( tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save --strip-path $tmp/src -n foo $tmp/src/x WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/ "$tmp/restore/latest/" ) || exit $? WVSTART "save --strip-path (absolute)" ( tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save --strip-path "$TOP" -n foo $tmp/src WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/ "$tmp/restore/latest/$tmp/src/" ) || exit $? WVSTART "save --strip-path (no match)" ( if test $(WVPASS path-filesystems . | WVPASS sort -u | WVPASS wc -l) -ne 1 then # Skip the test because the attempt to restore parent dirs to # the current filesystem may fail -- i.e. running from # /foo/ext4/bar/btrfs will fail when bup tries to restore # linux attrs above btrfs to the restore tree *inside* btrfs. echo "(running from tree with mixed filesystems; skipping test)" 1>&2 exit 0 fi tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save --strip-path $tmp/foo -n foo $tmp/src/x WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/ "$tmp/restore/latest/$TOP/$tmp/src/" ) || exit $? WVSTART "save --graft (empty graft points disallowed)" ( tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVFAIL bup save --graft =/grafted -n graft-point-absolute $tmp WVFAIL bup save --graft $TOP/$tmp= -n graft-point-absolute $tmp ) || exit $? WVSTART "save --graft /x/y=/a/b (relative paths)" ( tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save --graft $tmp/src=x -n foo $tmp/src WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/ "$tmp/restore/latest/$TOP/x/" ) || exit $? WVSTART "save --graft /x/y=/a/b (matching structure)" ( tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save -v --graft "$TOP/$tmp/src/x/y=$TOP/$tmp/src/a/b" \ -n foo $tmp/src/x/y WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/x/y/ \ "$tmp/restore/latest/$TOP/$tmp/src/a/b/" ) || exit $? WVSTART "save --graft /x/y=/a (shorter target)" ( tmp=graft-points.tmp WVPASS force-delete $tmp WVPASS mkdir $tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save -v --graft "$TOP/$tmp/src/x/y=/a" -n foo $tmp/src/x/y WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/x/y/ "$tmp/restore/latest/a/" ) || exit $? WVSTART "save --graft /x=/a/b (longer target)" ( tmp=graft-points.tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS force-delete $tmp WVPASS mkdir $tmp WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save -v --graft "$TOP/$tmp/src=$TOP/$tmp/src/a/b/c" \ -n foo $tmp/src WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/ "$tmp/restore/latest/$TOP/$tmp/src/a/b/c/" ) || exit $? WVSTART "save --graft /x=/ (root target)" ( tmp=graft-points.tmp export BUP_DIR="$(WVPASS pwd)/$tmp/bup" || exit $? WVPASS force-delete $tmp WVPASS mkdir $tmp WVPASS bup init WVPASS mkdir -p $tmp/src/x/y/z WVPASS bup random 8k > $tmp/src/x/y/random-1 WVPASS bup random 8k > $tmp/src/x/y/z/random-2 WVPASS bup index -u $tmp/src WVPASS bup save -v --graft "$TOP/$tmp/src/x=/" -n foo $tmp/src/x WVPASS bup restore -C $tmp/restore /foo/latest WVPASS t/compare-trees $tmp/src/x/ "$tmp/restore/latest/" ) || exit $? #WVSTART "save --graft /=/x/ (root source)" # FIXME: Not tested for now -- will require cleverness, or caution as root. WVSTART "indexfile" D=indexfile.tmp INDEXFILE=tmpindexfile.tmp WVPASS rm -f $INDEXFILE WVPASS force-delete $D WVPASS mkdir $D export BUP_DIR="$D/.bup" WVPASS bup init WVPASS touch $D/a WVPASS touch $D/b WVPASS mkdir $D/c WVPASS bup index -ux $D WVPASS bup save --strip -n bupdir $D WVPASSEQ "$(bup ls bupdir/latest/)" "a b c/" WVPASS bup index -f $INDEXFILE --exclude=$D/c -ux $D WVPASS bup save --strip -n indexfile -f $INDEXFILE $D WVPASSEQ "$(bup ls indexfile/latest/)" "a b" WVSTART "import-rsnapshot" D=rsnapshot.tmp export BUP_DIR="$TOP/$D/.bup" WVPASS force-delete $D WVPASS mkdir $D WVPASS bup init WVPASS mkdir -p $D/hourly.0/buptest/a WVPASS touch $D/hourly.0/buptest/a/b WVPASS mkdir -p $D/hourly.0/buptest/c/d WVPASS touch $D/hourly.0/buptest/c/d/e WVPASS true WVPASS bup import-rsnapshot $D/ WVPASSEQ "$(bup ls buptest/latest/)" "a/ c/" if [ "$(type -p rdiff-backup)" != "" ]; then WVSTART "import-rdiff-backup" D=rdiff-backup.tmp export BUP_DIR="$TOP/$D/.bup" WVPASS force-delete $D WVPASS mkdir $D WVPASS bup init WVPASS mkdir $D/rdiff-backup WVPASS rdiff-backup $TOP/cmd $D/rdiff-backup WVPASS bup tick WVPASS rdiff-backup $TOP/Documentation $D/rdiff-backup WVPASS bup import-rdiff-backup $D/rdiff-backup import-rdiff-backup WVPASSEQ $(bup ls import-rdiff-backup/ | wc -l) 3 WVPASSEQ "$(bup ls -a import-rdiff-backup/latest/ | sort)" \ "$(ls -A $TOP/Documentation | sort)" fi WVSTART "compression" D=compression0.tmp export BUP_DIR="$TOP/$D/.bup" WVPASS force-delete $D WVPASS mkdir $D WVPASS bup init WVPASS bup index $TOP/Documentation WVPASS bup save -n compression -0 --strip $TOP/Documentation # 'ls' on NetBSD sets -A by default when running as root, so we have to undo # it by grepping out any dotfiles. (Normal OSes don't auto-set -A, but this # is harmless there.) expected="$(WVPASS ls $TOP/Documentation | grep -v '^\.' | WVPASS sort)" \ || exit $? actual="$(WVPASS bup ls compression/latest/ | WVPASS sort)" || exit $? WVPASSEQ "$actual" "$expected" COMPRESSION_0_SIZE=$(WVPASS du -k -s $D | WVPASS cut -f1) || exit $? D=compression9.tmp export BUP_DIR="$TOP/$D/.bup" WVPASS force-delete $D WVPASS mkdir $D WVPASS bup init WVPASS bup index $TOP/Documentation WVPASS bup save -n compression -9 --strip $TOP/Documentation WVPASSEQ "$(bup ls compression/latest/ | sort)" \ "$(ls $TOP/Documentation | grep -v '^\.' | sort)" COMPRESSION_9_SIZE=$(WVPASS du -k -s $D | WVPASS cut -f1) || exit $? WVPASS [ "$COMPRESSION_9_SIZE" -lt "$COMPRESSION_0_SIZE" ] WVSTART "save disjoint top-level directories" ( # Resolve any symlinks involving the top top-level dirs. real_pwd="$(WVPASS realpath .)" || exit $? real_tmp="$(WVPASS realpath /tmp/.)" || exit $? pwd_top="$(echo $real_pwd | WVPASS awk -F "/" '{print $2}')" || exit $? tmp_top="$(echo $real_tmp | WVPASS awk -F "/" '{print $2}')" || exit $? if [ "$pwd_top" = "$tmp_top" ]; then echo "(running from within /$tmp_top; skipping test)" 1>&2 exit 0 fi D=bupdata.tmp WVPASS force-delete $D WVPASS mkdir -p $D/x WVPASS date > $D/x/1 tmpdir="$(WVPASS mktemp -d $real_tmp/bup-test-XXXXXXX)" || exit $? cleanup() { WVPASS rm -r "${tmpdir}"; } WVPASS trap cleanup EXIT WVPASS date > "$tmpdir/2" export BUP_DIR="$TOP/buptest.tmp" WVPASS test -d "$BUP_DIR" && WVPASS rm -r "$BUP_DIR" WVPASS bup init WVPASS bup index -vu $(pwd)/$D/x "$tmpdir" WVPASS bup save -t -n src $(pwd)/$D/x "$tmpdir" # For now, assume that "ls -a" and "sort" use the same order. actual="$(WVPASS bup ls -a src/latest)" || exit $? expected="$(echo -e "$pwd_top/\n$tmp_top/" | WVPASS sort)" || exit $? WVPASSEQ "$actual" "$expected" ) || exit $? WVSTART "clear-index" D=clear-index.tmp export BUP_DIR="$TOP/$D/.bup" WVPASS force-delete $TOP/$D WVPASS mkdir $TOP/$D WVPASS bup init WVPASS touch $TOP/$D/foo WVPASS touch $TOP/$D/bar WVPASS bup index -u $D WVPASSEQ "$(bup index -p)" "$D/foo $D/bar $D/ ./" WVPASS rm $TOP/$D/foo WVPASS bup index --clear WVPASS bup index -u $TOP/$D expected="$(WVPASS bup index -p)" || exit $? WVPASSEQ "$expected" "$D/bar $D/ ./" # bup index --exclude-rx ... ( export BUP_DIR="$TOP/buptest.tmp" D=bupdata.tmp WVSTART "index --exclude-rx '^/foo' (root anchor)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS mkdir $D/sub1 WVPASS mkdir $D/sub2 WVPASS touch $D/sub1/a WVPASS touch $D/sub2/b WVPASS bup index -u $D --exclude-rx "^$(pwd)/$D/sub1/" WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub2 ./sub2/b" WVSTART "index --exclude-rx '/foo$' (non-dir, tail anchor)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS touch $D/foo WVPASS mkdir $D/sub WVPASS mkdir $D/sub/foo WVPASS touch $D/sub/foo/a WVPASS bup index -u $D --exclude-rx '/foo$' WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub ./sub/foo ./sub/foo/a" WVSTART "index --exclude-rx '/foo/$' (dir, tail anchor)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS touch $D/foo WVPASS mkdir $D/sub WVPASS mkdir $D/sub/foo WVPASS touch $D/sub/foo/a WVPASS bup index -u $D --exclude-rx '/foo/$' WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub" WVSTART "index --exclude-rx '/foo/.' (dir content)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS touch $D/foo WVPASS mkdir $D/sub WVPASS mkdir $D/sub/foo WVPASS touch $D/sub/foo/a WVPASS bup index -u $D --exclude-rx '/foo/.' WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub ./sub/foo" ) || exit $? # bup restore --exclude-rx ... ( export BUP_DIR="$TOP/buptest.tmp" D=bupdata.tmp WVSTART "restore --exclude-rx '^/foo' (root anchor)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS mkdir $D/sub1 WVPASS mkdir $D/sub2 WVPASS touch $D/sub1/a WVPASS touch $D/sub2/b WVPASS bup index -u $D WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp --exclude-rx "^/sub1/" /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub2 ./sub2/b" WVSTART "restore --exclude-rx '/foo$' (non-dir, tail anchor)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS touch $D/foo WVPASS mkdir $D/sub WVPASS mkdir $D/sub/foo WVPASS touch $D/sub/foo/a WVPASS bup index -u $D WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp --exclude-rx '/foo$' /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./sub ./sub/foo ./sub/foo/a" WVSTART "restore --exclude-rx '/foo/$' (dir, tail anchor)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS touch $D/foo WVPASS mkdir $D/sub WVPASS mkdir $D/sub/foo WVPASS touch $D/sub/foo/a WVPASS bup index -u $D WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp --exclude-rx '/foo/$' /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub" WVSTART "restore --exclude-rx '/foo/.' (dir content)" WVPASS rm -rf "$D" "$BUP_DIR" buprestore.tmp WVPASS bup init WVPASS mkdir $D WVPASS touch $D/a WVPASS touch $D/b WVPASS touch $D/foo WVPASS mkdir $D/sub WVPASS mkdir $D/sub/foo WVPASS touch $D/sub/foo/a WVPASS bup index -u $D WVPASS bup save --strip -n bupdir $D WVPASS bup restore -C buprestore.tmp --exclude-rx '/foo/.' /bupdir/latest/ actual="$(WVPASS cd buprestore.tmp; WVPASS find . | WVPASS sort)" || exit $? WVPASSEQ "$actual" ". ./a ./b ./foo ./sub ./sub/foo" ) || exit $? bup-0.25/t/testfile1000066400000000000000000004657101225146730500143360ustar00rootroot00000000000000#!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu bup-0.25/t/testfile2000066400000000000000000004657101225146730500143370ustar00rootroot00000000000000#!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pwba vf punatvat fbzr enaqbz olgrf urer naq gurers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) va nccebkvzngryl gur fnzr cynprEQBAYL) naq qvfgevo-0) hgvba nf(sq) va gur bevtvany grfg svyrfREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: tvir be gnxr n ovgerfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu #!/hfe/ova/rai clguba sebz ohc vzcbeg bcgvbaf, qerphefr sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc qerphefr -- k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf d,dhvrg qba'g npghnyyl cevag svyranzrf cebsvyr eha haqre gur clguba cebsvyre """ b = bcgvbaf.Bcgvbaf('ohc qerphefr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar svyranzr rkcrpgrq") vg = qerphefr.erphefvir_qveyvfg(rkgen, bcg.kqri) vs bcg.cebsvyr: vzcbeg pCebsvyr qrs qb_vg(): sbe v va vg: cnff pCebsvyr.eha('qb_vg()') ryfr: vs bcg.dhvrg: sbe v va vg: cnff ryfr: sbe (anzr,fg) va vg: cevag anzr vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc fcyvg [-gpo] [-a anzr] [--orapu] [svyranzrf...] -- e,erzbgr= erzbgr ercbfvgbel cngu o,oybof bhgchg n frevrf bs oybo vqf g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) A,abbc qba'g npghnyyl fnir gur qngn naljurer d,dhvrg qba'g cevag cebterff zrffntrf i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) pbcl whfg pbcl vachg gb bhgchg, unfufcyvggvat nybat gur jnl orapu cevag orapuznex gvzvatf gb fgqree znk-cnpx-fvmr= znkvzhz olgrf va n fvatyr cnpx znk-cnpx-bowrpgf= znkvzhz ahzore bs bowrpgf va n fvatyr cnpx snabhg= znkvzhz ahzore bs oybof va n fvatyr gerr """ b = bcgvbaf.Bcgvbaf('ohc fcyvg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr be bcg.abbc be bcg.pbcl): b.sngny("hfr bar be zber bs -o, -g, -p, -a, -A, --pbcl") vs (bcg.abbc be bcg.pbcl) naq (bcg.oybof be bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny('-A vf vapbzcngvoyr jvgu -o, -g, -p, -a') vs bcg.ireobfr >= 2: tvg.ireobfr = bcg.ireobfr - 1 bcg.orapu = 1 vs bcg.znk_cnpx_fvmr: unfufcyvg.znk_cnpx_fvmr = cnefr_ahz(bcg.znk_cnpx_fvmr) vs bcg.znk_cnpx_bowrpgf: unfufcyvg.znk_cnpx_bowrpgf = cnefr_ahz(bcg.znk_cnpx_bowrpgf) vs bcg.snabhg: unfufcyvg.snabhg = cnefr_ahz(bcg.snabhg) vs bcg.oybof: unfufcyvg.snabhg = 0 vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") fgneg_gvzr = gvzr.gvzr() ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.abbc be bcg.pbcl: pyv = j = byqers = Abar ryvs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() svyrf = rkgen naq (bcra(sa) sbe sa va rkgen) be [flf.fgqva] vs j: funyvfg = unfufcyvg.fcyvg_gb_funyvfg(j, svyrf) gerr = j.arj_gerr(funyvfg) ryfr: ynfg = 0 sbe (oybo, ovgf) va unfufcyvg.unfufcyvg_vgre(svyrf): unfufcyvg.gbgny_fcyvg += yra(oybo) vs bcg.pbcl: flf.fgqbhg.jevgr(fge(oybo)) zrtf = unfufcyvg.gbgny_fcyvg/1024/1024 vs abg bcg.dhvrg naq ynfg != zrtf: cebterff('%q Zolgrf ernq\e' % zrtf) ynfg = zrtf cebterff('%q Zolgrf ernq, qbar.\a' % zrtf) vs bcg.ireobfr: ybt('\a') vs bcg.oybof: sbe (zbqr,anzr,ova) va funyvfg: cevag ova.rapbqr('urk') vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fcyvg\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') vs j: j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() frpf = gvzr.gvzr() - fgneg_gvzr fvmr = unfufcyvg.gbgny_fcyvg vs bcg.orapu: ybt('\aohc: %.2sxolgrf va %.2s frpf = %.2s xolgrf/frp\a' % (fvmr/1024., frpf, fvmr/1024./frpf)) #!/hfe/ova/rai clguba vzcbeg flf, er, fgehpg, zznc sebz ohc vzcbeg tvg, bcgvbaf sebz ohc.urycref vzcbeg * qrs f_sebz_olgrf(olgrf): pyvfg = [pue(o) sbe o va olgrf] erghea ''.wbva(pyvfg) qrs ercbeg(pbhag): svryqf = ['IzFvmr', 'IzEFF', 'IzQngn', 'IzFgx'] q = {} sbe yvar va bcra('/cebp/frys/fgnghf').ernqyvarf(): y = er.fcyvg(e':\f*', yvar.fgevc(), 1) q[y[0]] = y[1] vs pbhag >= 0: r1 = pbhag svryqf = [q[x] sbe x va svryqf] ryfr: r1 = '' cevag ('%9f ' + ('%10f ' * yra(svryqf))) % ghcyr([r1] + svryqf) flf.fgqbhg.syhfu() bcgfcrp = """ ohc zrzgrfg [-a ryrzragf] [-p plpyrf] -- a,ahzore= ahzore bs bowrpgf cre plpyr p,plpyrf= ahzore bs plpyrf gb eha vtaber-zvqk vtaber .zvqk svyrf, hfr bayl .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zrzgrfg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') tvg.vtaber_zvqk = bcg.vtaber_zvqk tvg.purpx_ercb_be_qvr() z = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) plpyrf = bcg.plpyrf be 100 ahzore = bcg.ahzore be 10000 ercbeg(-1) s = bcra('/qri/henaqbz') n = zznc.zznc(-1, 20) ercbeg(0) sbe p va kenatr(plpyrf): sbe a va kenatr(ahzore): o = s.ernq(3) vs 0: olgrf = yvfg(fgehpg.hacnpx('!OOO', o)) + [0]*17 olgrf[2] &= 0ks0 ova = fgehpg.cnpx('!20f', f_sebz_olgrf(olgrf)) ryfr: n[0:2] = o[0:2] n[2] = pue(beq(o[2]) & 0ks0) ova = fge(n[0:20]) #cevag ova.rapbqr('urk') z.rkvfgf(ova) ercbeg((p+1)*ahzore) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * qrs cevag_abqr(grkg, a): cersvk = '' vs bcg.unfu: cersvk += "%f " % a.unfu.rapbqr('urk') vs fgng.F_VFQVE(a.zbqr): cevag '%f%f/' % (cersvk, grkg) ryvs fgng.F_VFYAX(a.zbqr): cevag '%f%f@' % (cersvk, grkg) ryfr: cevag '%f%f' % (cersvk, grkg) bcgfcrp = """ ohc yf -- f,unfu fubj unfu sbe rnpu svyr """ b = bcgvbaf.Bcgvbaf('ohc yf', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) vs abg rkgen: rkgen = ['/'] erg = 0 sbe q va rkgen: gel: a = gbc.yerfbyir(q) vs fgng.F_VFQVE(a.zbqr): sbe fho va a: cevag_abqr(fho.anzr, fho) ryfr: cevag_abqr(q, a) rkprcg isf.AbqrReebe, r: ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er, fgng, ernqyvar, sazngpu sebz ohc vzcbeg bcgvbaf, tvg, fudhbgr, isf sebz ohc.urycref vzcbeg * qrs abqr_anzr(grkg, a): vs fgng.F_VFQVE(a.zbqr): erghea '%f/' % grkg ryvs fgng.F_VFYAX(a.zbqr): erghea '%f@' % grkg ryfr: erghea '%f' % grkg qrs qb_yf(cngu, a): y = [] vs fgng.F_VFQVE(a.zbqr): sbe fho va a: y.nccraq(abqr_anzr(fho.anzr, fho)) ryfr: y.nccraq(abqr_anzr(cngu, a)) cevag pbyhzangr(y, '') qrs jevgr_gb_svyr(vas, bhgs): sbe oybo va puhaxlernqre(vas): bhgs.jevgr(oybo) qrs vachgvgre(): vs bf.vfnggl(flf.fgqva.svyrab()): juvyr 1: gel: lvryq enj_vachg('ohc> ') rkprcg RBSReebe: oernx ryfr: sbe yvar va flf.fgqva: lvryq yvar qrs _pbzcyrgre_trg_fhof(yvar): (dglcr, ynfgjbeq) = fudhbgr.hasvavfurq_jbeq(yvar) (qve,anzr) = bf.cngu.fcyvg(ynfgjbeq) #ybt('\apbzcyrgre: %e %e %e\a' % (dglcr, ynfgjbeq, grkg)) a = cjq.erfbyir(qve) fhof = yvfg(svygre(ynzoqn k: k.anzr.fgnegfjvgu(anzr), a.fhof())) erghea (qve, anzr, dglcr, ynfgjbeq, fhof) _ynfg_yvar = Abar _ynfg_erf = Abar qrs pbzcyrgre(grkg, fgngr): tybony _ynfg_yvar tybony _ynfg_erf gel: yvar = ernqyvar.trg_yvar_ohssre()[:ernqyvar.trg_raqvqk()] vs _ynfg_yvar != yvar: _ynfg_erf = _pbzcyrgre_trg_fhof(yvar) _ynfg_yvar = yvar (qve, anzr, dglcr, ynfgjbeq, fhof) = _ynfg_erf vs fgngr < yra(fhof): fa = fhof[fgngr] fa1 = fa.erfbyir('') # qrers flzyvaxf shyyanzr = bf.cngu.wbva(qve, fa.anzr) vs fgng.F_VFQVE(fa1.zbqr): erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr+'/', grezvangr=Snyfr) ryfr: erg = fudhbgr.jung_gb_nqq(dglcr, ynfgjbeq, shyyanzr, grezvangr=Gehr) + ' ' erghea grkg + erg rkprcg Rkprcgvba, r: ybt('\areebe va pbzcyrgvba: %f\a' % r) bcgfcrp = """ ohc sgc """ b = bcgvbaf.Bcgvbaf('ohc sgc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) cjq = gbc vs rkgen: yvarf = rkgen ryfr: ernqyvar.frg_pbzcyrgre_qryvzf(' \g\a\e/') ernqyvar.frg_pbzcyrgre(pbzcyrgre) ernqyvar.cnefr_naq_ovaq("gno: pbzcyrgr") yvarf = vachgvgre() sbe yvar va yvarf: vs abg yvar.fgevc(): pbagvahr jbeqf = [jbeq sbe (jbeqfgneg,jbeq) va fudhbgr.dhbgrfcyvg(yvar)] pzq = jbeqf[0].ybjre() #ybt('rkrphgr: %e %e\a' % (pzq, cnez)) gel: vs pzq == 'yf': sbe cnez va (jbeqf[1:] be ['.']): qb_yf(cnez, cjq.erfbyir(cnez)) ryvs pzq == 'pq': sbe cnez va jbeqf[1:]: cjq = cjq.erfbyir(cnez) ryvs pzq == 'cjq': cevag cjq.shyyanzr() ryvs pzq == 'png': sbe cnez va jbeqf[1:]: jevgr_gb_svyr(cjq.erfbyir(cnez).bcra(), flf.fgqbhg) ryvs pzq == 'trg': vs yra(jbeqf) abg va [2,3]: envfr Rkprcgvba('Hfntr: trg [ybpnyanzr]') eanzr = jbeqf[1] (qve,onfr) = bf.cngu.fcyvg(eanzr) yanzr = yra(jbeqf)>2 naq jbeqf[2] be onfr vas = cjq.erfbyir(eanzr).bcra() ybt('Fnivat %e\a' % yanzr) jevgr_gb_svyr(vas, bcra(yanzr, 'jo')) ryvs pzq == 'ztrg': sbe cnez va jbeqf[1:]: (qve,onfr) = bf.cngu.fcyvg(cnez) sbe a va cjq.erfbyir(qve).fhof(): vs sazngpu.sazngpu(a.anzr, onfr): gel: ybt('Fnivat %e\a' % a.anzr) vas = a.bcra() bhgs = bcra(a.anzr, 'jo') jevgr_gb_svyr(vas, bhgs) bhgs.pybfr() rkprcg Rkprcgvba, r: ybt(' reebe: %f\a' % r) ryvs pzq == 'uryc' be pzq == '?': ybt('Pbzznaqf: yf pq cjq png trg ztrg uryc dhvg\a') ryvs pzq == 'dhvg' be pzq == 'rkvg' be pzq == 'olr': oernx ryfr: envfr Rkprcgvba('ab fhpu pbzznaq %e' % pzq) rkprcg Rkprcgvba, r: ybt('reebe: %f\a' % r) #envfr #!/hfe/ova/rai clguba vzcbeg flf, zznc sebz ohc vzcbeg bcgvbaf, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc enaqbz [-F frrq] -- F,frrq= bcgvbany enaqbz ahzore frrq (qrsnhyg 1) s,sbepr cevag enaqbz qngn gb fgqbhg rira vs vg'f n ggl """ b = bcgvbaf.Bcgvbaf('ohc enaqbz', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") gbgny = cnefr_ahz(rkgen[0]) vs bcg.sbepr be (abg bf.vfnggl(1) naq abg ngbv(bf.raiveba.trg('OHC_SBEPR_GGL')) & 1): _unfufcyvg.jevgr_enaqbz(flf.fgqbhg.svyrab(), gbgny, bcg.frrq be 0) ryfr: ybt('reebe: abg jevgvat ovanel qngn gb n grezvany. Hfr -s gb sbepr.\a') flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc uryc """ b = bcgvbaf.Bcgvbaf('ohc uryc', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) == 0: # gur jenccre cebtenz cebivqrf gur qrsnhyg hfntr fgevat bf.rkrpic(bf.raiveba['OHC_ZNVA_RKR'], ['ohc']) ryvs yra(rkgen) == 1: qbpanzr = (rkgen[0]=='ohc' naq 'ohc' be ('ohc-%f' % rkgen[0])) rkr = flf.neti[0] (rkrcngu, rkrsvyr) = bf.cngu.fcyvg(rkr) znacngu = bf.cngu.wbva(rkrcngu, '../Qbphzragngvba/' + qbpanzr + '.[1-9]') t = tybo.tybo(znacngu) vs t: bf.rkrpic('zna', ['zna', '-y', t[0]]) ryfr: bf.rkrpic('zna', ['zna', qbpanzr]) ryfr: b.sngny("rknpgyl bar pbzznaq anzr rkcrpgrq") #!/hfe/ova/rai clguba vzcbeg flf, bf, fgng, reeab, shfr, er, gvzr, grzcsvyr sebz ohc vzcbeg bcgvbaf, tvg, isf sebz ohc.urycref vzcbeg * pynff Fgng(shfr.Fgng): qrs __vavg__(frys): frys.fg_zbqr = 0 frys.fg_vab = 0 frys.fg_qri = 0 frys.fg_ayvax = 0 frys.fg_hvq = 0 frys.fg_tvq = 0 frys.fg_fvmr = 0 frys.fg_ngvzr = 0 frys.fg_zgvzr = 0 frys.fg_pgvzr = 0 frys.fg_oybpxf = 0 frys.fg_oyxfvmr = 0 frys.fg_eqri = 0 pnpur = {} qrs pnpur_trg(gbc, cngu): cnegf = cngu.fcyvg('/') pnpur[('',)] = gbc p = Abar znk = yra(cnegf) #ybt('pnpur: %e\a' % pnpur.xrlf()) sbe v va enatr(znk): cer = cnegf[:znk-v] #ybt('pnpur gelvat: %e\a' % cer) p = pnpur.trg(ghcyr(cer)) vs p: erfg = cnegf[znk-v:] sbe e va erfg: #ybt('erfbyivat %e sebz %e\a' % (e, p.shyyanzr())) p = p.yerfbyir(e) xrl = ghcyr(cer + [e]) #ybt('fnivat: %e\a' % (xrl,)) pnpur[xrl] = p oernx nffreg(p) erghea p pynff OhcSf(shfr.Shfr): qrs __vavg__(frys, gbc): shfr.Shfr.__vavg__(frys) frys.gbc = gbc qrs trgngge(frys, cngu): ybt('--trgngge(%e)\a' % cngu) gel: abqr = pnpur_trg(frys.gbc, cngu) fg = Fgng() fg.fg_zbqr = abqr.zbqr fg.fg_ayvax = abqr.ayvaxf() fg.fg_fvmr = abqr.fvmr() fg.fg_zgvzr = abqr.zgvzr fg.fg_pgvzr = abqr.pgvzr fg.fg_ngvzr = abqr.ngvzr erghea fg rkprcg isf.AbFhpuSvyr: erghea -reeab.RABRAG qrs ernqqve(frys, cngu, bssfrg): ybt('--ernqqve(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) lvryq shfr.Qveragel('.') lvryq shfr.Qveragel('..') sbe fho va abqr.fhof(): lvryq shfr.Qveragel(fho.anzr) qrs ernqyvax(frys, cngu): ybt('--ernqyvax(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) erghea abqr.ernqyvax() qrs bcra(frys, cngu, syntf): ybt('--bcra(%e)\a' % cngu) abqr = pnpur_trg(frys.gbc, cngu) nppzbqr = bf.B_EQBAYL | bf.B_JEBAYL | bf.B_EQJE vs (syntf & nppzbqr) != bf.B_EQBAYL: erghea -reeab.RNPPRF abqr.bcra() qrs eryrnfr(frys, cngu, syntf): ybt('--eryrnfr(%e)\a' % cngu) qrs ernq(frys, cngu, fvmr, bssfrg): ybt('--ernq(%e)\a' % cngu) a = pnpur_trg(frys.gbc, cngu) b = a.bcra() b.frrx(bssfrg) erghea b.ernq(fvmr) vs abg unfngge(shfr, '__irefvba__'): envfr EhagvzrReebe, "lbhe shfr zbqhyr vf gbb byq sbe shfr.__irefvba__" shfr.shfr_clguba_ncv = (0, 2) bcgfcrp = """ ohc shfr [-q] [-s] -- q,qroht vapernfr qroht yriry s,sbertebhaq eha va sbertebhaq """ b = bcgvbaf.Bcgvbaf('ohc shfr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) != 1: b.sngny("rknpgyl bar nethzrag rkcrpgrq") tvg.purpx_ercb_be_qvr() gbc = isf.ErsYvfg(Abar) s = OhcSf(gbc) s.shfr_netf.zbhagcbvag = rkgen[0] vs bcg.qroht: s.shfr_netf.nqq('qroht') vs bcg.sbertebhaq: s.shfr_netf.frgzbq('sbertebhaq') cevag s.zhygvguernqrq s.zhygvguernqrq = Snyfr s.znva() #!/hfe/ova/rai clguba sebz ohc vzcbeg tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ [OHC_QVE=...] ohc vavg [-e ubfg:cngu] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc vavg', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") vs bcg.erzbgr: tvg.vavg_ercb() # ybpny ercb tvg.purpx_ercb_be_qvr() pyv = pyvrag.Pyvrag(bcg.erzbgr, perngr=Gehr) pyv.pybfr() ryfr: tvg.vavg_ercb() #!/hfe/ova/rai clguba vzcbeg flf, zngu, fgehpg, tybo sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * CNTR_FVMR=4096 FUN_CRE_CNTR=CNTR_FVMR/200. qrs zretr(vqkyvfg, ovgf, gnoyr): pbhag = 0 sbe r va tvg.vqkzretr(vqkyvfg): pbhag += 1 cersvk = tvg.rkgenpg_ovgf(r, ovgf) gnoyr[cersvk] = pbhag lvryq r qrs qb_zvqk(bhgqve, bhgsvyranzr, vasvyranzrf): vs abg bhgsvyranzr: nffreg(bhgqve) fhz = Fun1('\0'.wbva(vasvyranzrf)).urkqvtrfg() bhgsvyranzr = '%f/zvqk-%f.zvqk' % (bhgqve, fhz) vac = [] gbgny = 0 sbe anzr va vasvyranzrf: vk = tvg.CnpxVqk(anzr) vac.nccraq(vk) gbgny += yra(vk) ybt('Zretvat %q vaqrkrf (%q bowrpgf).\a' % (yra(vasvyranzrf), gbgny)) vs (abg bcg.sbepr naq (gbgny < 1024 naq yra(vasvyranzrf) < 3)) \ be (bcg.sbepr naq abg gbgny): ybt('zvqk: abguvat gb qb.\a') erghea cntrf = vag(gbgny/FUN_CRE_CNTR) be 1 ovgf = vag(zngu.prvy(zngu.ybt(cntrf, 2))) ragevrf = 2**ovgf ybt('Gnoyr fvmr: %q (%q ovgf)\a' % (ragevrf*4, ovgf)) gnoyr = [0]*ragevrf gel: bf.hayvax(bhgsvyranzr) rkprcg BFReebe: cnff s = bcra(bhgsvyranzr + '.gzc', 'j+') s.jevgr('ZVQK\0\0\0\2') s.jevgr(fgehpg.cnpx('!V', ovgf)) nffreg(s.gryy() == 12) s.jevgr('\0'*4*ragevrf) sbe r va zretr(vac, ovgf, gnoyr): s.jevgr(r) s.jevgr('\0'.wbva(bf.cngu.onfranzr(c) sbe c va vasvyranzrf)) s.frrx(12) s.jevgr(fgehpg.cnpx('!%qV' % ragevrf, *gnoyr)) s.pybfr() bf.eranzr(bhgsvyranzr + '.gzc', bhgsvyranzr) # guvf vf whfg sbe grfgvat vs 0: c = tvg.CnpxZvqk(bhgsvyranzr) nffreg(yra(c.vqkanzrf) == yra(vasvyranzrf)) cevag c.vqkanzrf nffreg(yra(c) == gbgny) cv = vgre(c) sbe v va zretr(vac, gbgny, ovgf, gnoyr): nffreg(v == cv.arkg()) nffreg(c.rkvfgf(v)) cevag bhgsvyranzr bcgfcrp = """ ohc zvqk [bcgvbaf...] -- b,bhgchg= bhgchg zvqk svyranzr (qrsnhyg: nhgb-trarengrq) n,nhgb nhgbzngvpnyyl perngr .zvqk sebz nal havaqrkrq .vqk svyrf s,sbepr nhgbzngvpnyyl perngr .zvqk sebz *nyy* .vqk svyrf """ b = bcgvbaf.Bcgvbaf('ohc zvqk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen naq (bcg.nhgb be bcg.sbepr): b.sngny("lbh pna'g hfr -s/-n naq nyfb cebivqr svyranzrf") tvg.purpx_ercb_be_qvr() vs rkgen: qb_zvqk(tvg.ercb('bowrpgf/cnpx'), bcg.bhgchg, rkgen) ryvs bcg.nhgb be bcg.sbepr: cnguf = [tvg.ercb('bowrpgf/cnpx')] cnguf += tybo.tybo(tvg.ercb('vaqrk-pnpur/*/.')) sbe cngu va cnguf: ybt('zvqk: fpnaavat %f\a' % cngu) vs bcg.sbepr: qb_zvqk(cngu, bcg.bhgchg, tybo.tybo('%f/*.vqk' % cngu)) ryvs bcg.nhgb: z = tvg.CnpxVqkYvfg(cngu) arrqrq = {} sbe cnpx va z.cnpxf: # bayl .vqk svyrf jvgubhg n .zvqk ner bcra vs cnpx.anzr.raqfjvgu('.vqk'): arrqrq[cnpx.anzr] = 1 qry z qb_zvqk(cngu, bcg.bhgchg, arrqrq.xrlf()) ybt('\a') ryfr: b.sngny("lbh zhfg hfr -s be -n be cebivqr vachg svyranzrf") #!/hfe/ova/rai clguba vzcbeg flf, bf, enaqbz sebz ohc vzcbeg bcgvbaf sebz ohc.urycref vzcbeg * qrs enaqoybpx(a): y = [] sbe v va kenatr(a): y.nccraq(pue(enaqbz.enaqenatr(0,256))) erghea ''.wbva(y) bcgfcrp = """ ohc qnzntr [-a pbhag] [-f znkfvmr] [-F frrq] -- JNEAVAT: GUVF PBZZNAQ VF RKGERZRYL QNATREBHF a,ahz= ahzore bs oybpxf gb qnzntr f,fvmr= znkvzhz fvmr bs rnpu qnzntrq oybpx creprag= znkvzhz fvmr bs rnpu qnzntrq oybpx (nf n creprag bs ragver svyr) rdhny fcernq qnzntr rirayl guebhtubhg gur svyr F,frrq= enaqbz ahzore frrq (sbe ercrngnoyr grfgf) """ b = bcgvbaf.Bcgvbaf('ohc qnzntr', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg rkgen: b.sngny('svyranzrf rkcrpgrq') vs bcg.frrq != Abar: enaqbz.frrq(bcg.frrq) sbe anzr va rkgen: ybt('Qnzntvat "%f"...\a' % anzr) s = bcra(anzr, 'e+o') fg = bf.sfgng(s.svyrab()) fvmr = fg.fg_fvmr vs bcg.creprag be bcg.fvmr: zf1 = vag(sybng(bcg.creprag be 0)/100.0*fvmr) be fvmr zf2 = bcg.fvmr be fvmr znkfvmr = zva(zf1, zf2) ryfr: znkfvmr = 1 puhaxf = bcg.ahz be 10 puhaxfvmr = fvmr/puhaxf sbe e va enatr(puhaxf): fm = enaqbz.enaqenatr(1, znkfvmr+1) vs fm > fvmr: fm = fvmr vs bcg.rdhny: bsf = e*puhaxfvmr ryfr: bsf = enaqbz.enaqenatr(0, fvmr - fm + 1) ybt(' %6q olgrf ng %q\a' % (fm, bsf)) s.frrx(bsf) s.jevgr(enaqoybpx(fm)) s.pybfr() #!/hfe/ova/rai clguba vzcbeg flf, fgehpg, zznc sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * fhfcraqrq_j = Abar qrs vavg_qve(pbaa, net): tvg.vavg_ercb(net) ybt('ohc freire: ohcqve vavgvnyvmrq: %e\a' % tvg.ercbqve) pbaa.bx() qrs frg_qve(pbaa, net): tvg.purpx_ercb_be_qvr(net) ybt('ohc freire: ohcqve vf %e\a' % tvg.ercbqve) pbaa.bx() qrs yvfg_vaqrkrf(pbaa, whax): tvg.purpx_ercb_be_qvr() sbe s va bf.yvfgqve(tvg.ercb('bowrpgf/cnpx')): vs s.raqfjvgu('.vqk'): pbaa.jevgr('%f\a' % s) pbaa.bx() qrs fraq_vaqrk(pbaa, anzr): tvg.purpx_ercb_be_qvr() nffreg(anzr.svaq('/') < 0) nffreg(anzr.raqfjvgu('.vqk')) vqk = tvg.CnpxVqk(tvg.ercb('bowrpgf/cnpx/%f' % anzr)) pbaa.jevgr(fgehpg.cnpx('!V', yra(vqk.znc))) pbaa.jevgr(vqk.znc) pbaa.bx() qrs erprvir_bowrpgf(pbaa, whax): tybony fhfcraqrq_j tvg.purpx_ercb_be_qvr() fhttrfgrq = {} vs fhfcraqrq_j: j = fhfcraqrq_j fhfcraqrq_j = Abar ryfr: j = tvg.CnpxJevgre() juvyr 1: af = pbaa.ernq(4) vs abg af: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq yratgu urnqre, tbg RBS\a') a = fgehpg.hacnpx('!V', af)[0] #ybt('rkcrpgvat %q olgrf\a' % a) vs abg a: ybt('ohc freire: erprvirq %q bowrpg%f.\a' % (j.pbhag, j.pbhag!=1 naq "f" be '')) shyycngu = j.pybfr() vs shyycngu: (qve, anzr) = bf.cngu.fcyvg(shyycngu) pbaa.jevgr('%f.vqk\a' % anzr) pbaa.bx() erghea ryvs a == 0kssssssss: ybt('ohc freire: erprvir-bowrpgf fhfcraqrq.\a') fhfcraqrq_j = j pbaa.bx() erghea ohs = pbaa.ernq(a) # bowrpg fvmrf va ohc ner ernfbanoyl fznyy #ybt('ernq %q olgrf\a' % a) vs yra(ohs) < a: j.nobeg() envfr Rkprcgvba('bowrpg ernq: rkcrpgrq %q olgrf, tbg %q\a' % (a, yra(ohs))) (glcr, pbagrag) = tvg._qrpbqr_cnpxbow(ohs) fun = tvg.pnyp_unfu(glcr, pbagrag) byqcnpx = j.rkvfgf(fun) # SVKZR: jr bayl fhttrfg n fvatyr vaqrk cre plpyr, orpnhfr gur pyvrag # vf pheeragyl qhzo gb qbjaybnq zber guna bar cre plpyr naljnl. # Npghnyyl jr fubhyq svk gur pyvrag, ohg guvf vf n zvabe bcgvzvmngvba # ba gur freire fvqr. vs abg fhttrfgrq naq \ byqcnpx naq (byqcnpx == Gehr be byqcnpx.raqfjvgu('.zvqk')): # SVKZR: jr fubhyqa'g ernyyl unir gb xabj nobhg zvqk svyrf # ng guvf ynlre. Ohg rkvfgf() ba n zvqk qbrfa'g erghea gur # cnpxanzr (fvapr vg qbrfa'g xabj)... cebonoyl jr fubhyq whfg # svk gung qrsvpvrapl bs zvqk svyrf riraghnyyl, nygubhtu vg'yy # znxr gur svyrf ovttre. Guvf zrgubq vf pregnvayl abg irel # rssvpvrag. j.bowpnpur.erserfu(fxvc_zvqk = Gehr) byqcnpx = j.bowpnpur.rkvfgf(fun) ybt('arj fhttrfgvba: %e\a' % byqcnpx) nffreg(byqcnpx) nffreg(byqcnpx != Gehr) nffreg(abg byqcnpx.raqfjvgu('.zvqk')) j.bowpnpur.erserfu(fxvc_zvqk = Snyfr) vs abg fhttrfgrq naq byqcnpx: nffreg(byqcnpx.raqfjvgu('.vqk')) (qve,anzr) = bf.cngu.fcyvg(byqcnpx) vs abg (anzr va fhttrfgrq): ybt("ohc freire: fhttrfgvat vaqrk %f\a" % anzr) pbaa.jevgr('vaqrk %f\a' % anzr) fhttrfgrq[anzr] = 1 ryfr: j._enj_jevgr([ohs]) # ABGERNPURQ qrs ernq_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() e = tvg.ernq_ers(ersanzr) pbaa.jevgr('%f\a' % (e be '').rapbqr('urk')) pbaa.bx() qrs hcqngr_ers(pbaa, ersanzr): tvg.purpx_ercb_be_qvr() arjiny = pbaa.ernqyvar().fgevc() byqiny = pbaa.ernqyvar().fgevc() tvg.hcqngr_ers(ersanzr, arjiny.qrpbqr('urk'), byqiny.qrpbqr('urk')) pbaa.bx() qrs png(pbaa, vq): tvg.purpx_ercb_be_qvr() gel: sbe oybo va tvg.png(vq): pbaa.jevgr(fgehpg.cnpx('!V', yra(oybo))) pbaa.jevgr(oybo) rkprcg XrlReebe, r: ybt('freire: reebe: %f\a' % r) pbaa.jevgr('\0\0\0\0') pbaa.reebe(r) ryfr: pbaa.jevgr('\0\0\0\0') pbaa.bx() bcgfcrp = """ ohc freire """ b = bcgvbaf.Bcgvbaf('ohc freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') ybt('ohc freire: ernqvat sebz fgqva.\a') pbzznaqf = { 'vavg-qve': vavg_qve, 'frg-qve': frg_qve, 'yvfg-vaqrkrf': yvfg_vaqrkrf, 'fraq-vaqrk': fraq_vaqrk, 'erprvir-bowrpgf': erprvir_bowrpgf, 'ernq-ers': ernq_ers, 'hcqngr-ers': hcqngr_ers, 'png': png, } # SVKZR: guvf cebgbpby vf gbgnyyl ynzr naq abg ng nyy shgher-cebbs. # (Rfcrpvnyyl fvapr jr nobeg pbzcyrgryl nf fbba nf *nalguvat* onq unccraf) pbaa = Pbaa(flf.fgqva, flf.fgqbhg) ye = yvarernqre(pbaa) sbe _yvar va ye: yvar = _yvar.fgevc() vs abg yvar: pbagvahr ybt('ohc freire: pbzznaq: %e\a' % yvar) jbeqf = yvar.fcyvg(' ', 1) pzq = jbeqf[0] erfg = yra(jbeqf)>1 naq jbeqf[1] be '' vs pzq == 'dhvg': oernx ryfr: pzq = pbzznaqf.trg(pzq) vs pzq: pzq(pbaa, erfg) ryfr: envfr Rkprcgvba('haxabja freire pbzznaq: %e\a' % yvar) ybt('ohc freire: qbar\a') #!/hfe/ova/rai clguba vzcbeg flf, gvzr, fgehpg sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, pyvrag sebz ohc.urycref vzcbeg * sebz fhocebprff vzcbeg CVCR bcgfcrp = """ ohc wbva [-e ubfg:cngu] [ersf be unfurf...] -- e,erzbgr= erzbgr ercbfvgbel cngu """ b = bcgvbaf.Bcgvbaf('ohc wbva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg rkgen: rkgen = yvarernqre(flf.fgqva) erg = 0 vs bcg.erzbgr: pyv = pyvrag.Pyvrag(bcg.erzbgr) png = pyv.png ryfr: pc = tvg.PngCvcr() png = pc.wbva sbe vq va rkgen: gel: sbe oybo va png(vq): flf.fgqbhg.jevgr(oybo) rkprcg XrlReebe, r: flf.fgqbhg.syhfu() ybt('reebe: %f\a' % r) erg = 1 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, er, reeab, fgng, gvzr, zngu sebz ohc vzcbeg unfufcyvg, tvg, bcgvbaf, vaqrk, pyvrag sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc fnir [-gp] [-a anzr] -- e,erzbgr= erzbgr ercbfvgbel cngu g,gerr bhgchg n gerr vq p,pbzzvg bhgchg n pbzzvg vq a,anzr= anzr bs onpxhc frg gb hcqngr (vs nal) i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) d,dhvrg qba'g fubj cebterff zrgre fznyyre= bayl onpx hc svyrf fznyyre guna a olgrf """ b = bcgvbaf.Bcgvbaf('ohc fnir', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) tvg.purpx_ercb_be_qvr() vs abg (bcg.gerr be bcg.pbzzvg be bcg.anzr): b.sngny("hfr bar be zber bs -g, -p, -a") vs abg rkgen: b.sngny("ab svyranzrf tvira") bcg.cebterff = (vfggl naq abg bcg.dhvrg) bcg.fznyyre = cnefr_ahz(bcg.fznyyre be 0) vf_erirefr = bf.raiveba.trg('OHC_FREIRE_ERIREFR') vs vf_erirefr naq bcg.erzbgr: b.sngny("qba'g hfr -e va erirefr zbqr; vg'f nhgbzngvp") ersanzr = bcg.anzr naq 'ersf/urnqf/%f' % bcg.anzr be Abar vs bcg.erzbgr be vf_erirefr: pyv = pyvrag.Pyvrag(bcg.erzbgr) byqers = ersanzr naq pyv.ernq_ers(ersanzr) be Abar j = pyv.arj_cnpxjevgre() ryfr: pyv = Abar byqers = ersanzr naq tvg.ernq_ers(ersanzr) be Abar j = tvg.CnpxJevgre() unaqyr_pgey_p() qrs rngfynfu(qve): vs qve.raqfjvgu('/'): erghea qve[:-1] ryfr: erghea qve cnegf = [''] funyvfgf = [[]] qrs _chfu(cneg): nffreg(cneg) cnegf.nccraq(cneg) funyvfgf.nccraq([]) qrs _cbc(sbepr_gerr): nffreg(yra(cnegf) >= 1) cneg = cnegf.cbc() funyvfg = funyvfgf.cbc() gerr = sbepr_gerr be j.arj_gerr(funyvfg) vs funyvfgf: funyvfgf[-1].nccraq(('40000', cneg, gerr)) ryfr: # guvf jnf gur gbcyriry, fb chg vg onpx sbe fnavgl funyvfgf.nccraq(funyvfg) erghea gerr ynfgerznva = Abar qrs cebterff_ercbeg(a): tybony pbhag, fhopbhag, ynfgerznva fhopbhag += a pp = pbhag + fhopbhag cpg = gbgny naq (pp*100.0/gbgny) be 0 abj = gvzr.gvzr() ryncfrq = abj - gfgneg xcf = ryncfrq naq vag(pp/1024./ryncfrq) xcf_senp = 10 ** vag(zngu.ybt(xcf+1, 10) - 1) xcf = vag(xcf/xcf_senp)*xcf_senp vs pp: erznva = ryncfrq*1.0/pp * (gbgny-pp) ryfr: erznva = 0.0 vs (ynfgerznva naq (erznva > ynfgerznva) naq ((erznva - ynfgerznva)/ynfgerznva < 0.05)): erznva = ynfgerznva ryfr: ynfgerznva = erznva ubhef = vag(erznva/60/60) zvaf = vag(erznva/60 - ubhef*60) frpf = vag(erznva - ubhef*60*60 - zvaf*60) vs ryncfrq < 30: erznvafge = '' xcffge = '' ryfr: xcffge = '%qx/f' % xcf vs ubhef: erznvafge = '%qu%qz' % (ubhef, zvaf) ryvs zvaf: erznvafge = '%qz%q' % (zvaf, frpf) ryfr: erznvafge = '%qf' % frpf cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf) %f %f\e' % (cpg, pp/1024, gbgny/1024, spbhag, sgbgny, erznvafge, xcffge)) e = vaqrk.Ernqre(tvg.ercb('ohcvaqrk')) qrs nyernql_fnirq(rag): erghea rag.vf_inyvq() naq j.rkvfgf(rag.fun) naq rag.fun qrs jnagerphefr_cer(rag): erghea abg nyernql_fnirq(rag) qrs jnagerphefr_qhevat(rag): erghea abg nyernql_fnirq(rag) be rag.fun_zvffvat() gbgny = sgbgny = 0 vs bcg.cebterff: sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_cer): vs abg (sgbgny % 10024): cebterff('Ernqvat vaqrk: %q\e' % sgbgny) rkvfgf = rag.rkvfgf() unfuinyvq = nyernql_fnirq(rag) rag.frg_fun_zvffvat(abg unfuinyvq) vs abg bcg.fznyyre be rag.fvmr < bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: gbgny += rag.fvmr sgbgny += 1 cebterff('Ernqvat vaqrk: %q, qbar.\a' % sgbgny) unfufcyvg.cebterff_pnyyonpx = cebterff_ercbeg gfgneg = gvzr.gvzr() pbhag = fhopbhag = spbhag = 0 ynfgfxvc_anzr = Abar ynfgqve = '' sbe (genafanzr,rag) va e.svygre(rkgen, jnagerphefr=jnagerphefr_qhevat): (qve, svyr) = bf.cngu.fcyvg(rag.anzr) rkvfgf = (rag.syntf & vaqrk.VK_RKVFGF) unfuinyvq = nyernql_fnirq(rag) jnfzvffvat = rag.fun_zvffvat() byqfvmr = rag.fvmr vs bcg.ireobfr: vs abg rkvfgf: fgnghf = 'Q' ryvs abg unfuinyvq: vs rag.fun == vaqrk.RZCGL_FUN: fgnghf = 'N' ryfr: fgnghf = 'Z' ryfr: fgnghf = ' ' vs bcg.ireobfr >= 2: ybt('%f %-70f\a' % (fgnghf, rag.anzr)) ryvs abg fgng.F_VFQVE(rag.zbqr) naq ynfgqve != qve: vs abg ynfgqve.fgnegfjvgu(qve): ybt('%f %-70f\a' % (fgnghf, bf.cngu.wbva(qve, ''))) ynfgqve = qve vs bcg.cebterff: cebterff_ercbeg(0) spbhag += 1 vs abg rkvfgf: pbagvahr vs bcg.fznyyre naq rag.fvmr >= bcg.fznyyre: vs rkvfgf naq abg unfuinyvq: nqq_reebe('fxvccvat ynetr svyr "%f"' % rag.anzr) ynfgfxvc_anzr = rag.anzr pbagvahr nffreg(qve.fgnegfjvgu('/')) qvec = qve.fcyvg('/') juvyr cnegf > qvec: _cbc(sbepr_gerr = Abar) vs qve != '/': sbe cneg va qvec[yra(cnegf):]: _chfu(cneg) vs abg svyr: # ab svyranzr cbegvba zrnaf guvf vf n fhoqve. Ohg # fho/cneragqverpgbevrf nyernql unaqyrq va gur cbc/chfu() cneg nobir. byqgerr = nyernql_fnirq(rag) # znl or Abar arjgerr = _cbc(sbepr_gerr = byqgerr) vs abg byqgerr: vs ynfgfxvc_anzr naq ynfgfxvc_anzr.fgnegfjvgu(rag.anzr): rag.vainyvqngr() ryfr: rag.inyvqngr(040000, arjgerr) rag.ercnpx() vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr pbagvahr # vg'f abg n qverpgbel vq = Abar vs unfuinyvq: zbqr = '%b' % rag.tvgzbqr vq = rag.fun funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) ryfr: vs fgng.F_VFERT(rag.zbqr): gel: s = unfufcyvg.bcra_abngvzr(rag.anzr) rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = unfufcyvg.fcyvg_gb_oybo_be_gerr(j, [s]) ryfr: vs fgng.F_VFQVE(rag.zbqr): nffreg(0) # unaqyrq nobir ryvs fgng.F_VFYAX(rag.zbqr): gel: ey = bf.ernqyvax(rag.anzr) rkprcg BFReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr rkprcg VBReebe, r: nqq_reebe(r) ynfgfxvc_anzr = rag.anzr ryfr: (zbqr, vq) = ('120000', j.arj_oybo(ey)) ryfr: nqq_reebe(Rkprcgvba('fxvccvat fcrpvny svyr "%f"' % rag.anzr)) ynfgfxvc_anzr = rag.anzr vs vq: rag.inyvqngr(vag(zbqr, 8), vq) rag.ercnpx() funyvfgf[-1].nccraq((zbqr, tvg.znatyr_anzr(svyr, rag.zbqr, rag.tvgzbqr), vq)) vs rkvfgf naq jnfzvffvat: pbhag += byqfvmr fhopbhag = 0 vs bcg.cebterff: cpg = gbgny naq pbhag*100.0/gbgny be 100 cebterff('Fnivat: %.2s%% (%q/%qx, %q/%q svyrf), qbar. \a' % (cpg, pbhag/1024, gbgny/1024, spbhag, sgbgny)) juvyr yra(cnegf) > 1: _cbc(sbepr_gerr = Abar) nffreg(yra(funyvfgf) == 1) gerr = j.arj_gerr(funyvfgf[-1]) vs bcg.gerr: cevag gerr.rapbqr('urk') vs bcg.pbzzvg be bcg.anzr: zft = 'ohc fnir\a\aTrarengrq ol pbzznaq:\a%e' % flf.neti ers = bcg.anzr naq ('ersf/urnqf/%f' % bcg.anzr) be Abar pbzzvg = j.arj_pbzzvg(byqers, gerr, zft) vs bcg.pbzzvg: cevag pbzzvg.rapbqr('urk') j.pybfr() # zhfg pybfr orsber jr pna hcqngr gur ers vs bcg.anzr: vs pyv: pyv.hcqngr_ers(ersanzr, pbzzvg, byqers) ryfr: tvg.hcqngr_ers(ersanzr, pbzzvg, byqers) vs pyv: pyv.pybfr() vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq juvyr fnivat.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, gvzr sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc gvpx """ b = bcgvbaf.Bcgvbaf('ohc gvpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") g = gvzr.gvzr() gyrsg = 1 - (g - vag(g)) gvzr.fyrrc(gyrsg) #!/hfe/ova/rai clguba vzcbeg bf, flf, fgng, gvzr sebz ohc vzcbeg bcgvbaf, tvg, vaqrk, qerphefr sebz ohc.urycref vzcbeg * qrs zretr_vaqrkrf(bhg, e1, e2): sbe r va vaqrk.ZretrVgre([e1, e2]): # SVKZR: fubhyqa'g jr erzbir qryrgrq ragevrf riraghnyyl? Jura? bhg.nqq_vkragel(r) pynff VgreUrycre: qrs __vavg__(frys, y): frys.v = vgre(y) frys.phe = Abar frys.arkg() qrs arkg(frys): gel: frys.phe = frys.v.arkg() rkprcg FgbcVgrengvba: frys.phe = Abar erghea frys.phe qrs purpx_vaqrk(ernqre): gel: ybt('purpx: purpxvat sbejneq vgrengvba...\a') r = Abar q = {} sbe r va ernqre.sbejneq_vgre(): vs r.puvyqera_a: vs bcg.ireobfr: ybt('%08k+%-4q %e\a' % (r.puvyqera_bsf, r.puvyqera_a, r.anzr)) nffreg(r.puvyqera_bsf) nffreg(r.anzr.raqfjvgu('/')) nffreg(abg q.trg(r.puvyqera_bsf)) q[r.puvyqera_bsf] = 1 vs r.syntf & vaqrk.VK_UNFUINYVQ: nffreg(r.fun != vaqrk.RZCGL_FUN) nffreg(r.tvgzbqr) nffreg(abg r be r.anzr == '/') # ynfg ragel vf *nyjnlf* / ybt('purpx: purpxvat abezny vgrengvba...\a') ynfg = Abar sbe r va ernqre: vs ynfg: nffreg(ynfg > r.anzr) ynfg = r.anzr rkprcg: ybt('vaqrk reebe! ng %e\a' % r) envfr ybt('purpx: cnffrq.\a') qrs hcqngr_vaqrk(gbc): ev = vaqrk.Ernqre(vaqrksvyr) jv = vaqrk.Jevgre(vaqrksvyr) evt = VgreUrycre(ev.vgre(anzr=gbc)) gfgneg = vag(gvzr.gvzr()) unfutra = Abar vs bcg.snxr_inyvq: qrs unfutra(anzr): erghea (0100644, vaqrk.SNXR_FUN) gbgny = 0 sbe (cngu,cfg) va qerphefr.erphefvir_qveyvfg([gbc], kqri=bcg.kqri): vs bcg.ireobfr>=2 be (bcg.ireobfr==1 naq fgng.F_VFQVE(cfg.fg_zbqr)): flf.fgqbhg.jevgr('%f\a' % cngu) flf.fgqbhg.syhfu() cebterff('Vaqrkvat: %q\e' % gbgny) ryvs abg (gbgny % 128): cebterff('Vaqrkvat: %q\e' % gbgny) gbgny += 1 juvyr evt.phe naq evt.phe.anzr > cngu: # qryrgrq cnguf vs evt.phe.rkvfgf(): evt.phe.frg_qryrgrq() evt.phe.ercnpx() evt.arkg() vs evt.phe naq evt.phe.anzr == cngu: # cnguf gung nyernql rkvfgrq vs cfg: evt.phe.sebz_fgng(cfg, gfgneg) vs abg (evt.phe.syntf & vaqrk.VK_UNFUINYVQ): vs unfutra: (evt.phe.tvgzbqr, evt.phe.fun) = unfutra(cngu) evt.phe.syntf |= vaqrk.VK_UNFUINYVQ vs bcg.snxr_vainyvq: evt.phe.vainyvqngr() evt.phe.ercnpx() evt.arkg() ryfr: # arj cnguf jv.nqq(cngu, cfg, unfutra = unfutra) cebterff('Vaqrkvat: %q, qbar.\a' % gbgny) vs ev.rkvfgf(): ev.fnir() jv.syhfu() vs jv.pbhag: je = jv.arj_ernqre() vs bcg.purpx: ybt('purpx: orsber zretvat: byqsvyr\a') purpx_vaqrk(ev) ybt('purpx: orsber zretvat: arjsvyr\a') purpx_vaqrk(je) zv = vaqrk.Jevgre(vaqrksvyr) zretr_vaqrkrf(zv, ev, je) ev.pybfr() zv.pybfr() je.pybfr() jv.nobeg() ryfr: jv.pybfr() bcgfcrp = """ ohc vaqrk <-c|z|h> [bcgvbaf...] -- c,cevag cevag gur vaqrk ragevrf sbe gur tvira anzrf (nyfb jbexf jvgu -h) z,zbqvsvrq cevag bayl nqqrq/qryrgrq/zbqvsvrq svyrf (vzcyvrf -c) f,fgnghf cevag rnpu svyranzr jvgu n fgnghf pune (N/Z/Q) (vzcyvrf -c) U,unfu cevag gur unfu sbe rnpu bowrpg arkg gb vgf anzr (vzcyvrf -c) y,ybat cevag zber vasbezngvba nobhg rnpu svyr h,hcqngr (erphefviryl) hcqngr gur vaqrk ragevrf sbe gur tvira svyranzrf k,kqri,bar-svyr-flfgrz qba'g pebff svyrflfgrz obhaqnevrf snxr-inyvq znex nyy vaqrk ragevrf nf hc-gb-qngr rira vs gurl nera'g snxr-vainyvq znex nyy vaqrk ragevrf nf vainyvq purpx pnershyyl purpx vaqrk svyr vagrtevgl s,vaqrksvyr= gur anzr bs gur vaqrk svyr (qrsnhyg 'vaqrk') i,ireobfr vapernfr ybt bhgchg (pna or hfrq zber guna bapr) """ b = bcgvbaf.Bcgvbaf('ohc vaqrk', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs abg (bcg.zbqvsvrq be bcg['cevag'] be bcg.fgnghf be bcg.hcqngr be bcg.purpx): b.sngny('fhccyl bar be zber bs -c, -f, -z, -h, be --purpx') vs (bcg.snxr_inyvq be bcg.snxr_vainyvq) naq abg bcg.hcqngr: b.sngny('--snxr-{va,}inyvq ner zrnavatyrff jvgubhg -h') vs bcg.snxr_inyvq naq bcg.snxr_vainyvq: b.sngny('--snxr-inyvq vf vapbzcngvoyr jvgu --snxr-vainyvq') tvg.purpx_ercb_be_qvr() vaqrksvyr = bcg.vaqrksvyr be tvg.ercb('ohcvaqrk') unaqyr_pgey_p() vs bcg.purpx: ybt('purpx: fgnegvat vavgvny purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) cnguf = vaqrk.erqhpr_cnguf(rkgen) vs bcg.hcqngr: vs abg cnguf: b.sngny('hcqngr (-h) erdhrfgrq ohg ab cnguf tvira') sbe (ec,cngu) va cnguf: hcqngr_vaqrk(ec) vs bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq: sbe (anzr, rag) va vaqrk.Ernqre(vaqrksvyr).svygre(rkgen be ['']): vs (bcg.zbqvsvrq naq (rag.vf_inyvq() be rag.vf_qryrgrq() be abg rag.zbqr)): pbagvahr yvar = '' vs bcg.fgnghf: vs rag.vf_qryrgrq(): yvar += 'Q ' ryvs abg rag.vf_inyvq(): vs rag.fun == vaqrk.RZCGL_FUN: yvar += 'N ' ryfr: yvar += 'Z ' ryfr: yvar += ' ' vs bcg.unfu: yvar += rag.fun.rapbqr('urk') + ' ' vs bcg.ybat: yvar += "%7f %7f " % (bpg(rag.zbqr), bpg(rag.tvgzbqr)) cevag yvar + (anzr be './') vs bcg.purpx naq (bcg['cevag'] be bcg.fgnghf be bcg.zbqvsvrq be bcg.hcqngr): ybt('purpx: fgnegvat svany purpx.\a') purpx_vaqrk(vaqrk.Ernqre(vaqrksvyr)) vs fnirq_reebef: ybt('JNEAVAT: %q reebef rapbhagrerq.\a' % yra(fnirq_reebef)) flf.rkvg(1) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg sebz ohc vzcbeg bcgvbaf, urycref bcgfcrp = """ ohc eonpxhc-freire -- Guvf pbzznaq vf abg vagraqrq gb or eha znahnyyl. """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc-freire', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny('ab nethzragf rkcrpgrq') # trg gur fhopbzznaq'f neti. # Abeznyyl jr pbhyq whfg cnff guvf ba gur pbzznaq yvar, ohg fvapr jr'yy bsgra # or trggvat pnyyrq ba gur bgure raq bs na ffu cvcr, juvpu graqf gb znatyr # neti (ol fraqvat vg ivn gur furyy), guvf jnl vf zhpu fnsre. ohs = flf.fgqva.ernq(4) fm = fgehpg.hacnpx('!V', ohs)[0] nffreg(fm > 0) nffreg(fm < 1000000) ohs = flf.fgqva.ernq(fm) nffreg(yra(ohs) == fm) neti = ohs.fcyvg('\0') # fgqva/fgqbhg ner fhccbfrqyl pbaarpgrq gb 'ohc freire' gung gur pnyyre # fgnegrq sbe hf (bsgra ba gur bgure raq bs na ffu ghaary), fb jr qba'g jnag # gb zvfhfr gurz. Zbir gurz bhg bs gur jnl, gura ercynpr fgqbhg jvgu # n cbvagre gb fgqree va pnfr bhe fhopbzznaq jnagf gb qb fbzrguvat jvgu vg. # # Vg zvtug or avpr gb qb gur fnzr jvgu fgqva, ohg zl rkcrevzragf fubjrq gung # ffu frrzf gb znxr vgf puvyq'f fgqree n ernqnoyr-ohg-arire-ernqf-nalguvat # fbpxrg. Gurl ernyyl fubhyq unir hfrq fuhgqbja(FUHG_JE) ba gur bgure raq # bs vg, ohg cebonoyl qvqa'g. Naljnl, vg'f gbb zrffl, fb yrg'f whfg znxr fher # nalbar ernqvat sebz fgqva vf qvfnccbvagrq. # # (Lbh pna'g whfg yrnir fgqva/fgqbhg "abg bcra" ol pybfvat gur svyr # qrfpevcgbef. Gura gur arkg svyr gung bcraf vf nhgbzngvpnyyl nffvtarq 0 be 1, # naq crbcyr *gelvat* gb ernq/jevgr fgqva/fgqbhg trg fperjrq.) bf.qhc2(0, 3) bf.qhc2(1, 4) bf.qhc2(2, 1) sq = bf.bcra('/qri/ahyy', bf.B_EQBAYL) bf.qhc2(sq, 0) bf.pybfr(sq) bf.raiveba['OHC_FREIRE_ERIREFR'] = urycref.ubfganzr() bf.rkrpic(neti[0], neti) flf.rkvg(99) #!/hfe/ova/rai clguba vzcbeg flf, bf, tybo, fhocebprff, gvzr sebz ohc vzcbeg bcgvbaf, tvg sebz ohc.urycref vzcbeg * cne2_bx = 0 ahyys = bcra('/qri/ahyy') qrs qroht(f): vs bcg.ireobfr: ybt(f) qrs eha(neti): # ng yrnfg va clguba 2.5, hfvat "fgqbhg=2" be "fgqbhg=flf.fgqree" orybj # qbrfa'g npghnyyl jbex, orpnhfr fhocebprff pybfrf sq #2 evtug orsber # rkrpvat sbe fbzr ernfba. Fb jr jbex nebhaq vg ol qhcyvpngvat gur sq # svefg. sq = bf.qhc(2) # pbcl fgqree gel: c = fhocebprff.Cbcra(neti, fgqbhg=sq, pybfr_sqf=Snyfr) erghea c.jnvg() svanyyl: bf.pybfr(sq) qrs cne2_frghc(): tybony cne2_bx ei = 1 gel: c = fhocebprff.Cbcra(['cne2', '--uryc'], fgqbhg=ahyys, fgqree=ahyys, fgqva=ahyys) ei = c.jnvg() rkprcg BFReebe: ybt('sfpx: jneavat: cne2 abg sbhaq; qvfnoyvat erpbirel srngherf.\a') ryfr: cne2_bx = 1 qrs cnei(yiy): vs bcg.ireobfr >= yiy: vs vfggl: erghea [] ryfr: erghea ['-d'] ryfr: erghea ['-dd'] qrs cne2_trarengr(onfr): erghea eha(['cne2', 'perngr', '-a1', '-p200'] + cnei(2) + ['--', onfr, onfr+'.cnpx', onfr+'.vqk']) qrs cne2_irevsl(onfr): erghea eha(['cne2', 'irevsl'] + cnei(3) + ['--', onfr]) qrs cne2_ercnve(onfr): erghea eha(['cne2', 'ercnve'] + cnei(2) + ['--', onfr]) qrs dhvpx_irevsl(onfr): s = bcra(onfr + '.cnpx', 'eo') s.frrx(-20, 2) jnagfhz = s.ernq(20) nffreg(yra(jnagfhz) == 20) s.frrx(0) fhz = Fun1() sbe o va puhaxlernqre(s, bf.sfgng(s.svyrab()).fg_fvmr - 20): fhz.hcqngr(o) vs fhz.qvtrfg() != jnagfhz: envfr InyhrReebe('rkcrpgrq %e, tbg %e' % (jnagfhz.rapbqr('urk'), fhz.urkqvtrfg())) qrs tvg_irevsl(onfr): vs bcg.dhvpx: gel: dhvpx_irevsl(onfr) rkprcg Rkprcgvba, r: qroht('reebe: %f\a' % r) erghea 1 erghea 0 ryfr: erghea eha(['tvg', 'irevsl-cnpx', '--', onfr]) qrs qb_cnpx(onfr, ynfg): pbqr = 0 vs cne2_bx naq cne2_rkvfgf naq (bcg.ercnve be abg bcg.trarengr): ierfhyg = cne2_irevsl(onfr) vs ierfhyg != 0: vs bcg.ercnve: eerfhyg = cne2_ercnve(onfr) vs eerfhyg != 0: cevag '%f cne2 ercnve: snvyrq (%q)' % (ynfg, eerfhyg) pbqr = eerfhyg ryfr: cevag '%f cne2 ercnve: fhpprrqrq (0)' % ynfg pbqr = 100 ryfr: cevag '%f cne2 irevsl: snvyrq (%q)' % (ynfg, ierfhyg) pbqr = ierfhyg ryfr: cevag '%f bx' % ynfg ryvs abg bcg.trarengr be (cne2_bx naq abg cne2_rkvfgf): terfhyg = tvg_irevsl(onfr) vs terfhyg != 0: cevag '%f tvg irevsl: snvyrq (%q)' % (ynfg, terfhyg) pbqr = terfhyg ryfr: vs cne2_bx naq bcg.trarengr: cerfhyg = cne2_trarengr(onfr) vs cerfhyg != 0: cevag '%f cne2 perngr: snvyrq (%q)' % (ynfg, cerfhyg) pbqr = cerfhyg ryfr: cevag '%f bx' % ynfg ryfr: cevag '%f bx' % ynfg ryfr: nffreg(bcg.trarengr naq (abg cne2_bx be cne2_rkvfgf)) qroht(' fxvccrq: cne2 svyr nyernql trarengrq.\a') erghea pbqr bcgfcrp = """ ohc sfpx [bcgvbaf...] [svyranzrf...] -- e,ercnve nggrzcg gb ercnve reebef hfvat cne2 (qnatrebhf!) t,trarengr trarengr nhgb-ercnve vasbezngvba hfvat cne2 i,ireobfr vapernfr ireobfvgl (pna or hfrq zber guna bapr) dhvpx whfg purpx cnpx fun1fhz, qba'g hfr tvg irevsl-cnpx w,wbof= eha 'a' wbof va cnenyyry cne2-bx vzzrqvngryl erghea 0 vs cne2 vf bx, 1 vs abg qvfnoyr-cne2 vtaber cne2 rira vs vg vf ninvynoyr """ b = bcgvbaf.Bcgvbaf('ohc sfpx', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) cne2_frghc() vs bcg.cne2_bx: vs cne2_bx: flf.rkvg(0) # 'gehr' va fu ryfr: flf.rkvg(1) vs bcg.qvfnoyr_cne2: cne2_bx = 0 tvg.purpx_ercb_be_qvr() vs abg rkgen: qroht('sfpx: Ab svyranzrf tvira: purpxvat nyy cnpxf.\a') rkgen = tybo.tybo(tvg.ercb('bowrpgf/cnpx/*.cnpx')) pbqr = 0 pbhag = 0 bhgfgnaqvat = {} sbe anzr va rkgen: vs anzr.raqfjvgu('.cnpx'): onfr = anzr[:-5] ryvs anzr.raqfjvgu('.vqk'): onfr = anzr[:-4] ryvs anzr.raqfjvgu('.cne2'): onfr = anzr[:-5] ryvs bf.cngu.rkvfgf(anzr + '.cnpx'): onfr = anzr ryfr: envfr Rkprcgvba('%f vf abg n cnpx svyr!' % anzr) (qve,ynfg) = bf.cngu.fcyvg(onfr) cne2_rkvfgf = bf.cngu.rkvfgf(onfr + '.cne2') vs cne2_rkvfgf naq bf.fgng(onfr + '.cne2').fg_fvmr == 0: cne2_rkvfgf = 0 flf.fgqbhg.syhfu() qroht('sfpx: purpxvat %f (%f)\a' % (ynfg, cne2_bx naq cne2_rkvfgf naq 'cne2' be 'tvg')) vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.wbof: ap = qb_cnpx(onfr, ynfg) pbqr = pbqr be ap pbhag += 1 ryfr: juvyr yra(bhgfgnaqvat) >= bcg.wbof: (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 cvq = bf.sbex() vs cvq: # cnerag bhgfgnaqvat[cvq] = 1 ryfr: # puvyq gel: flf.rkvg(qb_cnpx(onfr, ynfg)) rkprcg Rkprcgvba, r: ybt('rkprcgvba: %e\a' % r) flf.rkvg(99) juvyr yra(bhgfgnaqvat): (cvq,ap) = bf.jnvg() ap >>= 8 vs cvq va bhgfgnaqvat: qry bhgfgnaqvat[cvq] pbqr = pbqr be ap pbhag += 1 vs abg bcg.ireobfr: cebterff('sfpx (%q/%q)\e' % (pbhag, yra(rkgen))) vs abg bcg.ireobfr naq vfggl: ybt('sfpx qbar. \a') flf.rkvg(pbqr) #!/hfe/ova/rai clguba vzcbeg flf, bf, fgehpg, trgbcg, fhocebprff, fvtany sebz ohc vzcbeg bcgvbaf, ffu sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc eonpxhc vaqrk ... ohc eonpxhc fnir ... ohc eonpxhc fcyvg ... """ b = bcgvbaf.Bcgvbaf('ohc eonpxhc', bcgfcrp, bcgshap=trgbcg.trgbcg) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs yra(rkgen) < 2: b.sngny('nethzragf rkcrpgrq') pynff FvtRkprcgvba(Rkprcgvba): qrs __vavg__(frys, fvtahz): frys.fvtahz = fvtahz Rkprcgvba.__vavg__(frys, 'fvtany %q erprvirq' % fvtahz) qrs unaqyre(fvtahz, senzr): envfr FvtRkprcgvba(fvtahz) fvtany.fvtany(fvtany.FVTGREZ, unaqyre) fvtany.fvtany(fvtany.FVTVAG, unaqyre) fc = Abar c = Abar erg = 99 gel: ubfganzr = rkgen[0] neti = rkgen[1:] c = ffu.pbaarpg(ubfganzr, 'eonpxhc-freire') netif = '\0'.wbva(['ohc'] + neti) c.fgqva.jevgr(fgehpg.cnpx('!V', yra(netif)) + netif) c.fgqva.syhfu() znva_rkr = bf.raiveba.trg('OHC_ZNVA_RKR') be flf.neti[0] fc = fhocebprff.Cbcra([znva_rkr, 'freire'], fgqva=c.fgqbhg, fgqbhg=c.fgqva) c.fgqva.pybfr() c.fgqbhg.pybfr() svanyyl: juvyr 1: # vs jr trg n fvtany juvyr jnvgvat, jr unir gb xrrc jnvgvat, whfg # va pnfr bhe puvyq qbrfa'g qvr. gel: erg = c.jnvg() fc.jnvg() oernx rkprcg FvtRkprcgvba, r: ybt('\aohc eonpxhc: %f\a' % r) bf.xvyy(c.cvq, r.fvtahz) erg = 84 flf.rkvg(erg) #!/hfe/ova/rai clguba vzcbeg flf, bf, er sebz ohc vzcbeg bcgvbaf bcgfcrp = """ ohc arjyvare """ b = bcgvbaf.Bcgvbaf('ohc arjyvare', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") e = er.pbzcvyr(e'([\e\a])') ynfgyra = 0 nyy = '' juvyr 1: y = e.fcyvg(nyy, 1) vs yra(y) <= 1: gel: o = bf.ernq(flf.fgqva.svyrab(), 4096) rkprcg XrlobneqVagreehcg: oernx vs abg o: oernx nyy += o ryfr: nffreg(yra(y) == 3) (yvar, fcyvgpune, nyy) = y #fcyvgpune = '\a' flf.fgqbhg.jevgr('%-*f%f' % (ynfgyra, yvar, fcyvgpune)) vs fcyvgpune == '\e': ynfgyra = yra(yvar) ryfr: ynfgyra = 0 flf.fgqbhg.syhfu() vs ynfgyra be nyy: flf.fgqbhg.jevgr('%-*f\a' % (ynfgyra, nyy)) #!/hfe/ova/rai clguba vzcbeg flf sebz ohc vzcbeg bcgvbaf, tvg, _unfufcyvg sebz ohc.urycref vzcbeg * bcgfcrp = """ ohc znetva """ b = bcgvbaf.Bcgvbaf('ohc znetva', bcgfcrp) (bcg, syntf, rkgen) = b.cnefr(flf.neti[1:]) vs rkgen: b.sngny("ab nethzragf rkcrpgrq") tvg.purpx_ercb_be_qvr() #tvg.vtaber_zvqk = 1 zv = tvg.CnpxVqkYvfg(tvg.ercb('bowrpgf/cnpx')) ynfg = '\0'*20 ybatzngpu = 0 sbe v va zv: vs v == ynfg: pbagvahr #nffreg(fge(v) >= ynfg) cz = _unfufcyvg.ovgzngpu(ynfg, v) ybatzngpu = znk(ybatzngpu, cz) ynfg = v cevag ybatzngpu bup-0.25/t/unknown-owner000077500000000000000000000006761225146730500152640ustar00rootroot00000000000000#!/usr/bin/env python import grp import pwd import sys def usage(): print >> sys.stderr, "Usage: unknown-owners (--user | --group)" if len(sys.argv) != 2: usage() sys.exit(1) if sys.argv[1] == '--user': max_name_len = max([len(x.pw_name) for x in pwd.getpwall()]) elif sys.argv[1] == '--group': max_name_len = max([len(x.gr_name) for x in grp.getgrall()]) else: usage() sys.exit(1) print 'x' * (max_name_len + 1) bup-0.25/wvtest-bup.sh000066400000000000000000000006731225146730500147150ustar00rootroot00000000000000# Include in your test script like this: # # #!/usr/bin/env bash # . ./wvtest-bup.sh . ./wvtest.sh _wvtop="$(pwd)" wvmktempdir () { local script_name="$(basename $0)" mkdir -p "$_wvtop/t/tmp" || exit $? mktemp -d "$_wvtop/t/tmp/$script_name-XXXXXXX" || exit $? } wvmkmountpt () { local script_name="$(basename $0)" mkdir -p "$_wvtop/t/mnt" || exit $? mktemp -d "$_wvtop/t/mnt/$script_name-XXXXXXX" || exit $? } bup-0.25/wvtest.py000077500000000000000000000164031225146730500141500ustar00rootroot00000000000000#!/usr/bin/env python # # WvTest: # Copyright (C)2007-2012 Versabanq Innovations Inc. and contributors. # Licensed under the GNU Library General Public License, version 2. # See the included file named LICENSE for license information. # You can get wvtest from: http://github.com/apenwarr/wvtest # import atexit import inspect import os import re import sys import traceback # NOTE # Why do we do we need the "!= main" check? Because if you run # wvtest.py as a main program and it imports your test files, then # those test files will try to import the wvtest module recursively. # That actually *works* fine, because we don't run this main program # when we're imported as a module. But you end up with two separate # wvtest modules, the one that gets imported, and the one that's the # main program. Each of them would have duplicated global variables # (most importantly, wvtest._registered), and so screwy things could # happen. Thus, we make the main program module *totally* different # from the imported module. Then we import wvtest (the module) into # wvtest (the main program) here and make sure to refer to the right # versions of global variables. # # All this is done just so that wvtest.py can be a single file that's # easy to import into your own applications. if __name__ != '__main__': # we're imported as a module _registered = [] _tests = 0 _fails = 0 def wvtest(func): """ Use this decorator (@wvtest) in front of any function you want to run as part of the unit test suite. Then run: python wvtest.py path/to/yourtest.py [other test.py files...] to run all the @wvtest functions in the given file(s). """ _registered.append(func) return func def _result(msg, tb, code): global _tests, _fails _tests += 1 if code != 'ok': _fails += 1 (filename, line, func, text) = tb filename = os.path.basename(filename) msg = re.sub(r'\s+', ' ', str(msg)) sys.stderr.flush() print '! %-70s %s' % ('%s:%-4d %s' % (filename, line, msg), code) sys.stdout.flush() def _check(cond, msg = 'unknown', tb = None): if tb == None: tb = traceback.extract_stack()[-3] if cond: _result(msg, tb, 'ok') else: _result(msg, tb, 'FAILED') return cond def _code(): (filename, line, func, text) = traceback.extract_stack()[-3] text = re.sub(r'^\w+\((.*)\)(\s*#.*)?$', r'\1', text); return text def WVMSG(message): ''' Issues a notification. ''' return _result(message, traceback.extract_stack()[-3], 'ok') def WVPASS(cond = True): ''' Counts a test failure unless cond is true. ''' return _check(cond, _code()) def WVFAIL(cond = True): ''' Counts a test failure unless cond is false. ''' return _check(not cond, 'NOT(%s)' % _code()) def WVPASSEQ(a, b): ''' Counts a test failure unless a == b. ''' return _check(a == b, '%s == %s' % (repr(a), repr(b))) def WVPASSNE(a, b): ''' Counts a test failure unless a != b. ''' return _check(a != b, '%s != %s' % (repr(a), repr(b))) def WVPASSLT(a, b): ''' Counts a test failure unless a < b. ''' return _check(a < b, '%s < %s' % (repr(a), repr(b))) def WVPASSLE(a, b): ''' Counts a test failure unless a <= b. ''' return _check(a <= b, '%s <= %s' % (repr(a), repr(b))) def WVPASSGT(a, b): ''' Counts a test failure unless a > b. ''' return _check(a > b, '%s > %s' % (repr(a), repr(b))) def WVPASSGE(a, b): ''' Counts a test failure unless a >= b. ''' return _check(a >= b, '%s >= %s' % (repr(a), repr(b))) def WVEXCEPT(etype, func, *args, **kwargs): ''' Counts a test failure unless func throws an 'etype' exception. You have to spell out the function name and arguments, rather than calling the function yourself, so that WVEXCEPT can run before your test code throws an exception. ''' try: func(*args, **kwargs) except etype, e: return _check(True, 'EXCEPT(%s)' % _code()) except: _check(False, 'EXCEPT(%s)' % _code()) raise else: return _check(False, 'EXCEPT(%s)' % _code()) def _check_unfinished(): if _registered: for func in _registered: print 'WARNING: not run: %r' % (func,) WVFAIL('wvtest_main() not called') if _fails: sys.exit(1) atexit.register(_check_unfinished) def _run_in_chdir(path, func, *args, **kwargs): oldwd = os.getcwd() oldpath = sys.path try: os.chdir(path) sys.path += [path, os.path.split(path)[0]] return func(*args, **kwargs) finally: os.chdir(oldwd) sys.path = oldpath if sys.version_info >= (2,6,0): _relpath = os.path.relpath; else: # Implementation for Python 2.5, taken from CPython (tag v2.6, # file Lib/posixpath.py, hg-commit 95fff5a6a276). Update # ./LICENSE When this code is eventually removed. def _relpath(path, start=os.path.curdir): if not path: raise ValueError("no path specified") start_list = os.path.abspath(start).split(os.path.sep) path_list = os.path.abspath(path).split(os.path.sep) # Work out how much of the filepath is shared by start and path. i = len(os.path.commonprefix([start_list, path_list])) rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return os.path.join(*rel_list) def _runtest(fname, f): mod = inspect.getmodule(f) relpath = _relpath(mod.__file__, os.getcwd()).replace('.pyc', '.py') print print 'Testing "%s" in %s:' % (fname, relpath) sys.stdout.flush() try: _run_in_chdir(os.path.split(mod.__file__)[0], f) except Exception, e: print print traceback.format_exc() tb = sys.exc_info()[2] wvtest._result(e, traceback.extract_tb(tb)[1], 'EXCEPTION') def _run_registered_tests(): import wvtest as _wvtestmod while _wvtestmod._registered: t = _wvtestmod._registered.pop(0) _runtest(t.func_name, t) print def wvtest_main(extra_testfiles=[]): import wvtest as _wvtestmod _run_registered_tests() for modname in extra_testfiles: if not os.path.exists(modname): print 'Skipping: %s' % modname continue if modname.endswith('.py'): modname = modname[:-3] print 'Importing: %s' % modname path, mod = os.path.split(os.path.abspath(modname)) nicename = modname.replace(os.path.sep, '.') while nicename.startswith('.'): nicename = modname[1:] _run_in_chdir(path, __import__, nicename, None, None, []) _run_registered_tests() print print 'WvTest: %d tests, %d failures.' % (_wvtestmod._tests, _wvtestmod._fails) if __name__ == '__main__': import wvtest as _wvtestmod sys.modules['wvtest'] = _wvtestmod sys.modules['wvtest.wvtest'] = _wvtestmod wvtest_main(sys.argv[1:]) bup-0.25/wvtest.sh000066400000000000000000000034331225146730500141260ustar00rootroot00000000000000# # Include this file in your shell script by using: # #!/bin/sh # . ./wvtest.sh # # we don't quote $TEXT in case it contains newlines; newlines # aren't allowed in test output. However, we set -f so that # at least shell glob characters aren't processed. _wvtextclean() { ( set -f; echo $* ) } if [ -n "$BASH_VERSION" ]; then _wvfind_caller() { LVL=$1 WVCALLER_FILE=${BASH_SOURCE[2]} WVCALLER_LINE=${BASH_LINENO[1]} } else _wvfind_caller() { LVL=$1 WVCALLER_FILE="unknown" WVCALLER_LINE=0 } fi _wvcheck() { CODE="$1" TEXT=$(_wvtextclean "$2") OK=ok if [ "$CODE" -ne 0 ]; then OK=FAILED fi echo "! $WVCALLER_FILE:$WVCALLER_LINE $TEXT $OK" >&2 if [ "$CODE" -ne 0 ]; then exit $CODE else return 0 fi } WVPASS() { TEXT="$*" _wvfind_caller if "$@"; then _wvcheck 0 "$TEXT" return 0 else _wvcheck 1 "$TEXT" # NOTREACHED return 1 fi } WVFAIL() { TEXT="$*" _wvfind_caller if "$@"; then _wvcheck 1 "NOT($TEXT)" # NOTREACHED return 1 else _wvcheck 0 "NOT($TEXT)" return 0 fi } _wvgetrv() { ( "$@" >&2 ) echo -n $? } WVPASSEQ() { _wvfind_caller _wvcheck $(_wvgetrv [ "$#" -eq 2 ]) "exactly 2 arguments" echo "Comparing:" >&2 echo "$1" >&2 echo "--" >&2 echo "$2" >&2 _wvcheck $(_wvgetrv [ "$1" = "$2" ]) "'$1' = '$2'" } WVPASSNE() { _wvfind_caller _wvcheck $(_wvgetrv [ "$#" -eq 2 ]) "exactly 2 arguments" echo "Comparing:" >&2 echo "$1" >&2 echo "--" >&2 echo "$2" >&2 _wvcheck $(_wvgetrv [ "$1" != "$2" ]) "'$1' != '$2'" } WVPASSRC() { RC=$? _wvfind_caller _wvcheck $(_wvgetrv [ $RC -eq 0 ]) "return code($RC) == 0" } WVFAILRC() { RC=$? _wvfind_caller _wvcheck $(_wvgetrv [ $RC -ne 0 ]) "return code($RC) != 0" } WVSTART() { echo >&2 _wvfind_caller echo "Testing \"$*\" in $WVCALLER_FILE:" >&2 } bup-0.25/wvtestrun000077500000000000000000000070271225146730500142500ustar00rootroot00000000000000#!/usr/bin/env perl # # WvTest: # Copyright (C)2007-2009 Versabanq Innovations Inc. and contributors. # Licensed under the GNU Library General Public License, version 2. # See the included file named LICENSE for license information. # use strict; use warnings; use Time::HiRes qw(time); # always flush $| = 1; if (@ARGV < 1) { print STDERR "Usage: $0 \n"; exit 127; } print STDERR "Testing \"all\" in @ARGV:\n"; my $pid = open(my $fh, "-|"); if (!$pid) { # child setpgrp(); open STDERR, '>&STDOUT' or die("Can't dup stdout: $!\n"); exec(@ARGV); exit 126; # just in case } my $istty = -t STDOUT; my @log = (); my ($gpasses, $gfails) = (0,0); sub bigkill($) { my $pid = shift; if (@log) { print "\n" . join("\n", @log) . "\n"; } print STDERR "\n! Killed by signal FAILED\n"; ($pid > 0) || die("pid is '$pid'?!\n"); local $SIG{CHLD} = sub { }; # this will wake us from sleep() faster kill 15, $pid; sleep(2); if ($pid > 1) { kill 9, -$pid; } kill 9, $pid; exit(125); } # parent local $SIG{INT} = sub { bigkill($pid); }; local $SIG{TERM} = sub { bigkill($pid); }; local $SIG{ALRM} = sub { print STDERR "Alarm timed out! No test results for too long.\n"; bigkill($pid); }; sub colourize($) { my $result = shift; my $pass = ($result eq "ok"); if ($istty) { my $colour = $pass ? "\e[32;1m" : "\e[31;1m"; return "$colour$result\e[0m"; } else { return $result; } } sub mstime($$$) { my ($floatsec, $warntime, $badtime) = @_; my $ms = int($floatsec * 1000); my $str = sprintf("%d.%03ds", $ms/1000, $ms % 1000); if ($istty && $ms > $badtime) { return "\e[31;1m$str\e[0m"; } elsif ($istty && $ms > $warntime) { return "\e[33;1m$str\e[0m"; } else { return "$str"; } } sub resultline($$) { my ($name, $result) = @_; return sprintf("! %-65s %s", $name, colourize($result)); } my $allstart = time(); my ($start, $stop); sub endsect() { $stop = time(); if ($start) { printf " %s %s\n", mstime($stop - $start, 500, 1000), colourize("ok"); } } while (<$fh>) { chomp; s/\r//g; if (/^\s*Testing "(.*)" in (.*):\s*$/) { alarm(120); my ($sect, $file) = ($1, $2); endsect(); printf("! %s %s: ", $file, $sect); @log = (); $start = $stop; } elsif (/^!\s*(.*?)\s+(\S+)\s*$/) { alarm(120); my ($name, $result) = ($1, $2); my $pass = ($result eq "ok"); if (!$start) { printf("\n! Startup: "); $start = time(); } push @log, resultline($name, $result); if (!$pass) { $gfails++; if (@log) { print "\n" . join("\n", @log) . "\n"; @log = (); } } else { $gpasses++; print "."; } } else { push @log, $_; } } endsect(); my $newpid = waitpid($pid, 0); if ($newpid != $pid) { die("waitpid returned '$newpid', expected '$pid'\n"); } my $code = $?; my $ret = ($code >> 8); # return death-from-signal exits as >128. This is what bash does if you ran # the program directly. if ($code && !$ret) { $ret = $code | 128; } if ($ret && @log) { print "\n" . join("\n", @log) . "\n"; } if ($code != 0) { print resultline("Program returned non-zero exit code ($ret)", "FAILED"); } my $gtotal = $gpasses+$gfails; printf("\nWvTest: %d test%s, %d failure%s, total time %s.\n", $gtotal, $gtotal==1 ? "" : "s", $gfails, $gfails==1 ? "" : "s", mstime(time() - $allstart, 2000, 5000)); print STDERR "\nWvTest result code: $ret\n"; exit( $ret ? $ret : ($gfails ? 125 : 0) );