pax_global_header00006660000000000000000000000064126076437330014525gustar00rootroot0000000000000052 comment=cce7db5ebd352690917792848c2aea92c08efdd5 lsyncd-release-2.1.6/000077500000000000000000000000001260764373300144455ustar00rootroot00000000000000lsyncd-release-2.1.6/.gitignore000066400000000000000000000003101260764373300164270ustar00rootroot00000000000000# compiled stuff *.o *.out lsyncd # cmake AdditionalInfo.txt config.h Makefile build/ CMakeCache.txt CMakeFiles/ cmake_install.cmake install_manifest.txt # generated C code defaults.c runner.c lsyncd-release-2.1.6/CMakeLists.txt000066400000000000000000000055531260764373300172150ustar00rootroot00000000000000# preamble project( Lsyncd ) cmake_minimum_required( VERSION 2.8 ) set( LSYNCD_VERSION 2.1.6 ) set( CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/" ) # finding Lua find_package(Lua REQUIRED) include_directories ( ${LUA_INCLUDE_DIR} ) # setting Lsyncd sources set( LSYNCD_SRC lsyncd.c runner.c defaults.c ) # selecting the file notification mechanisms to compile against option( WITH_INOTIFY "Compile with inotify file notifications (Linux)" ON ) option( WITH_FSEVENTS "Compile with inotify file notifications (OSX)" OFF ) if( WITH_INOTIFY ) set( LSYNCD_SRC ${LSYNCD_SRC} inotify.c ) endif( WITH_INOTIFY ) if( WITH_FSEVENTS ) set( LSYNCD_SRC ${LSYNCD_SRC} fsevents.c ) option( XNU_DIR "Path to the xnu sources" ) # if( NOT XNU_DIR/bsd/sys/fsevents.h ) # message( SEND_ERROR "Cannot find bsd/sys/fsevents.h in XNU_DIR" ) # endif( ) include_directories( ${XNU_DIR} ) endif( WITH_FSEVENTS ) # generating the config.h file configure_file ( "${PROJECT_SOURCE_DIR}/config.h.in" "${PROJECT_BINARY_DIR}/config.h" ) include_directories("${PROJECT_BINARY_DIR}") # building and compiling the part of lsyncd written in Lua # also called "runner" add_custom_command( OUTPUT runner.c COMMAND ${CMAKE_COMMAND} -E echo "Generating built-in runner linkable" COMMAND ${LUA_EXECUTABLE} ${PROJECT_SOURCE_DIR}/bin2carray.lua runner.out runner runner.c DEPENDS runner.out ) # this supposes the Lua compiler 'luac' is sitting right next to the Lua interpreter 'lua' add_custom_command( OUTPUT runner.out COMMAND ${CMAKE_COMMAND} -E echo "Compiling built-in runner" COMMAND ${LUA_EXECUTABLE}c -o runner.out ${PROJECT_SOURCE_DIR}/lsyncd.lua DEPENDS ${PROJECT_SOURCE_DIR}/lsyncd.lua ) # building and compiling the built-in default configs: # rsync rysnc-ssh and direct add_custom_command( OUTPUT defaults.c COMMAND ${CMAKE_COMMAND} -E echo "Generating built-in default configs" COMMAND ${LUA_EXECUTABLE} ${PROJECT_SOURCE_DIR}/bin2carray.lua defaults.out defaults defaults.c DEPENDS defaults.out ) set( DEFAULT_CONFIGS ${PROJECT_SOURCE_DIR}/default.lua ${PROJECT_SOURCE_DIR}/default-rsync.lua ${PROJECT_SOURCE_DIR}/default-rsyncssh.lua ${PROJECT_SOURCE_DIR}/default-direct.lua ) add_custom_command( OUTPUT defaults.out COMMAND ${CMAKE_COMMAND} -E echo "Compiling built-in default configs" COMMAND ${LUA_EXECUTABLE}c -o defaults.out ${DEFAULT_CONFIGS} DEPENDS ${DEFAULT_CONFIGS} ) # the manpage add_custom_command( OUTPUT doc/lsyncd.1 COMMAND ${CMAKE_COMMAND} -E echo "Updating the manpage" COMMAND a2x --format=manpage doc/lsyncd.1.txt DEPENDS doc/lsyncd.1.txt ) add_custom_target( manpage ALL DEPENDS doc/lsyncd.1 ) # compiling and linking it all together add_executable( lsyncd ${LSYNCD_SRC} ) target_link_libraries( lsyncd ${LUA_LIBRARIES} ) install( TARGETS lsyncd RUNTIME DESTINATION bin ) install( FILES doc/lsyncd.1 DESTINATION man) lsyncd-release-2.1.6/COPYING000066400000000000000000000431201260764373300155000ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Lsyncd version 2.X, Copyright (C) 2013 Axel Kittenberger Lsyncd comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Lsyncd' (which makes synchronises directories) written by Axel Kittenberger. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. lsyncd-release-2.1.6/ChangeLog000066400000000000000000000261661260764373300162320ustar00rootroot0000000000000015-10-2015: 2.1.6 enhancement: Lsyncd now locks its pidfile enhancement: added ssh.identifyFile and ssh.options options enhancement: added rsync inplace option fix: ignore blank lines and rsync commenits in exclude files (David Reiss) fix: don't tread exclude lines with embedded "+" chars as inclusions (David Reiss) fix: crash when debugging inotify (Michael Ploujnikov) fix: fixed Finished/Retrying error messages being swapped around (Jun Saito) fix: properly encapsulate filenames on ssh mv commands to avoid shell command injections. fix: postcmd example (Timo Teräs) change: closes also on INT signals change: now removes its pidfile on INT and TERM signals change: changed build system from autotools to cmake 07-06-2013: 2.1.5 enhancement: Added rsync options: bwlimit, timeout fix: Specifying ssh port no longer overwrites the last rsync option fix: rsync option password_file is now accepted fix: onAttrib is accepted again fix: -log Exec now prints now fully all arguments fix: configure script lua detection now includes math lib to workaround wrongly created "needs COMPAT_ALL" messages. fix: repaired variable replacement for layer 3 scripts fix: config.delay is now checked to a number >= 0 change: a2x is no longer checked by configure script. should not be needed when building from tarball 24-11-2012: 2.1.4 fix: making ssh custom port changes work with ssh and rsync 23-11-2012: 2.1.3 fix: fixed 2 crash conditions due to failure to read 'uSettings' 03-11-2012: 2.1.2 fix: added excludeFrom to checkgauge (thx to DavidWittman) fix: fixed rsync option computation enhancement: added password_file file option to rsync 27-10-2012: 2.1.1 fix: fix rsync.rsh, rsync.rsync_path, rsync.tmp_dir, rsync._extra parameters thanks go to Birger Schmidt for this fix. 23-10-2012: 2.1.0 fix: fail startup if settings.inist is false and one of the target hosts fails fix: in case of waiting for processes during restart only logs this state now once a minute rather than filling the log crazy about it enhancement: rsyncOpts has been replaced by rsync = {...} parameter lists enhancement: default.rsyncssh has now a ssh = {...} parameter similar to default.rsync to add option to ssh calls. Ditto for xargs = {...} enhancement: the default.* implementations have a checkgauge erroring on any unknown parameters to the sync{} call enhancement: the delete parameter now takes: true, false, 'running' and 'startup' improvement: Dennis Schridde provided various improvements for Lsyncd's autoconf building change: Lsyncd is now Lua 5.2 compatible change: Lsyncd now exits with exitcode 143 on TERM signal change: settings is now be used as call like settings{...} instead of settings = {...} 04-04-2012: 2.0.7 fix: closed a memory leak due to not correct configured weak tables fix: default.direct, do not use on OSX unrecognized option -t on modify fix: default.direct, typo leading to compile error fix: when using settings.inotifyMode = "Modify" events were longer ignored fix: layer 3 function generator was messed up. change: now uses a2x to generate the manpage (thus more compatible across distros) change: removed --with-default-runner since it was broken, and will be replaced by something more generic in future 16-02-2012: 2.0.6 fix: no longer stops syslogging on HUP signals fix: OSX event watcher no longer misses moves into and out of the watch tree fix: not refinding a relative path to the config file in case of HUP. fix: rsync doing error 13 and killing Lsyncd. see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=659941 fix: no event creation during shutdown (might loop before) fix: no logging due to wrong log levels fix: without-inotify compile option now works to compile on OSX fix: in case of HUP-reset imply insist=true, since startup is known to be configured correctly. fix: a series of typos in comments, manpage etc. fix: moves to and from deleted directories (deleted when Lsyncd gets notified) were not correctly translated fix: added --ignore-errors to rsync when deleting files, so it will not miss a delete even when another part of the tree made an IO-error. fix: default.direct now not using -p for mkdir since if the dir is not there it should fail fix: default.direct now not using -t for cp since OSX binutils doesn't understand it fix: some files might have been missed in splitting move events (especially on tests on OSX) change: complain if any "rsyncOps" is given change: splitted the default configurations in their own files. more cleanly seperated from the Lsyncd runner, and highlights it are just Layer 1 configurations that happen to be provided by default. change: Beautified the code, no extra spaces at line end, ' instead of ", supposing 100 char width to view, change: Lsyncd now remembers the absolute path of its config file during HUPs enhancement: Defaults now respect a 'delete=false' flag when set as parameter to sync{} default.rsync: does not add --delete to rsync default.rsyncssh: does not add --delete to rsync, and does not use rm via ssh tunnel default.direct: does not add --delete to startup rsync and does not use rm 25-08-2011: 2.0.5 fix: Lsyncd will now terminate if it inotify watching exceeds its preset limit. fix: rsync error exit code 12 now results in retries. fix: Lsyncd 2.0.5 should now compile better on a bit elder GNU/Linux versions. fix: examples are now installed in the configured document directory. fix: partial transfers during startup are considered ok. Lsyncd will rework the specified files anyway. change: Layer 1 interface init() now receives an "Init" event instead of an inlet. change: builtin Lua code is now loaded via a c-array. Now more portable and correct memory flags are set. change: Lsyncd will now bail on unknown error codes. enhancement: settings.maxProcesses can now be set as a global limit of childprocesses enhancement: Lsyncd will refuse to start when any startup rsync does not work cleanly and returns "again" unless settings.insist or --insist is specified, then Lsyncd will keep retrying until it works. enhancement: option "rsyncBinary" determines which rsync Lsyncd calls. Default is "/usr/bin/rsync" enhancement: fsevents ("inotify for OSX") fixed for Snowleopard. and is available to configure by default. enhancement: settings.inotifyMode: the actualy Modification event Lsyncd reacts to can now be configured. Default is to react on file closing in write mode. enhancement: readdir(path) is available to userscripts, reads the contents of a directory. 27-03-2011: 2.0.4 enhancement: new setting options logident, logfacility fix: moving filenames with spaces through ssh fix: excludes containing chars % $ ( ) . [ ] + - fix: various typos change: api, settings.statusInterval instead of settings.statusIntervall 25-02-2011: 2.0.3 enhancement: new default target --direct using /bin/ binaries to keep to local dirs in sync (and by default not preserving ownership) example: added a new example how to remotely execute a command after each rsync operations fix: possible crash with long rsyncOps table exceeding lua stack. fix: excludes now properly match relative and absolute paths fix: call to nil function when manually adding blanket delays fix: on ReiserFS not delivering proper dir stats and when encountering a symlink, aquire the node stat for the symlink instead from the linked node. change: leave lua apichecking enabled by default. 20-01-2011: 2.0.2 fix: exclude rules not terminated with '/' now match a file or dir named exactly the same not starting with. fix: pass exclude rules to the startup sync fix: when matching exclusion to not add inotify use same partial path than on syncs fix: properly close pipes that needed more than one write. 11-01-2011: 2.0.1 fix: write pidfile after daemonize() fix: fixed weak tables that allowed garbage collector to collect event lists too eraly. improvement: allow multiple arguments as table for rsyncOps change: added OSX fsevents interface, disabled in autoconf by default since still very experimental and limited to OSX 10.5 only. 02-12-2010: 2.0.0 a complete recoding! change: format of command line arguments changed completly. improvement: the config files format is not LUA instead of XML allowing a range from easy configuration up until complete scripts enhancement: difference actions on different kind of events effents are combined logically. enhancement: allow moves to moved on the target (rsyncssh) enhancement: excludes now allow simple file patterns (?, * and **) enhancement: optionally there is now a statusfile improvement: multiple target queues do not interfer with each other improvement: Lsyncd2 will no longer wait for child processes to return the monitor queue is constantly empties as long Lsyncd runs improvement: Lsyncd2 can now retry operations on network failure and queue events up. improvement: There are now several debugging categories that can be turned on individually. change: manpage is now written in asciidoc change: most more complex logic of Lsyncd is now written in Lua. 04-10-2010: 1.39 enhancement: call action for multiple targets simultanously fix: correctly accept from config xml fix: correctly close and free the inotify file descriptor in case of restart due to HUP signal or OVERFLOW condition fix: when delay=0 a bug always called rsync file filter even when in directory mode 01-09-2010: 1.38 enhancement: implemented file filters for singular operations enhancement: added --singular parameter for single file calls fix: fixed --dryrun messages improvement: now restarts on an inotify OVERFLOW message, just like if being kill -HUPed internal: printout the actual binary called when --debug specified 05-08-2010: 1.37 enhancement: react on HUP signals (interpreted as complete restart) enhancement: inotifies are configureable enhancement: --no-startup skips the startup calls fix : fixed delayed blocking handling. improvement: made logging output better readable internal: made all global variables local internal: renamed "tackles" to "delays" internal: removed the need of the "tosync" stack internal: use more pointers instead of indexes 11-07-2010: 1.34 fix: logging segfault on 64bit systems changed: man page location, spellings 05-06-2010: 1.33 fix: exlude file argument passing to rsync fix: allow exlude files specified for individual sources fix/enhancement: exlusions will be compared with extended path files allowing sub dirs to be excluded. enhancement: allow delays and call aggregation 05-01-2009: Release of lsyncd 1.26 fix: segfault on multitargets changed meaning of "version" tag in lsyncd.conf.xml 14-12-2008: Release of lsyncd 1.25 fix: mv dir and cp -r working fix: working with reiserfs enhancement: config files enhancement: multiple targets enhancement: pidfiles optimized: memory usage improved documentation lots of smaller stuff here and there ... Thanks to all contributers! 05-12-2007: Release of lsyncd 1.0 lsyncd-release-2.1.6/INSTALL000066400000000000000000000017011260764373300154750ustar00rootroot00000000000000INSTALLING ========== Prerequisites ------------- CMake Lsyncd now uses CMake as configuration tool Common compiler stuff The C compiler, make, binutils, etc. Lua For building Lsyncd the Lua interpreter 'lua' and the Lua compiler 'luac' are needed. They aren't needed in the deployed binary tough. Use Lua 5.1 or Lua 5.2 at your choice. Liblua The lua library. Note that you likely need the package "liblua-dev" or something like that. Use Lua 5.1 or Lua 5.2 at your choice. Note, this has to be exactly the same Version as the lua compiler used above! Building -------- Building with a seperate build directory: mkdir build cd build cmake .. make sudo make install Building intree: cmake . make On OSX you yet need to get the xnu sources. For example: cmake -DWITH_INOTIFY=OFF -DWITH_FSEVENTS=ON -DXNU_DIR=/path/to/xnu-VERSION make FIXME make install not yet done lsyncd-release-2.1.6/README.md000066400000000000000000000055561260764373300157370ustar00rootroot00000000000000Lsyncd -- Live Syncing (Mirror) Daemon ====================================== Description ----------- Lsyncd watches a local directory trees event monitor interface (inotify or fsevents). It aggregates and combines events for a few seconds and then spawns one (or more) process(es) to synchronize the changes. By default this is [rsync](http://rsync.samba.org/). Lsyncd is thus a light-weight live mirror solution that is comparatively easy to install not requiring new filesystems or block devices and does not hamper local filesystem performance. Rsync+ssh is an advanced action configuration that uses a SSH to act file and directory moves directly on the target instead of re-transmitting the move destination over the wire. Fine-grained customization can be achieved through the config file. Custom action configs can even be written from scratch in cascading layers ranging from shell scripts to code written in the [Lua language](http://www.lua.org/). This way simple, powerful and flexible configurations can be acheived. See the manual for details [Lsyncd21Manual](https://github.com/axkibe/lsyncd/wiki/Manual-to-Lsyncd-2.1.x) License: [GPLv2](http://www.fsf.org/licensing/licenses/info/GPLv2.html) or any later GPL version. When to use ----------- Lsyncd is designed to synchronize a local directory tree with low profile of expected changes to a remote mirror. Lsyncd is especially useful to sync data from a secure area to a not-so-secure area. Other synchronization tools ------------------------ [DRBD](http://www.drbd.org) operates on block device level. This makes it useful for synchronizing systems that are under heavy load. Lsyncd on the other hand does not require you to change block devices and/or mount points, allows you to change uid/gid of the transferred files, separates the receiver through the one-way nature of rsync. DRBD is likely the better option if you are syncing databases. [GlusterFS](http://www.gluster.org) and [BindFS](http://bindfs.org/) use a FUSE-Filesystem to interject kernel/userspace filesystem events. Lsyncd usage examples --------------------- ```lsyncd -rsync /home remotehost.org::share/``` This watches and rsyncs the local directory /home with all sub-directories and transfers them to 'remotehost' using the rsync-share 'share'. ```lsyncd -rsyncssh /home remotehost.org backup-home/``` This will also rsync/watch '/home', but it uses a ssh connection to make moves local on the remotehost instead of re-transmitting the moved file over the wire. Some more complicated examples, tips and tricks you can find in the [Lsyncd21Manual](https://github.com/axkibe/lsyncd/wiki/Manual-to-Lsyncd-2.1.x). Disclaimer ---------- Besides the usual disclaimer in the license, we want to specifically emphasize that the authors, and any organizations the authors are associated with, can not be held responsible for data-loss caused by possible malfunctions of Lsyncd. lsyncd-release-2.1.6/bin2carray.lua000077500000000000000000000023531260764373300172120ustar00rootroot00000000000000#!/usr/bin/lua --============================================================================ -- bin2carray.lua -- -- License: GPLv2 (see COPYING) or any later version -- -- Authors: Axel Kittenberger -- -- Transforms a binary file (the compiled lsyncd runner script) in a c array -- so it can be included into the executable in a portable way. --============================================================================ if #arg < 3 then error("Usage: "..arg[0].." [infile] [varname] [outfile]") end fin, err = io.open(arg[1], "rb") if fin == nil then error("Cannot open '"..arg[1].."' for reading: "..err) end fout, err = io.open(arg[3], "w") if fout == nil then error("Cannot open '"..arg[3].."'for writing: "..err) end fout:write("/* created by "..arg[0].." from file "..arg[1].." */\n") fout:write("#include \n") fout:write("const char "..arg[2].."_out[] = {\n") while true do local block = fin:read(16) if block == nil then break end for i = 1, #block do local val = string.format("%x", block:byte(i)) if #val < 2 then val = "0" ..val end fout:write("0x",val,",") end fout:write("\n") end fout:write("};\n\nsize_t "..arg[2].."_size = sizeof("..arg[2].."_out);\n"); fin:close(); fout:close(); lsyncd-release-2.1.6/cmake/000077500000000000000000000000001260764373300155255ustar00rootroot00000000000000lsyncd-release-2.1.6/cmake/FindLua.cmake000066400000000000000000000076561260764373300200670ustar00rootroot00000000000000# Locate Lua library # This module defines # LUA_EXECUTABLE, if found # LUA_FOUND, if false, do not try to link to Lua # LUA_LIBRARIES # LUA_INCLUDE_DIR, where to find lua.h # LUA_VERSION_STRING, the version of Lua found (since CMake 2.8.8) # # Note that the expected include convention is # #include "lua.h" # and not # #include # This is because, the lua location is not standardized and may exist # in locations other than lua/ #============================================================================= # Copyright 2007-2009 Kitware, Inc. # Modified to support Lua 5.2 by LuaDist 2012 # # Distributed under the OSI-approved BSD License (the "License"); # see accompanying file Copyright.txt for details. # # This software is distributed WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the License for more information. #============================================================================= # (To distribute this file outside of CMake, substitute the full # License text for the above reference.) # # The required version of Lua can be specified using the # standard syntax, e.g. FIND_PACKAGE(Lua 5.1) # Otherwise the module will search for any available Lua implementation # Always search for non-versioned lua first (recommended) SET(_POSSIBLE_LUA_INCLUDE include include/lua) SET(_POSSIBLE_LUA_EXECUTABLE lua) SET(_POSSIBLE_LUA_LIBRARY lua) # Determine possible naming suffixes (there is no standard for this) IF(Lua_FIND_VERSION_MAJOR AND Lua_FIND_VERSION_MINOR) SET(_POSSIBLE_SUFFIXES "${Lua_FIND_VERSION_MAJOR}${Lua_FIND_VERSION_MINOR}" "${Lua_FIND_VERSION_MAJOR}.${Lua_FIND_VERSION_MINOR}" "-${Lua_FIND_VERSION_MAJOR}.${Lua_FIND_VERSION_MINOR}") ELSE(Lua_FIND_VERSION_MAJOR AND Lua_FIND_VERSION_MINOR) SET(_POSSIBLE_SUFFIXES "52" "5.2" "-5.2" "51" "5.1" "-5.1") ENDIF(Lua_FIND_VERSION_MAJOR AND Lua_FIND_VERSION_MINOR) # Set up possible search names and locations FOREACH(_SUFFIX ${_POSSIBLE_SUFFIXES}) LIST(APPEND _POSSIBLE_LUA_INCLUDE "include/lua${_SUFFIX}") LIST(APPEND _POSSIBLE_LUA_EXECUTABLE "lua${_SUFFIX}") LIST(APPEND _POSSIBLE_LUA_LIBRARY "lua${_SUFFIX}") ENDFOREACH(_SUFFIX) # Find the lua executable FIND_PROGRAM(LUA_EXECUTABLE NAMES ${_POSSIBLE_LUA_EXECUTABLE} ) # Find the lua header FIND_PATH(LUA_INCLUDE_DIR lua.h HINTS $ENV{LUA_DIR} PATH_SUFFIXES ${_POSSIBLE_LUA_INCLUDE} PATHS ~/Library/Frameworks /Library/Frameworks /usr/local /usr /sw # Fink /opt/local # DarwinPorts /opt/csw # Blastwave /opt ) # Find the lua library FIND_LIBRARY(LUA_LIBRARY NAMES ${_POSSIBLE_LUA_LIBRARY} HINTS $ENV{LUA_DIR} PATH_SUFFIXES lib64 lib PATHS ~/Library/Frameworks /Library/Frameworks /usr/local /usr /sw /opt/local /opt/csw /opt ) IF(LUA_LIBRARY) # include the math library for Unix IF(UNIX AND NOT APPLE) FIND_LIBRARY(LUA_MATH_LIBRARY m) SET( LUA_LIBRARIES "${LUA_LIBRARY};${LUA_MATH_LIBRARY}" CACHE STRING "Lua Libraries") # For Windows and Mac, don't need to explicitly include the math library ELSE(UNIX AND NOT APPLE) SET( LUA_LIBRARIES "${LUA_LIBRARY}" CACHE STRING "Lua Libraries") ENDIF(UNIX AND NOT APPLE) ENDIF(LUA_LIBRARY) # Determine Lua version IF(LUA_INCLUDE_DIR AND EXISTS "${LUA_INCLUDE_DIR}/lua.h") FILE(STRINGS "${LUA_INCLUDE_DIR}/lua.h" lua_version_str REGEX "^#define[ \t]+LUA_RELEASE[ \t]+\"Lua .+\"") STRING(REGEX REPLACE "^#define[ \t]+LUA_RELEASE[ \t]+\"Lua ([^\"]+)\".*" "\\1" LUA_VERSION_STRING "${lua_version_str}") UNSET(lua_version_str) ENDIF() INCLUDE(FindPackageHandleStandardArgs) # handle the QUIETLY and REQUIRED arguments and set LUA_FOUND to TRUE if # all listed variables are TRUE FIND_PACKAGE_HANDLE_STANDARD_ARGS(Lua REQUIRED_VARS LUA_LIBRARIES LUA_INCLUDE_DIR VERSION_VAR LUA_VERSION_STRING) MARK_AS_ADVANCED(LUA_INCLUDE_DIR LUA_LIBRARIES LUA_LIBRARY LUA_MATH_LIBRARY LUA_EXECUTABLE) lsyncd-release-2.1.6/config.h.in000066400000000000000000000002541260764373300164710ustar00rootroot00000000000000/* Lsyncd Version */ #define PACKAGE_VERSION "@LSYNCD_VERSION@" /* File event notification mechanims available */ #cmakedefine WITH_INOTIFY 1 #cmakedefine WITH_FSEVENTS 1 lsyncd-release-2.1.6/default-direct.lua000066400000000000000000000103521260764373300200450ustar00rootroot00000000000000--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- default-direct.lua -- -- Keeps two directories with /bin/cp, /bin/rm and /bin/mv in sync. -- Startup still uses rsync tough. -- -- A (Layer 1) configuration. -- -- Note: -- this is infact just a configuration using Layer 1 configuration -- like any other. It only gets compiled into the binary by default. -- -- You can simply use a modified one, by copying everything into a -- config file of yours and name it differently. -- -- License: GPLv2 (see COPYING) or any later version -- Authors: Axel Kittenberger -- --~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if not default then error('default not loaded') end if not default.rsync then error('default-direct (currently) needs default.rsync loaded') end if default.direct then error('default-direct already loaded') end local direct = { } default.direct = direct -- -- known configuration parameters -- direct.checkgauge = { -- -- inherits rsync config params -- default.rsync.checkgauge, rsyncExitCodes = true, onMove = true, } -- -- Spawns rsync for a list of events -- direct.action = function(inlet) -- gets all events ready for syncing local event, event2 = inlet.getEvent() local config = inlet.getConfig() if event.etype == 'Create' then if event.isdir then spawn( event, '/bin/mkdir', event.targetPath ) else -- 'cp -t', not supported on OSX spawn( event, '/bin/cp', event.sourcePath, event.targetPathdir ) end elseif event.etype == 'Modify' then if event.isdir then error("Do not know how to handle 'Modify' on dirs") end spawn(event, '/bin/cp', event.sourcePath, event.targetPathdir ) elseif event.etype == 'Delete' then if config.delete ~= true and config.delete ~= 'running' then inlet.discardEvent(event) return end local tp = event.targetPath -- extra security check if tp == '' or tp == '/' or not tp then error('Refusing to erase your harddisk!') end spawn(event, '/bin/rm', '-rf', tp) elseif event.etype == 'Move' then local tp = event.targetPath -- extra security check if tp == '' or tp == '/' or not tp then error('Refusing to erase your harddisk!') end local command = '/bin/mv $1 $2 || /bin/rm -rf $1' if config.delete ~= true and config.delete ~= 'running' then command = '/bin/mv $1 $2' end spawnShell( event, command, event.targetPath, event2.targetPath ) else log('Warn', 'ignored an event of type "',event.etype, '"') inlet.discardEvent(event) end end -- -- Called when collecting a finished child process -- direct.collect = function(agent, exitcode) local config = agent.config if not agent.isList and agent.etype == 'Init' then local rc = config.rsyncExitCodes[exitcode] if rc == 'ok' then log('Normal', 'Startup of "',agent.source,'" finished: ', exitcode) elseif rc == 'again' then if settings.insist then log('Normal', 'Retrying startup of "',agent.source,'": ', exitcode) else log('Error', 'Temporary or permanent failure on startup of "', agent.source, '". Terminating since "insist" is not set.'); terminate(-1) -- ERRNO end elseif rc == 'die' then log('Error', 'Failure on startup of "',agent.source,'": ', exitcode) else log('Error', 'Unknown exitcode on startup of "', agent.source,': "',exitcode) rc = 'die' end return rc end -- everything else is just as it is, -- there is no network to retry something. return end -- -- Spawns the recursive startup sync -- (currently) identical to default rsync. -- direct.init = default.rsync.init -- -- Checks the configuration. -- direct.prepare = function( config, level ) default.rsync.prepare( config, level + 1 ) end -- -- Default delay is very short. -- direct.delay = 1 -- -- Let the core not split move events. -- direct.onMove = true -- -- Rsync configuration for startup. -- direct.rsync = default.rsync.rsync direct.rsyncExitCodes = default.rsyncExitCodes -- -- By default do deletes. -- direct.delete = true -- -- On many system multiple disk operations just rather slow down -- than speed up. direct.maxProcesses = 1 lsyncd-release-2.1.6/default-rsync.lua000066400000000000000000000252561260764373300177420ustar00rootroot00000000000000--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- default-rsync.lua -- -- Syncs with rsync ("classic" Lsyncd) -- A (Layer 1) configuration. -- -- Note: -- this is infact just a configuration using Layer 1 configuration -- like any other. It only gets compiled into the binary by default. -- You can simply use a modified one, by copying everything into a -- config file of yours and name it differently. -- -- License: GPLv2 (see COPYING) or any later version -- Authors: Axel Kittenberger -- --~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if not default then error( 'default not loaded' ) end if default.rsync then error( 'default-rsync already loaded' ) end local rsync = { } default.rsync = rsync -- uses default collect -- -- used to ensure there aren't typos in the keys -- rsync.checkgauge = { -- unsets default user action handlers onCreate = false, onModify = false, onDelete = false, onStartup = false, onMove = false, delete = true, exclude = true, excludeFrom = true, target = true, rsync = { acls = true, archive = true, binary = true, bwlimit = true, checksum = true, compress = true, copy_links = true, cvs_exclude = true, dry_run = true, executability = true, group = true, hard_links = true, ignore_times = true, inplace = true, ipv4 = true, ipv6 = true, keep_dirlinks = true, links = true, one_file_system = true, owner = true, password_file = true, perms = true, protect_args = true, prune_empty_dirs = true, quiet = true, rsh = true, rsync_path = true, sparse = true, temp_dir = true, timeout = true, times = true, update = true, verbose = true, whole_file = true, xattrs = true, _extra = true, }, } -- -- Spawns rsync for a list of events -- -- Exlcusions are already handled by not having -- events for them. -- rsync.action = function( inlet ) -- -- gets all events ready for syncing -- local elist = inlet.getEvents( function(event) return event.etype ~= 'Init' and event.etype ~= 'Blanket' end ) -- -- Replaces what rsync would consider filter rules by literals -- local function sub( p ) if not p then return end return p: gsub( '%?', '\\?' ): gsub( '%*', '\\*' ): gsub( '%[', '\\[' ): gsub( '%]', '\\]' ) end -- -- Gets the list of paths for the event list -- -- Deletes create multi match patterns -- local paths = elist.getPaths( function( etype, path1, path2 ) if string.byte( path1, -1 ) == 47 and etype == 'Delete' then return sub( path1 )..'***', sub( path2 ) else return sub( path1 ), sub( path2 ) end end ) -- -- stores all filters by integer index -- local filterI = { } -- -- Stores all filters with path index -- local filterP = { } -- -- Adds one path to the filter -- local function addToFilter( path ) if filterP[ path ] then return end filterP[ path ] = true table.insert( filterI, path ) end -- -- Adds a path to the filter. -- -- Rsync needs to have entries for all steps in the path, -- so the file for example d1/d2/d3/f1 needs following filters: -- 'd1/', 'd1/d2/', 'd1/d2/d3/' and 'd1/d2/d3/f1' -- for _, path in ipairs( paths ) do if path and path ~= '' then addToFilter(path) local pp = string.match( path, '^(.*/)[^/]+/?' ) while pp do addToFilter(pp) pp = string.match( pp, '^(.*/)[^/]+/?' ) end end end local filterS = table.concat( filterI, '\n' ) local filter0 = table.concat( filterI, '\000' ) log( 'Normal', 'Calling rsync with filter-list of new/modified files/dirs\n', filterS ) local config = inlet.getConfig( ) local delete = nil if config.delete == true or config.delete == 'running' then delete = { '--delete', '--ignore-errors' } end spawn( elist, config.rsync.binary, '<', filter0, config.rsync._computed, '-r', delete, '--force', '--from0', '--include-from=-', '--exclude=*', config.source, config.target ) end -- -- Spawns the recursive startup sync -- rsync.init = function(event) local config = event.config local inlet = event.inlet local excludes = inlet.getExcludes( ) local delete = nil local target = config.target if not target then if not config.host then error('Internal fail, Neither target nor host is configured') end target = config.host .. ':' .. config.targetdir end if config.delete == true or config.delete == 'startup' then delete = { '--delete', '--ignore-errors' } end if #excludes == 0 then -- start rsync without any excludes log( 'Normal', 'recursive startup rsync: ', config.source, ' -> ', target ) spawn( event, config.rsync.binary, delete, config.rsync._computed, '-r', config.source, target ) else -- start rsync providing an exclude list -- on stdin local exS = table.concat( excludes, '\n' ) log( 'Normal', 'recursive startup rsync: ', config.source, ' -> ', target, ' excluding\n', exS ) spawn( event, config.rsync.binary, '<', exS, '--exclude-from=-', delete, config.rsync._computed, '-r', config.source, target ) end end -- -- Prepares and checks a syncs configuration on startup. -- rsync.prepare = function( config, -- the configuration level, -- additional error level for inherited use ( by rsyncssh ) skipTarget -- used by rsyncssh, do not check for target ) -- First let default.prepare test the checkgauge default.prepare( config, level + 6 ) if not skipTarget and not config.target then error( 'default.rsync needs "target" configured', level ) end if config.rsyncOps then error( '"rsyncOps" is outdated please use the new rsync = { ... } syntax.', level ) end if config.rsyncOpts and config.rsync._extra then error( '"rsyncOpts" is outdated in favor of the new rsync = { ... } syntax\n"' + 'for which you provided the _extra attribute as well.\n"' + 'Please remove rsyncOpts from your config.', level ) end if config.rsyncOpts then log( 'Warn', '"rsyncOpts" is outdated. Please use the new rsync = { ... } syntax."' ) config.rsync._extra = config.rsyncOpts config.rsyncOpts = nil end if config.rsyncBinary and config.rsync.binary then error( '"rsyncBinary is outdated in favor of the new rsync = { ... } syntax\n"'+ 'for which you provided the binary attribute as well.\n"' + "Please remove rsyncBinary from your config.'", level ) end if config.rsyncBinary then log( 'Warn', '"rsyncBinary" is outdated. Please use the new rsync = { ... } syntax."' ) config.rsync.binary = config.rsyncBinary config.rsyncOpts = nil end -- checks if the _computed argument exists already if config.rsync._computed then error( 'please do not use the internal rsync._computed parameter', level ) end -- computes the rsync arguments into one list local crsync = config.rsync; -- everything implied by archive = true local archiveFlags = { recursive = true, links = true, perms = true, times = true, group = true, owner = true, devices = true, specials = true, hard_links = false, acls = false, xattrs = false, } -- if archive is given the implications are filled in if crsync.archive then for k, v in pairs( archiveFlags ) do if crsync[ k ] == nil then crsync[ k ] = v end end end crsync._computed = { true } local computed = crsync._computed local computedN = 2 local shortFlags = { acls = 'A', checksum = 'c', compress = 'z', copy_links = 'L', cvs_exclude = 'C', dry_run = 'n', executability = 'E', group = 'g', hard_links = 'H', ignore_times = 'I', ipv4 = '4', ipv6 = '6', keep_dirlinks = 'K', links = 'l', one_file_system = 'x', owner = 'o', perms = 'p', protect_args = 's', prune_empty_dirs = 'm', quiet = 'q', sparse = 'S', times = 't', update = 'u', verbose = 'v', whole_file = 'W', xattrs = 'X', } local shorts = { '-' } local shortsN = 2 if crsync._extra then for k, v in ipairs( crsync._extra ) do computed[ computedN ] = v computedN = computedN + 1 end end for k, flag in pairs( shortFlags ) do if crsync[ k ] then shorts[ shortsN ] = flag shortsN = shortsN + 1 end end if crsync.devices and crsync.specials then shorts[ shortsN ] = 'D' shortsN = shortsN + 1 else if crsync.devices then computed[ computedN ] = '--devices' computedN = computedN + 1 end if crsync.specials then computed[ computedN ] = '--specials' computedN = computedN + 1 end end if crsync.bwlimit then computed[ computedN ] = '--bwlimit=' .. crsync.bwlimit computedN = computedN + 1 end if crsync.inplace then computed[ computedN ] = '--inplace' computedN = computedN + 1 end if crsync.password_file then computed[ computedN ] = '--password-file=' .. crsync.password_file computedN = computedN + 1 end if crsync.rsh then computed[ computedN ] = '--rsh=' .. crsync.rsh computedN = computedN + 1 end if crsync.rsync_path then computed[ computedN ] = '--rsync-path=' .. crsync.rsync_path computedN = computedN + 1 end if crsync.temp_dir then computed[ computedN ] = '--temp-dir=' .. crsync.temp_dir computedN = computedN + 1 end if crsync.timeout then computed[ computedN ] = '--timeout=' .. crsync.timeout computedN = computedN + 1 end if shortsN ~= 2 then computed[ 1 ] = table.concat( shorts, '' ) else computed[ 1 ] = { } end -- appends a / to target if not present if not skipTarget and string.sub(config.target, -1) ~= '/' then config.target = config.target..'/' end end -- -- By default do deletes. -- rsync.delete = true -- -- Rsyncd exitcodes -- rsync.exitcodes = default.rsyncExitCodes -- -- Calls rsync with this default options -- rsync.rsync = { -- The rsync binary to be called. binary = '/usr/bin/rsync', links = true, times = true, protect_args = true } -- -- Default delay -- rsync.delay = 15 lsyncd-release-2.1.6/default-rsyncssh.lua000066400000000000000000000221201260764373300204430ustar00rootroot00000000000000-- -- default-rsyncssh.lua -- -- Improved rsync - sync with rsync, but moves and deletes executed over ssh. -- A (Layer 1) configuration. -- -- Note: -- this is infact just a configuration using Layer 1 configuration -- like any other. It only gets compiled into the binary by default. -- You can simply use a modified one, by copying everything into a -- config file of yours and name it differently. -- -- License: GPLv2 (see COPYING) or any later version -- Authors: Axel Kittenberger -- -- if not default then error( 'default not loaded' ); end if not default.rsync then error( 'default.rsync not loaded' ); end if default.rsyncssh then error( 'default-rsyncssh already loaded' ); end -- -- rsyncssh extends default.rsync -- local rsyncssh = { default.rsync } default.rsyncssh = rsyncssh -- -- used to ensure there aren't typos in the keys -- rsyncssh.checkgauge = { -- unsets the inherited value of from default.rsync target = false, onMove = true, -- rsyncssh users host and targetdir host = true, targetdir = true, sshExitCodes = true, rsyncExitCodes = true, -- ssh settings ssh = { binary = true, identityFile = true, options = true, port = true, _extra = true }, -- xargs settings xargs = { binary = true, delimiter = true, _extra = true } } -- -- Spawns rsync for a list of events -- rsyncssh.action = function( inlet ) local event, event2 = inlet.getEvent( ) local config = inlet.getConfig( ) -- makes move local on target host -- if the move fails, it deletes the source if event.etype == 'Move' then local path1 = config.targetdir .. event.path local path2 = config.targetdir .. event2.path path1 = "'" .. path1:gsub ('\'', '\'"\'"\'') .. "'" path2 = "'" .. path2:gsub ('\'', '\'"\'"\'') .. "'" log( 'Normal', 'Moving ', event.path, ' -> ', event2.path ) spawn( event, config.ssh.binary, config.ssh._computed, config.host, 'mv', path1, path2, '||', 'rm', '-rf', path1 ) return end -- uses ssh to delete files on remote host -- instead of constructing rsync filters if event.etype == 'Delete' then if config.delete ~= true and config.delete ~= 'running' then inlet.discardEvent(event) return end -- gets all other deletes ready to be -- executed local elist = inlet.getEvents( function( e ) return e.etype == 'Delete' end ) -- returns the paths of the delete list local paths = elist.getPaths( function( etype, path1, path2 ) if path2 then return config.targetdir..path1, config.targetdir..path2 else return config.targetdir..path1 end end ) -- ensures none of the paths is '/' for _, v in pairs( paths ) do if string.match(v, '^%s*/+%s*$') then log('Error', 'refusing to `rm -rf /` the target!') terminate(-1) -- ERRNO end end log( 'Normal', 'Deleting list\n', table.concat( paths, '\n' ) ) local params = { } spawn( elist, config.ssh.binary, '<', table.concat(paths, config.xargs.delimiter), params, config.ssh._computed, config.host, config.xargs.binary, config.xargs._extra ) return end -- -- for everything else a rsync is spawned -- local elist = inlet.getEvents( function( e ) -- TODO use a table return e.etype ~= 'Move' and e.etype ~= 'Delete' and e.etype ~= 'Init' and e.etype ~= 'Blanket' end ) local paths = elist.getPaths( ) -- -- removes trailing slashes from dirs. -- for k, v in ipairs( paths ) do if string.byte( v, -1 ) == 47 then paths[k] = string.sub( v, 1, -2 ) end end local sPaths = table.concat( paths, '\n' ) local zPaths = table.concat( paths, '\000' ) log( 'Normal', 'Rsyncing list\n', sPaths ) spawn( elist, config.rsync.binary, '<', zPaths, config.rsync._computed, '--from0', '--files-from=-', config.source, config.host .. ':' .. config.targetdir ) end ----- -- Called when collecting a finished child process -- rsyncssh.collect = function( agent, exitcode ) local config = agent.config if not agent.isList and agent.etype == 'Init' then local rc = config.rsyncExitCodes[exitcode] if rc == 'ok' then log('Normal', 'Startup of "',agent.source,'" finished: ', exitcode) elseif rc == 'again' then if settings('insist') then log('Normal', 'Retrying startup of "',agent.source,'": ', exitcode) else log('Error', 'Temporary or permanent failure on startup of "', agent.source, '". Terminating since "insist" is not set.'); terminate(-1) -- ERRNO end elseif rc == 'die' then log('Error', 'Failure on startup of "',agent.source,'": ', exitcode) else log('Error', 'Unknown exitcode on startup of "', agent.source,': "',exitcode) rc = 'die' end return rc end if agent.isList then local rc = config.rsyncExitCodes[exitcode] if rc == 'ok' then log('Normal', 'Finished (list): ',exitcode) elseif rc == 'again' then log('Normal', 'Retrying (list): ',exitcode) elseif rc == 'die' then log('Error', 'Failure (list): ', exitcode) else log('Error', 'Unknown exitcode (list): ',exitcode) rc = 'die' end return rc else local rc = config.sshExitCodes[exitcode] if rc == 'ok' then log('Normal', 'Finished ',agent.etype,' ',agent.sourcePath,': ',exitcode) elseif rc == 'again' then log('Normal', 'Retrying ',agent.etype,' ',agent.sourcePath,': ',exitcode) elseif rc == 'die' then log('Normal', 'Failure ',agent.etype,' ',agent.sourcePath,': ',exitcode) else log('Error', 'Unknown exitcode ',agent.etype,' ',agent.sourcePath,': ',exitcode) rc = 'die' end return rc end end -- -- checks the configuration. -- rsyncssh.prepare = function( config, level ) default.rsync.prepare( config, level + 1, true ) if not config.host then error( 'default.rsyncssh needs "host" configured', level ) end if not config.targetdir then error( 'default.rsyncssh needs "targetdir" configured', level ) end -- -- computes the ssh options -- if config.ssh._computed then error( 'please do not use the internal rsync._computed parameter', level ) end local cssh = config.ssh; cssh._computed = { } local computed = cssh._computed local computedN = 1 local rsyncc = config.rsync._computed if cssh.identityFile then computed[ computedN ] = '-i' computed[ computedN + 1 ] = cssh.identityFile computedN = computedN + 2 if not config.rsync._rshIndex then config.rsync._rshIndex = #rsyncc + 1 rsyncc[ config.rsync._rshIndex ] = '--rsh=ssh' end rsyncc[ config.rsync._rshIndex ] = rsyncc[ config.rsync._rshIndex ] .. ' -i ' .. cssh.identityFile end if cssh.options then for k, v in pairs( cssh.options ) do computed[ computedN ] = '-o' computed[ computedN + 1 ] = k .. '=' .. v computedN = computedN + 2 if not config.rsync._rshIndex then config.rsync._rshIndex = #rsyncc + 1 rsyncc[ config.rsync._rshIndex ] = '--rsh=ssh' end rsyncc[ config.rsync._rshIndex ] = table.concat( { rsyncc[ config.rsync._rshIndex ], ' -o ', k, '=', v }, '' ) end end if cssh.port then computed[ computedN ] = '-p' computed[ computedN + 1 ] = cssh.port computedN = computedN + 2 if not config.rsync._rshIndex then config.rsync._rshIndex = #rsyncc + 1 rsyncc[ config.rsync._rshIndex ] = '--rsh=ssh' end rsyncc[ config.rsync._rshIndex ] = rsyncc[ config.rsync._rshIndex ] .. ' -p ' .. cssh.port end if cssh._extra then for k, v in ipairs( cssh._extra ) do computed[ computedN ] = v computedN = computedN + 1 end end -- appends a slash to the targetdir if missing if string.sub( config.targetdir, -1 ) ~= '/' then config.targetdir = config.targetdir .. '/' end end -- -- allow processes -- rsyncssh.maxProcesses = 1 -- -- The core should not split move events -- rsyncssh.onMove = true -- -- default delay -- rsyncssh.delay = 15 -- -- no default exit codes -- rsyncssh.exitcodes = false -- -- rsync exit codes -- rsyncssh.rsyncExitCodes = default.rsyncExitCodes -- -- ssh exit codes -- rsyncssh.sshExitCodes = default.sshExitCodes -- -- xargs calls configuration -- -- xargs is used to delete multiple remote files, when ssh access is -- available this is simpler than to build filters for rsync for this. -- rsyncssh.xargs = { -- -- the binary called (on target host) binary = '/usr/bin/xargs', -- -- delimiter, uses null by default, you might want to override this for older -- by for example '\n' delimiter = '\000', -- -- extra parameters _extra = { '-0', 'rm -rf' } } -- -- ssh calls configuration -- -- ssh is used to move and delete files on the target host -- rsyncssh.ssh = { -- -- the binary called -- binary = '/usr/bin/ssh', -- -- if set adds this key to ssh -- identityFile = nil, -- -- if set adds this special options to ssh -- options = nil, -- -- if set connect to this port -- port = nil, -- -- extra parameters -- _extra = { } } lsyncd-release-2.1.6/default.lua000066400000000000000000000151561260764373300166040ustar00rootroot00000000000000--============================================================================ -- default.lua Live (Mirror) Syncing Demon -- -- The default table for the user to access. -- This default layer 1 functions provide the higher layer functionality. -- -- License: GPLv2 (see COPYING) or any later version -- Authors: Axel Kittenberger --============================================================================ if default then error( 'default already loaded' ) end default = { } -- -- Only this items are inherited from the default -- table -- default._merge = { action = true, checkgauge = true, collect = true, delay = true, init = true, maxDelays = true, maxProcesses = true, prepare = true, } -- -- used to ensure there aren't typos in the keys -- default.checkgauge = { action = true, checkgauge = true, collect = true, delay = true, exitcodes = true, init = true, maxDelays = true, maxProcesses = true, onAttrib = true, onCreate = true, onModify = true, onDelete = true, onStartup = true, onMove = true, prepare = true, source = true, target = true, } -- -- On default action the user's on*** scripts are called. -- default.action = function( inlet ) -- in case of moves getEvent returns the origin and dest of the move local event, event2 = inlet.getEvent( ) local config = inlet.getConfig( ) local func = config[ 'on'.. event.etype ] if type( func ) == 'function' then func( event, event2 ) end -- if function didnt change the wait status its not interested -- in this event -> drop it. if event.status == 'wait' then inlet.discardEvent( event ) end end -- -- Default collector. -- -- Called when collecting a finished child process -- default.collect = function( agent, exitcode ) local config = agent.config local rc if config.exitcodes then rc = config.exitcodes[exitcode] elseif exitcode == 0 then rc = 'ok' else rc = 'die' end -- TODO synchronize with similar code before if not agent.isList and agent.etype == 'Init' then if rc == 'ok' then log('Normal', 'Startup of "',agent.source,'" finished.') return 'ok' elseif rc == 'again' then if settings('insist') then log( 'Normal', 'Retrying startup of "', agent.source, '": ', exitcode ) return 'again' else log( 'Error', 'Temporary or permanent failure on startup of "', agent.source, '". Terminating since "insist" is not set.' ) terminate( -1 ) end elseif rc == 'die' then log( 'Error', 'Failure on startup of "', agent.source, '".' ) terminate( -1 ) else log( 'Error', 'Unknown exitcode "', exitcode, '" on startup of "', agent.source, '".' ) return 'die' end end if agent.isList then if rc == 'ok' then log( 'Normal', 'Finished a list after exitcode: ', exitcode ) elseif rc == 'again' then log( 'Normal', 'Retrying a list after exitcode = ', exitcode ) elseif rc == 'die' then log( 'Error', 'Failure with a list width exitcode = ', exitcode ) else log( 'Error', 'Unknown exitcode "',exitcode,'" with a list' ) rc = 'die' end else if rc == 'ok' then log('Normal', 'Finished ',agent.etype,' on ',agent.sourcePath,' = ',exitcode) elseif rc == 'again' then log('Normal', 'Retrying ',agent.etype,' on ',agent.sourcePath,' = ',exitcode) elseif rc == 'die' then log('Error', 'Failure with ',agent.etype,' on ',agent.sourcePath,' = ',exitcode) else log('Normal', 'Unknown exitcode "',exitcode,'" with ', agent.etype, ' on ',agent.sourcePath,' = ',exitcode) rc = 'die' end end return rc end -- -- Called on the Init event sent -- on (re)initialization of Lsyncd for every sync -- default.init = function(event) local config = event.config local inlet = event.inlet -- user functions -- calls a startup if given by user script. if type(config.onStartup) == 'function' then local startup = config.onStartup(event) -- TODO honor some return codes of startup like "warmstart". end if event.status == 'wait' then -- user script did not spawn anything -- thus the blanket event is deleted again. inlet.discardEvent(event) end end -- -- The collapsor tries not to have more than these delays. -- So it dealy stack does not grow too large, -- since calculation for stacking events is n*log(n) (or so) -- default.maxDelays = 1000 -- -- The maximum number of processes Lsyncd will -- simultanously spawn for this sync. -- default.maxProcesses = 1 -- -- Exitcodes of rsync and what to do. -- TODO move to rsync -- default.rsyncExitCodes = { -- -- if another config provides the same table -- this will not be inherited (merged) into that one -- -- if it does not, integer keys are to be copied -- verbatim -- _merge = false, _verbatim = true, [ 0 ] = 'ok', [ 1 ] = 'die', [ 2 ] = 'die', [ 3 ] = 'again', [ 4 ] = 'die', [ 5 ] = 'again', [ 6 ] = 'again', [ 10 ] = 'again', [ 11 ] = 'again', [ 12 ] = 'again', [ 14 ] = 'again', [ 20 ] = 'again', [ 21 ] = 'again', [ 22 ] = 'again', -- partial transfers are ok, since Lsyncd has registered the event that -- caused the transfer to be partial and will recall rsync. [ 23 ] = 'ok', [ 24 ] = 'ok', [ 25 ] = 'die', [ 30 ] = 'again', [ 35 ] = 'again', [ 255 ] = 'again', } -- -- Exitcodes of ssh and what to do. -- default.sshExitCodes = { -- -- if another config provides the same table -- this will not be inherited (merged) into that one -- -- if it does not, integer keys are to be copied -- verbatim -- _merge = false, _verbatim = true, [ 0 ] = 'ok', [ 255 ] = 'again', } -- -- Minimum seconds between two writes of a status file. -- default.statusInterval = 10 -- -- checks all keys to be in the checkgauge -- local function check( config, gauge, subtable, level ) for k, v in pairs( config ) do if not gauge[k] then error( 'Parameter "' .. subtable .. k .. '" unknown.' .. ' (if this is not a typo add it to checkgauge)', level ); end if type( gauge [ k ] ) == 'table' then if type( v ) ~= 'table' then error( 'Parameter "' .. subtable .. k .. '" must be a table.', level ) end check( config[ k ], gauge[ k ], subtable .. k .. '.', level + 1 ) end end end default.prepare = function( config, level ) local gauge = config.checkgauge if not gauge then return end check( config, gauge, '', level + 1 ) end lsyncd-release-2.1.6/doc/000077500000000000000000000000001260764373300152125ustar00rootroot00000000000000lsyncd-release-2.1.6/doc/lsyncd.1000066400000000000000000000113071260764373300165720ustar00rootroot00000000000000'\" t .\" Title: lsyncd .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: April 2012 .\" Manual: Lsyncd .\" Source: Lsyncd 2.0.7 .\" Language: English .\" .TH "LSYNCD" "1" "April 2012" "Lsyncd 2\&.0\&.7" "Lsyncd" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" lsyncd \- a daemon to continuously synchronize directory trees .SH "SYNOPSIS" .PP config file .RS 4 \ \& \fBlsyncd\fR [\fIOPTIONS\fR] \fICONFIG\-FILE\fR .RE .PP default rsync behaviour .RS 4 \ \& \fBlsyncd\fR [\fIOPTIONS\fR] \-rsync \fISOURCEDIR\fR\fITARGET\fR \&... .RE .PP default rync+ssh behaviour (moves and deletes through ssh) .RS 4 \ \& \fBlsyncd\fR [\fIOPTIONS\fR] \-rsyncssh \fISOURCEDIR\fR\fITARGETHOST\fR\fITARGETDIR\fR \&... .RE .PP default direct behaviour (local file operations/rsync) .RS 4 \ \& \fBlsyncd\fR [\fIOPTIONS\fR] \-direct \fISOURCEDIR\fR\fITARGETDIR\fR \&... .RE .SH "DESCRIPTION" .sp Lsyncd(1) watches local directory trees through an event monitor interface (inotify, fsevents)\&. It aggregates and combines events for a few seconds and then spawns one or more processes to synchronize the changes\&. By default this is rsync(1)\&. Lsyncd is thus a light\-weight asynchronous live mirror solution that is comparatively easy to install not requiring new filesystems or block devices and does not hamper local filesystem performance\&. .sp Rsync+ssh is an advanced action configuration that uses a SSH(1) to act file and directory moves directly on the target instead of re\-transmitting the move destination over the wire\&. .sp Fine\-grained customization can be achieved through the CONFIG\-FILE\&. Custom action configs can even be written from scratch in cascading layers ranging from shell scripts to code written in the LUA(1) language\&. This way simplicity can be balanced with powerfulness\&. See the online manual for details on the CONFIG\-FILE https://github\&.com/axkibe/lsyncd/wiki/Manual\-to\-Lsyncd\-2\&.0\&.x \&. .sp Note that under normal configuration Lsyncd will delete pre\-existing files in the target directories that are not present in the respective source directory\&. .SH "OPTIONS" .PP \fB\-delay\fR \fISECS\fR .RS 4 Overrides the default delay times\&. .RE .PP \fB\-help\fR .RS 4 Show a help message\&. .RE .PP \fB\-insist\fR .RS 4 Continues start up even if rsync cannot connect\&. .RE .PP \fB\-log\fR \fILEVEL\fR .RS 4 Controls which kind of events are logged\&. By default Lsyncd logs \fINormal\fR and \fIError\fR Messages\&. \fB\-log scarce\fR will make Lsyncd log Error messages only\&. \fB\-log all\fR will log all debug messages\&. .RE .PP \fB\-log\fR \fICategory\fR .RS 4 Turns on a specific debug message\&. E\&.g\&. \fB\-log Exec\fR will log all processes as they are spawned\&. .RE .PP \fB\-nodaemon\fR .RS 4 Lsyncd will not detach from the invoker and log as well to stdout/err\&. .RE .PP \fB\-pidfile\fR \fIFILE\fR .RS 4 Lsyncd will write its process ID in \fIFILE\fR\&. .RE .PP \fB\-runner\fR \fIFILE\fR .RS 4 Makes the Lsyncd core load the part of Lsyncd written in Lua from \fIFILE\fR\&. .RE .PP \fB\-version\fR .RS 4 Writes version information and exits\&. .RE .SH "EXIT STATUS" .PP \fB0\fR .RS 4 Terminated on a TERM signal(7) .RE .PP \fB\-1\fR .RS 4 Failure (syntax, unrecoverable error condition, internal failure) .RE .SH "SEE ALSO" .sp Online Manual: https://github\&.com/axkibe/lsyncd/wiki/Lsyncd\-2\&.1\&.x\-%E2%80%96\-What\(cqs\-New%3F .SH "VERSION" .sp This man page is for lsyncd(1) version 2\&.0\&.7 .SH "AUTHOR" .sp Axel Kittenberger, 2010\-2012 .SH "COPYING" .sp Copyright (C) 2010\-2012 Axel Kittenberger\&. Free use of this software is granted under the terms of the GNU General Public License (GPL) version 2, or any later version\&. Free redistrubition of this Documentation (/doc directory) is granted under the terms of the Creative Commons 3\&.0 Attribution License (CC\-3\&.0\-BY)\&. lsyncd-release-2.1.6/doc/lsyncd.1.txt000066400000000000000000000063751260764373300174210ustar00rootroot00000000000000lsyncd(1) ========= :doctype: manpage :man source: Lsyncd :man manual: Lsyncd :man version: 2.0.7 :date: April 2012 NAME ---- lsyncd - a daemon to continuously synchronize directory trees SYNOPSIS -------- config file::: {nbsp} *lsyncd* ['OPTIONS'] 'CONFIG-FILE' default rsync behaviour::: {nbsp} *lsyncd* ['OPTIONS'] -rsync 'SOURCEDIR' 'TARGET' ... default rync+ssh behaviour (moves and deletes through ssh)::: {nbsp} *lsyncd* ['OPTIONS'] -rsyncssh 'SOURCEDIR' 'TARGETHOST' 'TARGETDIR' ... default direct behaviour (local file operations/rsync)::: {nbsp} *lsyncd* ['OPTIONS'] -direct 'SOURCEDIR' 'TARGETDIR' ... DESCRIPTION ------------ Lsyncd(1) watches local directory trees through an event monitor interface (inotify, fsevents). It aggregates and combines events for a few seconds and then spawns one or more processes to synchronize the changes. By default this is rsync(1). Lsyncd is thus a light-weight asynchronous live mirror solution that is comparatively easy to install not requiring new filesystems or block devices and does not hamper local filesystem performance. Rsync+ssh is an advanced action configuration that uses a SSH(1) to act file and directory moves directly on the target instead of re-transmitting the move destination over the wire. Fine-grained customization can be achieved through the CONFIG-FILE. Custom action configs can even be written from scratch in cascading layers ranging from shell scripts to code written in the LUA(1) language. This way simplicity can be balanced with powerfulness. See the online manual for details on the CONFIG-FILE https://github.com/axkibe/lsyncd/wiki/Manual-to-Lsyncd-2.0.x . Note that under normal configuration Lsyncd will delete pre-existing files in the target directories that are not present in the respective source directory. OPTIONS ------- *-delay* 'SECS':: Overrides the default delay times. *-help*:: Show a help message. *-insist*:: Continues start up even if rsync cannot connect. *-log* 'LEVEL':: Controls which kind of events are logged. By default Lsyncd logs 'Normal' and 'Error' Messages. *-log scarce* will make Lsyncd log Error messages only. *-log all* will log all debug messages. *-log* 'Category':: Turns on a specific debug message. E.g. *-log Exec* will log all processes as they are spawned. *-nodaemon*:: Lsyncd will not detach from the invoker and log as well to stdout/err. *-pidfile* 'FILE':: Lsyncd will write its process ID in 'FILE'. *-runner* 'FILE':: Makes the Lsyncd core load the part of Lsyncd written in Lua from 'FILE'. *-version*:: Writes version information and exits. EXIT STATUS ----------- *0*:: Terminated on a TERM signal(7) *-1*:: Failure (syntax, unrecoverable error condition, internal failure) SEE ALSO -------- Online Manual: https://github.com/axkibe/lsyncd/wiki/Lsyncd-2.1.x-%E2%80%96-What's-New%3F VERSION ------ This man page is for lsyncd(1) version 2.0.7 AUTHOR ------ Axel Kittenberger, 2010-2012 COPYING ------- Copyright \(C) 2010-2012 Axel Kittenberger. Free use of this software is granted under the terms of the GNU General Public License (GPL) version 2, or any later version. Free redistrubition of this Documentation (/doc directory) is granted under the terms of the Creative Commons 3.0 Attribution License (CC-3.0-BY). lsyncd-release-2.1.6/examples/000077500000000000000000000000001260764373300162635ustar00rootroot00000000000000lsyncd-release-2.1.6/examples/lalarm.lua000066400000000000000000000013131260764373300202340ustar00rootroot00000000000000----- -- User configuration file for lsyncd. -- -- While this example does not do anything it shows -- how user custom alarms can be now. It will log -- "Beep!" every 5 seconds. -- settings.nodaemon = true local function noAction (inlet) -- just discard any events that happes in source dir. inlet.discardEvent(inlet.getEvent()) end ----- -- Adds a watch to some not so large directory for this example. local in1 = sync{source="/usr/local/etc/", action = noAction } local function myAlarm(timestamp, extra) log("Normal", extra.message) spawn(extra.inlet.createBlanketEvent(), "/bin/echo", extra.message) alarm(timestamp + 5, myAlarm, extra) end alarm(now() + 5, myAlarm, {inlet = in1, message = "Beep"}) lsyncd-release-2.1.6/examples/lbash.lua000066400000000000000000000020411260764373300200540ustar00rootroot00000000000000----- -- User configuration file for lsyncd. -- -- This example uses local bash commands to keep two local -- directory trees in sync. -- settings = { logfile = "/tmp/lsyncd.log", statusFile = "/tmp/lsyncd.stat", statusIntervall = 1, nodaemon = true, } ----- -- for testing purposes. prefix can be used to slow commands down. -- prefix = "sleep 5 && " -- prefix = "" ----- -- for testing purposes. uses bash command to hold local dirs in sync. -- bash = { delay = 0, maxProcesses = 1, -- calls `cp -r SOURCE/* TARGET` only when there is something in SOURCE -- otherwise it deletes contents in the target if there. onStartup = [[ if [ "$(ls -A ^source)" ]; then cp -r ^source* ^target; else if [ "$(ls -A ^target)" ]; then rm -rf ^target/*; fi fi]], onCreate = prefix..[[cp -r ^sourcePath ^targetPathdir]], onModify = prefix..[[cp -r ^sourcePath ^targetPathdir]], onDelete = prefix..[[rm -rf ^targetPath]], onMove = prefix..[[mv ^o.targetPath ^d.targetPath]], } sync{bash, source="src", target="/path/to/trg/"} lsyncd-release-2.1.6/examples/lecho.lua000066400000000000000000000010261260764373300200570ustar00rootroot00000000000000----- -- User configuration file for lsyncd. -- -- This example uses just echos the operations -- ----- -- for testing purposes. just echos what is happening. -- echo = { maxProcesses = 1, delay = 1, onStartup = "/bin/echo telling about ^source", onAttrib = "/bin/echo attrib ^pathname", onCreate = "/bin/echo create ^pathname", onDelete = "/bin/echo delete ^pathname", onModify = "/bin/echo modify ^pathname", onMove = "/bin/echo move ^o.pathname -> ^d.pathname", } sync{echo, source="src", target="/path/to/trg/"} lsyncd-release-2.1.6/examples/lftp.lua000066400000000000000000000064601260764373300177410ustar00rootroot00000000000000--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- User configuration file for lsyncd. -- -- Syncs with 'lftp'. -- --~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lftp = { ----- -- Spawns rsync for a list of events -- action = function(inlet) -- gets all events ready for syncing local elist = inlet.getEvents( function(event) return event.etype ~= 'Init' and event.etype ~= 'Blanket' end ) ----- -- replaces filter rule by literals -- local function sub(p) if not p then return end return p:gsub('%?', '\\?'): gsub('%*', '\\*'): gsub('%[', '\\['): gsub('%]', '\\]') end local config = inlet.getConfig() local commands = elist.getPaths( function(etype, path1, path2) if etype == 'Delete' then if string.byte(path1, -1) == 47 then return 'rm -r '.. config.targetdir..sub(path1) else return 'rm '.. config.targetdir..sub(path1) end elseif etype == 'Create' or etype == 'Modify' or etype == 'Attrib' then if string.byte(path1, -1) == 47 then return 'mirror -R '.. config.source..sub(path1)..' '.. config.targetdir..sub(path1) else return 'put '.. config.source..sub(path1).. ' -o '..config.targetdir..sub(path1) end end end ) if #commands == 0 then spawn(elist, '/bin/true') return end commands = table.concat(commands, ';\n') log('Normal', 'Calling lftp with commands\n', commands) spawn(elist, '/usr/bin/lftp', '<', commands, '-u', config.user..','..config.pass, config.host ) end, ----- -- Spawns the recursive startup sync -- init = function(event) local config = event.config local inlet = event.inlet local excludes = inlet.getExcludes() local delete = nil if config.delete then delete = { '--delete', '--ignore-errors' }; end if #excludes ~= 0 then error('lftp does not work with excludes', 4) end log('Normal', 'recursive startup lftp: ', config.source, ' to host: ', config.host) spawn(event, '/usr/bin/lftp', '-c', 'open -u '..config.user..','..config.pass..' '..config.host..'; '.. 'mirror -R -e '..config.source..' '..config.targetdir..';' ) end, ----- -- Checks the configuration. -- prepare = function(config) if not config.host then error('lftps needs "host" configured', 4); end if not config.user then error('lftps needs "user" configured', 4); end if not config.pass then error('lftps needs "pass" configured', 4); end if not config.targetdir then error('lftp needs "targetdir" configured', 4) end if config.target then error('lftp needs NOT "target" configured', 4) end if config.exclude then error('lftp does not work with excludes', 4) end if config.rsyncOpts then error('lftp needs NOT "rsyncOpts" configured', 4) end if string.sub(config.targetdir, -1) == '/' then error('please make targetdir not end with a /', 4) end end, ----- -- Exit codes for rsync. -- exitcodes = { [ 0] = 'ok', [ 1] = 'ok', }, ----- -- Default delay -- delay = 1, } sync{ lftp, host = 'localhost', user = 'test', pass = 'test', source = 'src', targetdir = '.', } lsyncd-release-2.1.6/examples/lgforce.lua000066400000000000000000000043461260764373300204160ustar00rootroot00000000000000----- -- User configuration file for lsyncd. -- -- This example refers to one common challenge in multiuser unix systems. -- -- You have a shared directory for a set of users and you want -- to ensure all users have read and write permissions on all -- files in there. Unfortunally sometimes users mess with their -- umask, and create files in there that are not read/write/deleteable -- by others. Usually this involves frequent handfixes by a sysadmin, -- or a cron job that recursively chmods/chowns the whole directory. -- -- This is another approach to use lsyncd to continously fix permissions. -- -- One second after a file is created/modified it checks for its permissions -- and forces group permissions on it. -- -- This example regards more the handcraft of bash scripting than lsyncd. -- An alternative to this would be to load a Lua-Posix library and do the -- permission changes right within the onAction handlers. ---- -- forces this group. -- fgroup = "staff" ----- -- script for all changes. -- command = -- checks if the group is the one enforced and sets them if not [[ perm=`stat -c %A ^sourcePathname` if [ `stat -c %G ^sourcePathname` != ]]..fgroup..[[ ]; then /bin/chgrp ]]..fgroup..[[ ^sourcePathname || /bin/true; fi ]] .. -- checks if the group permissions are rw and sets them [[ if [ `expr match $perm "....rw"` == 0 ]; then /bin/chmod g+rw ^sourcePathname || /bin/true; fi ]] .. -- and forces the executable bit for directories. [[ if [ -d ^sourcePathname ]; then if [ `expr match $perm "......x"` == 0 ]; then /bin/chmod g+x ^^sourcePathname || /bin/true; fi fi ]] -- on startup recursevily sets all group ownerships -- all group permissions are set to rw -- and to executable flag for directories -- -- the carret as first char tells Lsycnd to call a shell altough it -- starts with a slash otherwisw -- startup = [[^/bin/chgrp -R ]]..fgroup..[[ ^source || /bin/true && /bin/chmod -R g+rw ^source || /bin/true && /usr/bin/find ^source -type d | xargs chmod g+x ]] gforce = { maxProcesses = 99, delay = 1, onStartup = startup, onAttrib = command, onCreate = command, onModify = command, -- does nothing on moves, they won't change permissions onMove = true, } sync{gforce, source="/path/to/share"} lsyncd-release-2.1.6/examples/limagemagic.lua000066400000000000000000000052611260764373300212310ustar00rootroot00000000000000---- -- Lsyncd user-script that creates a "magic" image converter directory. -- -- This configuration will automatically convert all images that are placed -- in the directory 'magicdir' all resulting images are placed in the same -- directory! -- -- Be sure to mkdir 'magicdir' first. ----- -- Fileformats: .jpg .gif .png -- local formats = { jpg=true, gif=true, png=true, } convert = { delay = 0, maxProcesses = 99, action = function(inlet) local event = inlet.getEvent() if event.isdir then -- ignores events on dirs inlet.discardEvent(event) return end -- extract extension and basefilename local p = event.pathname local ext = string.match(p, ".*%.([^.]+)$") local base = string.match(p, "(.*)%.[^.]+$") if not formats[ext] then -- an unknown extenion log("Normal", "not doing something on ."..ext) inlet.discardEvent(event) return end -- autoconvert on create and modify if event.etype == "Create" or event.etype == "Modify" then -- builds one bash command local cmd = "" -- do for all other extensions for k, _ in pairs(formats) do if k ~= ext then -- excludes files to be created, so no -- followup actions will occur inlet.addExclude(base..'.'..k) if cmd ~= "" then cmd = cmd .. " && " end cmd = cmd.. '/usr/bin/convert "'.. event.source..p..'" "'.. event.source..base..'.'..k.. '" || /bin/true' end end log("Normal", "Converting "..p) spawnShell(event, cmd) return end -- deletes all formats if you delete one if event.etype == "Delete" then -- builds one bash command local cmd = "" -- do for all other extensions for k, _ in pairs(formats) do if k ~= ext then -- excludes files to be created, so no -- followup actions will occur inlet.addExclude(base..'.'..k) if cmd ~= "" then cmd = cmd .. " && " end cmd = cmd.. 'rm "'..event.source..base..'.'..k.. '" || /bin/true' end end log("Normal", "Deleting all "..p) spawnShell(event, cmd) return end -- ignores other events. inlet.discardEvent(event) end, ----- -- Removes excludes when convertions are finished -- collect = function(event, exitcode) local p = event.pathname local ext = string.match(p, ".*%.([^.]+)$") local base = string.match(p, "(.*)%.[^.]+$") local inlet = event.inlet if event.etype == "Create" or event.etype == "Modify" or event.etype == "Delete" then for k, _ in pairs(formats) do inlet.rmExclude(base..'.'..k) end end end, ----- -- Does not collapse anything collapse = function() return 3 end, } sync{convert, source="magicdir", subdirs=false} lsyncd-release-2.1.6/examples/lpostcmd.lua000066400000000000000000000053221260764373300206150ustar00rootroot00000000000000----- -- User configuration file for lsyncd. -- This needs lsyncd >= 2.0.3 -- -- This configuration will execute a command on the remote host -- after every successfullycompleted rsync operation. -- for example to restart servlets on the target host or so. local rsyncpostcmd = { -- based on default rsync. default.rsync, checkgauge = { default.rsync.checkgauge, host = true, targetdir = true, target = true, postcmd = true, }, -- for this config it is important to keep maxProcesses at 1, so -- the postcmds will only be spawned after the rsync completed maxProcesses = 1, -- called whenever something is to be done action = function(inlet) local event = inlet.getEvent() local config = inlet.getConfig() -- if the event is a blanket event and not the startup, -- its there to spawn the webservice restart at the target. if event.etype == "Blanket" then -- uses rawget to test if "isPostcmd" has been set without -- triggering an error if not. local isPostcmd = rawget(event, "isPostcmd") if isPostcmd then spawn(event, "/usr/bin/ssh", config.host, config.postcmd) return else -- this is the startup, forwards it to default routine. return default.rsync.action(inlet) end error("this should never be reached") end -- for any other event, a blanket event is created that -- will stack on the queue and do the postcmd when its finished local sync = inlet.createBlanketEvent() sync.isPostcmd = true -- the original event is simply forwarded to the normal action handler return default.rsync.action(inlet) end, -- called when a process exited. -- this can be a rsync command, the startup rsync or the postcmd collect = function(agent, exitcode) -- for the ssh commands 255 is network error -> try again local isPostcmd = rawget(agent, "isPostcmd") if not agent.isList and agent.etype == "Blanket" and isPostcmd then if exitcode == 255 then return "again" end return else --- everything else, forward to default collection handler return default.collect(agent,exitcode) end error("this should never be reached") end, -- called before anything else -- builds the target from host and targetdir prepare = function(config, level, skipTarget) if not config.host then error("rsyncpostcmd neets 'host' configured", 4) end if not config.targetdir then error("rsyncpostcmd needs 'targetdir' configured", 4) end if not config.target then config.target = config.host .. ":" .. config.targetdir end return default.rsync.prepare(config, level, skipTarget) end } sync { rsyncpostcmd, source = "src", host = "beetle", targetdir = "/path/to/trg", postcmd = "/usr/local/bin/restart-servelt.sh", } lsyncd-release-2.1.6/examples/lrsync.lua000066400000000000000000000003251260764373300203000ustar00rootroot00000000000000---- -- User configuration file for lsyncd. -- -- Simple example for default rsync. -- settings = { statusFile = "/tmp/lsyncd.stat", statusInterval = 1, } sync{ default.rsync, source="src", target="trg", } lsyncd-release-2.1.6/examples/lrsyncssh.lua000066400000000000000000000003141260764373300210140ustar00rootroot00000000000000---- -- User configuration file for lsyncd. -- -- Simple example for default rsync, but executing moves through on the target. -- sync{default.rsyncssh, source="src", host="localhost", targetdir="dst/"} lsyncd-release-2.1.6/examples/lsayirc.lua000066400000000000000000000077171260764373300204500ustar00rootroot00000000000000----- -- An Lsyncd+IRC-Bot Config -- -- Logs into an IRC channel and tells there everything that happens in the -- watched directory tree. -- -- The challenge coding Lsyncd configs taking use of TCP sockets is -- that they must not block! Otherwise Lsyncd will block, no longer -- empty the kernels monitor queue, no longer collecting zombie processes, -- no longer spawning processes (this example doesnt do any, but maybe you -- might want to do that as well), blocking is just bad. -- -- This demo codes just minimal IRC functionality. -- it does not respond to anything else than IRC PING messages. -- -- There is no flood control, if a lot happens the IRC server will disconnect -- the bot. -- -- Requires "luasocket" to be installed require("socket") -- For demo reasons, do not detach settings.nodaemon = true hostname = "irc.freenode.org" --hostname = "127.0.0.1" port = 6667 nick = "lbot01" chan = "##lfile01" -- this blocks until the connection is established -- for once lets say this ok since Lsyncd didnt yet actually -- start. local ircSocket, err = socket.connect(hostname, port) if not ircSocket then log("Error", "Cannot connect to IRC: ", err) terminate(-1) end -- from now on, the socket must not block! ircSocket:settimeout(0) -- Buffers for stuff to send and receive on IRC: local ircWBuf = "" local ircRBuf = "" -- Predeclaration for functions calling each other local writeIRC ----- -- Called when the IRC socket can be written again. -- This happens when writeIRC (see below) couldnt write -- its buffer in one go, call it again so it can continue its task. local function ircWritey(fd) writeIRC() end ---- -- Called when there is data on the socket local function ircReady(socket) local l, err, ircRBuf = ircSocket:receive("*l", ircRBuf) if not l then if err ~= "timeout" then log("Error", "IRC connection failed: ", err) terminate(-1) end else ircRBuf = "" end log("Normal", "ircin :", l) --- answers ping messages local ping = l:match("PING :(.*)") if ping then writeIRC("PONG :", ping, "\n") end end ----- -- Writes on IRC socket -- Do not forget to add an "/n". function writeIRC(...) -- Appends all arbuments into the write buffer ircWBuf = ircWBuf..table.concat({...}) -- Gives it to the socket and sees how much it accepted local s, err = ircSocket:send(ircWBuf) -- If it cant the socket terminated. if not s and err~="timeout" then log("Error", "IRC connection failed: ", err) terminate(-1) end --- logs what has been send, without the linefeed. if (ircWBuf:sub(s, s) == "\n") then log("Normal", "ircout:", ircWBuf:sub(1, s - 1)) else log("Normal", "ircout: ", ircWBuf:sub(1, s), "\\") end ---- reduces the buffer by the amount of data sent. ircWBuf = ircWBuf:sub(s + 1, -1) -- when the write buffer is empty tell the core to no longer -- call ircWritey if data can be written on the socket. There -- is nothing to be written. If there is data in the buffer -- asks to be called as soon it can be written again if ircWBuf == "" then observefd(ircSocket:getfd(), ircReady, nil) else observefd(ircSocket:getfd(), ircReady, ircWritey) end end -- Aquires the nick on IRC and joins the configured channel -- This will also register the ircReady/ircWritey function at the core -- to be called when the socket is ready to be read/written. writeIRC("NICK ", nick, "\n") writeIRC("USER ", nick, " 0 * :lsyncd-sayirc-bot", "\n") writeIRC("JOIN ", chan, "\n") -- As action tells on IRC what the action is, then instead of -- spawning somthing, it discards the event. local function action(inlet) -- event2 is the target of a move event local event, event2 = inlet.getEvent() if not event2 then writeIRC("PRIVMSG ",chan," :",event.etype," ", event.path, "\n") else writeIRC("PRIVMSG ",chan," :",event.etype," ", event.path," -> ",event2.path, "\n") end inlet.discardEvent(event) end -- Watch a directory, and use a second for delay to aggregate events a little. sync{source = "src", action = action, delay = 1, onMove = true} lsyncd-release-2.1.6/fsevents.c000066400000000000000000000243011260764373300164460ustar00rootroot00000000000000/** fsevents.c from Lsyncd - Live (Mirror) Syncing Demon * * License: GPLv2 (see COPYING) or any later version * * Authors: Axel Kittenberger * Damian Steward * * ----------------------------------------------------------------------- * * Event interface for MacOS 10.5 (Leopard) /dev/fsevents interface. * * Special thanks go to Amit Singh and his fslogger demonstration that showed * how apples /dev/fsevents can be used. http://osxbook.com/software/fslogger/ * * -- WARNING -- Quoting http://www.osxbook.com/software/fslogger/ -- * * The interface that fslogger [and thus Lsyncd] uses is private to Apple. * Currently, there is a caveat regarding the use of this interface by third * parties (including fslogger [and thus Lsyncd]). While the change * notification interface supports multiple clients, there is a single kernel * buffer for holding events that are to be delivered to one or more * subscribers, with the primary subscriber being Spotlight. Now, the kernel * must hold events until it has notified all subscribers that are interested * in them. Since there is a single buffer, a slow subscriber can cause it to * overflow. If this happens, events will be dropped — for all subscribers, * including Spotlight. Consequently, Spotlight may need to look at the entire * volume to determine "what changed". */ #include "lsyncd.h" #include #include #include #include #include #include #include #include #include #include "bsd/sys/fsevents.h" #include #include #include /* the fsevents pseudo-device */ #define DEV_FSEVENTS "/dev/fsevents" /* buffer for reading from the device */ #define FSEVENT_BUFSIZ 131072 /* limited by MAX_KFS_EVENTS */ #define EVENT_QUEUE_SIZE 4096 #define KFS_NUM_ARGS FSE_MAX_ARGS /* OS 10.5 structuce */ /* an event argument */ struct kfs_event_arg { /* argument type */ u_int16_t type; /* size of argument data that follows this field */ u_int16_t len; union { struct vnode *vp; char *str; void *ptr; int32_t int32; dev_t dev; ino_t ino; int32_t mode; uid_t uid; gid_t gid; uint64_t timestamp; } data; }; /* OS 10.5 structuce */ /* an event */ struct kfs_event { /* event type */ int32_t type; /* pid of the process that performed the operation */ pid_t pid; /* event arguments */ struct kfs_event_arg* args[FSE_MAX_ARGS]; }; /** * fsevents (cloned) filedescriptor */ static int fsevents_fd = -1; /* event names */ /*static const char *eventNames[FSE_MAX_EVENTS] = { "CREATE_FILE", "DELETE", "STAT_CHANGED", "RENAME", "CONTENT_MODIFIED", "EXCHANGE", "FINDER_INFO_CHANGED", "CREATE_DIR", "CHOWN", "XATTR_MODIFIED", "XATTR_REMOVED", };*/ /* argument names*/ /*static const char *argNames[] = { "UNKNOWN", "VNODE", "STRING", "PATH", "INT32", "INT64", "RAW", "INO", "UID", "DEV", "MODE", "GID", "FINFO", };*/ /** * The read buffer */ static size_t const readbuf_size = 131072; static char * readbuf = NULL; /** * The event buffer */ static size_t const eventbuf_size = FSEVENT_BUFSIZ; static char* eventbuf = NULL; /** * Handles one fsevents event */ static void handle_event(lua_State *L, struct kfs_event *event, ssize_t mlen) { int32_t atype; const char *path = NULL; const char *trg = NULL; const char *etype = NULL; int isdir = -1; if (event->type == FSE_EVENTS_DROPPED) { logstring("Fsevents", "Events dropped!"); load_runner_func(L, "overflow"); if (lua_pcall(L, 0, 0, -2)) { exit(-1); // ERRNO } lua_pop(L, 1); hup = 1; return; } atype = event->type & FSE_TYPE_MASK; /*uint32_t aflags = FSE_GET_FLAGS(event->type);*/ if ((atype < FSE_MAX_EVENTS) && (atype >= -1)) { /*printlogf(L, "Fsevents", "got event %s", eventNames[atype]); if (aflags & FSE_COMBINED_EVENTS) { logstring("Fsevents", "combined events"); } if (aflags & FSE_CONTAINS_DROPPED_EVENTS) { logstring("Fsevents", "contains dropped events"); }*/ } else { printlogf( L, "Error", "unknown event(%d) in fsevents.", atype ); exit(-1); // ERRNO } { /* assigns the expected arguments */ int whichArg = 0; while (whichArg < FSE_MAX_ARGS) { struct kfs_event_arg * arg = event->args[whichArg++]; if (arg->type == FSE_ARG_DONE) { break; } switch (arg->type) { case FSE_ARG_STRING : switch(atype) { case FSE_RENAME : if (path) { // for move events second string is target trg = (char *) &arg->data.str; } // fallthrough case FSE_CHOWN : case FSE_CONTENT_MODIFIED : case FSE_CREATE_FILE : case FSE_CREATE_DIR : case FSE_DELETE : case FSE_STAT_CHANGED : if (!path) path = (char *)&arg->data.str; break; } break; case FSE_ARG_MODE : switch(atype) { case FSE_RENAME : case FSE_CHOWN : case FSE_CONTENT_MODIFIED : case FSE_CREATE_FILE : case FSE_CREATE_DIR : case FSE_DELETE : case FSE_STAT_CHANGED : isdir = arg->data.mode & S_IFDIR ? 1 : 0; break; } break; } } } switch(atype) { case FSE_CHOWN : case FSE_STAT_CHANGED : etype = "Attrib"; break; case FSE_CREATE_DIR : case FSE_CREATE_FILE : etype = "Create"; break; case FSE_DELETE : etype = "Delete"; break; case FSE_RENAME : etype = "Move"; break; case FSE_CONTENT_MODIFIED : etype = "Modify"; break; } if (etype) { if (!path) { printlogf(L, "Error", "Internal fail, fsevents, no path."); exit(-1); } if (isdir < 0) { printlogf(L, "Error", "Internal fail, fsevents, neither dir nor file."); exit(-1); } load_runner_func(L, "fsEventsEvent"); lua_pushstring(L, etype); lua_pushboolean(L, isdir); l_now(L); lua_pushstring(L, path); if (trg) { lua_pushstring(L, trg); } else { lua_pushnil(L); } if (lua_pcall(L, 5, 0, -7)) { exit(-1); // ERRNO } lua_pop(L, 1); } } /** * Called when fsevents has something to read */ static void fsevents_ready(lua_State *L, struct observance *obs) { if (obs->fd != fsevents_fd) { logstring("Error", "Internal, fsevents_fd != ob->fd"); exit(-1); // ERRNO } ptrdiff_t len = read (fsevents_fd, readbuf, readbuf_size); int err = errno; if (len == 0) { return; } if (len < 0) { if (err == EAGAIN) { /* nothing more */ return; } else { printlogf(L, "Error", "Read fail on fsevents"); exit(-1); // ERRNO } } { int off = 0; while (off < len && !hup && !term) { /* deals with alignment issues on 64 bit by copying data bit by bit */ struct kfs_event* event = (struct kfs_event *) eventbuf; event->type = *(int32_t*)(readbuf+off); off += sizeof(int32_t); event->pid = *(pid_t*)(readbuf+off); off += sizeof(pid_t); /* arguments */ int whichArg = 0; int eventbufOff = sizeof(struct kfs_event); size_t ptrSize = sizeof(void*); if ((eventbufOff % ptrSize) != 0) { eventbufOff += ptrSize-(eventbufOff%ptrSize); } while (off < len && whichArg < FSE_MAX_ARGS) { /* assign argument pointer to eventbuf based on known current offset into eventbuf */ uint16_t argLen = 0; event->args[whichArg] = (struct kfs_event_arg *) (eventbuf + eventbufOff); /* copy type */ uint16_t argType = *(uint16_t*)(readbuf + off); event->args[whichArg]->type = argType; off += sizeof(uint16_t); if (argType == FSE_ARG_DONE) { /* done */ break; } else { /* copy data length */ argLen = *(uint16_t *)(readbuf + off); event->args[whichArg]->len = argLen; off += sizeof(uint16_t); /* copy data */ memcpy(&(event->args[whichArg]->data), readbuf + off, argLen); off += argLen; } /* makes sure alignment is correct for 64 bit systems */ size_t argStructLen = sizeof(uint16_t) + sizeof(uint16_t); if ((argStructLen % ptrSize) != 0) { argStructLen += ptrSize-(argStructLen % ptrSize); } argStructLen += argLen; if ((argStructLen % ptrSize) != 0) { argStructLen += ptrSize-(argStructLen % ptrSize); } eventbufOff += argStructLen; whichArg++; } handle_event(L, event, len); } } } /** * Called to close/tidy fsevents */ static void fsevents_tidy(struct observance *obs) { if (obs->fd != fsevents_fd) { logstring("Error", "Internal, fsevents_fd != ob->fd"); exit(-1); // ERRNO } close(fsevents_fd); free(readbuf); readbuf = NULL; free(eventbuf); eventbuf = NULL; } /** * opens and initalizes fsevents. */ extern void open_fsevents(lua_State *L) { int8_t event_list[] = { // action to take for each event FSE_REPORT, // FSE_CREATE_FILE FSE_REPORT, // FSE_DELETE FSE_REPORT, // FSE_STAT_CHANGED FSE_REPORT, // FSE_RENAME FSE_REPORT, // FSE_CONTENT_MODIFIED FSE_REPORT, // FSE_EXCHANGE FSE_REPORT, // FSE_FINDER_INFO_CHANGED FSE_REPORT, // FSE_CREATE_DIR FSE_REPORT, // FSE_CHOWN FSE_REPORT, // FSE_XATTR_MODIFIED FSE_REPORT, // FSE_XATTR_REMOVED }; struct fsevent_clone_args fca = { .event_list = (int8_t *) event_list, .num_events = sizeof(event_list)/sizeof(int8_t), .event_queue_depth = EVENT_QUEUE_SIZE, .fd = &fsevents_fd, }; int fd = open(DEV_FSEVENTS, O_RDONLY); int err = errno; printlogf(L, "Warn", "Using /dev/fsevents which is considered an OSX internal interface."); printlogf(L, "Warn", "Functionality might break across OSX versions (This is for 10.5.X)"); printlogf(L, "Warn", "A hanging Lsyncd might cause Spotlight/Timemachine doing extra work."); if (fd < 0) { printlogf(L, "Error", "Cannot access %s monitor! (%d:%s)", DEV_FSEVENTS, err, strerror(err)); exit(-1); // ERRNO } if (ioctl(fd, FSEVENTS_CLONE, (char *)&fca) < 0) { printlogf(L, "Error", "Cannot control %s monitor! (%d:%s)", DEV_FSEVENTS, errno, strerror(errno)); exit(-1); // ERRNO } if (readbuf) { logstring("Error", "internal fail, inotify readbuf!=NULL in open_inotify()") exit(-1); // ERRNO } readbuf = s_malloc(readbuf_size); eventbuf = s_malloc(eventbuf_size); // fd has been cloned, closes access fd close(fd); close_exec_fd(fsevents_fd); non_block_fd(fsevents_fd); observe_fd(fsevents_fd, fsevents_ready, NULL, fsevents_tidy, NULL); } lsyncd-release-2.1.6/inotify.c000066400000000000000000000240041260764373300162720ustar00rootroot00000000000000/* | inotify.c from Lsyncd - Live (Mirror) Syncing Demon | | License: GPLv2 (see COPYING) or any later version | | Authors: Axel Kittenberger | | ----------------------------------------------------------------------- | | Event interface for Lsyncd to Linux´ inotify. */ #include "lsyncd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* | Event types. */ static const char * ATTRIB = "Attrib"; static const char * MODIFY = "Modify"; static const char * CREATE = "Create"; static const char * DELETE = "Delete"; static const char * MOVE = "Move"; /* * The inotify file descriptor. */ static int inotify_fd = -1; /* | Standard inotify events to listen to. */ static const uint32_t standard_event_mask = IN_ATTRIB | IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_DELETE_SELF | IN_MOVED_FROM | IN_MOVED_TO | IN_DONT_FOLLOW | IN_ONLYDIR; /* | Adds an inotify watch | | param dir (Lua stack) path to directory | param inotifyMode (Lua stack) which inotify event to react upon | "CloseWrite", "CloseWrite or Modify" | | returns (Lua stack) numeric watch descriptor */ static int l_addwatch( lua_State *L ) { const char *path = luaL_checkstring( L, 1 ); const char *imode = luaL_checkstring( L, 2 ); uint32_t mask = standard_event_mask; // checks the desired inotify reaction mode if (*imode) { if ( !strcmp( imode, "Modify" ) ) { // acts on modify instead of closeWrite mask |= IN_MODIFY; mask &= ~IN_CLOSE_WRITE; } else if ( !strcmp( imode, "CloseWrite" ) ) { // thats default } else if ( !strcmp( imode, "CloseWrite or Modify" ) ) { // acts on modify and closeWrite mask |= IN_MODIFY; } else if ( ! strcmp( imode, "CloseWrite after Modify") ) { // might be done in future printlogf( L, "Error", "'CloseWrite after Modify' not implemented." ); exit(-1); } else { printlogf( L, "Error", "'%s' not a valid inotfiyMode.", imode ); exit(-1); } } // kernel call to create the inotify watch int wd = inotify_add_watch( inotify_fd, path, mask ); if (wd < 0) { if (errno == ENOSPC) { printlogf( L, "Error", "%s\n%s", "Terminating since out of inotify watches.", "Consider increasing /proc/sys/fs/inotify/max_user_watches" ); exit(-1); // ERRNO. } printlogf( L, "Inotify", "addwatch( %s )-> %d; err= %d : %s", path, wd, errno, strerror( errno ) ); } else { printlogf(L, "Inotify", "addwatch( %s )-> %d ", path, wd ); } lua_pushinteger( L, wd ); return 1; } /* * Removes an inotify watch. * * param dir (Lua stack) numeric watch descriptor * * return nil */ static int l_rmwatch(lua_State *L) { int wd = luaL_checkinteger(L, 1); inotify_rm_watch(inotify_fd, wd); printlogf(L, "Inotify", "rmwatch()<-%d", wd); return 0; } /* | Lsyncd's core's inotify functions. */ static const luaL_Reg linotfylib[] = { { "addwatch", l_addwatch }, { "rmwatch", l_rmwatch }, { NULL, NULL} }; /* | Buffer for MOVE_FROM events. | Lsyncd buffers MOVE_FROM events to check if | they are followed by MOVE_TO events with identical cookie | then they are condensed into one move event to be sent to the | runner */ static struct inotify_event * move_event_buf = NULL; /* | Memory allocated for move_event_buf */ static size_t move_event_buf_size = 0; /* | True if the buffer is used. */ static bool move_event = false; /* | Handles an inotify event. */ static void handle_event( lua_State *L, struct inotify_event *event ) { const char *event_type = NULL; // used to execute two events in case of unmatched MOVE_FROM buffer struct inotify_event *after_buf = NULL; if( event && ( IN_Q_OVERFLOW & event->mask ) ) { // and overflow happened, tells the runner load_runner_func( L, "overflow" ); if( lua_pcall( L, 0, 0, -2 ) ) { exit( -1 ); } lua_pop( L, 1 ); hup = 1; return; } // cancel on ignored or resetting if( event && ( IN_IGNORED & event->mask ) ) { return; } if( event && event->len == 0 ) { // sometimes inotify sends such strange events, // (e.g. when touching a dir return; } if( event == NULL ) { // a buffered MOVE_FROM is not followed by anything, // thus it is unary event = move_event_buf; event_type = "Delete"; move_event = false; } else if( move_event && ( !( IN_MOVED_TO & event->mask ) || event->cookie != move_event_buf->cookie ) ) { // there is a MOVE_FROM event in the buffer and this is not the match // continue in this function iteration to handle the buffer instead */ logstring( "Inotify", "icore, changing unary MOVE_FROM into DELETE" ) after_buf = event; event = move_event_buf; event_type = "Delete"; move_event = false; } else if( move_event && ( IN_MOVED_TO & event->mask ) && event->cookie == move_event_buf->cookie ) { // this is indeed a matched move */ event_type = "Move"; move_event = false; } else if( IN_MOVED_FROM & event->mask ) { // just the MOVE_FROM, buffers this event, and wait if next event is // a matching MOVED_TO of this was an unary move out of the watched // tree. size_t el = sizeof( struct inotify_event ) + event->len; if( move_event_buf_size < el ) { move_event_buf_size = el; move_event_buf = s_realloc( move_event_buf, el ); } memcpy( move_event_buf, event, el ); move_event = true; return; } else if( IN_MOVED_TO & event->mask ) { // must be an unary move-to event_type = CREATE; } else if( IN_ATTRIB & event->mask ) { // just attrib change event_type = ATTRIB; } else if( ( IN_CLOSE_WRITE | IN_MODIFY) & event->mask ) { // modify, or closed after written something // the event type received depends settings.inotifyMode event_type = MODIFY; } else if( IN_CREATE & event->mask ) { // a new file event_type = CREATE; } else if( IN_DELETE & event->mask ) { // rm'ed event_type = DELETE; } else { logstring( "Inotify", "skipped some inotify event." ); return; } // hands the event over to the runner load_runner_func( L, "inotifyEvent" ); if( !event_type ) { logstring( "Error", "internal failure: unknown event in handle_event()" ); exit( -1 ); } lua_pushstring( L, event_type ); if( event_type != MOVE ) { lua_pushnumber( L, event->wd ); } else { lua_pushnumber( L, move_event_buf->wd ); } lua_pushboolean( L, ( event->mask & IN_ISDIR ) != 0 ); l_now( L ); if( event_type == MOVE ) { lua_pushstring( L, move_event_buf->name ); lua_pushnumber( L, event->wd ); lua_pushstring( L, event->name ); } else { lua_pushstring( L, event->name ); lua_pushnil( L ); lua_pushnil( L ); } if( lua_pcall( L, 7, 0, -9 ) ) { exit( -1 ); } lua_pop( L, 1 ); // if there is a buffered event, executes it if (after_buf) { logstring("Inotify", "icore, handling buffered event."); handle_event(L, after_buf); } } /* | buffer to read inotify events into */ static size_t readbuf_size = 2048; static char * readbuf = NULL; /* | Called when the inotify file descriptor became ready. | Reads it contents and forwards all received events | to the runner. */ static void inotify_ready( lua_State *L, struct observance *obs ) { // sanity check if( obs->fd != inotify_fd ) { logstring( "Error", "internal failure, inotify_fd != ob->fd" ); exit( -1 ); } while( true ) { ptrdiff_t len; int err; do { len = read( inotify_fd, readbuf, readbuf_size ); err = errno; if( len < 0 && err == EINVAL ) { // kernel > 2.6.21 indicates that way that way that // the buffer was too small to fit a filename. // double its size and try again. When using a lower // kernel and a filename > 2KB appears lsyncd // will fail. (but does a 2KB filename really happen?) // readbuf_size *= 2; readbuf = s_realloc(readbuf, readbuf_size); } } while( len < 0 && err == EINVAL ); if( len == 0 ) { // no more inotify events break; } if (len < 0) { if (err == EAGAIN) { // nothing more inotify break; } else { printlogf( L, "Error", "Read fail on inotify" ); exit( -1 ); } } { int i = 0; while( i < len && !hup && !term ) { struct inotify_event *event = ( struct inotify_event * ) (readbuf + i); handle_event( L, event ); i += sizeof( struct inotify_event ) + event->len; } } if( !move_event ) { // give it a pause if not endangering splitting a move break; } } // checks if there is an unary MOVE_FROM left in the buffer if( move_event ) { logstring( "Inotify", "handling unary move from." ); handle_event( L, NULL ); } } /* | Registers the inotify functions. */ extern void register_inotify( lua_State *L ) { luaL_register( L, LSYNCD_INOTIFYLIBNAME, linotfylib ); } /* | Cleans up the inotify handling. */ static void inotify_tidy( struct observance *obs ) { if( obs->fd != inotify_fd ) { logstring( "Error", "internal failure: inotify_fd != ob->fd" ); exit( -1 ); } close( inotify_fd ); free( readbuf ); readbuf = NULL; } /* | Initalizes inotify handling */ extern void open_inotify( lua_State *L ) { if( readbuf ) { logstring( "Error", "internal failure, inotify readbuf != NULL in open_inotify()" ) exit(-1); } readbuf = s_malloc( readbuf_size ); inotify_fd = inotify_init( ); if( inotify_fd < 0 ) { printlogf( L, "Error", "Cannot access inotify monitor! ( %d : %s )", errno, strerror(errno) ); exit( -1 ); } printlogf( L, "Inotify", "inotify fd = %d", inotify_fd ); close_exec_fd( inotify_fd ); non_block_fd( inotify_fd ); observe_fd( inotify_fd, inotify_ready, NULL, inotify_tidy, NULL ); } lsyncd-release-2.1.6/lsyncd.c000066400000000000000000001432041260764373300161110ustar00rootroot00000000000000/* | lsyncd.c Live (Mirror) Syncing Demon | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | This is Lsyncd's core. | | It contains as minimal as possible glues to the operating system needed | for Lsyncd's operation. All high-level logic is coded (when feasable) | into lsyncd.lua | | This code assumes you have a 100 character wide display to view it (when tabstop is 4) | | License: GPLv2 (see COPYING) or any later version | Authors: Axel Kittenberger | */ #include "lsyncd.h" #define SYSLOG_NAMES 1 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LUA_USE_APICHECK 1 #include #include #include /* | The Lua part of Lsyncd */ extern const char runner_out[]; extern size_t runner_size; extern const char defaults_out[]; extern size_t defaults_size; /* | Makes sure there is one file system monitor. */ #ifndef WITH_INOTIFY #ifndef WITH_FSEVENTS # error "needing at least one notification system. please rerun cmake" #endif #endif /* | All monitors supported by this Lsyncd. */ static char *monitors[] = { #ifdef WITH_INOTIFY "inotify", #endif #ifdef WITH_FSEVENTS "fsevents", #endif NULL, }; /** | Configuration parameters that matter to the core */ struct settings settings = { .log_file = NULL, .log_syslog = false, .log_ident = NULL, .log_facility = LOG_USER, .log_level = LOG_NOTICE, .nodaemon = false, }; /* | True when Lsyncd daemonized itself. */ static bool is_daemon = false; /* | The config file loaded by Lsyncd. */ char * lsyncd_config_file = NULL; /* | False after first time Lsyncd started up. | | Configuration error messages are thus written to | stdout/stderr only on first start. | | All other resets (HUP or monitor OVERFLOW) run with 'insist' | implictly turned on and thus Lsyncd does not failing on a non | responding target. */ static bool first_time = true; /* | Set by TERM or HUP signal handler | telling Lsyncd should end or reset ASAP. */ volatile sig_atomic_t hup = 0; volatile sig_atomic_t term = 0; volatile sig_atomic_t sigcode = 0; int pidfile_fd = 0; /* | The kernel's clock ticks per second. */ static long clocks_per_sec; /** * signal handler */ void sig_child(int sig) { // nothing } /** * signal handler */ void sig_handler( int sig ) { switch( sig ) { case SIGTERM: case SIGINT: term = 1; sigcode = sig; return; case SIGHUP: hup = 1; return; } } /* | Non glibc builds need a real tms structure for the times( ) call */ #ifdef __GLIBC__ static struct tms * dummy_tms = NULL; #else static struct tms _dummy_tms; static struct tms * dummy_tms = &_dummy_tms; #endif /* | Returns the absolute path of a path. | | This is a wrapper to various C-Library differences. */ char * get_realpath( const char * rpath ) { // uses c-library to get the absolute path #ifdef __GLIBC__ // in case of GLIBC the task is easy. return realpath( rpath, NULL ); #else # warning having to use old style realpath() // otherwise less so and requires PATH_MAX limit char buf[ PATH_MAX] ; char *asw = realpath( rpath, buf ); if( !asw ) { return NULL; } return s_strdup( asw ); #endif } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Logging ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | A logging category */ struct logcat { char *name; int priority; }; /* | A table of all enabled logging categories. | Sorted by first letter for faster access. */ static struct logcat * logcats[ 26 ] = { 0, }; /* | Returns a positive priority if category is configured to be logged or -1. */ extern int check_logcat( const char *name ) { struct logcat *lc; if( name[ 0 ] < 'A' || name[ 0 ] > 'Z') { return 99; } lc = logcats[ name[ 0 ] - 'A' ]; if( !lc ) { return 99; } while( lc->name ) { if( !strcmp( lc->name, name ) ) { return lc->priority; } lc++; } return 99; } /* | Adds a logging category | | Returns true if OK. */ static bool add_logcat( const char *name, int priority ) { struct logcat *lc; if( !strcmp( "all", name ) ) { settings.log_level = 99; return true; } if( !strcmp( "scarce", name ) ) { settings.log_level = LOG_WARNING; return true; } // categories must start with a capital letter. if( name[ 0 ] < 'A' || name[ 0 ] > 'Z' ) { return false; } if( !logcats[ name[ 0 ]- 'A' ] ) { // an empty capital letter lc = logcats[name[0]-'A'] = s_calloc(2, sizeof(struct logcat)); } else { // length of letter list int ll = 0; // counts list length for( lc = logcats[name[0]-'A']; lc->name; lc++ ) { ll++; } // enlarges list logcats[ name[ 0 ] - 'A'] = s_realloc( logcats[ name[ 0 ]-'A' ], ( ll + 2 ) * sizeof( struct logcat ) ); // goes to the list end for( lc = logcats[ name[ 0 ] - 'A']; lc->name; lc++ ) { if( !strcmp( name, lc->name ) ) { // already there return true; } } } lc->name = s_strdup( name ); lc->priority = priority; // terminates the list lc[ 1 ].name = NULL; return true; } /* | Logs a string. | | Do not call this directly, but the macro logstring( ) | defined in lsyncd.h */ extern void logstring0( int priority, // the priority of the log message const char * cat, // the category const char * message // the log message ) { if( first_time ) { // lsyncd is in it's intial configuration phase. // thus just print to normal stdout/stderr. if( priority >= LOG_ERR ) { fprintf( stderr, "%s: %s\n", cat, message); } else { printf( "%s: %s\n", cat, message ); } return; } // writes on console if not daemonized if( !is_daemon ) { char ct[ 255 ]; // gets current timestamp hour:minute:second time_t mtime; time( &mtime ); strftime( ct, sizeof( ct ), "%T", localtime( &mtime ) ); FILE * flog = priority <= LOG_ERR ? stderr : stdout; fprintf( flog, "%s %s: %s\n", ct, cat, message ); } // writes to file if configured so if (settings.log_file) { FILE * flog = fopen( settings.log_file, "a" ); char * ct; time_t mtime; // gets current timestamp day-time-year time( &mtime ); ct = ctime( &mtime ); // cuts trailing linefeed ct[ strlen( ct ) - 1] = 0; if( flog == NULL ) { fprintf( stderr, "Cannot open logfile [%s]!\n", settings.log_file ); exit( -1 ); } fprintf( flog, "%s %s: %s\n", ct, cat, message ); fclose( flog ); } // sends to syslog if configured so if( settings.log_syslog ) { syslog( priority, "%s, %s", cat, message ); } return; } /* | Lets the core print logmessages comfortably as formated string. | This uses the lua_State for it easy string buffers only. */ extern void printlogf0(lua_State *L, int priority, const char *cat, const char *fmt, ...) { va_list ap; va_start(ap, fmt); lua_pushvfstring(L, fmt, ap); va_end(ap); logstring0(priority, cat, luaL_checkstring(L, -1)); lua_pop(L, 1); return; } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Simple memory management ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ // FIXME call the Lua garbace collector in case of out of memory /* | "Secured" calloc */ extern void * s_calloc( size_t nmemb, size_t size ) { void * r = calloc( nmemb, size ); if( r == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return r; } /* | "Secured" malloc */ extern void * s_malloc( size_t size ) { void * r = malloc( size ); if( r == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return r; } /* | "Secured" realloc */ extern void * s_realloc( void * ptr, size_t size ) { void * r = realloc( ptr, size ); if( r == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return r; } /* | "Secured" strdup */ extern char * s_strdup( const char *src ) { char *s = strdup( src ); if( s == NULL ) { logstring0( LOG_ERR, "Error", "Out of memory!" ); exit( -1 ); } return s; } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Pipes Management ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | A child process gets text piped through stdin */ struct pipemsg { char * text; // message to send int tlen; // length of text int pos; // position in message }; /* | Called by the core whenever a pipe becomes | writeable again */ static void pipe_writey( lua_State * L, struct observance * observance ) { int fd = observance->fd; struct pipemsg *pm = (struct pipemsg * ) observance->extra; int len = write( fd, pm->text + pm->pos, pm->tlen - pm->pos ); pm->pos += len; if( len < 0 ) { logstring( "Normal", "broken pipe." ); nonobserve_fd( fd ); } else if( pm->pos >= pm->tlen ) { logstring( "Exec", "finished pipe." ); nonobserve_fd(fd); } } /* | Called when cleaning up a pipe. */ static void pipe_tidy( struct observance * observance ) { struct pipemsg *pm = ( struct pipemsg * ) observance->extra; close( observance->fd ); free( pm->text ); free( pm ); } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Helper Routines ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | Dummy variable of which it's address is used as | the cores index in the lua registry to | the lua runners function table in the lua registry. */ static int runner; /* | Dummy variable of which it's address is used as | the cores index n the lua registry to | the lua runners error handler. */ static int callError; /* | Sets the close-on-exit flag of a file descriptor. */ extern void close_exec_fd( int fd ) { int flags; flags = fcntl( fd, F_GETFD ); if( flags == -1 ) { logstring( "Error", "cannot get descriptor flags!" ); exit( -1 ); } flags |= FD_CLOEXEC; if( fcntl( fd, F_SETFD, flags ) == -1 ) { logstring( "Error", "cannot set descripptor flags!" ); exit( -1 ); } } /* | Sets the non-blocking flag of a file descriptor. */ extern void non_block_fd( int fd ) { int flags; flags = fcntl( fd, F_GETFL ); if( flags == -1 ) { logstring( "Error", "cannot get status flags!" ); exit( -1 ); } flags |= O_NONBLOCK; if( fcntl( fd, F_SETFL, flags ) == -1 ) { logstring( "Error", "cannot set status flags!" ); exit( -1 ); } } /* | Writes a pid file. */ static void write_pidfile( lua_State *L, const char *pidfile ) { pidfile_fd = open( pidfile, O_CREAT | O_RDWR, 0644 ); fcntl( pidfile_fd, F_SETFD, FD_CLOEXEC ); char buf[ 127 ]; if( pidfile_fd < 0 ) { printlogf( L, "Error", "Cannot create pidfile; '%s'", pidfile ); exit( -1 ); } int rc = lockf( pidfile_fd, F_TLOCK, 0 ); if( rc < 0 ) { printlogf( L, "Error", "Cannot lock pidfile; '%s'", pidfile ); exit( -1 ); } snprintf( buf, sizeof( buf ), "%i\n", getpid( ) ); write( pidfile_fd, buf, strlen( buf ) ); //fclose( f ); } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Observances ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | List of file descriptor watches. */ static struct observance * observances = NULL; static int observances_len = 0; static int observances_size = 0; /* | List of file descriptors to not observe. | | While working for the oberver lists, it may | not be altered, thus nonobserve stores the | delayed removals. */ static int * nonobservances = NULL; static int nonobservances_len = 0; static int nonobservances_size = 0; /* | True while the observances list is being handled. */ static bool observance_action = false; /* | Core watches a filedescriptor to become ready, | one of read_ready or write_ready may be zero */ extern void observe_fd( int fd, void ( * ready ) (lua_State *, struct observance * ), void ( * writey ) (lua_State *, struct observance * ), void ( * tidy ) (struct observance * ), void *extra ) { int pos; // looks if the fd is already there as pos or // stores the position to insert the new fd in pos for( pos = 0; pos < observances_len; pos++) { if( fd <= observances[ pos ].fd ) { break; } } if( pos < observances_len && observances[ pos ].fd == fd ) { // just updates an existing observance logstring( "Masterloop", "updating fd observance" ); observances[ pos ].ready = ready; observances[ pos ].writey = writey; observances[ pos ].tidy = tidy; observances[ pos ].extra = extra; return; } if( observance_action ) { // FIXME logstring( "Error", "New observances in ready/writey handlers not yet supported" ); exit( -1 ); } if( !tidy ) { logstring( "Error", "internal, tidy() in observe_fd() must not be NULL." ); exit( -1 ); } if( observances_len + 1 > observances_size ) { observances_size = observances_len + 1; observances = s_realloc( observances, observances_size * sizeof( struct observance ) ); } memmove( observances + pos + 1, observances + pos, (observances_len - pos) * sizeof(struct observance) ); observances_len++; observances[ pos ].fd = fd; observances[ pos ].ready = ready; observances[ pos ].writey = writey; observances[ pos ].tidy = tidy; observances[ pos ].extra = extra; } /* | Makes the core no longer watch a filedescriptor. */ extern void nonobserve_fd( int fd ) { int pos; if( observance_action ) { // this function is called through a ready/writey handler // while the core works through the observance list, thus // it does not alter the list, but stores this actions // on a stack nonobservances_len++; if( nonobservances_len > nonobservances_size ) { nonobservances_size = nonobservances_len; nonobservances = s_realloc( nonobservances, nonobservances_size * sizeof( int ) ); } nonobservances[ nonobservances_len - 1 ] = fd; return; } // looks for the fd for( pos = 0; pos < observances_len; pos++ ) { if( observances[ pos ].fd == fd ) { break; } } if( pos >= observances_len ) { logstring( "Error", "internal fail, not observance file descriptor in nonobserve" ); exit( -1 ); } // tidies up the observance observances[ pos ].tidy( observances + pos ); // and moves the list down memmove( observances + pos, observances + pos + 1, (observances_len - pos) * sizeof( struct observance ) ); observances_len--; } /* | A user observance became read-ready. */ static void user_obs_ready( lua_State * L, struct observance * obs ) { int fd = obs->fd; // pushes the ready table on table lua_pushlightuserdata( L, ( void * ) user_obs_ready ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the error handler lua_pushlightuserdata( L, (void *) &callError ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the user func lua_pushnumber( L, fd ); lua_gettable( L, -3 ); // gives the ufunc the fd lua_pushnumber( L, fd ); // calls the user function if( lua_pcall( L, 1, 0, -3 ) ) { exit( -1 ); } lua_pop( L, 2 ); } /* | A user observance became write-ready */ static void user_obs_writey( lua_State * L, struct observance * obs ) { int fd = obs->fd; // pushes the writey table on table lua_pushlightuserdata( L, (void *) user_obs_writey ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the error handler lua_pushlightuserdata(L, (void *) &callError); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the user func lua_pushnumber( L, fd ); lua_gettable( L, -3 ); // gives the user func the fd lua_pushnumber( L, fd ); // calls the user function if( lua_pcall( L, 1, 0, -3 ) ) { exit(-1); } lua_pop( L, 2 ); } /* | Tidies up a user observance | FIXME - give the user a chance to do something in that case! */ static void user_obs_tidy( struct observance *obs ) { close( obs->fd ); } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Library calls for the runner ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ static void daemonize( lua_State *L ); int l_stackdump( lua_State* L ); /* | Logs a message. | | Params on Lua stack: | | 1: loglevel of massage | 2: the string to log */ static int l_log( lua_State *L ) { const char * cat; // log category const char * message; // log message int priority; // log priority cat = luaL_checkstring( L, 1 ); priority = check_logcat( cat ); // skips filtered messages if( priority > settings.log_level ) { return 0; } // replaces non string values { int i; int top = lua_gettop(L); for( i = 1; i <= top; i++ ) { int t = lua_type( L, i ); switch( t ) { case LUA_TTABLE : lua_pushfstring( L, "(Table: %p)", lua_topointer( L, i ) ); lua_replace( L, i ); break; case LUA_TBOOLEAN : if( lua_toboolean( L, i ) ) { lua_pushstring( L, "(true)" ); } else { lua_pushstring( L, "(false)" ); } lua_replace(L, i); break; case LUA_TUSERDATA: { clock_t *c = ( clock_t * ) luaL_checkudata( L, i, "Lsyncd.jiffies" ); double d = *c; d /= clocks_per_sec; lua_pushfstring( L, "(Timestamp: %f)", d ); lua_replace( L, i ); } break; case LUA_TNIL: lua_pushstring( L, "(nil)" ); lua_replace( L, i ); break; } } } // concates if there is more than one string parameter lua_concat( L, lua_gettop( L ) - 1 ); message = luaL_checkstring( L, 2 ); logstring0( priority, cat, message ); return 0; } /* | Returns (on Lua stack) the current kernels | clock state (jiffies) */ extern int l_now(lua_State *L) { clock_t * j = lua_newuserdata( L, sizeof( clock_t ) ); luaL_getmetatable( L, "Lsyncd.jiffies" ); lua_setmetatable( L, -2 ); *j = times( dummy_tms ); return 1; } /* | Executes a subprocess. Does not wait for it to return. | | Params on Lua stack: | | 1: Path to binary to call | 2: List of string as arguments | or "<" in which case the next argument is a string | that will be piped on stdin. | The arguments will follow that one. | | Returns (Lua stack) the pid on success, 0 on failure. */ static int l_exec( lua_State *L ) { // the binary to call const char *binary = luaL_checkstring(L, 1); // number of arguments int argc = lua_gettop( L ) - 1; // the pid spawned pid_t pid; // the arguments position in the lua arguments int li = 1; // the pipe to text char const * pipe_text = NULL; // the pipes length size_t pipe_len = 0; // the arguments char const ** argv; // pipe file descriptors int pipefd[ 2 ]; int i; // expands tables // and removes nils for( i = 1; i <= lua_gettop( L ); i++ ) { if( lua_isnil( L, i ) ) { lua_remove( L, i ); i--; argc--; continue; } if( lua_istable( L, i ) ) { int tlen; int it; lua_checkstack( L, lua_gettop( L ) + lua_objlen( L, i ) + 1 ); // moves table to top of stack lua_pushvalue( L, i ); lua_remove( L, i ); argc--; tlen = lua_objlen( L, -1 ); for( it = 1; it <= tlen; it++ ) { lua_pushinteger( L, it ); lua_gettable( L, -2 ); lua_insert( L, i ); i++; argc++; } i--; lua_pop( L, 1 ); } } // writes a log message (if needed). if( check_logcat( "Exec" ) <= settings.log_level ) { lua_checkstack( L, lua_gettop( L ) + argc * 3 + 2 ); lua_pushvalue( L, 1 ); for( i = 1; i <= argc; i++ ) { lua_pushstring( L, " [" ); lua_pushvalue( L, i + 1 ); lua_pushstring( L, "]" ); } lua_concat( L, 3 * argc + 1 ); // replaces midfile 0 chars by linefeed size_t len = 0; const char * cs = lua_tolstring( L, -1, &len ); char * s = s_calloc( len + 1, sizeof( char ) ); for( i = 0; i < len; i++ ) { s[ i ] = cs[ i ] ? cs[ i ] : '\n'; } logstring0( LOG_DEBUG, "Exec", s ); free( s ); lua_pop( L, 1 ); } if( argc >= 2 && !strcmp( luaL_checkstring( L, 2 ), "<" ) ) { // pipes something into stdin if( !lua_isstring( L, 3 ) ) { logstring( "Error", "in spawn(), expected a string after pipe '<'" ); exit( -1 ); } pipe_text = lua_tolstring( L, 3, &pipe_len ); if( strlen( pipe_text ) > 0 ) { // creates the pipe if( pipe( pipefd ) == -1 ) { logstring( "Error", "cannot create a pipe!" ); exit( -1 ); } // always closes the write end for child processes close_exec_fd( pipefd[ 1 ] ); // sets the write end on non-blocking non_block_fd( pipefd[ 1 ] ); } else { pipe_text = NULL; } argc -= 2; li += 2; } // prepares the arguments argv = s_calloc( argc + 2, sizeof( char * ) ); argv[ 0 ] = binary; for( i = 1; i <= argc; i++ ) { argv[i] = luaL_checkstring( L, i + li ); } argv[ i ] = NULL; // the fork! pid = fork( ); if( pid == 0 ) { // replaces stdin for pipes if( pipe_text ) { dup2( pipefd[ 0 ], STDIN_FILENO ); } // if lsyncd runs as a daemon and has a logfile it will redirect // stdout/stderr of child processes to the logfile. if( is_daemon && settings.log_file ) { if( !freopen( settings.log_file, "a", stdout ) ) { printlogf( L, "Error", "cannot redirect stdout to '%s'.", settings.log_file ); } if( !freopen( settings.log_file, "a", stderr ) ) { printlogf( L, "Error", "cannot redirect stderr to '%s'.", settings.log_file ); } } execv( binary, ( char ** ) argv ); // in a sane world execv does not return! printlogf( L, "Error", "Failed executing [ %s ]!", binary ); exit( -1 ); } if( pipe_text ) { int len; // first closes read-end of pipe, this is for child process only close( pipefd[ 0 ] ); // starts filling the pipe len = write( pipefd[ 1 ], pipe_text, pipe_len ); if( len < 0 ) { logstring( "Normal", "immediatly broken pipe." ); close( pipefd[ 1 ] ); } else if( len == pipe_len ) { // usual and best case, the pipe accepted all input -> close close( pipefd[ 1 ] ); logstring( "Exec", "one-sweeped pipe" ); } else { struct pipemsg *pm; logstring( "Exec", "adding pipe observance" ); pm = s_calloc( 1, sizeof( struct pipemsg ) ); pm->text = s_calloc( pipe_len + 1, sizeof( char ) ); memcpy( pm->text, pipe_text, pipe_len + 1 ); pm->tlen = pipe_len; pm->pos = len; observe_fd( pipefd[ 1 ], NULL, pipe_writey, pipe_tidy, pm ); } } free( argv ); lua_pushnumber( L, pid ); return 1; } /* | Converts a relative directory path to an absolute. | | Params on Lua stack: | 1: a relative path to directory | | Returns on Lua stack: | The absolute path of directory */ static int l_realdir( lua_State *L ) { luaL_Buffer b; const char *rdir = luaL_checkstring(L, 1); char *adir = get_realpath(rdir); if (!adir) { printlogf(L, "Error", "failure getting absolute path of [%s]", rdir); return 0; } { // makes sure its a directory struct stat st; if (stat(adir, &st)) { printlogf(L, "Error", "cannot get absolute path of dir '%s': %s", rdir, strerror(errno)); free(adir); return 0; } if (!S_ISDIR(st.st_mode)) { printlogf(L, "Error", "cannot get absolute path of dir '%s': is not a directory", rdir); free(adir); return 0; } } // returns absolute path with a concated '/' luaL_buffinit(L, &b); luaL_addstring(&b, adir); luaL_addchar(&b, '/'); luaL_pushresult(&b); free(adir); return 1; } /* | Dumps the Lua stack. | For debugging purposes. */ int l_stackdump( lua_State * L ) { int i; int top = lua_gettop( L ); printlogf( L, "Debug", "total in stack %d", top ); for( i = 1; i <= top; i++ ) { int t = lua_type( L, i ); switch( t ) { case LUA_TSTRING: printlogf( L, "Debug", "%d string: '%s'", i, lua_tostring( L, i ) ); break; case LUA_TBOOLEAN: printlogf( L, "Debug", "%d boolean %s", i, lua_toboolean( L, i ) ? "true" : "false" ); break; case LUA_TNUMBER: printlogf( L, "Debug", "%d number: %g", i, lua_tonumber( L, i ) ); break; default: printlogf( L, "Debug", "%d %s", i, lua_typename( L, t ) ); break; } } return 0; } /* | Reads the directories entries. | | Params on Lua stack: | 1: absolute path to directory | | Returns on Lua stack: | a table of directory names. | names are keys | values are boolean true on dirs. */ static int l_readdir ( lua_State *L ) { const char * dirname = luaL_checkstring( L, 1 ); DIR *d; d = opendir( dirname ); if( d == NULL ) { printlogf( L, "Error", "cannot open dir [%s].", dirname ); return 0; } lua_newtable( L ); while( !hup && !term ) { struct dirent *de = readdir( d ); bool isdir; if( de == NULL ) { // finished break; } // ignores . and .. if( !strcmp( de->d_name, "." ) || !strcmp( de->d_name, ".." ) ) { continue; } if( de->d_type == DT_UNKNOWN ) { // must call stat on some systems :-/ // ( e.g. ReiserFS ) char *entry = s_malloc( strlen( dirname ) + strlen( de->d_name ) + 2 ); struct stat st; strcpy( entry, dirname ); strcat( entry, "/" ); strcat( entry, de->d_name ); lstat( entry, &st ); isdir = S_ISDIR( st.st_mode ); free( entry ); } else { // otherwise readdir can be trusted isdir = de->d_type == DT_DIR; } // adds this entry to the Lua table lua_pushstring( L, de->d_name ); lua_pushboolean( L, isdir ); lua_settable( L, -3 ); } closedir( d ); return 1; } /* | Terminates Lsyncd. | | Params on Lua stack: | 1: exitcode of Lsyncd. | | Does not return. | */ int l_terminate(lua_State *L) { int exitcode = luaL_checkinteger( L, 1 ); exit( exitcode ); return 0; } /* | Configures core parameters. | | Params on Lua stack: | 1: a string, configure option | 2: depends on Param 1 */ static int l_configure( lua_State *L ) { const char * command = luaL_checkstring( L, 1 ); if( !strcmp( command, "running" ) ) { // set by runner after first initialize // from this on log to configurated log end instead of // stdout/stderr first_time = false; if( !settings.nodaemon && !settings.log_file ) { settings.log_syslog = true; const char * log_ident = settings.log_ident ? settings.log_ident : "lsyncd"; openlog( log_ident, 0, settings.log_facility ); } if( !settings.nodaemon && !is_daemon ) { logstring( "Debug", "daemonizing now." ); daemonize( L ); } if( settings.pidfile ) { write_pidfile( L, settings.pidfile ); } } else if( !strcmp( command, "nodaemon" ) ) { settings.nodaemon = true; } else if( !strcmp( command, "logfile" ) ) { const char * file = luaL_checkstring( L, 2 ); if( settings.log_file ) { free( settings.log_file ); } settings.log_file = s_strdup( file ); } else if( !strcmp( command, "pidfile" ) ) { const char * file = luaL_checkstring( L, 2 ); if( settings.pidfile ) { free( settings.pidfile ); } settings.pidfile = s_strdup( file ); } else if( !strcmp( command, "logfacility" ) ) { if( lua_isstring( L, 2 ) ) { const char * fname = luaL_checkstring( L, 2 ); int i; for( i = 0; facilitynames[ i ].c_name; i++ ) { if( !strcasecmp( fname, facilitynames[ i ].c_name ) ) { break; } } if( !facilitynames[ i ].c_name ) { printlogf( L, "Error", "Logging facility '%s' unknown.", fname ); exit( -1 ); } settings.log_facility = facilitynames[ i ].c_val; } else if (lua_isnumber(L, 2)) { settings.log_facility = luaL_checknumber(L, 2); } else { printlogf( L, "Error", "Logging facility must be a number or string" ); exit( -1 ); } } else if( !strcmp( command, "logident" ) ) { const char * ident = luaL_checkstring( L, 2 ); if (settings.log_ident) { free(settings.log_ident); } settings.log_ident = s_strdup( ident ); } else { printlogf( L, "Error", "Internal error, unknown parameter in l_configure( %s )", command ); exit( -1 ); } return 0; } /* | Allows user scripts to observe filedescriptors | | Params on Lua stack: | 1: file descriptor | 2: function to call when read becomes ready | 3: function to call when write becomes ready */ static int l_observe_fd( lua_State *L ) { int fd = luaL_checknumber( L, 1 ); bool ready = false; bool writey = false; // Stores the user function in the lua registry. // It uses the address of the cores ready/write functions // for the user as key if( !lua_isnoneornil( L, 2 ) ) { lua_pushlightuserdata( L, (void *) user_obs_ready ); lua_gettable( L, LUA_REGISTRYINDEX ); if( lua_isnil( L, -1 ) ) { lua_pop ( L, 1 ); lua_newtable ( L ); lua_pushlightuserdata ( L, (void *) user_obs_ready ); lua_pushvalue ( L, -2 ); lua_settable ( L, LUA_REGISTRYINDEX ); } lua_pushnumber ( L, fd ); lua_pushvalue ( L, 2 ); lua_settable ( L, -3 ); lua_pop ( L, 1 ); ready = true; } if( !lua_isnoneornil( L, 3 ) ) { lua_pushlightuserdata( L, (void *) user_obs_writey ); lua_gettable (L, LUA_REGISTRYINDEX ); if( lua_isnil(L, -1) ) { lua_pop ( L, 1 ); lua_newtable ( L ); lua_pushlightuserdata ( L, (void *) user_obs_writey ); lua_pushvalue ( L, -2 ); lua_settable ( L, LUA_REGISTRYINDEX ); } lua_pushnumber ( L, fd ); lua_pushvalue ( L, 3 ); lua_settable ( L, -3 ); lua_pop ( L, 1 ); writey = true; } // tells the core to watch the fd observe_fd( fd, ready ? user_obs_ready : NULL, writey ? user_obs_writey : NULL, user_obs_tidy, NULL ); return 0; } /* | Removes a user observance | | Params on Lua stack: | 1: exitcode of Lsyncd. */ extern int l_nonobserve_fd( lua_State *L ) { int fd = luaL_checknumber( L, 1 ); // removes the read function lua_pushlightuserdata( L, (void *) user_obs_ready ); lua_gettable( L, LUA_REGISTRYINDEX ); if( !lua_isnil( L, -1 ) ) { lua_pushnumber ( L, fd ); lua_pushnil ( L ); lua_settable ( L, -2 ); } lua_pop( L, 1 ); lua_pushlightuserdata( L, (void *) user_obs_writey ); lua_gettable( L, LUA_REGISTRYINDEX ); if ( !lua_isnil( L, -1 ) ) { lua_pushnumber ( L, fd ); lua_pushnil ( L ); lua_settable ( L, -2 ); } lua_pop( L, 1 ); nonobserve_fd( fd ); return 0; } /* | The Lsnycd's core library */ static const luaL_Reg lsyncdlib[] = { { "configure", l_configure }, { "exec", l_exec }, { "log", l_log }, { "now", l_now }, { "nonobserve_fd", l_nonobserve_fd }, { "observe_fd", l_observe_fd }, { "readdir", l_readdir }, { "realdir", l_realdir }, { "stackdump", l_stackdump }, { "terminate", l_terminate }, { NULL, NULL } }; /* | Adds a number in seconds to a jiffy timestamp. */ static int l_jiffies_add( lua_State *L ) { clock_t *p1 = ( clock_t * ) lua_touserdata( L, 1 ); clock_t *p2 = ( clock_t * ) lua_touserdata( L, 2 ); if( p1 && p2 ) { logstring( "Error", "Cannot add two timestamps!" ); exit( -1 ); } { clock_t a1 = p1 ? *p1 : luaL_checknumber( L, 1 ) * clocks_per_sec; clock_t a2 = p2 ? *p2 : luaL_checknumber( L, 2 ) * clocks_per_sec; clock_t *r = ( clock_t * ) lua_newuserdata( L, sizeof( clock_t ) ); luaL_getmetatable( L, "Lsyncd.jiffies" ); lua_setmetatable( L, -2 ); *r = a1 + a2; return 1; } } /* | Subracts two jiffy timestamps resulting in a number in seconds | or substracts a jiffy by a number in seconds resulting a jiffy timestamp. */ static int l_jiffies_sub( lua_State *L ) { clock_t *p1 = ( clock_t * ) lua_touserdata( L, 1 ); clock_t *p2 = ( clock_t * ) lua_touserdata( L, 2 ); if( p1 && p2 ) { // substracting two timestamps result in a timespan in seconds clock_t a1 = *p1; clock_t a2 = *p2; lua_pushnumber(L, ((double) (a1 -a2)) / clocks_per_sec); return 1; } // makes a timestamp earlier by NUMBER seconds clock_t a1 = p1 ? *p1 : luaL_checknumber( L, 1 ) * clocks_per_sec; clock_t a2 = p2 ? *p2 : luaL_checknumber( L, 2 ) * clocks_per_sec; clock_t *r = (clock_t *) lua_newuserdata( L, sizeof( clock_t ) ); luaL_getmetatable( L, "Lsyncd.jiffies" ); lua_setmetatable( L, -2 ); *r = a1 - a2; return 1; } /* | Compares two jiffy timestamps */ static int l_jiffies_eq( lua_State *L ) { clock_t a1 = ( *( clock_t * ) luaL_checkudata( L, 1, "Lsyncd.jiffies" ) ); clock_t a2 = ( *( clock_t * ) luaL_checkudata( L, 2, "Lsyncd.jiffies" ) ); lua_pushboolean( L, a1 == a2 ); return 1; } /* * True if jiffy1 timestamp is eariler than jiffy2 timestamp */ static int l_jiffies_lt( lua_State *L ) { clock_t a1 = ( *( clock_t * ) luaL_checkudata( L, 1, "Lsyncd.jiffies" ) ); clock_t a2 = ( *( clock_t * ) luaL_checkudata( L, 2, "Lsyncd.jiffies" ) ); lua_pushboolean( L, time_before( a1, a2 ) ); return 1; } /* | True if jiffy1 before or equals jiffy2 */ static int l_jiffies_le(lua_State *L) { clock_t a1 = ( *( clock_t * ) luaL_checkudata( L, 1, "Lsyncd.jiffies" ) ); clock_t a2 = ( *( clock_t * ) luaL_checkudata( L, 2, "Lsyncd.jiffies" ) ); lua_pushboolean( L, ( a1 == a2 ) || time_before( a1, a2 ) ); return 1; } /* | Registers the Lsyncd's core library. */ void register_lsyncd( lua_State *L ) { luaL_register( L, LSYNCD_LIBNAME, lsyncdlib ); lua_setglobal( L, LSYNCD_LIBNAME ); // creates the metatable for the jiffies ( timestamps ) userdata luaL_newmetatable( L, "Lsyncd.jiffies" ); int mt = lua_gettop( L ); lua_pushcfunction( L, l_jiffies_add ); lua_setfield( L, mt, "__add" ); lua_pushcfunction( L, l_jiffies_sub ); lua_setfield( L, mt, "__sub" ); lua_pushcfunction( L, l_jiffies_lt ); lua_setfield( L, mt, "__lt" ); lua_pushcfunction( L, l_jiffies_le ); lua_setfield( L, mt, "__le" ); lua_pushcfunction( L, l_jiffies_eq ); lua_setfield( L, mt, "__eq" ); lua_pop( L, 1 ); // pop(mt) #ifdef WITH_INOTIFY lua_getglobal( L, LSYNCD_LIBNAME ); register_inotify( L ); lua_setfield( L, -2, LSYNCD_INOTIFYLIBNAME ); lua_pop( L, 1 ); #endif if( lua_gettop( L ) ) { logstring( "Error", "internal, stack not empty in lsyncd_register( )" ); exit( -1 ); } } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ( Lsyncd Core ) *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ /* | Pushes a function from the runner on the stack. | As well as the callError handler. */ extern void load_runner_func( lua_State * L, const char * name ) { printlogf( L, "Call", "%s( )", name ); // pushes the error handler lua_pushlightuserdata( L, (void *) &callError ); lua_gettable( L, LUA_REGISTRYINDEX ); // pushes the function lua_pushlightuserdata( L, (void *) &runner ); lua_gettable( L, LUA_REGISTRYINDEX ); lua_pushstring( L, name ); lua_gettable( L, -2 ); lua_remove( L, -2 ); } /* | Daemonizes. | | Lsyncds own implementation over daemon(0, 0) since | a) OSX keeps bugging about it being deprecated | b) for a reason, since blindly closing stdin/out/err | is unsafe, since they might not have existed and | might actually close the monitors fd! */ static void daemonize( lua_State *L ) { pid_t pid, sid; pid = fork( ); if( pid < 0 ) { printlogf( L, "Error", "Failure in daemonize at fork: %s", strerror( errno ) ); exit( -1 ); } if (pid > 0) { // parent process returns to shell exit( 0 ); } // detaches the new process from the parent process sid = setsid( ); if( sid < 0 ) { printlogf( L, "Error", "Failure in daemonize at setsid: %s", strerror( errno ) ); exit( -1 ); } // goes to root dir if( chdir( "/" ) < 0 ) { printlogf( L, "Error", "Failure in daemonize at chdir( \"/\" ): %s", strerror( errno ) ); exit( -1 ); } // does what clibs daemon( 0, 0 ) cannot do, // checks if there were no stdstreams and it might close used fds if( observances_len && observances->fd < 3 ) { printlogf( L, "Normal", "daemonize not closing stdin/out/err, since there seem to none." ); return; } // disconnects stdstreams if ( !freopen( "/dev/null", "r", stdin ) || !freopen( "/dev/null", "w", stdout ) || !freopen( "/dev/null", "w", stderr ) ) { printlogf( L, "Error", "Failure in daemonize at freopen( /dev/null, std[in|out|err] )" ); } is_daemon = true; } /* | Normal operation happens in here. */ static void masterloop(lua_State *L) { while( true ) { bool have_alarm; bool force_alarm = false; clock_t now = times( dummy_tms ); clock_t alarm_time = 0; // memory usage debugging // lua_gc( L, LUA_GCCOLLECT, 0 ); // printf( // "gccount: %d\n", // lua_gc( L, LUA_GCCOUNT, 0 ) * 1024 + lua_gc( L, LUA_GCCOUNTB, 0 ) ); // // queries the runner about the soonest alarm // load_runner_func( L, "getAlarm" ); if( lua_pcall( L, 0, 1, -2 ) ) { exit( -1 ); } if( lua_type( L, -1 ) == LUA_TBOOLEAN) { have_alarm = false; force_alarm = lua_toboolean( L, -1 ); } else { have_alarm = true; alarm_time = *( ( clock_t * ) luaL_checkudata( L, -1, "Lsyncd.jiffies" ) ); } lua_pop( L, 2 ); if( force_alarm || ( have_alarm && time_before_eq( alarm_time, now ) ) ) { // there is a delay that wants to be handled already thus instead // of reading/writing from observances it jumps directly to // handling // TODO: Actually it might be smarter to handler observances // eitherway. since event queues might overflow. logstring( "Masterloop", "immediately handling delays." ); } else { // uses select( ) to determine what happens next: // a) a new event on an observance // b) an alarm on timeout // c) the return of a child process struct timespec tv; if( have_alarm ) { // TODO use trunc instead of long converstions double d = ( (double )( alarm_time - now ) ) / clocks_per_sec; tv.tv_sec = d; tv.tv_nsec = ( (d - ( long ) d) ) * 1000000000.0; printlogf( L, "Masterloop", "going into select ( timeout %f seconds )", d ); } else { logstring( "Masterloop", "going into select ( no timeout )" ); } // time for Lsyncd to try to put itself to rest into the big select( ) // this configures: // timeouts, // filedescriptors and // signals // that will wake Lsyncd { fd_set rfds; fd_set wfds; sigset_t sigset; int pi, pr; sigemptyset( &sigset ); FD_ZERO( &rfds ); FD_ZERO( &wfds ); for( pi = 0; pi < observances_len; pi++ ) { struct observance *obs = observances + pi; if ( obs->ready ) { FD_SET( obs->fd, &rfds ); } if ( obs->writey ) { FD_SET( obs->fd, &wfds ); } } if( !observances_len ) { logstring( "Error", "Internal fail, no observances, no monitor!" ); exit( -1 ); } // the great select, this is the very heart beat of Lsyncd // that puts Lsyncd to sleep until anything worth noticing // happens pr = pselect( observances[ observances_len - 1 ].fd + 1, &rfds, &wfds, NULL, have_alarm ? &tv : NULL, &sigset ); // something happened! if (pr >= 0) { // walks through the observances calling ready/writey observance_action = true; for( pi = 0; pi < observances_len; pi++ ) { struct observance *obs = observances + pi; // Checks for signals if( hup || term ) { break; } // a file descriptor became read-ready if( obs->ready && FD_ISSET( obs->fd, &rfds ) ) { obs->ready(L, obs); } // Checks for signals, again, better safe than sorry if ( hup || term ) { break; } // FIXME breaks on multiple nonobservances in one beat if( nonobservances_len > 0 && nonobservances[ nonobservances_len - 1 ] == obs->fd ) { continue; } // a file descriptor became write-ready if( obs->writey && FD_ISSET( obs->fd, &wfds ) ) { obs->writey( L, obs ); } } observance_action = false; // works through delayed nonobserve_fd() calls for (pi = 0; pi < nonobservances_len; pi++) { nonobserve_fd( nonobservances[ pi ] ); } nonobservances_len = 0; } } } // collects zombified child processes while( 1 ) { int status; pid_t pid = waitpid( 0, &status, WNOHANG ); if (pid <= 0) { // no more zombies break; } // calls the runner to handle the collection load_runner_func( L, "collectProcess" ); lua_pushinteger( L, pid ); lua_pushinteger( L, WEXITSTATUS( status ) ); if ( lua_pcall( L, 2, 0, -4 ) ) { exit(-1); } lua_pop( L, 1 ); } // reacts on HUP signals if( hup ) { load_runner_func( L, "hup" ); if( lua_pcall( L, 0, 0, -2 ) ) { exit( -1 ); } lua_pop( L, 1 ); hup = 0; } // reacts on TERM and INT signals if( term == 1 ) { load_runner_func( L, "term" ); lua_pushnumber( L, sigcode ); if( lua_pcall( L, 1, 0, -3 ) ) { exit( -1 ); } lua_pop( L, 1 ); term = 2; } // lets the runner do stuff every cycle, // like starting new processes, writing the statusfile etc. load_runner_func( L, "cycle" ); l_now( L ); if( lua_pcall( L, 1, 1, -3 ) ) { exit( -1 ); } if( !lua_toboolean( L, -1 ) ) { // cycle told core to break mainloop lua_pop( L, 2 ); return; } lua_pop( L, 2 ); if( lua_gettop( L ) ) { logstring( "Error", "internal, stack is dirty." ); l_stackdump( L ); exit( -1 ); } } } /* | The effective main for one run. | | HUP signals may cause several runs of the one main. */ int main1( int argc, char *argv[] ) { // the Lua interpreter lua_State * L; // the runner file char * lsyncd_runner_file = NULL; int argp = 1; // load Lua L = luaL_newstate( ); luaL_openlibs( L ); { // checks the lua version const char * version; int major, minor; lua_getglobal( L, "_VERSION" ); version = luaL_checkstring( L, -1 ); if( sscanf( version, "Lua %d.%d", &major, &minor ) != 2 ) { fprintf( stderr, "cannot parse lua library version!\n" ); exit (-1 ); } if( major < 5 || (major == 5 && minor < 1) ) { fprintf( stderr, "Lua library is too old. Needs 5.1 at least" ); exit( -1 ); } lua_pop( L, 1 ); } { // logging is prepared quite early int i = 1; add_logcat( "Normal", LOG_NOTICE ); add_logcat( "Warn", LOG_WARNING ); add_logcat( "Error", LOG_ERR ); while( i < argc ) { if( strcmp( argv[ i ], "-log" ) && strcmp( argv[ i ], "--log" ) ) { // arg is neither -log or --log i++; continue; } if( ++i >= argc ) { // -(-)log was last argument break; } if( !add_logcat( argv[ i ], LOG_NOTICE ) ) { printlogf( L, "Error", "'%s' is not a valid logging category", argv[ i ] ); exit( -1 ); } } } // registers Lsycnd's core library register_lsyncd( L ); if( check_logcat( "Debug" ) <= settings.log_level ) { // printlogf doesnt support %ld :-( printf( "kernels clocks_per_sec=%ld\n", clocks_per_sec ); } // checks if the user overrode the default runner file if( argp < argc && !strcmp( argv[ argp ], "--runner" ) ) { if (argp + 1 >= argc) { logstring( "Error", "Lsyncd Lua-runner file missing after --runner " ); exit( -1 ); } lsyncd_runner_file = argv[ argp + 1 ]; argp += 2; } if( lsyncd_runner_file ) { // checks if the runner file exists struct stat st; if( stat( lsyncd_runner_file, &st ) ) { printlogf( L, "Error", "Cannot see a runner at '%s'.", lsyncd_runner_file ); exit( -1 ); } // loads the runner file if( luaL_loadfile(L, lsyncd_runner_file ) ) { printlogf( L, "Error", "error loading '%s': %s", lsyncd_runner_file, lua_tostring( L, -1 ) ); exit( -1 ); } } else { // loads the runner from binary if( luaL_loadbuffer( L, runner_out, runner_size, "runner" ) ) { printlogf( L, "Error", "error loading precompiled runner: %s", lua_tostring( L, -1 ) ); exit( -1 ); } } // prepares the runner executing the script { if( lua_pcall( L, 0, LUA_MULTRET, 0 ) ) { printlogf( L, "Error", "preparing runner: %s", lua_tostring( L, -1 ) ); exit( -1 ); } lua_pushlightuserdata( L, (void *) & runner ); // switches the value ( result of preparing ) and the key &runner lua_insert( L, 1 ); // saves the table of the runners functions in the lua registry lua_settable( L, LUA_REGISTRYINDEX ); // saves the error function extras // &callError is the key lua_pushlightuserdata ( L, (void *) &callError ); // &runner[ callError ] the value lua_pushlightuserdata ( L, (void *) &runner ); lua_gettable ( L, LUA_REGISTRYINDEX ); lua_pushstring ( L, "callError" ); lua_gettable ( L, -2 ); lua_remove ( L, -2 ); lua_settable ( L, LUA_REGISTRYINDEX ); } // asserts the Lsyncd's version matches // between runner and core { const char *lversion; lua_getglobal( L, "lsyncd_version" ); lversion = luaL_checkstring( L, -1 ); if( strcmp( lversion, PACKAGE_VERSION ) ) { printlogf( L, "Error", "Version mismatch '%s' is '%s', but core is '%s'", lsyncd_runner_file ? lsyncd_runner_file : "( internal runner )", lversion, PACKAGE_VERSION ); exit( -1 ); } lua_pop( L, 1 ); } // loads the defaults from binary { if( luaL_loadbuffer( L, defaults_out, defaults_size, "defaults" ) ) { printlogf( L, "Error", "loading defaults: %s", lua_tostring( L, -1 ) ); exit( -1 ); } // prepares the defaults if( lua_pcall( L, 0, 0, 0 ) ) { printlogf( L, "Error", "preparing defaults: %s", lua_tostring( L, -1 ) ); exit( -1 ); } } // checks if there is a "-help" or "--help" { int i; for( i = argp; i < argc; i++ ) { if ( !strcmp( argv[ i ], "-help" ) || !strcmp( argv[ i ], "--help" ) ) { load_runner_func( L, "help" ); if( lua_pcall( L, 0, 0, -2 ) ) { exit( -1 ); } lua_pop( L, 1 ); exit( 0 ); } } } // starts the option parser in Lua script { int idx = 1; const char *s; // creates a table with all remaining argv option arguments load_runner_func( L, "configure" ); lua_newtable( L ); while( argp < argc ) { lua_pushnumber ( L, idx++ ); lua_pushstring ( L, argv[ argp++ ] ); lua_settable ( L, -3 ); } // creates a table with the cores event monitor interfaces idx = 0; lua_newtable( L ); while( monitors[ idx ] ) { lua_pushnumber ( L, idx + 1 ); lua_pushstring ( L, monitors[ idx++ ] ); lua_settable ( L, -3 ); } if( lua_pcall( L, 2, 1, -4 ) ) { exit( -1 ); } if( first_time ) { // If not first time, simply retains the config file given s = lua_tostring(L, -1); if( s ) { lsyncd_config_file = s_strdup( s ); } } lua_pop( L, 2 ); } // checks existence of the config file if( lsyncd_config_file ) { struct stat st; // gets the absolute path to the config file // so in case of HUPing the daemon, it finds it again char * apath = get_realpath( lsyncd_config_file ); if( !apath ) { printlogf( L, "Error", "Cannot find config file at '%s'.", lsyncd_config_file ); exit( -1 ); } free( lsyncd_config_file ); lsyncd_config_file = apath; if( stat( lsyncd_config_file, &st ) ) { printlogf( L, "Error", "Cannot find config file at '%s'.", lsyncd_config_file ); exit( -1 ); } // loads and executes the config file if( luaL_loadfile( L, lsyncd_config_file ) ) { printlogf( L, "Error", "error loading %s: %s", lsyncd_config_file, lua_tostring( L, -1 ) ); exit( -1 ); } if( lua_pcall( L, 0, LUA_MULTRET, 0) ) { printlogf( L, "Error", "error preparing %s: %s", lsyncd_config_file, lua_tostring( L, -1 ) ); exit( -1 ); } } #ifdef WITH_INOTIFY open_inotify( L ); #endif #ifdef WITH_FSEVENTS open_fsevents( L ); #endif // adds signal handlers // listens to SIGCHLD, but blocks it until pselect( ) // opens the signal handler up { sigset_t set; sigemptyset( &set ); sigaddset( &set, SIGCHLD ); signal( SIGCHLD, sig_child ); sigprocmask( SIG_BLOCK, &set, NULL ); signal( SIGHUP, sig_handler ); signal( SIGTERM, sig_handler ); signal( SIGINT, sig_handler ); } // runs initializations from runner // it will set the configuration and add watches { load_runner_func( L, "initialize" ); lua_pushboolean( L, first_time ); if( lua_pcall( L, 1, 0, -3 ) ) { exit( -1 ); } lua_pop( L, 1 ); } // // enters the master loop // masterloop( L ); // // cleanup // // tidies up all observances { int i; for( i = 0; i < observances_len; i++ ) { struct observance *obs = observances + i; obs->tidy( obs ); } observances_len = 0; nonobservances_len = 0; } // frees logging categories { int ci; struct logcat *lc; for( ci = 'A'; ci <= 'Z'; ci++ ) { for( lc = logcats[ ci - 'A' ]; lc && lc->name; lc++) { free( lc->name ); lc->name = NULL; } if( logcats[ci - 'A' ] ) { free( logcats[ ci - 'A' ] ); logcats[ ci - 'A' ] = NULL; } } } lua_close( L ); return 0; } /* | Main */ int main( int argc, char * argv[ ] ) { // gets a kernel parameter clocks_per_sec = sysconf( _SC_CLK_TCK ); while( !term ) { main1( argc, argv ); } if( pidfile_fd > 0 ) { close( pidfile_fd ); } if( settings.pidfile ) { remove( settings.pidfile ); } // exits with error code responding to the signal it died for return 128 + sigcode; } lsyncd-release-2.1.6/lsyncd.h000066400000000000000000000075601260764373300161220ustar00rootroot00000000000000/** * lsyncd.h Live (Mirror) Syncing Demon * * Interface between the core modules. * * License: GPLv2 (see COPYING) or any later version * Authors: Axel Kittenberger * **/ #ifndef LSYNCD_H #define LSYNCD_H // some older machines need this to see pselect #define _BSD_SOURCE 1 #define _XOPEN_SOURCE 700 #define _DARWIN_C_SOURCE 1 #define LUA_COMPAT_ALL // includes needed for headerfile #include "config.h" #include #include #include #define LUA_USE_APICHECK 1 #include #define LSYNCD_LIBNAME "lsyncd" #define LSYNCD_INOTIFYLIBNAME "inotify" /** * Lsyncd runtime configuration */ extern struct settings { char * log_file; // If not NULL Lsyncd logs into this file. bool log_syslog; // If true Lsyncd sends log messages to syslog char * log_ident; // If not NULL the syslog identity (otherwise "Lsyncd") int log_facility; // The syslog facility int log_level; // -1 logs everything, 0 normal mode, LOG_ERROR errors only. bool nodaemon; // True if Lsyncd shall not daemonize. char * pidfile; // If not NULL Lsyncd writes its pid into this file. } settings; /** * time comparisons - wrap around safe */ #define time_after(a,b) ((long)(b) - (long)(a) < 0) #define time_before(a,b) time_after(b,a) #define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0) #define time_before_eq(a,b) time_after_eq(b,a) // returns (on Lua stack) the current kernels * clock state (jiffies) extern int l_now(lua_State *L); // pushes a runner function and the runner error handler onto Lua stack extern void load_runner_func(lua_State *L, const char *name); // set to 1 on hup signal or term signal extern volatile sig_atomic_t hup; extern volatile sig_atomic_t term; /** * wrappers for heap management, they exit if out-of-memory. */ extern void * s_calloc(size_t nmemb, size_t size); extern void * s_malloc(size_t size); extern void * s_realloc(void *ptr, size_t size); extern char * s_strdup(const char *src); /** * Logging */ // Returns the positive priority if name is configured to be logged, or -1 extern int check_logcat(const char *name); // logs a string #define logstring(cat, message) \ {int p; if ((p = check_logcat(cat)) <= settings.log_level) \ {logstring0(p, cat, message);}} extern void logstring0(int priority, const char *cat, const char *message); // logs a formated string #define printlogf(L, cat, ...) \ {int p; if ((p = check_logcat(cat)) <= settings.log_level) \ {printlogf0(L, p, cat, __VA_ARGS__);}} extern void printlogf0(lua_State *L, int priority, const char *cat, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /** * File-descriptor helpers */ // Sets the non-blocking flag for a file descriptor. extern void non_block_fd(int fd); // Sets the close-on-exit flag for a file descriptor. extern void close_exec_fd(int fd); /** * An observance to be called when a file descritor becomes * read-ready or write-ready. */ struct observance { // The file descriptor to observe. int fd; // Function to call when read becomes ready. void (*ready)(lua_State *, struct observance *); // Function to call when write becomes ready. void (*writey)(lua_State *, struct observance *); // Function to call to clean up void (*tidy)(struct observance *); // Extra tokens to pass to the functions. void *extra; }; // makes the core observe a file descriptor extern void observe_fd( int fd, void (*ready) (lua_State *, struct observance *), void (*writey)(lua_State *, struct observance *), void (*tidy) (struct observance *), void *extra ); // stops the core to observe a file descriptor extern void nonobserve_fd(int fd); /* * inotify */ #ifdef WITH_INOTIFY extern void register_inotify(lua_State *L); extern void open_inotify(lua_State *L); #endif /* * /dev/fsevents */ #ifdef WITH_FSEVENTS extern void open_fsevents(lua_State *L); #endif #endif lsyncd-release-2.1.6/lsyncd.lua000066400000000000000000002326021260764373300164510ustar00rootroot00000000000000--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- lsyncd.lua Live (Mirror) Syncing Demon -- -- This is the "runner" part of Lsyncd. It containts all its high-level logic. -- It works closely together with the Lsyncd core in lsyncd.c. This means it -- cannot be runned directly from the standard lua interpreter. -- -- This code assumes your editor is at least 100 chars wide. -- -- License: GPLv2 (see COPYING) or any later version -- Authors: Axel Kittenberger -- --~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- require('profiler') -- profiler.start() -- -- A security measurement. -- The core will exit if version ids mismatch. -- if lsyncd_version then -- ensures the runner is not being loaded twice lsyncd.log( 'Error', 'You cannot use the lsyncd runner as configuration file!' ) lsyncd.terminate( -1 ) end lsyncd_version = '2.1.6' -- -- Hides the core interface from user scripts. -- local _l = lsyncd lsyncd = nil local lsyncd = _l _l = nil -- -- Shortcuts (which user is supposed to be able to use them as well) -- log = lsyncd.log terminate = lsyncd.terminate now = lsyncd.now readdir = lsyncd.readdir -- -- Coping globals to ensure userscripts don't change this. -- local log = log local terminate = terminate local now = now -- -- Predeclarations -- local Monitors -- -- Global: total number of processess running -- local processCount = 0 -- -- Settings specified by command line. -- local clSettings = { } -- -- Settings specified by config scripts. -- local uSettings = { } -- -- A copy of the settings function to see if the -- user script replaced the settings() by a table -- ( pre Lsyncd 2.1 style ) -- local settingsSafe --============================================================================ -- Lsyncd Prototypes --============================================================================ -- -- Array tables error if accessed with a non-number. -- local Array = ( function( ) -- Metatable local mt = { } -- on accessing a nil index. mt.__index = function( t, k ) if type(k) ~= 'number' then error( 'Key "'..k..'" invalid for Array', 2 ) end return rawget( t, k ) end -- on assigning a new index. mt.__newindex = function( t, k, v ) if type( k ) ~= 'number' then error( 'Key "'..k..'" invalid for Array', 2 ) end rawset( t, k, v ) end -- creates a new object local function new( ) local o = { } setmetatable( o, mt ) return o end -- -- Public interface -- return { new = new } end )( ) -- -- Count array tables error if accessed with a non-number. -- -- Additionally they maintain their length as 'size' attribute, -- since Lua's # operator does not work on tables whose key values are not -- strictly linear. -- local CountArray = ( function( ) -- -- Metatable -- local mt = { } -- -- Key to native table -- local k_nt = { } -- -- On accessing a nil index. -- mt.__index = function( t, k ) if type( k ) ~= 'number' then error( 'Key "'..k..'" invalid for CountArray', 2 ) end return t[ k_nt ][ k ] end -- -- On assigning a new index. -- mt.__newindex = function( t, k, v ) if type(k) ~= 'number' then error( 'Key "'..k..'" invalid for CountArray', 2 ) end -- value before local vb = t[ k_nt ][ k ] if v and not vb then t._size = t._size + 1 elseif not v and vb then t._size = t._size - 1 end t[ k_nt ][ k ] = v end -- -- Walks through all entries in any order. -- local function walk( self ) return pairs( self[ k_nt ] ) end -- -- Returns the count -- local function size( self ) return self._size end -- -- Creates a new count array -- local function new( ) -- k_nt is native table, private for this object. local o = { _size = 0, walk = walk, size = size, [k_nt] = { } } setmetatable(o, mt) return o end -- -- Public interface -- return { new = new } end )( ) -- -- A queue is optimized for pushing on the right and poping on the left. -- Queue = ( function( ) -- -- Creates a new queue. -- local function new( ) return { first = 1, last = 0, size = 0 }; end -- -- Pushes a value on the queue. -- Returns the last value -- local function push( list, value ) if not value then error('Queue pushing nil value', 2) end local last = list.last + 1 list.last = last list[ last ] = value list.size = list.size + 1 return last end -- -- Removes an item at pos from the Queue. -- local function remove( list, pos ) if list[ pos ] == nil then error('Removing nonexisting item in Queue', 2) end list[ pos ] = nil -- if removing first or last element, -- the queue limits are adjusted. if pos == list.first then local last = list.last while list[ pos ] == nil and pos <= list.last do pos = pos + 1 end list.first = pos elseif pos == list.last then while list[ pos ] == nil and pos >= list.first do pos = pos - 1 end list.last = pos end -- reset the indizies if the queue is empty if list.last < list.first then list.first = 1 list.last = 0 end list.size = list.size - 1 end -- -- Queue iterator (stateless) -- local function iter( list, pos ) pos = pos + 1 while list[ pos ] == nil and pos <= list.last do pos = pos + 1 end if pos > list.last then return nil end return pos, list[ pos ] end -- -- Reverse queue iterator (stateless) -- local function iterReverse( list, pos ) pos = pos - 1 while list[pos] == nil and pos >= list.first do pos = pos - 1 end if pos < list.first then return nil end return pos, list[ pos ] end -- -- Iteraters through the queue -- returning all non-nil pos-value entries. -- local function qpairs( list ) return iter, list, list.first - 1 end -- -- Iteraters backwards through the queue -- returning all non-nil pos-value entries. -- local function qpairsReverse( list ) return iterReverse, list, list.last + 1 end return { new = new, push = push, remove = remove, qpairs = qpairs, qpairsReverse = qpairsReverse } end )( ) -- -- Locks globals, -- No more globals can be created after this -- local function lockGlobals( ) local t = _G local mt = getmetatable( t ) or { } -- TODO try to remove the underscore exceptions mt.__index = function( t, k ) if k ~= '_' and string.sub(k, 1, 2) ~= '__' then error( 'Access of non-existing global "'..k..'"', 2 ) else rawget( t, k ) end end mt.__newindex = function( t, k, v ) if k ~= '_' and string.sub( k, 1, 2 ) ~= '__' then error('Lsyncd does not allow GLOBALS to be created on the fly. '.. 'Declare "'..k..'" local or declare global on load.', 2) else rawset( t, k, v ) end end setmetatable( t, mt ) end -- -- Holds the information about a delayed event for one Sync. -- local Delay = ( function( ) -- -- Creates a new delay. -- -- Params see below. -- local function new( etype, sync, alarm, path, path2 ) local o = { -- -- Type of event. -- Can be 'Create', 'Modify', 'Attrib', 'Delete' and 'Move' -- etype = etype, -- -- the Sync this delay belongs to -- sync = sync, -- -- Latest point in time this should be catered for. -- This value is in kernel ticks, return of the C's -- times(NULL) call. alarm = alarm, -- -- Path and filename or dirname of the delay relative -- to the syncs root. -- -- for the directories it contains a trailing slash -- path = path, -- -- Used only for Moves. -- Path and file/dirname of a move destination. -- path2 = path2, -- -- Status of the event. Valid stati are: -- -- 'wait' ... the event is ready to be handled. -- -- 'active' ... there is process running catering for this event. -- -- 'blocked' ... this event waits for another to be handled first. -- -- 'done' ... event has been collected. This should never be -- visible as all references should be droped on -- collection, nevertheless the seperate status is -- used as insurrance everything is running correctly. status = 'wait', -- -- Position in the queue -- dpos = -1, } return o end -- -- Public interface -- return { new = new } end )( ) -- -- Combines delays -- local Combiner = ( function( ) -- -- The new delay is absorbed by an older one. -- local function abso( d1, d2 ) log( 'Delay', d2.etype, ':',d2.path, ' absorbed by ', d1.etype,':',d1.path ) return 'absorb' end -- -- The new delay replaces the old one if it's a file -- local function refi( d1, d2 ) -- but a directory blocks if d2.path:byte( -1 ) == 47 then log( 'Delay', d2.etype,':',d2.path, ' blocked by ', d1.etype,':',d1.path ) return 'stack' end log( 'Delay', d2.etype, ':', d2.path, ' replaces ', d1.etype, ':', d1.path ) return 'replace' end -- -- The new delay replaces an older one. -- local function repl( d1, d2 ) log( 'Delay', d2.etype, ':', d2.path, ' replaces ', d1.etype, ':', d1.path ) return 'replace' end -- -- Two delays nullificate each other. -- local function null( d1, d2 ) log( 'Delay', d2.etype,':',d2.path, ' nullifies ', d1.etype,':',d1.path ) return 'remove' end -- -- Table on how to combine events that dont involve a move. -- local combineNoMove = { Attrib = { Attrib = abso, Modify = repl, Create = repl, Delete = repl }, Modify = { Attrib = abso, Modify = abso, Create = repl, Delete = repl }, Create = { Attrib = abso, Modify = abso, Create = abso, Delete = repl }, Delete = { Attrib = abso, Modify = abso, Create = refi, Delete = abso }, } -- -- Combines two delays -- local function combine( d1, d2 ) if d1.etype == 'Init' or d1.etype == 'Blanket' then -- everything is blocked by init or blanket delays. if d2.path2 then log( 'Delay', d2.etype,':',d2.path,'->',d2.path2, ' blocked by ', d1.etype,' event' ) else log( 'Delay', d2.etype,':',d2.path, ' blocked by ', d1.etype,' event' ) end return 'stack' end -- two normal events if d1.etype ~= 'Move' and d2.etype ~= 'Move' then if d1.path == d2.path then if d1.status == 'active' then return 'stack' end return combineNoMove[ d1.etype ][ d2.etype ]( d1, d2 ) end -- if one is a parent directory of another, events are blocking if d1.path:byte(-1) == 47 and string.starts(d2.path, d1.path) or d2.path:byte(-1) == 47 and string.starts(d1.path, d2.path) then return 'stack' end return nil end -- non-move event on a move. if d1.etype == 'Move' and d2.etype ~= 'Move' then -- if the from field could be damaged the events are stacked if d1.path == d2.path or d2.path:byte(-1) == 47 and string.starts(d1.path, d2.path) or d1.path:byte(-1) == 47 and string.starts(d2.path, d1.path) then log( 'Delay', d2.etype, ':', d2.path, ' blocked by ', 'Move :', d1.path,'->', d1.path2 ) return 'stack' end -- the event does something with the move destination if d1.path2 == d2.path then if d2.etype == 'Delete' or d2.etype == 'Create' then if d1.status == 'active' then return 'stack' end log( 'Delay', d2.etype, ':', d2.path, ' turns ', 'Move :', d1.path, '->', d1.path2, ' into ', 'Delete:', d1.path ) d1.etype = 'Delete' d1.path2 = nil return 'stack' end -- on 'Attrib' or 'Modify' simply stack on moves return 'stack' end if d2.path :byte(-1) == 47 and string.starts(d1.path2, d2.path) or d1.path2:byte(-1) == 47 and string.starts(d2.path, d1.path2) then log( 'Delay' ,d2.etype, ':', d2.path, ' blocked by ', 'Move:', d1.path, '->', d1.path2 ) return 'stack' end return nil end -- a move upon a non-move event if d1.etype ~= 'Move' and d2.etype == 'Move' then if d1.path == d2.path or d1.path == d2.path2 or d1.path :byte(-1) == 47 and string.starts(d2.path, d1.path) or d1.path :byte(-1) == 47 and string.starts(d2.path2, d1.path) or d2.path :byte(-1) == 47 and string.starts(d1.path, d2.path) or d2.path2:byte(-1) == 47 and string.starts(d1.path, d2.path2) then log( 'Delay', 'Move:', d2.path, '->', d2.path2, ' splits on ', d1.etype, ':', d1.path ) return 'split' end return nil end -- -- a move event upon a move event -- if d1.etype == 'Move' and d2.etype == 'Move' then -- TODO combine moves, if d1.path == d2.path or d1.path == d2.path2 or d1.path2 == d2.path or d2.path2 == d2.path or d1.path :byte(-1) == 47 and string.starts(d2.path, d1.path) or d1.path :byte(-1) == 47 and string.starts(d2.path2, d1.path) or d1.path2:byte(-1) == 47 and string.starts(d2.path, d1.path2) or d1.path2:byte(-1) == 47 and string.starts(d2.path2, d1.path2) or d2.path :byte(-1) == 47 and string.starts(d1.path, d2.path) or d2.path :byte(-1) == 47 and string.starts(d1.path2, d2.path) or d2.path2:byte(-1) == 47 and string.starts(d1.path, d2.path2) or d2.path2:byte(-1) == 47 and string.starts(d1.path2, d2.path2) then log( 'Delay', 'Move:', d2.path, '->', d1.path2, ' splits on Move:', d1.path, '->', d1.path2 ) return 'split' end return nil end error( 'reached impossible state' ) end -- -- Public interface -- return { combine = combine } end )( ) -- -- Creates inlets for syncs: the user interface for events. -- local InletFactory = ( function( ) -- -- Table to receive the delay of an event -- or the delay list of an event list. -- -- Keys are events and values are delays. -- local e2d = { } -- -- Table to ensure the uniqueness of every event -- related to a delay. -- -- Keys are delay and values are events. -- local e2d2 = { } -- -- Allows the garbage collector to remove not refrenced -- events. -- setmetatable( e2d, { __mode = 'k' } ) setmetatable( e2d2, { __mode = 'v' } ) -- -- Removes the trailing slash from a path. -- local function cutSlash( path ) if string.byte(path, -1) == 47 then return string.sub(path, 1, -2) else return path end end -- -- Gets the path of an event. -- local function getPath( event ) if event.move ~= 'To' then return e2d[ event ].path else return e2d[ event ].path2 end end -- -- Interface for user scripts to get event fields. -- local eventFields = { -- -- Returns a copy of the configuration as called by sync. -- But including all inherited data and default values. -- -- TODO give user a readonly version. -- config = function( event ) return e2d[ event ].sync.config end, ----- -- Returns the inlet belonging to an event. -- inlet = function( event ) return e2d[ event ].sync.inlet end, -- -- Returns the type of the event. -- -- Can be: 'Attrib', 'Create', 'Delete', 'Modify' or 'Move', -- etype = function( event ) return e2d[ event ].etype end, -- -- Events are not lists. -- isList = function( ) return false end, -- -- Returns the status of the event. -- -- Can be: -- 'wait', 'active', 'block'. -- status = function( event ) return e2d[ event ].status end, -- -- Returns true if event relates to a directory -- isdir = function( event ) return string.byte( getPath( event ), -1 ) == 47 end, -- -- Returns the name of the file/dir. -- -- Includes a trailing slash for dirs. -- name = function( event ) return string.match( getPath( event ), '[^/]+/?$' ) end, -- -- Returns the name of the file/dir -- excluding a trailing slash for dirs. -- basename = function( event ) return string.match( getPath( event ), '([^/]+)/?$') end, --- -- Returns the file/dir relative to watch root -- including a trailing slash for dirs. -- path = function( event ) return getPath( event ) end, -- -- Returns the directory of the file/dir relative to watch root -- Always includes a trailing slash. -- pathdir = function( event ) return string.match( getPath( event ), '^(.*/)[^/]+/?' ) or '' end, -- -- Returns the file/dir relativ to watch root -- excluding a trailing slash for dirs. -- pathname = function( event ) return cutSlash( getPath( event ) ) end, --- -- Returns the absolute path of the watch root. -- All symlinks are resolved. -- source = function( event ) return e2d[ event ].sync.source end, -- -- Returns the absolute path of the file/dir -- including a trailing slash for dirs. -- sourcePath = function( event ) return e2d[ event ].sync.source .. getPath( event ) end, -- -- Returns the absolute dir of the file/dir -- including a trailing slash. -- sourcePathdir = function( event ) return e2d[event].sync.source .. ( string.match( getPath( event ), '^(.*/)[^/]+/?' ) or '' ) end, ------ -- Returns the absolute path of the file/dir -- excluding a trailing slash for dirs. -- sourcePathname = function( event ) return e2d[ event ].sync.source .. cutSlash( getPath( event ) ) end, -- -- Returns the configured target -- target = function( event ) return e2d[ event ].sync.config.target end, -- -- Returns the relative dir/file appended to the target -- including a trailing slash for dirs. -- targetPath = function( event ) return e2d[ event ].sync.config.target .. getPath( event ) end, -- -- Returns the dir of the dir/file appended to the target -- including a trailing slash. -- targetPathdir = function( event ) return e2d[ event ].sync.config.target .. ( string.match( getPath( event ), '^(.*/)[^/]+/?' ) or '' ) end, -- -- Returns the relative dir/file appended to the target -- excluding a trailing slash for dirs. -- targetPathname = function( event ) return e2d[ event ].sync.config.target .. cutSlash( getPath( event ) ) end, } -- -- Retrievs event fields for the user script. -- local eventMeta = { __index = function( event, field ) local f = eventFields[ field ] if not f then if field == 'move' then -- possibly undefined return nil end error( 'event does not have field "'..field..'"', 2 ) end return f( event ) end } -- -- Interface for user scripts to get list fields. -- local eventListFuncs = { -- -- Returns a list of paths of all events in list. -- -- @param elist -- handle returned by getevents() -- @param mutator -- if not nil called with (etype, path, path2) -- returns one or two strings to add. -- getPaths = function( elist, mutator ) local dlist = e2d[elist] if not dlist then error( 'cannot find delay list from event list.' ) end local result = { } local resultn = 1 for k, d in ipairs( dlist ) do local s1, s2 if mutator then s1, s2 = mutator( d.etype, d.path, d.path2 ) else s1, s2 = d.path, d.path2 end result[ resultn ] = s1 resultn = resultn + 1 if s2 then result[ resultn ] = s2 resultn = resultn + 1 end end return result end } -- -- Retrievs event list fields for the user script -- local eventListMeta = { __index = function( elist, func ) if func == 'isList' then return true end if func == 'config' then return e2d[ elist ].sync.config end local f = eventListFuncs[ func ] if not f then error( 'event list does not have function "' .. func .. '"', 2 ) end return function( ... ) return f( elist, ... ) end end } -- -- Table of all inlets with their syncs. -- local inlets = { } -- -- Allows the garbage collector to remove entries. -- setmetatable( inlets, { __mode = 'v' } ) -- -- Encapsulates a delay into an event for the user script. -- local function d2e( delay ) -- already created? local eu = e2d2[delay] if delay.etype ~= 'Move' then if eu then return eu end local event = { } setmetatable( event, eventMeta ) e2d[ event ] = delay e2d2[ delay ] = event return event else -- moves have 2 events - origin and destination if eu then return eu[1], eu[2] end local event = { move = 'Fr' } local event2 = { move = 'To' } setmetatable( event, eventMeta ) setmetatable( event2, eventMeta ) e2d[ event ] = delay e2d[ event2 ] = delay e2d2[ delay ] = { event, event2 } -- move events have a field 'move' return event, event2 end end -- -- Encapsulates a delay list into an event list for the user script. -- local function dl2el( dlist ) local eu = e2d2[ dlist ] if eu then return eu end local elist = { } setmetatable( elist, eventListMeta ) e2d [ elist ] = dlist e2d2[ dlist ] = elist return elist end -- -- The functions the inlet provides. -- local inletFuncs = { -- -- Adds an exclude. -- addExclude = function( sync, pattern ) sync:addExclude( pattern ) end, -- -- Removes an exclude. -- rmExclude = function( sync, pattern ) sync:rmExclude( pattern ) end, -- -- Gets the list of excludes in their original rsynlike patterns form. -- getExcludes = function( sync ) -- creates a copy local e = { } local en = 1; for k, _ in pairs( sync.excludes.list ) do e[ en ] = k; en = en + 1; end return e; end, -- -- Creates a blanketEvent that blocks everything -- and is blocked by everything. -- createBlanketEvent = function( sync ) return d2e( sync:addBlanketDelay( ) ) end, -- -- Discards a waiting event. -- discardEvent = function( sync, event ) local delay = e2d[ event ] if delay.status ~= 'wait' then log( 'Error', 'Ignored cancel of a non-waiting event of type ', event.etype ) return end sync:removeDelay( delay ) end, -- -- Gets the next not blocked event from queue. -- getEvent = function( sync ) return d2e( sync:getNextDelay( now( ) ) ) end, -- -- Gets all events that are not blocked by active events. -- -- @param if not nil a function to test each delay -- getEvents = function( sync, test ) local dlist = sync:getDelays( test ) return dl2el( dlist ) end, -- -- Returns the configuration table specified by sync{} -- getConfig = function( sync ) -- TODO gives a readonly handler only. return sync.config end, } -- -- Forwards access to inlet functions. -- local inletMeta = { __index = function( inlet, func ) local f = inletFuncs[ func ] if not f then error( 'inlet does not have function "'..func..'"', 2 ) end return function( ... ) return f( inlets[ inlet ], ... ) end end, } -- -- Creates a new inlet for Sync. -- local function newInlet( sync ) -- Lsyncd runner controlled variables local inlet = { } -- sets use access methods setmetatable( inlet, inletMeta ) inlets[ inlet ] = sync return inlet end -- -- Returns the delay from a event. -- local function getDelayOrList( event ) return e2d[ event ] end -- -- Returns the sync from an event or list -- local function getSync( event ) return e2d[ event ].sync end -- -- Public interface. -- return { getDelayOrList = getDelayOrList, d2e = d2e, dl2el = dl2el, getSync = getSync, newInlet = newInlet, } end )( ) -- -- A set of exclude patterns -- local Excludes = ( function( ) -- -- Turns a rsync like file pattern to a lua pattern. -- ( at best it can ) -- local function toLuaPattern( p ) local o = p p = string.gsub( p, '%%', '%%%%' ) p = string.gsub( p, '%^', '%%^' ) p = string.gsub( p, '%$', '%%$' ) p = string.gsub( p, '%(', '%%(' ) p = string.gsub( p, '%)', '%%)' ) p = string.gsub( p, '%.', '%%.' ) p = string.gsub( p, '%[', '%%[' ) p = string.gsub( p, '%]', '%%]' ) p = string.gsub( p, '%+', '%%+' ) p = string.gsub( p, '%-', '%%-' ) p = string.gsub( p, '%?', '[^/]' ) p = string.gsub( p, '%*', '[^/]*' ) -- this was a ** before p = string.gsub( p, '%[%^/%]%*%[%^/%]%*', '.*' ) p = string.gsub( p, '^/', '^/' ) if p:sub( 1, 2 ) ~= '^/' then -- if does not begin with '^/' -- then all matches should begin with '/'. p = '/' .. p; end log( 'Exclude', 'toLuaPattern "', o, '" = "', p, '"' ) return p end -- -- Adds a pattern to exclude -- local function add( self, pattern ) if self.list[ pattern ] then -- already in the list return end local lp = toLuaPattern( pattern ) self.list[ pattern ] = lp end -- -- Removes a pattern to exclude. -- local function remove( self, pattern ) if not self.list[ pattern ] then -- already in the list? log( 'Normal', 'Removing not excluded exclude "' .. pattern .. '"' ) return end self.list[pattern] = nil end ----- -- Adds a list of patterns to exclude. -- local function addList(self, plist) for _, v in ipairs(plist) do add(self, v) end end -- -- Loads the excludes from a file -- local function loadFile( self, file ) f, err = io.open( file ) if not f then log( 'Error', 'Cannot open exclude file "', file,'": ', err ) terminate( -1 ) end for line in f:lines() do -- lsyncd 2.0 does not support includes if not string.match(line, '^%s*%+') and not string.match(line, '^%s*#') and not string.match(line, '^%s*$') then local p = string.match( line, '%s*-?%s*(.*)' ) if p then add(self, p) end end end f:close( ) end -- -- Tests if 'path' is excluded. -- local function test( self, path ) for _, p in pairs( self.list ) do if p:byte( -1 ) == 36 then -- ends with $ if path:match( p ) then return true end else -- ends either end with / or $ if path:match(p .. '/') or path:match(p .. '$') then return true end end end return false end -- -- Cretes a new exclude set -- local function new( ) return { list = { }, -- functions add = add, addList = addList, loadFile = loadFile, remove = remove, test = test, } end -- -- Public interface -- return { new = new } end )( ) -- -- Holds information about one observed directory including subdirs. -- local Sync = ( function( ) -- -- Syncs that have no name specified by the user script -- get an incremental default name 'Sync[X]' -- local nextDefaultName = 1 -- -- Adds an exclude. -- local function addExclude( self, pattern ) return self.excludes:add( pattern ) end -- -- Removes an exclude. -- local function rmExclude( self, pattern ) return self.excludes:remove( pattern ) end -- -- Removes a delay. -- local function removeDelay( self, delay ) if self.delays[ delay.dpos ] ~= delay then error( 'Queue is broken, delay not a dpos' ) end Queue.remove( self.delays, delay.dpos ) -- free all delays blocked by this one. if delay.blocks then for i, vd in pairs( delay.blocks ) do vd.status = 'wait' end end end -- -- Returns true if this Sync concerns about 'path' -- local function concerns( self, path ) -- not concerned if watch rootdir doesnt match if not path:starts( self.source ) then return false end -- a sub dir and not concerned about subdirs if self.config.subdirs == false and path:sub( #self.source, -1 ):match( '[^/]+/?' ) then return false end -- concerned if not excluded return not self.excludes:test( path:sub( #self.source ) ) end -- -- Collects a child process -- local function collect( self, pid, exitcode ) local delay = self.processes[ pid ] if not delay then -- not a child of this sync. return end if delay.status then log( 'Delay', 'collected an event' ) if delay.status ~= 'active' then error('collecting a non-active process') end local rc = self.config.collect( InletFactory.d2e( delay ), exitcode ) if rc == 'die' then log( 'Error', 'Critical exitcode.' ); terminate( -1 ) end if rc ~= 'again' then -- if its active again the collecter restarted the event removeDelay( self, delay ) log( 'Delay', 'Finish of ', delay.etype, ' on ', self.source,delay.path, ' = ', exitcode ) else -- sets the delay on wait again delay.status = 'wait' local alarm = self.config.delay -- delays at least 1 second if alarm < 1 then alarm = 1 end delay.alarm = now( ) + alarm end else log( 'Delay', 'collected a list' ) local rc = self.config.collect( InletFactory.dl2el( delay ), exitcode ) if rc == 'die' then log( 'Error', 'Critical exitcode.' ); terminate( -1 ) end if rc == 'again' then -- sets the delay on wait again delay.status = 'wait' local alarm = self.config.delay -- delays at least 1 second if alarm < 1 then alarm = 1 end alarm = now() + alarm for _, d in ipairs( delay ) do d.alarm = alarm d.status = 'wait' end end for _, d in ipairs( delay ) do if rc ~= 'again' then removeDelay( self, d ) else d.status = 'wait' end end log( 'Delay','Finished list = ',exitcode ) end self.processes[ pid ] = nil end -- -- Stacks a newDelay on the oldDelay, -- the oldDelay blocks the new Delay. -- -- A delay can block 'n' other delays, -- but is blocked at most by one, the latest delay. -- local function stack( oldDelay, newDelay ) newDelay.status = 'block' if not oldDelay.blocks then oldDelay.blocks = { } end table.insert( oldDelay.blocks, newDelay ) end -- -- Puts an action on the delay stack. -- local function delay( self, etype, time, path, path2 ) log( 'Function', 'delay( ', self.config.name, ', ', etype, ', ', path, ', ', path2, ' )' ) -- TODO local function recurse( ) if etype == 'Create' and path:byte( -1 ) == 47 then local entries = lsyncd.readdir( self.source .. path ) if entries then for dirname, isdir in pairs(entries) do local pd = path .. dirname if isdir then pd = pd..'/' end log( 'Delay', 'Create creates Create on ', pd ) delay( self, 'Create', time, pd, nil ) end end end end -- exclusion tests if not path2 then -- simple test for single path events if self.excludes:test(path) then log( 'Exclude', 'excluded ', etype, ' on "', path, '"' ) return end else -- for double paths (move) it might result into a split local ex1 = self.excludes:test( path ) local ex2 = self.excludes:test( path2 ) if ex1 and ex2 then log( 'Exclude', 'excluded "', etype, ' on "', path, '" -> "', path2, '"' ) return elseif not ex1 and ex2 then -- splits the move if only partly excluded log( 'Exclude', 'excluded destination transformed ', etype, ' to Delete ', path ) delay( self, 'Delete', time, path, nil ) return elseif ex1 and not ex2 then -- splits the move if only partly excluded log( 'Exclude', 'excluded origin transformed ', etype, ' to Create.', path2 ) delay( self, 'Create', time, path2, nil ) return end end if etype == 'Move' and not self.config.onMove then -- if there is no move action defined, -- split a move as delete/create -- layer 1 scripts which want moves events have to -- set onMove simply to 'true' log( 'Delay', 'splitting Move into Delete & Create' ) delay( self, 'Delete', time, path, nil ) delay( self, 'Create', time, path2, nil ) return end -- creates the new action local alarm if time and self.config.delay then alarm = time + self.config.delay else alarm = now( ) end -- new delay local nd = Delay.new( etype, self, alarm, path, path2 ) if nd.etype == 'Init' or nd.etype == 'Blanket' then -- always stack init or blanket events on the last event log( 'Delay', 'Stacking ', nd.etype, ' event.' ) if self.delays.size > 0 then stack( self.delays[ self.delays.last ], nd ) end nd.dpos = Queue.push( self.delays, nd ) recurse( ) return end -- detects blocks and combos by working from back until -- front through the fifo for il, od in Queue.qpairsReverse( self.delays ) do -- asks Combiner what to do local ac = Combiner.combine( od, nd ) if ac then if ac == 'remove' then Queue.remove( self.delays, il ) elseif ac == 'stack' then stack( od, nd ) nd.dpos = Queue.push( self.delays, nd ) elseif ac == 'absorb' then -- nada elseif ac == 'replace' then od.etype = nd.etype od.path = nd.path od.path2 = nd.path2 elseif ac == 'split' then delay( self, 'Delete', time, path, nil ) delay( self, 'Create', time, path2, nil ) else error( 'unknown result of combine()' ) end recurse( ) return end il = il - 1 end if nd.path2 then log( 'Delay','New ',nd.etype,':',nd.path,'->',nd.path2 ) else log( 'Delay','New ',nd.etype,':',nd.path ) end -- no block or combo nd.dpos = Queue.push( self.delays, nd ) recurse( ) end -- -- Returns the soonest alarm for this Sync. -- local function getAlarm( self ) if self.processes:size( ) >= self.config.maxProcesses then return false end -- first checks if more processes could be spawned if self.processes:size( ) < self.config.maxProcesses then -- finds the nearest delay waiting to be spawned for _, d in Queue.qpairs( self.delays ) do if d.status == 'wait' then return d.alarm end end end -- nothing to spawn return false end -- -- Gets all delays that are not blocked by active delays. -- -- @param test function to test each delay -- local function getDelays( self, test ) local dlist = { sync = self} local dlistn = 1 local blocks = { } -- -- inheritly transfers all blocks from delay -- local function getBlocks( delay ) blocks[ delay ] = true if delay.blocks then for i, d in ipairs( delay.blocks ) do getBlocks( d ) end end end for i, d in Queue.qpairs( self.delays ) do if d.status == 'active' or ( test and not test( InletFactory.d2e( d ) ) ) then getBlocks( d ) elseif not blocks[ d ] then dlist[ dlistn ] = d dlistn = dlistn + 1 end end return dlist end -- -- Creates new actions -- local function invokeActions( self, timestamp ) log( 'Function', 'invokeActions( "', self.config.name, '", ', timestamp, ' )' ) if self.processes:size( ) >= self.config.maxProcesses then -- no new processes return end for _, d in Queue.qpairs( self.delays ) do -- if reached the global limit return if uSettings.maxProcesses and processCount >= uSettings.maxProcesses then log('Alarm', 'at global process limit.') return end if self.delays.size < self.config.maxDelays then -- time constrains are only concerned if not maxed -- the delay FIFO already. if d.alarm ~= true and timestamp < d.alarm then -- reached point in stack where delays are in future return end end if d.status == 'wait' then -- found a waiting delay if d.etype ~= 'Init' then self.config.action( self.inlet ) else self.config.init( InletFactory.d2e( d ) ) end if self.processes:size( ) >= self.config.maxProcesses then -- no further processes return end end end end -- -- Gets the next event to be processed. -- local function getNextDelay( self, timestamp ) for i, d in Queue.qpairs( self.delays ) do if self.delays.size < self.config.maxDelays then -- time constrains are only concerned if not maxed -- the delay FIFO already. if d.alarm ~= true and timestamp < d.alarm then -- reached point in stack where delays are in future return nil end end if d.status == 'wait' then -- found a waiting delay return d end end end ------ -- Adds and returns a blanket delay thats blocks all. -- Used as custom marker. -- local function addBlanketDelay( self ) local newd = Delay.new( 'Blanket', self, true, '' ) newd.dpos = Queue.push( self.delays, newd ) return newd end -- -- Adds and returns a blanket delay thats blocks all. -- Used as startup marker to call init asap. -- local function addInitDelay( self ) local newd = Delay.new( 'Init', self, true, '' ) newd.dpos = Queue.push( self.delays, newd ) return newd end -- -- Writes a status report about delays in this sync. -- local function statusReport( self, f ) local spaces = ' ' f:write( self.config.name, ' source=', self.source, '\n' ) f:write( 'There are ', self.delays.size, ' delays\n') for i, vd in Queue.qpairs( self.delays ) do local st = vd.status f:write( st, string.sub( spaces, 1, 7 - #st ) ) f:write( vd.etype, ' ' ) f:write( vd.path ) if vd.path2 then f:write( ' -> ',vd.path2 ) end f:write('\n') end f:write( 'Excluding:\n' ) local nothing = true for t, p in pairs( self.excludes.list ) do nothing = false f:write( t,'\n' ) end if nothing then f:write(' nothing.\n') end f:write( '\n' ) end -- -- Creates a new Sync -- local function new( config ) local s = { -- fields config = config, delays = Queue.new( ), source = config.source, processes = CountArray.new( ), excludes = Excludes.new( ), -- functions addBlanketDelay = addBlanketDelay, addExclude = addExclude, addInitDelay = addInitDelay, collect = collect, concerns = concerns, delay = delay, getAlarm = getAlarm, getDelays = getDelays, getNextDelay = getNextDelay, invokeActions = invokeActions, removeDelay = removeDelay, rmExclude = rmExclude, statusReport = statusReport, } s.inlet = InletFactory.newInlet( s ) -- provides a default name if needed if not config.name then config.name = 'Sync' .. nextDefaultName end -- increments defaults if a config name was given or not -- so Sync{n} will be the n-th call to sync{} nextDefaultName = nextDefaultName + 1 -- loads exclusions if config.exclude then local te = type( config.exclude ) if te == 'table' then s.excludes:addList( config.exclude ) elseif te == 'string' then s.excludes:add( config.exclude ) else error( 'type for exclude must be table or string', 2 ) end end if config.delay ~= nil and ( type(config.delay) ~= 'number' or config.delay < 0 ) then error( 'delay must be a number and >= 0', 2 ) end if config.excludeFrom then s.excludes:loadFile( config.excludeFrom ) end return s end -- -- Public interface -- return { new = new } end )( ) -- -- Syncs - a singleton -- -- Syncs maintains all configured syncs. -- local Syncs = ( function( ) -- -- the list of all syncs -- local syncsList = Array.new( ) -- -- The round robin pointer. In case of global limited maxProcesses -- gives every sync equal chances to spawn the next process. -- local round = 1 -- -- The cycle( ) sheduler goes into the next round of roundrobin. -- local function nextRound( ) round = round + 1; if round > #syncsList then round = 1 end return round end -- -- Returns the round -- local function getRound( ) return round end -- -- Returns sync at listpos i -- local function get( i ) return syncsList[ i ]; end -- -- Helper function for inherit -- defined below -- local inheritKV -- -- Recurvely inherits a source table to a destionation table -- copying all keys from source. -- -- table copy source ( cs ) -- table copy destination ( cd ) -- -- All entries with integer keys are inherited as additional -- sources for non-verbatim tables -- local function inherit( cd, cs ) -- -- First copies all entries with non-integer keys -- tables are merged, already present keys are not -- overwritten -- -- For verbatim tables integer keys are treated like -- non integer keys -- for k, v in pairs( cs ) do if ( type( k ) ~= 'number' or cs._verbatim == true ) and ( type( cs._merge ) ~= 'table' or cs._merge[ k ] == true ) then inheritKV( cd, k, v ) end end -- -- recursevely inherits all integer keyed tables -- ( for non-verbatim tables ) -- if cs._verbatim ~= true then local n = nil for k, v in ipairs( cs ) do n = k if type( v ) == 'table' then inherit( cd, v ) else cd[ #cd + 1 ] = v end end end end -- -- Helper to inherit. Inherits one key. -- inheritKV = function( cd, k, v ) -- don't merge inheritance controls if k == '_merge' or k == '_verbatim' then return end local dtype = type( cd [ k ] ) if type( v ) == 'table' then if dtype == 'nil' then cd[ k ] = { } inherit( cd[ k ], v ) elseif dtype == 'table' and v._merge ~= false then inherit( cd[ k ], v ) end elseif dtype == 'nil' then cd[ k ] = v end end -- -- Adds a new sync (directory-tree to observe). -- local function add( config ) -- workaround for backwards compatibility -- FIXME: remove when dropping that if settings ~= settingsSafe then log( 'Warn', 'settings = { ... } is deprecated.\n'.. ' please use settings{ ... } (without the equal sign)' ) for k, v in pairs( settings ) do uSettings[ k ] = v end settings = settingsSafe end -- Creates a new config table which inherits all keys/values -- from integer keyed tables local uconfig = config config = { } inherit( config, uconfig ) -- -- last and least defaults are inherited -- inherit( config, default ) local inheritSettings = { 'delay', 'maxDelays', 'maxProcesses' } -- Lets settings override these values. for _, v in ipairs( inheritSettings ) do if uSettings[ v ] then config[ v ] = uSettings[ v ] end end -- Lets commandline override these values. for _, v in ipairs( inheritSettings ) do if clSettings[ v ] then config[ v ] = clSettings[ v ] end end -- -- lets the userscript 'prepare' function -- check and complete the config -- if type( config.prepare ) == 'function' then -- prepare is given a writeable copy of config config.prepare( config, 4 ) end if not config[ 'source' ] then local info = debug.getinfo( 3, 'Sl' ) log( 'Error', info.short_src,':', info.currentline,': source missing from sync.' ) terminate( -1 ) end -- -- absolute path of source -- local realsrc = lsyncd.realdir( config.source ) if not realsrc then log( 'Error', 'Cannot access source directory: ', config.source ) terminate( -1 ) end config._source = config.source config.source = realsrc if not config.action and not config.onAttrib and not config.onCreate and not config.onModify and not config.onDelete and not config.onMove then local info = debug.getinfo( 3, 'Sl' ) log( 'Error', info.short_src, ':', info.currentline, ': no actions specified.' ) terminate( -1 ) end -- the monitor to use config.monitor = uSettings.monitor or config.monitor or Monitors.default( ) if config.monitor ~= 'inotify' and config.monitor ~= 'fsevents' then local info = debug.getinfo( 3, 'Sl' ) log( 'Error', info.short_src, ':', info.currentline, ': event monitor "', config.monitor, '" unknown.' ) terminate( -1 ) end --- creates the new sync local s = Sync.new( config ) table.insert( syncsList, s ) return s end -- -- Allows a for-loop to walk through all syncs. -- local function iwalk( ) return ipairs( syncsList ) end -- -- Returns the number of syncs. -- local size = function( ) return #syncsList end -- -- Tests if any sync is interested in a path. -- local function concerns( path ) for _, s in ipairs( syncsList ) do if s:concerns( path ) then return true end end return false end -- -- Public interface -- return { add = add, get = get, getRound = getRound, concerns = concerns, iwalk = iwalk, nextRound = nextRound, size = size } end )( ) -- -- Utility function, -- Returns the relative part of absolute path if it -- begins with root -- local function splitPath( path, root ) local rlen = #root local sp = string.sub( path, 1, rlen ) if sp == root then return string.sub( path, rlen, -1 ) else return nil end end -- -- Interface to inotify. -- -- watches recursively subdirs and sends events. -- -- All inotify specific implementation is enclosed here. -- local Inotify = ( function( ) -- -- A list indexed by inotify watch descriptors yielding -- the directories absolute paths. -- local wdpaths = CountArray.new( ) -- -- The same vice versa, -- all watch descriptors by their absolute paths. -- local pathwds = { } -- -- A list indexed by syncs containing yielding -- the root paths the syncs are interested in. -- local syncRoots = { } -- -- Stops watching a directory -- -- path ... absolute path to unwatch -- core ... if false not actually send the unwatch to the kernel -- (used in moves which reuse the watch) -- local function removeWatch( path, core ) local wd = pathwds[ path ] if not wd then return end if core then lsyncd.inotify.rmwatch( wd ) end wdpaths[ wd ] = nil pathwds[ path ] = nil end -- -- Adds watches for a directory (optionally) including all subdirectories. -- -- @param path absolute path of directory to observe -- @param recurse true if recursing into subdirs -- local function addWatch(path) log( 'Function', 'Inotify.addWatch( ', path, ' )' ) if not Syncs.concerns(path) then log('Inotify', 'not concerning "',path,'"') return end -- registers the watch local inotifyMode = ( uSettings and uSettings.inotifyMode ) or ''; local wd = lsyncd.inotify.addwatch( path, inotifyMode) ; if wd < 0 then log( 'Inotify','Unable to add watch "', path, '"' ) return end do -- If this watch descriptor is registered already -- the kernel reuses it since old dir is gone. local op = wdpaths[ wd ] if op and op ~= path then pathwds[ op ] = nil end end pathwds[ path ] = wd wdpaths[ wd ] = path -- registers and adds watches for all subdirectories local entries = lsyncd.readdir( path ) if not entries then return end for dirname, isdir in pairs( entries ) do if isdir then addWatch( path .. dirname .. '/' ) end end end -- -- Adds a Sync to receive events. -- -- sync: Object to receive events -- rootdir: root dir to watch -- local function addSync( sync, rootdir ) if syncRoots[ sync ] then error( 'duplicate sync in Inotify.addSync()' ) end syncRoots[ sync ] = rootdir addWatch( rootdir ) end -- -- Called when an event has occured. -- local function event( etype, -- 'Attrib', 'Modify', 'Create', 'Delete', 'Move' wd, -- watch descriptor, matches lsyncd.inotifyadd() isdir, -- true if filename is a directory time, -- time of event filename, -- string filename without path wd2, -- watch descriptor for target if it's a Move filename2 -- string filename without path of Move target ) if isdir then filename = filename .. '/' if filename2 then filename2 = filename2 .. '/' end end if filename2 then log( 'Inotify', 'got event ', etype, ' ', filename, '(', wd, ') to ', filename2, '(', wd2 ,')' ) else log( 'Inotify', 'got event ', etype, ' ', filename, '(', wd, ')' ) end -- looks up the watch descriptor id local path = wdpaths[ wd ] if path then path = path..filename end local path2 = wd2 and wdpaths[ wd2 ] if path2 and filename2 then path2 = path2..filename2 end if not path and path2 and etype == 'Move' then log( 'Inotify', 'Move from deleted directory ', path2, ' becomes Create.' ) path = path2 path2 = nil etype = 'Create' end if not path then -- this is normal in case of deleted subdirs log( 'Inotify', 'event belongs to unknown watch descriptor.' ) return end for sync, root in pairs( syncRoots ) do repeat local relative = splitPath( path, root ) local relative2 = nil if path2 then relative2 = splitPath( path2, root ) end if not relative and not relative2 then -- sync is not interested in this dir break -- continue end -- makes a copy of etype to possibly change it local etyped = etype if etyped == 'Move' then if not relative2 then log( 'Normal', 'Transformed Move to Delete for ', sync.config.name ) etyped = 'Delete' elseif not relative then relative = relative2 relative2 = nil log( 'Normal', 'Transformed Move to Create for ', sync.config.name ) etyped = 'Create' end end if isdir then if etyped == 'Create' then addWatch( path ) elseif etyped == 'Delete' then removeWatch( path, true ) elseif etyped == 'Move' then removeWatch( path, false ) addWatch( path2 ) end end sync:delay( etyped, time, relative, relative2 ) until true end end -- -- Writes a status report about inotify to a file descriptor -- local function statusReport( f ) f:write( 'Inotify watching ', wdpaths:size(), ' directories\n' ) for wd, path in wdpaths:walk( ) do f:write( ' ', wd, ': ', path, '\n' ) end end -- -- Public interface. -- return { addSync = addSync, event = event, statusReport = statusReport, } end)( ) -- -- Interface to OSX /dev/fsevents -- -- This watches all the filesystems at once, -- but needs root access. -- -- All fsevents specific implementation are enclosed here. -- local Fsevents = ( function( ) -- -- A list indexed by syncs yielding -- the root path the sync is interested in. -- local syncRoots = { } -- -- Adds a Sync to receive events. -- -- @param sync Object to receive events -- @param dir dir to watch -- local function addSync( sync, dir ) if syncRoots[ sync ] then error( 'duplicate sync in Fanotify.addSync()' ) end syncRoots[ sync ] = dir end -- -- Called when an event has occured. -- local function event( etype, -- 'Attrib', 'Modify', 'Create', 'Delete', 'Move' isdir, -- true if filename is a directory time, -- time of event path, -- path of file path2 -- path of target in case of 'Move' ) if isdir then path = path .. '/' if path2 then path2 = path2 .. '/' end end log( 'Fsevents', etype, ',', isdir, ',', time, ',', path, ',', path2 ) for _, sync in Syncs.iwalk() do repeat local root = sync.source -- TODO combine ifs if not path:starts( root ) then if not path2 or not path2:starts( root ) then break -- continue end end local relative = splitPath( path, root ) local relative2 if path2 then relative2 = splitPath( path2, root ) end -- possibly change etype for this iteration only local etyped = etype if etyped == 'Move' then if not relative2 then log('Normal', 'Transformed Move to Delete for ', sync.config.name) etyped = 'Delete' elseif not relative then relative = relative2 relative2 = nil log('Normal', 'Transformed Move to Create for ', sync.config.name) etyped = 'Create' end end sync:delay( etyped, time, relative, relative2 ) until true end end -- -- Writes a status report about fsevents to a filedescriptor. -- local function statusReport( f ) -- TODO end -- -- Public interface -- return { addSync = addSync, event = event, statusReport = statusReport } end )( ) -- -- Holds information about the event monitor capabilities -- of the core. -- Monitors = ( function( ) -- -- The cores monitor list -- local list = { } -- -- The default event monitor. -- local function default( ) return list[ 1 ] end -- -- Initializes with info received from core -- local function initialize( clist ) for k, v in ipairs( clist ) do list[ k ] = v end end -- -- Public interface -- return { default = default, list = list, initialize = initialize } end)( ) -- -- Writes functions for the user for layer 3 configurations. -- local functionWriter = ( function( ) -- -- All variables known to layer 3 configs. -- transVars = { { '%^pathname', 'event.pathname', 1 }, { '%^pathdir', 'event.pathdir', 1 }, { '%^path', 'event.path', 1 }, { '%^sourcePathname', 'event.sourcePathname', 1 }, { '%^sourcePathdir', 'event.sourcePathdir', 1 }, { '%^sourcePath', 'event.sourcePath', 1 }, { '%^source', 'event.source', 1 }, { '%^targetPathname', 'event.targetPathname', 1 }, { '%^targetPathdir', 'event.targetPathdir', 1 }, { '%^targetPath', 'event.targetPath', 1 }, { '%^target', 'event.target', 1 }, { '%^o%.pathname', 'event.pathname', 1 }, { '%^o%.path', 'event.path', 1 }, { '%^o%.sourcePathname', 'event.sourcePathname', 1 }, { '%^o%.sourcePathdir', 'event.sourcePathdir', 1 }, { '%^o%.sourcePath', 'event.sourcePath', 1 }, { '%^o%.targetPathname', 'event.targetPathname', 1 }, { '%^o%.targetPathdir', 'event.targetPathdir', 1 }, { '%^o%.targetPath', 'event.targetPath', 1 }, { '%^d%.pathname', 'event2.pathname', 2 }, { '%^d%.path', 'event2.path', 2 }, { '%^d%.sourcePathname', 'event2.sourcePathname', 2 }, { '%^d%.sourcePathdir', 'event2.sourcePathdir', 2 }, { '%^d%.sourcePath', 'event2.sourcePath', 2 }, { '%^d%.targetPathname', 'event2.targetPathname', 2 }, { '%^d%.targetPathdir', 'event2.targetPathdir', 2 }, { '%^d%.targetPath', 'event2.targetPath', 2 }, } -- -- Splits a user string into its arguments. -- Returns a table of arguments -- local function splitStr( str -- a string where parameters are seperated by spaces. ) local args = { } while str ~= '' do -- break where argument stops local bp = #str -- in a quote local inQuote = false -- tests characters to be space and not within quotes for i=1, #str do local c = string.sub( str, i, i ) if c == '"' then inQuote = not inQuote elseif c == ' ' and not inQuote then bp = i - 1 break end end local arg = string.sub( str, 1, bp ) arg = string.gsub( arg, '"', '\\"' ) table.insert( args, arg ) str = string.sub( str, bp + 1, -1 ) str = string.match( str, '^%s*(.-)%s*$' ) end return args end -- -- Translates a call to a binary to a lua function. -- TODO this has a little too blocking. -- local function translateBinary( str ) -- splits the string local args = splitStr( str ) -- true if there is a second event local haveEvent2 = false for ia, iv in ipairs( args ) do -- a list of arguments this arg is being split into local a = { { true, iv } } -- goes through all translates for _, v in ipairs( transVars ) do local ai = 1 while ai <= #a do if a[ ai ][ 1 ] then local pre, post = string.match( a[ ai ][ 2 ], '(.*)'..v[1]..'(.*)' ) if pre then if v[3] > 1 then haveEvent2 = true end if pre ~= '' then table.insert( a, ai, { true, pre } ) ai = ai + 1 end a[ ai ] = { false, v[ 2 ] } if post ~= '' then table.insert( a, ai + 1, { true, post } ) end end end ai = ai + 1 end end -- concats the argument pieces into a string. local as = '' local first = true for _, v in ipairs( a ) do if not first then as = as..' .. ' end if v[ 1 ] then as = as .. '"' .. v[ 2 ] .. '"' else as = as .. v[ 2 ] end first = false end args[ ia ] = as end local ft if not haveEvent2 then ft = 'function(event)\n' else ft = 'function(event, event2)\n' end ft = ft .. " log('Normal', 'Event ', event.etype, \n" .. " ' spawns action \"".. str.."\"')\n" .. " spawn(event" for _, v in ipairs( args ) do ft = ft .. ',\n ' .. v end ft = ft .. ')\nend' return ft end -- -- Translates a call using a shell to a lua function -- local function translateShell( str ) local argn = 1 local args = { } local cmd = str local lc = str -- true if there is a second event local haveEvent2 = false for _, v in ipairs( transVars ) do local occur = false cmd = string.gsub( cmd, v[ 1 ], function( ) occur = true return '"$' .. argn .. '"' end ) lc = string.gsub( lc, v[1], ']]..' .. v[2] .. '..[[' ) if occur then argn = argn + 1 table.insert( args, v[ 2 ] ) if v[ 3 ] > 1 then haveEvent2 = true end end end local ft if not haveEvent2 then ft = 'function(event)\n' else ft = 'function(event, event2)\n' end -- TODO do array joining instead ft = ft.. " log('Normal', 'Event ',event.etype,\n".. " [[ spawns shell \""..lc.."\"]])\n".. " spawnShell(event, [["..cmd.."]]" for _, v in ipairs( args ) do ft = ft..',\n '..v end ft = ft .. ')\nend' return ft end -- -- Writes a lua function for a layer 3 user script. -- local function translate( str ) -- trim spaces str = string.match( str, '^%s*(.-)%s*$' ) local ft if string.byte( str, 1, 1 ) == 47 then -- starts with / ft = translateBinary( str ) elseif string.byte( str, 1, 1 ) == 94 then -- starts with ^ ft = translateShell( str:sub( 2, -1 ) ) else ft = translateShell( str ) end log( 'FWrite', 'translated "', str, '" to \n', ft ) return ft end -- -- Public interface. -- return { translate = translate } end )( ) -- -- Writes a status report file at most every [statusintervall] seconds. -- local StatusFile = ( function( ) -- -- Timestamp when the status file has been written. -- local lastWritten = false -- -- Timestamp when a status file should be written. -- local alarm = false -- -- Returns the alarm when the status file should be written- -- local function getAlarm() return alarm end -- -- Called to check if to write a status file. -- local function write( timestamp ) log( 'Function', 'write( ', timestamp, ' )' ) -- -- takes care not write too often -- if uSettings.statusInterval > 0 then -- already waiting? if alarm and timestamp < alarm then log( 'Statusfile', 'waiting(', timestamp, ' < ', alarm, ')' ) return end -- determines when a next write will be possible if not alarm then local nextWrite = lastWritten and timestamp + uSettings.statusInterval if nextWrite and timestamp < nextWrite then log( 'Statusfile', 'setting alarm: ', nextWrite ) alarm = nextWrite return end end lastWritten = timestamp alarm = false end log( 'Statusfile', 'writing now' ) local f, err = io.open( uSettings.statusFile, 'w' ) if not f then log( 'Error', 'Cannot open status file "' .. uSettings.statusFile .. '" :' .. err ) return end f:write( 'Lsyncd status report at ', os.date( ), '\n\n' ) for i, s in Syncs.iwalk( ) do s:statusReport( f ) f:write( '\n' ) end Inotify.statusReport( f ) f:close( ) end -- -- Public interface -- return { write = write, getAlarm = getAlarm } end )( ) -- -- Lets userscripts make their own alarms. -- local UserAlarms = ( function( ) local alarms = { } -- -- Calls the user function at timestamp. -- local function alarm( timestamp, func, extra ) local idx for k, v in ipairs( alarms ) do if timestamp < v.timestamp then idx = k break end end local a = { timestamp = timestamp, func = func, extra = extra } if idx then table.insert( alarms, idx, a ) else table.insert( alarms, a ) end end -- -- Retrieves the soonest alarm. -- local function getAlarm( ) if #alarms == 0 then return false else return alarms[1].timestamp end end -- -- Calls user alarms. -- local function invoke( timestamp ) while #alarms > 0 and alarms[ 1 ].timestamp <= timestamp do alarms[ 1 ].func( alarms[ 1 ].timestamp, alarms[ 1 ].extra ) table.remove( alarms, 1 ) end end -- -- Public interface -- return { alarm = alarm, getAlarm = getAlarm, invoke = invoke } end )( ) --============================================================================ -- Lsyncd runner's plugs. These functions are called from core. --============================================================================ -- -- Current status of Lsyncd. -- -- 'init' ... on (re)init -- 'run' ... normal operation -- 'fade' ... waits for remaining processes -- local lsyncdStatus = 'init' -- -- The cores interface to the runner. -- local runner = { } -- -- Last time said to be waiting for more child processes -- local lastReportedWaiting = false -- -- Called from core whenever Lua code failed. -- -- Logs a backtrace -- function runner.callError( message ) log('Error', 'in Lua: ', message ) -- prints backtrace local level = 2 while true do local info = debug.getinfo( level, 'Sl' ) if not info then terminate( -1 ) end log( 'Error', 'Backtrace ', level - 1, ' :', info.short_src, ':', info.currentline ) level = level + 1 end end -- -- Called from core whenever a child process has finished and -- the zombie process was collected by core. -- function runner.collectProcess( pid, exitcode ) processCount = processCount - 1 if processCount < 0 then error( 'negative number of processes!' ) end for _, s in Syncs.iwalk() do if s:collect(pid, exitcode) then return end end end -- -- Called from core everytime a masterloop cycle runs through. -- -- This happens in case of -- * an expired alarm. -- * a returned child process. -- * received filesystem events. -- * received a HUP, TERM or INT signal. -- function runner.cycle( timestamp -- the current kernel time (in jiffies) ) if lsyncdStatus == 'fade' then if processCount > 0 then if lastReportedWaiting == false or timestamp >= lastReportedWaiting + 60 then lastReportedWaiting = timestamp log( 'Normal', 'waiting for ', processCount, ' more child processes.' ) end return true else return false end end if lsyncdStatus ~= 'run' then error( 'runner.cycle() called while not running!' ) end -- -- goes through all syncs and spawns more actions -- if possibly. But only let Syncs invoke actions if -- not at global limit -- if not uSettings.maxProcesses or processCount < uSettings.maxProcesses then local start = Syncs.getRound( ) local ir = start repeat local s = Syncs.get( ir ) s:invokeActions( timestamp ) ir = ir + 1 if ir > Syncs.size( ) then ir = 1 end until ir == start Syncs.nextRound( ) end UserAlarms.invoke( timestamp ) if uSettings.statusFile then StatusFile.write( timestamp ) end return true end -- -- Called by core if '-help' or '--help' is in -- the arguments. -- function runner.help( ) io.stdout:write( [[ USAGE: runs a config file: lsyncd [OPTIONS] [CONFIG-FILE] default rsync behaviour: lsyncd [OPTIONS] -rsync [SOURCE] [TARGET] default rsync with mv's through ssh: lsyncd [OPTIONS] -rsyncssh [SOURCE] [HOST] [TARGETDIR] default local copying mechanisms (cp|mv|rm): lsyncd [OPTIONS] -direct [SOURCE] [TARGETDIR] OPTIONS: -delay SECS Overrides default delay times -help Shows this -insist Continues startup even if it cannot connect -log all Logs everything (debug) -log scarce Logs errors only -log [Category] Turns on logging for a debug category -logfile FILE Writes log to FILE (DEFAULT: uses syslog) -nodaemon Does not detach and logs to stdout/stderr -pidfile FILE Writes Lsyncds PID into FILE -runner FILE Loads Lsyncds lua part from FILE -version Prints versions and exits LICENSE: GPLv2 or any later version. SEE: `man lsyncd` for further information. ]]) -- -- -monitor NAME Uses operating systems event montior NAME -- (inotify/fanotify/fsevents) os.exit( -1 ) end -- -- Called from core to parse the command line arguments -- -- returns a string as user script to load. -- or simply 'true' if running with rsync bevaiour -- -- terminates on invalid arguments. -- function runner.configure( args, monitors ) Monitors.initialize( monitors ) -- -- a list of all valid options -- -- first paramter is the number of parameters an option takes -- if < 0 the called function has to check the presence of -- optional arguments. -- -- second paramter is the function to call -- local options = { -- log is handled by core already. delay = { 1, function( secs ) clSettings.delay = secs + 0 end }, insist = { 0, function( ) clSettings.insist = true end }, log = { 1, nil }, logfile = { 1, function( file ) clSettings.logfile = file end }, monitor = { -1, function( monitor ) if not monitor then io.stdout:write( 'This Lsyncd supports these monitors:\n' ) for _, v in ipairs(Monitors.list) do io.stdout:write(' ',v,'\n') end io.stdout:write('\n') lsyncd.terminate(-1) else clSettings.monitor = monitor end end }, nodaemon = { 0, function( ) clSettings.nodaemon = true end }, pidfile = { 1, function( file ) clSettings.pidfile=file end }, rsync = { 2, function( src, trg ) clSettings.syncs = clSettings.syncs or { } table.insert( clSettings.syncs, { 'rsync', src, trg } ) end }, rsyncssh = { 3, function( src, host, tdir ) clSettings.syncs = clSettings.syncs or { } table.insert( clSettings.syncs, { 'rsyncssh', src, host, tdir } ) end }, direct = { 2, function( src, trg ) clSettings.syncs = clSettings.syncs or { } table.insert( clSettings.syncs, { 'direct', src, trg } ) end }, version = { 0, function( ) io.stdout:write( 'Version: ', lsyncd_version, '\n' ) os.exit( 0 ) end } } -- non-opts is filled with all args that were no part dash options local nonopts = { } local i = 1 while i <= #args do local a = args[ i ] if a:sub( 1, 1 ) ~= '-' then table.insert( nonopts, args[ i ] ) else if a:sub( 1, 2 ) == '--' then a = a:sub( 3 ) else a = a:sub( 2 ) end local o = options[ a ] if not o then log( 'Error', 'unknown option command line option ', args[i] ) os.exit( -1 ) end if o[ 1 ] >= 0 and i + o[ 1 ] > #args then log( 'Error', a ,' needs ', o[ 1 ],' arguments' ) os.exit( -1 ) elseif o[1] < 0 then o[ 1 ] = -o[ 1 ] end if o[ 2 ] then if o[ 1 ] == 0 then o[ 2 ]( ) elseif o[ 1 ] == 1 then o[ 2 ]( args[i + 1] ) elseif o[ 1 ] == 2 then o[ 2 ]( args[i + 1], args[i + 2] ) elseif o[ 1 ] == 3 then o[ 2 ]( args[i + 1], args[i + 2], args[i + 3] ) end end i = i + o[1] end i = i + 1 end if clSettings.syncs then if #nonopts ~= 0 then log( 'Error', 'There cannot be command line syncs and config file together.' ) os.exit( -1 ) end else if #nonopts == 0 then runner.help( args[ 0 ] ) elseif #nonopts == 1 then return nonopts[ 1 ] else -- TODO make this possible log( 'Error', 'There can only be one config file in command line.' ) os.exit( -1 ) end end end -- -- Called from core on init or restart after user configuration. -- -- firstTime: -- true when Lsyncd startups the first time, -- false on resets, due to HUP signal or monitor queue overflow. -- function runner.initialize( firstTime ) if settings ~= settingsSafe then log( 'Warn', 'settings = { ... } is deprecated.\n'.. ' please use settings{ ... } (without the equal sign)' ) for k, v in pairs( settings ) do uSettings[ k ] = v end end lastReportedWaiting = false -- -- From this point on, no globals may be created anymore -- lockGlobals( ) -- -- copies simple settings with numeric keys to 'key = true' settings. -- -- FIXME this can be removed when -- Lsyncd 2.0.x backwards compatibility is dropped -- for k, v in ipairs( uSettings ) do if uSettings[ v ] then log( 'Error', 'Double setting "' .. v.. '"' ) os.exit( -1 ) end uSettings[ v ]= true end -- -- all command line settings overwrite config file settings -- for k, v in pairs( clSettings ) do if k ~= 'syncs' then uSettings[ k ] = v end end -- -- implicitly forces 'insist' on Lsyncd resets. -- if not firstTime then uSettings.insist = true end -- -- adds syncs specified by command line. -- if clSettings.syncs then for _, s in ipairs( clSettings.syncs ) do if s[ 1 ] == 'rsync' then sync{ default.rsync, source = s[ 2 ], target = s[ 3 ] } elseif s[ 1 ] == 'rsyncssh' then sync{ default.rsyncssh, source = s[ 2 ], host = s[ 3 ], targetdir=s[ 4 ] } elseif s[ 1 ] == 'direct' then sync{ default.direct, source=s[ 2 ], target=s[ 3 ] } end end end if uSettings.nodaemon then lsyncd.configure( 'nodaemon' ) end if uSettings.logfile then lsyncd.configure( 'logfile', uSettings.logfile ) end if uSettings.logident then lsyncd.configure( 'logident', uSettings.logident ) end if uSettings.logfacility then lsyncd.configure( 'logfacility', uSettings.logfacility ) end if uSettings.pidfile then lsyncd.configure( 'pidfile', uSettings.pidfile ) end -- -- Transfers some defaults to uSettings -- if uSettings.statusInterval == nil then uSettings.statusInterval = default.statusInterval end -- makes sure the user gave Lsyncd anything to do if Syncs.size() == 0 then log( 'Error', 'Nothing to watch!' ) os.exit( -1 ) end -- from now on use logging as configured instead of stdout/err. lsyncdStatus = 'run'; lsyncd.configure( 'running' ); local ufuncs = { 'onAttrib', 'onCreate', 'onDelete', 'onModify', 'onMove', 'onStartup', } -- translates layer 3 scripts for _, s in Syncs.iwalk() do -- checks if any user functions is a layer 3 string. local config = s.config for _, fn in ipairs(ufuncs) do if type(config[fn]) == 'string' then local ft = functionWriter.translate(config[fn]) config[fn] = assert(loadstring('return '..ft))() end end end -- runs through the Syncs created by users for _, s in Syncs.iwalk( ) do if s.config.monitor == 'inotify' then Inotify.addSync( s, s.source ) elseif s.config.monitor == 'fsevents' then Fsevents.addSync( s, s.source ) else error( 'sync ' .. s.config.name .. ' has no known event monitor interface.' ) end -- if the sync has an init function, the init delay -- is stacked which causes the init function to be called. if s.config.init then s:addInitDelay( ) end end end -- -- Called by core to query the soonest alarm. -- -- @return false ... no alarm, core can in untimed sleep, or -- true ... immediate action -- times ... the alarm time (only read if number is 1) -- function runner.getAlarm( ) if lsyncdStatus ~= 'run' then return false end local alarm = false -- -- Checks if 'a' is sooner than the 'alarm' up-value. -- local function checkAlarm( a ) if a == nil then error('got nil alarm') end if alarm == true or not a then -- 'alarm' is already immediate or -- a not a new alarm return end -- sets 'alarm' to a if a is sooner if not alarm or a < alarm then alarm = a end end -- -- checks all syncs for their earliest alarm, -- but only if the global process limit is not yet reached. -- if not uSettings.maxProcesses or processCount < uSettings.maxProcesses then for _, s in Syncs.iwalk( ) do checkAlarm( s:getAlarm ( )) end else log( 'Alarm', 'at global process limit.' ) end -- checks if a statusfile write has been delayed checkAlarm( StatusFile.getAlarm( ) ) -- checks for an userAlarm checkAlarm( UserAlarms.getAlarm( ) ) log( 'Alarm', 'runner.getAlarm returns: ', alarm ) return alarm end -- -- Called when an file system monitor events arrive -- runner.inotifyEvent = Inotify.event runner.fsEventsEvent = Fsevents.event -- -- Collector for every child process that finished in startup phase -- function runner.collector( pid, -- pid of the child process exitcode -- exitcode of the child process ) if exitcode ~= 0 then log('Error', 'Startup process',pid,' failed') terminate( -1 ) end return 0 end -- -- Called by core when an overflow happened. -- function runner.overflow( ) log( 'Normal', '--- OVERFLOW in event queue ---' ) lsyncdStatus = 'fade' end -- -- Called by core on a hup signal. -- function runner.hup( ) log( 'Normal', '--- HUP signal, resetting ---' ) lsyncdStatus = 'fade' end -- -- Called by core on a term signal. -- function runner.term( sigcode ) local sigtexts = { [ 2 ] = 'INT', [ 15 ] = 'TERM' }; local sigtext = sigtexts[ sigcode ]; if not sigtext then sigtext = 'UNKNOWN' end log( 'Normal', '--- ', sigtext, ' signal, fading ---' ) lsyncdStatus = 'fade' end --============================================================================ -- Lsyncd runner's user interface --============================================================================ -- -- Main utility to create new observations. -- -- Returns an Inlet to that sync. -- function sync( opts ) if lsyncdStatus ~= 'init' then error( 'Sync can only be created during initialization.', 2 ) end return Syncs.add( opts ).inlet end -- -- Spawns a new child process. -- function spawn( agent, -- the reason why a process is spawned. -- a delay or delay list for a sync -- it will mark the related files as blocked. binary, -- binary to call ... -- arguments ) if agent == nil or type( agent ) ~= 'table' then error( 'spawning with an invalid agent', 2 ) end if lsyncdStatus == 'fade' then log( 'Normal', 'ignored process spawning while fading' ) return end if type( binary ) ~= 'string' then error( 'calling spawn(agent, binary, ...): binary is not a string', 2 ) end local dol = InletFactory.getDelayOrList( agent ) if not dol then error( 'spawning with an unknown agent', 2 ) end -- -- checks if a spawn is called on an already active event -- if dol.status then -- is an event if dol.status ~= 'wait' then error('spawn() called on an non-waiting event', 2) end else -- is a list for _, d in ipairs(dol) do if d.status ~= 'wait' and d.status ~= 'block' then error('spawn() called on an non-waiting event list', 2) end end end -- -- tries to spawn the process -- local pid = lsyncd.exec( binary, ... ) if pid and pid > 0 then processCount = processCount + 1 if uSettings.maxProcesses and processCount > uSettings.maxProcesses then error( 'Spawned too much processes!' ) end local sync = InletFactory.getSync( agent ) -- delay or list if dol.status then -- is a delay dol.status = 'active' sync.processes[ pid ] = dol else -- is a list for _, d in ipairs( dol ) do d.status = 'active' end sync.processes[ pid ] = dol end end end -- -- Spawns a child process using the default shell. -- function spawnShell( agent, -- the delay(list) to spawn the command for command, -- the shell command ... -- additonal arguments ) return spawn( agent, '/bin/sh', '-c', command, '/bin/sh', ... ) end ----- -- Observes a filedescriptor -- function observefd( fd, -- file descriptor ready, -- called when fd is ready to be read writey -- called when fd is ready to be written ) return lsyncd.observe_fd( fd, ready, writey ) end -- -- Stops observeing a filedescriptor -- function nonobservefd( fd -- file descriptor ) return lsyncd.nonobserve_fd( fd ) end -- -- Calls func at timestamp. -- -- Use now() to receive current timestamp -- add seconds with '+' to it -- alarm = UserAlarms.alarm -- -- Comfort routine also for user. -- Returns true if 'String' starts with 'Start' -- function string.starts( String, Start ) return string.sub( String, 1, #Start )==Start end -- -- Comfort routine also for user. -- Returns true if 'String' ends with 'End' -- function string.ends( String, End ) return End == '' or string.sub( String, -#End ) == End end -- -- The Lsyncd 2.1 settings call -- function settings( a1 ) -- if a1 is a string this is a get operation if type( a1 ) == 'string' then return uSettings[ a1 ] end -- if its a table it sets all the value of the bale for k, v in pairs( a1 ) do if type( k ) ~= 'number' then uSettings[ k ] = v else uSettings[ v ] = true end end end settingsSafe = settings -- -- Returns the core the runners function interface. -- return runner lsyncd-release-2.1.6/m4/000077500000000000000000000000001260764373300147655ustar00rootroot00000000000000lsyncd-release-2.1.6/m4/ax_subst_l.m4000066400000000000000000000007041260764373300173730ustar00rootroot00000000000000 # ax_subst_l.m4 - Substitute every var in the given comma seperated list -*-Autoconf-*- # # Copyright (C) 2012 Dennis Schridde # # This file is free software; the authors give # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 1 # Substitute every var in the given comma seperated list AC_DEFUN([AX_SUBST_L],[ m4_foreach([__var__], [$@], [AC_SUBST(__var__)]) ]) lsyncd-release-2.1.6/tests/000077500000000000000000000000001260764373300156075ustar00rootroot00000000000000lsyncd-release-2.1.6/tests/churn-direct.lua000077500000000000000000000022711260764373300207060ustar00rootroot00000000000000#!/usr/bin/lua -- a heavy duty test. -- makes thousends of random changes to the source tree require( 'posix' ) dofile( 'tests/testlib.lua' ) cwriteln( '****************************************************************' ) cwriteln( ' Testing default.direct with random data activity ' ) cwriteln( '****************************************************************' ) local tdir, srcdir, trgdir = mktemps( ) -- makes some startup data churn( srcdir, 10 ) local logs = { } --local logs = {'-log', 'Exec', '-log', 'Delay' } local pid = spawn( './lsyncd', '-nodaemon', '-direct', srcdir, trgdir, unpack( logs ) ) cwriteln( 'waiting for Lsyncd to startup' ) posix.sleep( 1 ) churn( srcdir, 500 ) cwriteln( 'waiting for Lsyncd to finish its jobs.' ) posix.sleep( 10 ) cwriteln( 'killing the Lsyncd daemon' ) posix.kill( pid ) local _, exitmsg, lexitcode = posix.wait( lpid ) cwriteln( 'Exitcode of Lsyncd = ', exitmsg, ' ', lexitcode ) _, result, code = os.execute( 'diff -r ' .. srcdir .. ' ' .. trgdir ) if result == 'exit' then cwriteln( 'Exitcode of diff = ', code ) else cwriteln( 'Signal terminating diff = ', code ) end if code ~= 0 then os.exit( 1 ) else os.exit( 0 ) end lsyncd-release-2.1.6/tests/churn-rsync.lua000077500000000000000000000023171260764373300205730ustar00rootroot00000000000000#!/usr/bin/lua -- a heavy duty test. -- makes thousends of random changes to the source tree require( 'posix' ) dofile( 'tests/testlib.lua' ) cwriteln( '****************************************************************' ) cwriteln( ' Testing default.rsync with random data activity' ) cwriteln( '****************************************************************' ) local tdir, srcdir, trgdir = mktemps( ) -- makes some startup data churn( srcdir, 100 ) local logs = { } -- logs = { "-log", "Delay", "-log", "Fsevents" } local pid = spawn( './lsyncd', '-nodaemon', '-delay', '5', '-rsync', srcdir, trgdir, unpack( logs ) ) cwriteln( 'waiting for Lsyncd to startup' ) posix.sleep( 1 ) churn( srcdir, 500 ) cwriteln( 'waiting for Lsyncd to finish its jobs.' ) posix.sleep( 10 ) cwriteln( 'killing the Lsyncd daemon' ) posix.kill( pid ) local _, exitmsg, lexitcode = posix.wait( lpid ) cwriteln( 'Exitcode of Lsyncd = ', exitmsg, ' ', lexitcode ) _, result, code = os.execute( 'diff -r ' .. srcdir .. ' ' .. trgdir ) if result == 'exit' then cwriteln( 'Exitcode of diff = ', code ) else cwriteln( 'Signal terminating diff = ', code ) end if exitcode ~= 0 then os.exit( 1 ) else os.exit( 0 ) end lsyncd-release-2.1.6/tests/churn-rsyncssh.lua000077500000000000000000000025421260764373300213110ustar00rootroot00000000000000#!/usr/bin/lua -- a heavy duty test. -- makes thousends of random changes to the source tree require( 'posix' ) dofile( 'tests/testlib.lua' ) cwriteln( '****************************************************************' ) cwriteln( ' Testing default.rsyncssh with random data activity ' ) cwriteln( '****************************************************************' ) cwriteln( '( this test needs passwordless ssh localhost access )' ) cwriteln( '( for current user )' ) local tdir, srcdir, trgdir = mktemps() -- makes some startup data churn( srcdir, 100 ) local logs = {} logs = { '-log', 'Delay' } local pid = spawn( './lsyncd', '-nodaemon', '-delay', '5', '-rsyncssh', srcdir, 'localhost', trgdir, unpack(logs) ) cwriteln( 'waiting for Lsyncd to startup' ) posix.sleep( 1 ) churn( srcdir, 100 ) cwriteln( 'waiting for Lsyncd to finish its jobs.' ) posix.sleep( 10 ) cwriteln( 'killing the Lsyncd daemon' ) posix.kill(pid) local _, exitmsg, lexitcode = posix.wait(lpid) cwriteln( 'Exitcode of Lsyncd = ', exitmsg, ' ', lexitcode ) _, result, code = os.execute( 'diff -r ' .. srcdir .. ' ' .. trgdir ) if result == 'exit' then cwriteln( 'Exitcode of diff = ', code ) else cwriteln( 'Signal terminating diff = ', code ) end if exitcode ~= 0 then os.exit( 1 ) else os.exit( 0 ) end lsyncd-release-2.1.6/tests/exclude-rsync.lua000077500000000000000000000055651260764373300211150ustar00rootroot00000000000000#!/usr/bin/lua require("posix") dofile("tests/testlib.lua") cwriteln("****************************************************************") cwriteln(" Testing excludes ") cwriteln("****************************************************************") local tdir, srcdir, trgdir = mktemps() local logfile = tdir .. "log" local cfgfile = tdir .. "config.lua" local range = 5 local log = {"-log", "all"} writefile(cfgfile, [[ settings = { logfile = "]]..logfile..[[", nodaemon = true, delay = 3, } sync { default.rsync, source = "]]..srcdir..[[", target = "]]..trgdir..[[", exclude = { "erf", "/eaf", "erd/", "/ead/", }, }]]); -- writes all files local function writefiles() posix.mkdir(srcdir .. "d"); writefile(srcdir .. "erf", "erf"); writefile(srcdir .. "eaf", "erf"); writefile(srcdir .. "erd", "erd"); writefile(srcdir .. "ead", "ead"); writefile(srcdir .. "d/erf", "erf"); writefile(srcdir .. "d/eaf", "erf"); writefile(srcdir .. "d/erd", "erd"); writefile(srcdir .. "d/ead", "ead"); end -- test if the filename exists, fails if this is different to expect local function testfile(filename, expect) local stat, err = posix.stat(filename) if stat and not expect then cwriteln("failure: ",filename," should be excluded"); os.exit(1); end if not stat and expect then cwriteln("failure: ",filename," should not be excluded"); os.exit(1); end end -- test all files local function testfiles() testfile(trgdir .. "erf", false); testfile(trgdir .. "eaf", false); testfile(trgdir .. "erd", true); testfile(trgdir .. "ead", true); testfile(trgdir .. "d/erf", false); testfile(trgdir .. "d/eaf", true); testfile(trgdir .. "d/erd", true); testfile(trgdir .. "d/ead", true); end cwriteln( 'testing startup excludes' ); writefiles( ); cwriteln( 'starting Lsyncd' ); local pid = spawn( './lsyncd', cfgfile, '-log', 'all'); cwriteln( 'waiting for Lsyncd to start' ); posix.sleep( 3 ) cwriteln( 'testing excludes after startup' ); testfiles( ); cwriteln( 'ok, removing sources' ); if srcdir:sub( 1,4 ) ~= '/tmp' then -- just to make sure before rm -rf cwriteln( 'exit before drama, srcdir is "', srcdir, '"' ); os.exit( 1 ); end os.execute( 'rm -rf '..srcdir..'/*' ); cwriteln( 'waiting for Lsyncd to remove destination' ); posix.sleep( 5 ); _, result, code = os.execute( 'diff -urN ' .. srcdir .. ' ' .. trgdir ) if result ~= 'exit' or code ~= 0 then cwriteln( 'fail, target directory not empty!' ); os.exit( 1 ); end cwriteln( 'writing files after startup' ); writefiles( ); cwriteln( 'waiting for Lsyncd to transmit changes' ); posix.sleep( 5 ); testfiles( ); cwriteln( 'killing started Lsyncd' ); posix.kill( pid ); local _, exitmsg, lexitcode = posix.wait( lpid ); cwriteln( 'Exitcode of Lsyncd = ', exitmsg, ' ', lexitcode ); if lexitcode == 143 then cwriteln( "OK" ); os.exit( 0 ); else os.exit( 1 ); end -- TODO remove temp lsyncd-release-2.1.6/tests/exclude-rsyncssh.lua000077500000000000000000000061231260764373300216220ustar00rootroot00000000000000#!/usr/bin/lua require( 'posix' ) dofile( 'tests/testlib.lua' ) cwriteln( '****************************************************************' ); cwriteln( ' Testing excludes' ); cwriteln( '****************************************************************' ); cwriteln( ' (this test needs passwordless ssh localhost access ' ); cwriteln( ' for current user)' ); local tdir, srcdir, trgdir = mktemps( ) local logfile = tdir .. 'log' local cfgfile = tdir .. 'config.lua' local range = 5 local log = {} log = {'-log', 'all'} writefile(cfgfile, [[ settings = { logfile = ']]..logfile..[[', nodaemon = true, delay = 3, } sync { default.rsyncssh, host = 'localhost', source = ']]..srcdir..[[', targetdir = ']]..trgdir..[[', exclude = { 'erf', '/eaf', 'erd/', '/ead/', }, }]]); -- writes all files local function writefiles() posix.mkdir(srcdir .. 'd'); writefile(srcdir .. 'erf', 'erf'); writefile(srcdir .. 'eaf', 'erf'); writefile(srcdir .. 'erd', 'erd'); writefile(srcdir .. 'ead', 'ead'); writefile(srcdir .. 'd/erf', 'erf'); writefile(srcdir .. 'd/eaf', 'erf'); writefile(srcdir .. 'd/erd', 'erd'); writefile(srcdir .. 'd/ead', 'ead'); end -- test if the filename exists, fails if this is different to expect local function testfile(filename, expect) local stat, err = posix.stat(filename) if stat and not expect then cwriteln('failure: ',filename,' should be excluded'); os.exit(1); end if not stat and expect then cwriteln('failure: ',filename,' should not be excluded'); os.exit(1); end end -- test all files local function testfiles() testfile( trgdir .. 'erf', false ); testfile( trgdir .. 'eaf', false ); testfile( trgdir .. 'erd', true ); testfile( trgdir .. 'ead', true ); testfile( trgdir .. 'd/erf', false ); testfile( trgdir .. 'd/eaf', true ); testfile( trgdir .. 'd/erd', true ); testfile( trgdir .. 'd/ead', true ); end cwriteln('testing startup excludes'); writefiles(); cwriteln('starting Lsyncd'); local pid = spawn('./lsyncd', cfgfile, unpack(log)); cwriteln('waiting for Lsyncd to start'); posix.sleep(10) cwriteln('testing excludes after startup'); testfiles(); cwriteln('ok, removing sources'); if srcdir:sub(1,4) ~= '/tmp' then -- just to make sure before rm -rf cwriteln('exist before drama, srcdir is "', srcdir, '"'); os.exit(1); end os.execute( 'rm -rf ' .. srcdir .. '/*' ); cwriteln( 'waiting for Lsyncd to remove destination' ); posix.sleep( 5 ); _, result, code = os.execute('diff -urN '..srcdir..' '..trgdir) ~= 0 if result ~= 'exit' or code ~= 0 then cwriteln( 'fail, target directory not empty!' ); os.exit( 1 ); end cwriteln( 'writing files after startup' ); writefiles( ); cwriteln( 'waiting for Lsyncd to transmit changes' ); posix.sleep( 15 ); testfiles( ); cwriteln( 'killing started Lsyncd' ); posix.kill( pid ); local _, exitmsg, lexitcode = posix.wait( lpid ); cwriteln( 'Exitcode of Lsyncd = ', exitmsg, ' ', lexitcode ); posix.sleep( 1 ); if lexitcode == 143 then cwriteln( 'OK' ); os.exit( 0 ); else os.exit( 1 ); end -- TODO remove temp lsyncd-release-2.1.6/tests/l4rsyncdata.lua000077500000000000000000000033331260764373300205470ustar00rootroot00000000000000#!/usr/bin/lua require( 'posix' ) dofile( 'tests/testlib.lua' ) cwriteln( '****************************************************************' ) cwriteln( ' Testing layer 4 default rsync with simulated data activity ' ) cwriteln( '****************************************************************' ) local tdir, srcdir, trgdir = mktemps() local logfile = tdir .. 'log' local range = 5 local log = { '-log', 'all' } posix.mkdir( srcdir .. 'd' ) posix.mkdir( srcdir .. 'd/e' ) if not writefile( srcdir .. "d/e/f1", 'test' ) then os.exit( 1 ) end cwriteln( 'starting Lsyncd' ) logs = { } local pid = spawn( './lsyncd', '-logfile', logfile, '-nodaemon', '-delay', '5', "-rsync", srcdir, trgdir, unpack( logs ) ) cwriteln( 'waiting for lsyncd to start' ) posix.sleep( 2 ) cwriteln( '* making some data' ) cwriteln( '* creating d[x]/e/f2' ) for i = 1, range do cwriteln( '[cp -r ' .. srcdir .. 'd ' .. srcdir .. 'd' .. i .. ']' ) os.execute( 'cp -r ' .. srcdir .. 'd ' .. srcdir .. 'd' .. i ) end -- mkdir -p "$S"/m/n -- echo 'test3' > "$S"/m/n/file -- for i in $RANGE; do -- cp -r "$S"/m "$S"/m$i -- echo 'test4' > "$S"/m${i}/n/another -- done cwriteln( '* waiting for Lsyncd to do its job.' ) posix.sleep( 10 ) cwriteln( '* killing Lsyncd' ) posix.kill( pid ) local _, exitmsg, lexitcode = posix.wait(lpid) cwriteln( 'Exitcode of Lsyncd = ', exitmsg, ' ', lexitcode) posix.sleep( 1 ) cwriteln( '* differences:' ) _, result, code = os.execute( 'diff -urN ' .. srcdir .. ' ' .. trgdir ) if result == 'exit' then cwriteln( 'Exitcode of diff = "', code, '"') else cwriteln( 'Signal terminating diff = "', code, '"') end if result ~= 'exit' or exitcode ~= 0 then os.exit( 1 ) else os.exit( 0 ) end -- TODO remove temp lsyncd-release-2.1.6/tests/schedule.lua000077500000000000000000000036561260764373300201230ustar00rootroot00000000000000#!/usr/bin/lua require("posix") dofile("tests/testlib.lua") cwriteln("****************************************************************") cwriteln(" Testing Lsyncd scheduler ") cwriteln("****************************************************************") local tdir, srcdir, trgdir = mktemps() local logfile = tdir .. "log" local cfgfile = tdir .. "config.lua" local logs = {"-log", "all" } writefile(cfgfile, [[ settings = { logfile = "]]..logfile..[[", log = all, nodaemon = true, maxProcesses = 1 } -- continously touches a file acircuit = { delay = 0, onStartup = "sleep 3 && touch ^source/a", onCreate = "sleep 3 && touch ^source/a", } -- continously touches b file bcircuit = { delay = 0, onStartup = "sleep 3 && touch ^source/b", onCreate = "sleep 3 && touch ^source/b", } -- continously touches c file ccircuit = { delay = 0, onStartup = "sleep 3 && touch ^source/c", onCreate = "sleep 3 && touch ^source/c", } sync {acircuit, source ="]]..srcdir..[[", target = "]]..trgdir..[["} sync {bcircuit, source ="]]..srcdir..[[", target = "]]..trgdir..[["} sync {ccircuit, source ="]]..srcdir..[[", target = "]]..trgdir..[["} ]]); -- test if the filename exists, fails if this is different to expect local function testfile(filename) local stat, err = posix.stat(filename) if not stat then cwriteln("failure: ",filename," missing") os.exit(1) end end cwriteln("starting Lsyncd") local pid = spawn("./lsyncd", cfgfile, unpack(logs)) cwriteln("waiting for Lsyncd to do a few cycles") posix.sleep(30) cwriteln("look if every circle got a chance to run") testfile(srcdir.."a") testfile(srcdir.."b") testfile(srcdir.."c") cwriteln("killing started Lsyncd") posix.kill(pid) local _, exitmsg, lexitcode = posix.wait(lpid) cwriteln("Exitcode of Lsyncd = ", exitmsg, " ", lexitcode) posix.sleep(1); if lexitcode == 143 then cwriteln("OK") os.exit( 0 ) else os.exit( 1 ) end -- TODO remove temp lsyncd-release-2.1.6/tests/testlib.lua000066400000000000000000000155551260764373300177730ustar00rootroot00000000000000-- common testing environment posix = require('posix') -- escape codes to colorize output on terminal local c1='\027[47;34m' local c0='\027[0m' --- -- writes colorized -- function cwriteln(...) io.write(c1, ...) io.write(c0, '\n') end ----- -- initializes the pseudo random generator -- if environemnt 'SEED' is set, use that as seed. local seed = os.getenv('SEED') or os.time() math.randomseed(seed) cwriteln('random seed: ', seed) ----- -- creates a tmp directory -- -- @returns the name of the directory -- function mktempd() local f = io.popen('mktemp -td ltest.XXX', 'r') local s = f:read('*a') f:close() s = s:gsub('[\n\r]+', ' ') s = s:match('^%s*(.-)%s*$') return s end ----- -- creates a tmp directory with the -- typical lsyncd test architecture -- -- @returns path of tmpdir -- path of srcdir -- path of trgdir -- function mktemps() local tdir = mktempd()..'/' cwriteln('using ', tdir, ' as test root') local srcdir = tdir..'src/' local trgdir = tdir..'trg/' posix.mkdir(srcdir) posix.mkdir(trgdir) return tdir, srcdir, trgdir end ---- -- Writes a file with 'text' in it. -- and adds a newline. -- function writefile(filename, text) local f = io.open(filename, 'w') if not f then cwriteln('Cannot open "'..filename..'" for writing.') return false end f:write(text) f:write('\n') f:close() return true end ----- -- spawns a subprocess. -- -- @returns the processes pid -- function spawn(...) args = {...} cwriteln('spawning: ', table.concat(args, ' ')) local pid = posix.fork() if pid < 0 then cwriteln('Error, failed fork!') os.exit(-1) end if pid == 0 then posix.exec(...) -- should not return cwriteln('Error, failed to spawn: ', ...) os.exit(-1); end return pid end ----- -- Makes a lot of random data -- -- @param rootdir ... the directory to make data in -- @param n ... roughly how much data action will done -- function churn(rootdir, n) -- all dirs created, indexed by integer and path root = {name=''} alldirs = {root} dirsWithFileI = {} dirsWithFileD = {} ----- -- returns the name of a directory -- -- name is internal recursive paramter, keep it nil. -- local function dirname(dir, name) name = name or '' if not dir then return name end return dirname(dir.parent, dir.name .. '/' .. name) end ----- -- Picks a random dir. -- local function pickDir(notRoot) if notRoot then if #alldirs <= 2 then return nil end return alldirs[math.random(2, #alldirs)] end return alldirs[math.random(#alldirs)] end ---- -- Picks a random file. -- -- Returns 3 values: -- * the directory -- * the filename -- * number of files in directory -- local function pickFile() -- picks the random directory if #dirsWithFileI < 1 then return end local rdir = dirsWithFileI[math.random(1, #dirsWithFileI)] if not rdir then return end -- counts the files in there local c = 0 for name, _ in pairs(rdir) do if #name == 2 then c = c + 1 end end -- picks one file at random local cr = math.random(1, c) local fn for name, _ in pairs(rdir) do if #name == 2 then -- filenames are 2 chars wide. cr = cr - 1 if cr == 0 then fn = name break end end end return rdir, fn, c end ----- -- Removes a reference to a file -- -- @param dir --- directory reference -- @param fn --- filename -- @param c --- number of files in dir -- local function rmFileReference(dir, fn, c) dir[fn] = nil if c == 1 then -- if last file from origin dir, it has no files anymore for i, v in ipairs(dirsWithFileI) do if v == dir then table.remove(dirsWithFileI, i) break end end dirsWithFileD[dir] = nil end end ---- -- possible randomized behaviour. -- just gives it a pause -- local function sleep() cwriteln('..zzz..') posix.sleep(1) end ---- -- possible randomized behaviour. -- creates a directory -- local function mkdir() -- chooses a random directory to create it into local rdir = pickDir() -- creates a new random one letter name local nn = string.char(96 + math.random(26)) if not rdir[nn] then local ndir = { name = nn, parent = rdir, } local dn = dirname(ndir) rdir[nn] = dn table.insert(alldirs, ndir) cwriteln('mkdir '..rootdir..dn) posix.mkdir(rootdir..dn) end end ---- -- possible randomized behaviour. -- Creates a file. -- local function mkfile() -- chooses a random directory to create it into local rdir = pickDir() -- creates a new random one letter name local nn = 'f'..string.char(96 + math.random(26)) local fn = dirname(rdir) .. nn cwriteln('mkfile '..rootdir..fn) local f = io.open(rootdir..fn, 'w') if f then for i=1,10 do f:write(string.char(96 + math.random(26))) end f:write('\n') f:close() rdir[nn]=true if not dirsWithFileD[rdir] then table.insert(dirsWithFileI, rdir) dirsWithFileD[rdir]=true end end end ---- -- possible randomized behaviour, -- moves a directory. -- local function mvdir() if #alldirs <= 2 then return end -- chooses a random directory to move local odir = pickDir(true) -- chooses a random directory to move to local tdir = pickDir() -- makes sure tdir is not a subdir of odir local dd = tdir while dd do if odir == dd then return end dd = dd.parent end -- origin name in the target dir already if tdir[odir.name] ~= nil then return end local on = dirname(odir) local tn = dirname(tdir) cwriteln('mvdir ',rootdir,on,' -> ',rootdir,tn,odir.name) os.rename(rootdir..on, rootdir..tn..odir.name) odir.parent[odir.name] = nil odir.parent = tdir tdir[odir.name] = odir end ---- -- possible randomized behaviour, -- moves a file. -- local function mvfile() local odir, fn, c = pickFile() if not odir then return end -- picks a directory with a file at random -- picks a target directory at random local tdir = pickDir() local on = dirname(odir) local tn = dirname(tdir) cwriteln('mvfile ',rootdir,on,fn,' -> ',rootdir,tn,fn) os.rename(rootdir..on..fn, rootdir..tn..fn) rmFileReference(odir, fn, c) tdir[fn] = true if not dirsWithFileD[tdir] then dirsWithFileD[tdir] = true table.insert(dirsWithFileI, tdir) end end ---- -- possible randomized behaviour, -- removes a file. -- local function rmfile() local dir, fn, c = pickFile() if dir then local dn = dirname(dir) cwriteln('rmfile ',rootdir,dn,fn) posix.unlink(rootdir..dn..fn) rmFileReference(dir, fn, c) end end local dice = { { 10, sleep }, { 20, mkfile }, { 20, mkdir }, { 20, mvdir }, { 20, rmfile }, } cwriteln('making random data') local ndice = 0 for i, d in ipairs(dice) do ndice = ndice + d[1] d[1] = ndice end for ai=1,n do -- throws a die what to do local acn = math.random(ndice) for i, d in ipairs(dice) do if acn <= d[1] then d[2]() break end end end end