dvcs-autosync-0.5/ 0000755 0000000 0000000 00000000000 11625302736 011053 5 ustar dvcs-autosync-0.5/.autosync-example 0000644 0000000 0000000 00000012444 11625302736 014357 0 ustar [autosync]
path = ~/amw
# If pidfile is not explicitly specified, the name of the used config file
# will be appended with '.pid', e.g. for a config file ~/.autosync, the PID
# will be written to ~/.autosync.pid. This option allows to override the
# default.
#pidfile = ~/.autosync.pid
# Available synchronization methods: "xmpp" and "none", "autosync-server"
# may be implemented later (to potentially be interoperable with SparkleShare).
# If xmpp is set as synchronization method, a config section [xmpp] with at
# least two config variables username and password must exist, otherwise
# dvcs-autosync will fail to start.
#syncmethod = none
#syncmethod = autosync-server
syncmethod = xmpp
# Available notification methods: "desktop", "xmpp", "all", and "none".
# Default if not specified is "desktop" and indicates that KDE/Gnome/notify
# notifications (typically in the tray area) should be used to inform the user
# when anything noticable happens. "none" disables notifications altogether
# and "xmpp" causes notifications to be sent via XMPP to the account specified
# in the "alsonotify" option in section [xmpp]. "all" will send notifications
# to both desktop and xmpp.
notifymethod = all
# There are currently two options for handling file notifications, as neither
# one is perfect. You can choose between the 'conservative' option, which is
# slower but should work in every corner case, and the 'optimized' option,
# which will consume less CPU and I/O resources on a remotely-triggered pull,
# but may miss local changes until the next time autosync is restarted or a
# manual commit is done on the repository.
#
# The problem is that during a pull from the remote repository, changes will
# be applied to the local file system and consequently generate file-changed
# events. These events are in turn translated to add/remove/move commands for
# the DVCS, which would duplicate the remote changes locally in the history and
# obviously doesn't work e.g. for file removes. Therefore, the file/dir changes
# caused by a remote pull must not be translated to local DVCS changes.
# The conservative strategy solves this problem by completely suspending event
# handling while the pull is active. Because it is possible that _real_ local
# changes occur concurrently to the pull, the startup command will be run after
# the pull has been finished and event processing was resumed again. This is a
# safe option, as all local changes that occurred before or during the pull
# will be picked up by the DVCS client. However, when no local changes occurred
# (which is more probable), then this strategy causes unnecessary I/O overhead.
#
# The optimized strategy also suspends the execution of local DVCS actions
# triggered by file/directory events during the pull, but does not completely
# discard them. Instead, all events that occurred during the pull are recorded
# in an event queue which is replayed after the pull has finished. The
# advantage is that a complete re-scan of the local repository is avoided and
# only those files/directories that saw some modification are re-checked for
# local changes. The disadvantage is that this depends more strongly on the
# change detection capabilities (trivial ones done by autosync-dvcs and more
# complex ones done by the respective DVCS client) and it is therefore not
# guaranteed that all local, concurrent changes are being detected. This option
# is still being evaluated for corner cases where it doesn't work, and
# therefore is not yet the default strategy.
pulllock = conservative
#pulllock = optimized
# The number of seconds to wait for additional events before acting. Setting
# this lower will increase the synchronization speed at the cost of CPU and
# transfer resources.
readfrequency = 5
ignorepath = .git .svn .hg src/packages src/java/openuat
src/csharp/sparkleshare src/cpp/cross/keepassx src/android/ipv6config
# Note: addcmd, rmcmd, and modifycmd take one argument, movecmd takes two (first the source, then the destination).
# Note: statuscmd should return with code 0 when nothing has changed in the
# local checked-out tree that needs to be committed and non-zero when a commit
# is required.
# Note: commitcmd also takes one argument, which will be substituted by a generated commit message.
[dvcs]
# for git
statuscmd = git status | grep -iq "nothing to commit"
addcmd = git add %s
rmcmd = git rm -r %s
modifycmd = git add %s
# doesn't work when the source file no longer exists, git expects to move it itself
#movecmd = git mv %s %s
# use this instead, git will figure out that it was a move because the file is similar
movecmd = git rm %s
git add %s
startupcmd = git add -A
commitcmd = git commit -m %s
pushcmd = git push
pullcmd = git pull
remoteurlcmd = git config --get remote.origin.url
# for mercurial
# next line works with bash as shell, returning 0 if nothing has changed
#statuscmd = test "`hg status`" = ""
#addcmd = hg add
#rmcmd = hg remove
#modifycmd =
#movecmd = hg mv %s %s
#startupcmd = hg addremove
#commitcmd = hg commit -m %s
#pushcmd = hg push
#pullcmd = hg pull -u
#remoteurlcmd = hg showconfig paths.default
[xmpp]
username = your XMPP id here
password = your XMPP password here
alsonotify = if set, another XMPP id that will get notified when something happens
[autosync-server]
server = http://whatever.sync.server
username = your-username
password = your-password
dvcs-autosync-0.5/.autosync-example-windows 0000644 0000000 0000000 00000007710 11625302736 016047 0 ustar [autosync]
path = ~/Desktop/splashbin
pidfile = ~/.autosync.pid
syncmethod = xmpp
#syncmethod = autosync-server
# There are currently two options for handling file notifications, as neither
# one is perfect. You can choose between the 'conservative' option, which is
# slower but should work in every corner case, and the 'optimized' option,
# which will consume less CPU and I/O resources on a remotely-triggered pull,
# but may miss local changes until the next time autosync is restarted or a
# manual commit is done on the repository.
#
# The problem is that during a pull from the remote repository, changes will
# be applied to the local file system and consequently generate file-changed
# events. These events are in turn translated to add/remove/move commands for
# the DVCS, which would duplicate the remote changes locally in the history and
# obviously doesn't work e.g. for file removes. Therefore, the file/dir changes
# caused by a remote pull must not be translated to local DVCS changes.
# The conservative strategy solves this problem by completely suspending event
# handling while the pull is active. Because it is possible that _real_ local
# changes occur concurrently to the pull, the startup command will be run after
# the pull has been finished and event processing was resumed again. This is a
# safe option, as all local changes that occurred before or during the pull
# will be picked up by the DVCS client. However, when no local changes occurred
# (which is more probable), then this strategy causes unnecessary I/O overhead.
#
# The optimized strategy also suspends the execution of local DVCS actions
# triggered by file/directory events during the pull, but does not completely
# discard them. Instead, all events that occurred during the pull are recorded
# in an event queue which is replayed after the pull has finished. The
# advantage is that a complete re-scan of the local repository is avoided and
# only those files/directories that saw some modification are re-checked for
# local changes. The disadvantage is that this depends more strongly on the
# change detection capabilities (trivial ones done by autosync-dvcs and more
# complex ones done by the respective DVCS client) and it is therefore not
# guaranteed that all local, concurrent changes are being detected. This option
# is still being evaluated for corner cases where it doesn't work, and
# therefore is not yet the default strategy.
pulllock = conservative
#pulllock = optimized
# The number of seconds to wait for additional events before acting. Setting
# this lower will increase the synchronization speed at the cost of CPU and
# transfer resources.
readfrequency = 5
ignorepath = .git
# Note: addcmd, rmcmd, and modifycmd take one argument, movecmd takes two (first the source, then the destination).
# Note: statuscmd should return with code 0 when nothing has changed in the
# local checked-out tree that needs to be committed and non-zero when a commit
# is required.
# Note: commitcmd also takes one argument, which will be substituted by a generated commit message.
[dvcs]
# for git
statuscmd = git.cmd status | find /I "nothing to commit"
addcmd = git.cmd add %s
rmcmd = git.cmd rm %s
modifycmd = git.cmd add %s
# doesn't work when the source file no longer exists, git expects to move it itself
#movecmd = git mv %s %s
# use this instead, git will figure out that it was a move because the file is similar
movecmd = git.cmd rm %s
git.cmd add %s
startupcmd = git.cmd add -A
commitcmd = git.cmd commit -m %s
pushcmd = git.cmd push
pullcmd = git.cmd pull
remoteurlcmd = git.cmd config --get remote.origin.url
# for mercurial
#statuscmd = hg status
#addcmd = hg add
#rmcmd = hg remove
#modifycmd =
#movecmd = hg mv %s %s
#startupcmd = hg addremove
#commitcmd = hg commit -m %s
#pushcmd = hg push
#pullcmd = hg pull -u
[xmpp]
username = ascii158-autosync@jabber.org
password = computer
alsonotify =
[autosync-server]
server = http://whatever.sync.server
username = your-username
password = your-password
dvcs-autosync-0.5/.gitignore 0000644 0000000 0000000 00000000327 11625302736 013045 0 ustar *~
.~lock.*#
*.backup
*.old
*.aux
*.blg
*.bbl
*.bib.bak
*.pyc
.swp.*
qt_temp.*
build
debian/python-module-stampdir/
debian/files
debian/dvcs-autosync/
debian/dvcs-autosync.substvars
debian/dvcs-autosync.*debhelper*
dvcs-autosync-0.5/.project 0000644 0000000 0000000 00000000557 11625302736 012531 0 ustar
dvcs-autosync
org.python.pydev.PyDevBuilder
org.python.pydev.pythonNature
dvcs-autosync-0.5/.pydevproject 0000644 0000000 0000000 00000000460 11625302736 013572 0 ustar
Default
python 2.6
dvcs-autosync-0.5/.settings/ 0000755 0000000 0000000 00000000000 11625302736 012771 5 ustar dvcs-autosync-0.5/.settings/org.eclipse.core.resources.prefs 0000644 0000000 0000000 00000000166 11625302736 021207 0 ustar #Thu May 12 17:31:06 CEST 2011
eclipse.preferences.version=1
encoding/dvcs-autosync=utf-8
encoding/jabberbot.py=utf-8
dvcs-autosync-0.5/CHANGELOG 0000644 0000000 0000000 00000001412 11625302736 012263 0 ustar Version 0.5
* Added Windows and Mac OS/X ports (thanks to Philipp Tölke and Olivier
Guerriat, respectively, for the ports!).
* Added icon for notification popups (patch by evgeni@golov.de via Gitorious,
thanks!).
Version 0.4
* Debian packaging.
* Improved logging and notifications contributed by Dieter Plaetinck.
* Added improvements by René 'Necoro' Neumann to make the embedded Jabberbot
more stable with regards to reconnections. Instead of running a tight loop
while trying to reconnect, ping the server regularly and queue the messages
until the connection can be re-established.
* Support configurable notifications (desktop notifications, XMPP, all, or
none).
Version 0.3
* First public release, announced on vcs-home@lists.madduck.net.
dvcs-autosync-0.5/COPYRIGHT 0000644 0000000 0000000 00000000403 11625302736 012343 0 ustar # Copyright Rene Mayrhofer, 2010-2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2 or 3 of the License.
dvcs-autosync-0.5/INSTALL_MAC 0000644 0000000 0000000 00000002242 11625302736 012564 0 ustar
Install dvcs-autosync
------------------------
* `git://gitorious.org/~olivierg/dvcs-autosync/olivierg-dvcs-autosync.git`
* `cd olivierg-dvcs-autosync`
* `python setup.py build`
* `sudo python setup.py install`
* `cp .autosync-example ~/.autosync`
* `open ~/.autosync`
* the config file will open in your default text editor, fill and save it
Install xmpppy
------------------------
* Download the latest version here : http://xmpppy.sourceforge.net/
* Untar & `cd` to its folder
* `sudo python setup.py install`
Install MacFSEvents
------------------------
* `git clone https://github.com/malthe/macfsevents.git`
* `cd macfsevents`
* `sudo python setup.py install`
Install Growl python binding
------------------------
* Make sure you have Growl installed (or install it from growl.info)
* `cd` to `growl-python` folder of this repository (or download the Growl SDK on growl.info)
* `sudo python setup.py install`
* After the first launch, you will be able to configure (or disable) notifications in the Growl preferences pane.
Run dvcs-autosync
------------------------
* foreground : `dvcs-autosync`
* background : `screen -dmS autosync dvcs-autosync`
dvcs-autosync-0.5/README 0000644 0000000 0000000 00000017774 11625302736 011753 0 ustar What does it do?
------------------------
Automatically keep DVCS repositories in sync whenever changes happen by automatically committing and pushing/pulling.
How does it do it?
------------------------
0. Set up desktop notifications (for these nice bubble-style popups when anything happens) and log into a Jabber/XMPP account specified in the config file.
1. Monitor a specific path (and its subdirectories) for changes with inotify.
At the moment, only one path is supported and multiple script instances have to be run for multiple disjoint paths. This path is assumed to be (part of) a repository. Currently tested with git, but should support most DVCS (the config file allows to specify the DVCS commands called when interacting with it).
Optionally, an [ignores] file is read with one exclusion pattern per line and files matching any of the patterns are ignored. This will typically be the .gitignore file already existing the git tree.
2. When changes are detected, check them into the repository that is being monitored (or delete, or move, etc.).
It automatically ignores any patterns listed in .gitignore and the config file allows to exclude other directories (e.g. repositories within the main repository).
3. Wait for a configurable time. When nothing else changes in between, commit.
4. Wait a few seconds longer (again configurable) and, if nothing else is commited, initiate a push.
5. After the push has finished, send an XMPP message to self (that is, to all clients logged in with the same account) to notify other accounts of the push.
Furthermore:
* At any time in between, when receiving a proper XMPP message, pull from the repository.
* A PID file is written to [pidfile] for killing the daemon later on.
Dependencies
-----------------------
* Python >= 2.6
* patched JabberBot (>= 0.9) (included in this repository)
the patch allows reception of messages from its own XMPP id (patch already pushed upstream and will be included in next upstream JabberBot version)
* xmpppy (http://xmpppy.sourceforge.net/)
Linux:
* Linux kernel with inotify enabled
* Pyinotify (better performance with version >= 0.9)
Mac OS X:
* MacFSEvents (https://github.com/malthe/macfsevents/)
* Python 2.7 (included in Lion)
Recommended:
* Pynotify (for desktop notifications on linux)
* Growl python binding (for desktop notifications on Mac OS X, included in this repository)
Installation
------------------------
(on Mac OS X, see INSTALL_MAC file for detailled instructions)
[PREFERRED] PACKAGE INSTALLATION
* Either install the Debian package (generated by dpkg-buildpackage from the source tree) or use the arch package
* or (on other systems) simply execute (to install to /usr/local/bin and /usr/share/dvcs-autosync):
1. python setup.py build
2. sudo python setup.by install
MANUAL INSTALLATION
* Copy dvcs-autosync to a location in $PATH and jabberbot.py to a location in $PYTHONPATH
* (Quick and dirty: keep both in the same directory and run ./dvcs-autosync later)
Create the repository and do initial push
----------------------
[on the server used to host the central git repository]
$ git init --bare autosync.git
[on the first host using that repository]
$ cd ~ && git clone :autosync.git autosync
$ cd autosync
$ [ populate initial contents and add to index ]
$ git commit -m 'Initial commit'
$ git push origin master
[on each additional host]
$ git clone :autosync.git autosync
Note that these are only examples. You can use arbitrary directories and repositories.
Configuration
-----------------------
* Create an XMPP/Jabber account (for example on jabber.org, or set up your own server)
* Copy the included .autosync-example config file to ~/.autosync (or wherever you want)
* Change it to your needs
Running the program
-----------------------
autosync.py [config file] # config defaults to ~/.autosync
Potential pitfalls
----------------------
* for Jabber login, there probably needs to be a
_xmpp-client._tcp. SRV entry in DNS so that
the Python XMPP module can look up the server and port to use. Without such
an SRV entry, Jabber login may fail even if the account details are correct
and the server is reachable.
* when there are errors
ERROR:pyinotify:add_watch: cannot watch ...
on startup, it will either be an invalid file or directory name which can
not be watched for changes, or the number of files a user may watch
concurrently using the kernel inotify interface has reached the set limit.
In the latter case, the limit can be changed by modifying the sysctl variable
fs.inotify.max_user_watches and increasing it to a sufficient value
(e.g. 500000).
* Note that, when keeping changing media files (or other large binaries) in
an automatically synchronized repository, it may grow quickly. In the
current version, dvcs-autosync will never delete any history and keep all
previous versions. This is intentional for documents and text files, but may
be problematic for large binaries. I will try to address this problem in
future versions, e.g. by integrating with git-annex (see TODO).
Thoughts that should be considered at some point but have not yet been implemented:
------------------------
- The XMPP push message already contains a parameter, namely the repository the push went to. Add another parameter to specify the repository in which the change happened so that others can try to pull directly from there, in case it is quicker. The main use case for this optimization is my standard one: the laptop sitting next to the desktop and both of them syncing each other's home directories. Going via the main, hosted server is quite a bit more inefficient than pulling via 1GB/s LAN....
- Pulls and pushes can and should be optimized. At the moment, I take a conservative locking approach whenever a conflict may occur and performance is reasonable on my main work tree with ca. 16GB (cloned GIT repo), but not stellar. Specifically, actually implement the "optimized" pull lock strategy already described in the example config file.
- Implement another option for synchronization besides XMPP (idea: a simple broadcast reflector on a single TCP port that could even run on e.g. OpenWRT, or re-use whatever the Sparkleshare server does).
- Automatically adding some context to each commit message besides the automatic date/time would be useful for finding out why a change happened. Nepomuk anybody (just kidding, maybe, for now...)?
- Allow to specify commit messages via popups. When ignored, use default commit message.
Disclaimer
------------------------
This is my first Python program that is longer than 100 lines. Please be easy on me with the patches, complaints and "what did you think, doing it this way?" messages. I have tried to comment wherever I found it necessary for my own understanding, but this is neither the best structured nor the most elegant program I ever wrote. Any hints for improving it are greatly welcome, and interoperability patches to work with Sparkleshare even more so. In the future, the two projects should definitely interoperate, which will come done to implementing each other's notification mechanism. My autosync Python script could then be used wherever headless operation might be required and/or Mono is not installed.
I have tested it between three systems and, in this version, it works reasonably well. However, there does seem to be the occasional kink when editors go crazy on temporary file creation, renaming, deleting originals, etc. These might be races, but I don't know for certain yet. Additional test cases are more then welcome. This script should be fairly safe to try, considering that the worst it will do is add a few hundred commits to your DVCS repo and push them to the configured default remote. But, after all, what is the point in using a DVCS if you can't roll back any changes made by you or a buggy script (yes, I did have to do that a number of times while developing the manual inotify event coalescing to cooperate better with git add/remove/mv actions).
Rene Mayrhofer
dvcs-autosync-0.5/TODO 0000644 0000000 0000000 00000006241 11625302736 011546 0 ustar ===============================================================================
This document should be modified to reflect the current state of development.
===============================================================================
TODO - currently known bugs:
----------------------------
* Moving directories is not tracked properly with the current event coalescing. Find out how to do a directory move in a better way or simply pass the "-r" option to git when working on directories.
* definitely coalesce multiple file events within a time window instead of creating one commit for each file, e.g. when moving whole directory hierarchies (don't clutter the commit history with each file commit separately)
* find out why the Jabber msg-to-self doesn't work in some cases
* Mac OS X : git pull file manipulation are tracked and cause false "local change" alerts (stopped by git which says "no modification")
TODO - short term:
------------------
* autocommit messages should not only include the file path, but also the action performed on the file (as many details as could be helpful for later analysis)
* determine if pulling directly from those repositories which caused the changes is quicker then from central
* optimize pulls and pushes during startup
* implement optimistic pull lock for better performance
* Find a good combination of global and file-based event coalescing. This would allow quick synchronization of files that are written and then not touched again while commits to files that are continiously being worked on would be kept in one (larger) commit. There are (at least) 2 use cases to take into account: copying/moving/removing whole directories that should be kept in one big commit and synced quickly, and one file being open in an editor application and being written to more or less continuously. In the latter case, we need to find a compromise between too many commits and to few (which is a problem when accidentally deleting text that was written an hour earlier and that should be recoverable).
TODO - future versions:
-----------------------
* automatically add some context to commit messages (e.g. location, applications open at the same time, etc.)
* allow to specify a commit/change message via traybar icon/popup message, maybe even in retrospect (rewriting history before pushing with a longer push delay)
* integrate with git-annex to support keeping large binary files in synchronized directories without unnecessarily increasing repository size by keeping their history
Ideas:
------
One approach to the local/global coalescing compromise would be to put all files that are changed within seconds into a waiting list for the event processing. Then, keep a time with each file to remember when the last change was. When files have not been changed for seconds, move them into a "pending commit" list. When the number of files in the "pending commit" list becomes large (with some threshold) and the remaining still-being-written-to files list becomes small and (that's the important part) _stable_ (in terms of no longer adding/removing new files), then break them into two groups, make a commit of the pending list and keep the other files until they settle down.
dvcs-autosync-0.5/autosync-xdg-launcher.sh 0000755 0000000 0000000 00000000677 11625302736 015650 0 ustar #!/bin/bash
# This simple script will start autosync.py daemons for all configs found in $XDG_CONF_HOME/autosync/,
# stopping any existing daemons with that config first.
conf=${XDG_CONF_HOME:-$HOME/.config}/autosync
data=${XDG_DATA_HOME:-$HOME/.local/share}/autosync
mkdir -p $data/log
for i in $conf/*
do
pkill -f "python.*dvcs-autosync .*autosync/$(basename $i)"
dvcs-autosync $i &> $data/log/$(basename $i).$(date '+%F-%T').log &
done
dvcs-autosync-0.5/debian/ 0000755 0000000 0000000 00000000000 11625302736 012275 5 ustar dvcs-autosync-0.5/debian/changelog 0000644 0000000 0000000 00000001002 11625302736 014140 0 ustar dvcs-autosync (0.5) unstable; urgency=low
* Added icon for dvcs-autosync for pynotify (notification 'bubbles')
* Bump standards version and fix Git repo URL to the anonymous version.
Closes: #637387: dvcs-autosync: specified Vcs-git is not public accessible
-- René Mayrhofer Wed, 24 Aug 2011 22:15:33 +0200
dvcs-autosync (0.4) unstable; urgency=low
* Initial release (Closes: #620962: ITP: dvcs-autosync)
-- Rene Mayrhofer Tue, 24 May 2011 14:40:12 +0100
dvcs-autosync-0.5/debian/compat 0000644 0000000 0000000 00000000002 11625302736 013473 0 ustar 7
dvcs-autosync-0.5/debian/control 0000644 0000000 0000000 00000003041 11625302736 013676 0 ustar Source: dvcs-autosync
Section: utils
Priority: optional
Maintainer: Rene Mayrhofer
Build-Depends: cdbs (>= 0.4.49), debhelper (>= 7.0.50~), python-support, git
Standards-Version: 3.9.2
Homepage: http://www.mayrhofer.eu.org/dvcs-autosync
Vcs-Git: git://gitorious.org/dvcs-autosync/dvcs-autosync.git
Vcs-Browser: http://gitorious.org/dvcs-autosync
Package: dvcs-autosync
Architecture: all
Depends: ${shlibs:Depends}, ${misc:Depends}, ${python:Depends},
python-pyinotify, python-xmpp
Recommends: python-notify
Description: Automatically synchronize distributed version control repositories
dvcs-autosync is an open source replacement for Dropbox/Wuala/Box.net/etc.
based on distributed version control systems (DVCS). It offers nearly
instantaneous mutual updates when a file is added or changed on one side but
with the added benefit of (local, distributed) versioning and that it does
not rely on a centralized service provider, but can be used with any DVCS
hosting option including a completely separate server.
.
Synchronization of directories is based on DVCS repositories. Git is used for
main development and is being tested most thoroughly as the backend storage,
but other DVCS such as Mercurial are also supported. dvcs-autosync is
comparable to SparkleShare in terms of overall aim, but takes a more
minimalistic approach. A single Python script monitors the configured
directory for live changes, commits these changes to the DVCS (such as git)
and synchronizes with other instances using XMPP messages.
dvcs-autosync-0.5/debian/copyright 0000644 0000000 0000000 00000001026 11625302736 014227 0 ustar This work was packaged for Debian by:
Rene Mayrhofer on Sat, 19 Mar 2011 22:50:12 +0100
It was downloaded from:
http://www.mayrhofer.eu.org/dvcs-autosync
Upstream Author(s):
Rene Mayrhofer
Copyright:
Copyright (C) 2011 Rene Mayrhofer
License:
dvcs-autosync is distributed under the terms of the GNU GPL (v2 or v3).
The Debian packaging is:
Copyright (C) 2011 Rene Mayrhofer
and is licensed under the GPL version 3,
see "/usr/share/common-licenses/GPL-3".
dvcs-autosync-0.5/debian/docs 0000644 0000000 0000000 00000000026 11625302736 013146 0 ustar README
TODO
CHANGELOG
dvcs-autosync-0.5/debian/dvcs-autosync.examples 0000644 0000000 0000000 00000000022 11625302736 016631 0 ustar .autosync-example
dvcs-autosync-0.5/debian/pycompat 0000644 0000000 0000000 00000000002 11625302736 014044 0 ustar 2
dvcs-autosync-0.5/debian/rules 0000755 0000000 0000000 00000000222 11625302736 013351 0 ustar #!/usr/bin/make -f
DEB_PYTHON_SYSTEM=pysupport
include /usr/share/cdbs/1/rules/debhelper.mk
include /usr/share/cdbs/1/class/python-distutils.mk
dvcs-autosync-0.5/debian/source/ 0000755 0000000 0000000 00000000000 11625302736 013575 5 ustar dvcs-autosync-0.5/debian/source/format 0000644 0000000 0000000 00000000015 11625302736 015004 0 ustar 3.0 (native)
dvcs-autosync-0.5/dvcs-autosync 0000755 0000000 0000000 00000116447 11625302736 013620 0 ustar #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# ============================================================================
# Copyright René Mayrhofer, 2010-2011
#
# Contributors:
# * Dieter Plaetinck: documentation and bug fixes, launcher script, config
# handling improvements
# * René 'Necoro' Neumann: improvements for embedded Jabberbot with regards to
# disconnects, bug fixes
# * Philipp Tölke: Windows port
# * Olivier Guerriat : Mac OS port, Growl support
# * evgeni@golov.de: Various bugfixes
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2 or 3 of the License.
# ============================================================================
from __future__ import with_statement
__author__ = 'René Mayrhofer '
__version__ = '0.5'
__website__ = 'http://www.mayrhofer.eu.org/dvcs-autosync'
__license__ = 'GPL v2 or v3'
# Imports for various platforms and error handling concerning optional Python modules
import warnings, sys, signal, os, time, subprocess, threading, fnmatch, ConfigParser, logging
# OS detection
detected_os = False
try:
import pyinotify
detected_os = "LINUX"
except:
pass
try:
import win32api
detected_os = "WINDOWS"
except:
pass
try:
from fsevents import Observer
from fsevents import Stream
detected_os = "MAC_OS"
except:
pass
if not detected_os:
logging.error('Unsupported OS, sorry.')
sys.exit(2)
if not hasattr(subprocess, 'check_output'):
# see https://gist.github.com/839684
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = check_output
# do not care about deprecation warnings right now, as they are only confusing for users
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
# need to use a private instance of jabberbot for now...
# TODO: remove when we no longer need this
sys.path.insert(0, '/usr/share/dvcs-autosync')
import jabberbot, xmpp
botcmd = jabberbot.botcmd
# some global variables, will be initialized in main
desktopnotifykde = False
desktopnotifygnome = False
desktopnotifygrowl = False
xmppnotify = False
knotify = None
notifier = None
gnotify = None
bot = None
hostname = None
username = None
pidfile = None
icon = None
def printmsg(title, msg, level=logging.INFO):
# there are probably more levels but I couldn't find the appropriate docs
kdelevels = {logging.DEBUG: 'info',
logging.INFO: 'info',
logging.WARNING: 'warning',
logging.ERROR: 'warning',
logging.CRITICAL: 'warning'}
try:
if desktopnotifygnome:
urgencies = {logging.DEBUG: pynotify.URGENCY_LOW,
logging.INFO: pynotify.URGENCY_NORMAL,
logging.WARNING: pynotify.URGENCY_CRITICAL,
logging.ERROR: pynotify.URGENCY_CRITICAL,
logging.CRITICAL: pynotify.URGENCY_CRITICAL}
n = pynotify.Notification(title, msg)
if icon:
n.set_icon_from_pixbuf(icon)
n.set_urgency(urgencies[level])
n.show()
elif desktopnotifykde:
knotify.event(kdelevels[level], 'kde', [], title, msg, [], [], 0, dbus_interface="org.kde.KNotify")
elif desktopnotifygrowl:
gnotify.notify("Every notifications", title, msg, gnotifyIcon)
time.sleep(0.1) # When sending multiple notifications at the same time, Growl seems to only consider the latest. This little delay prevent that.
if xmppnotify and bot and alsonotify:
bot.send(alsonotify, '[%s]: %s' % (title, msg))
except:
pass
logging.log(level, "NOTIFICATION: %s: %s" % (title, msg))
# this helper class has been shamelessly copied from http://socialwire.ca/2010/01/python-resettable-timer-example/
class ResettableTimer(threading.Thread):
"""
The ResettableTimer class is a timer whose counting loop can be reset
arbitrarily. Its duration is configurable. Commands can be specified
for both expiration and update. Its update resolution can also be
specified. Resettable timer keeps counting until the "run" method
is explicitly killed with the "kill" method.
"""
def __init__(self, maxtime, expire, inc=None, update=None, arg=None):
"""
@param maxtime: time in seconds before expiration after resetting
in seconds
@param expire: function called when timer expires
@param inc: amount by which timer increments before
updating in seconds, default is maxtime/2
@param update: function called when timer updates
@param arg: arbitrary argument that will be passed to function expire when timer expires
"""
self.maxtime = maxtime
self.expire = expire
if inc:
self.inc = inc
else:
self.inc = maxtime / 2
if update:
self.update = update
else:
self.update = lambda c : None
self.arg = arg
self.counter = 0
self.active = True
self.stop = False
threading.Thread.__init__(self)
self.setDaemon(True)
def set_counter(self, t):
"""
Set self.counter to t.
@param t: new counter value
"""
self.counter = t
def deactivate(self):
"""
Set self.active to False.
"""
self.active = False
def kill(self):
"""
Will stop the counting loop before next update.
"""
self.stop = True
def reset(self):
"""
Fully rewinds the timer and makes the timer active, such that
the expire and update commands will be called when appropriate.
"""
self.counter = 0
self.active = True
def run(self):
"""
Run the timer loop.
"""
while True:
self.counter = 0
while self.counter < self.maxtime:
self.counter += self.inc
time.sleep(self.inc)
if self.stop:
return
if self.active:
self.update(self.counter)
if self.active:
self.active = False
self.expire(self.arg)
class AutosyncJabberBot(jabberbot.JabberBot):
def __init__(self, username, password, res=None, debug=False, ignoreownmsg=True):
self._running = False
self._unsent = []
jabberbot.JabberBot.__init__(self, username, password, res, debug, False, not ignoreownmsg)
self.PING_FREQUENCY = 30
def _process_thread(self):
self.log.info('Background Jabber bot thread starting')
while self._running:
try:
if self.conn.Process(1) is None:
# Process() does not raise IOErrors
# instead it returns None if there is no data
self.log.warning('Link down')
raise IOError
self.idle_proc()
except IOError:
self.conn = None
self.log.warning('Received IOError while trying to handle incoming messages, trying to reconnect now')
while not self.conn and self._running:
time.sleep(10)
self.conn = self.connect()
# copy self._unsent, s.t. it doesn't gets an infinite loop
# this could happen if we try to send a msg, this fails
# and then it gets re-appended to self._unsent -- where we try
# to send it again ... and again ... and again...
unsent = self._unsent
self._unsent = []
for msg in unsent:
self.send(*msg)
def start_serving(self):
self.connect()
if self.conn:
self.log.info('bot connected. serving forever.')
else:
self.log.warning('could not connect to server - aborting.')
return
self._running = True
self._thread = threading.Thread(target=self._process_thread)
self._thread.daemon = True
self._thread.start()
# this is a hack to get other bots to add this one to their "seen" lists
# TODO: still doesn't work, figure out how to use JabberBot to get rid of
# 'AutosyncJabberBot : Ignoring message from unseen guest: rene-sync@doc.to/AutosyncJabberBot on iss'
self.conn.send(xmpp.Presence(to=username))
def stop_serving(self):
self._running = False
if self._thread:
self._thread.join()
def on_ping_timeout(self):
raise IOError, "Ping timeout"
# override the send method so that connection errors can be handled by trying to reconnect
def send(self, user, text, in_reply_to=None, message_type='chat'):
try:
jabberbot.JabberBot.send(self, user, text, in_reply_to, message_type)
except (AttributeError, IOError):
if self.conn is not None: # error is something different
raise
self.log.warning('Received an error while trying to send message. Will send it later.')
self._unsent.append((user, text, in_reply_to, message_type))
@botcmd
def whoami(self, mess, args):
"""Tells you your username"""
return 'You are %s, I am %s/%s' % (mess.getFrom(), self.jid, self.res)
@botcmd
def ping(self, mess, args):
self.log.debug('Received ping command over Jabber channel')
return 'pong'
@botcmd
def pushed(self, mess, args):
self.log.debug('Received pushed command over Jabber channel with args %s from %s' % (args, mess.getFrom()))
if mess.getFrom() == str(self.jid) + '/' + self.res:
self.log.debug('Ignoring own pushed message looped back by server')
else:
self.log.debug('Trying to pull from %s' % args)
with lock:
handler.protected_pull()
@botcmd
def login(self, mess, args):
"""The bot sends a "login" message first. ignore it"""
return
@botcmd
def unknown(self, mess, args):
"""Should somebody say something that is not a command, all bots will
reply with "Unknown command...." to which all bots will reply that thay
do not know the command "Unknown"..."""
return
class FileChangeHandler():
def __init__(self, cwd, ignored):
self.cwd = cwd
self.ignored = ignored
# singleton timer for delayed execution of push
self._push_timer = None
# When set to true, then all events will be ignored.
# This is used to temporarily disable file event handling when a local
# pull operation is active.
self._ignore_events = False
# This is a dictionary of all events that occurred within _coalesce_time seconds.
# Elements in the sets are tuples of FIFO lists of event types which were delivered
# for the respective file path and timers for handling the file, indexed by the
# respective file path.
self._file_events = dict()
def _exec_cmd(self, commands, parms = None):
j = 0
for command in commands.split('\n'):
cmdarray = command.split(' ')
if parms:
i = 0
while i < len(cmdarray):
if cmdarray[i] == '%s':
logging.debug('Substituting cmd part %s with %s', cmdarray[i], parms[j])
cmdarray[i] = parms[j]
j=j+1
i=i+1
try:
out = subprocess.check_output(cmdarray, cwd=self.cwd, stderr=subprocess.STDOUT)
logging.debug("Command '%s' in '%s'. Output:\n%s" % (" ".join (cmdarray), self.cwd, out))
except subprocess.CalledProcessError, e:
if hasattr(e, 'output'):
printmsg('Command failed', "Command '%s' in '%s' failed. Output:\n%s" % (" ".join (cmdarray), self.cwd, e.output), level=logging.WARNING)
else:
printmsg('Command failed', "Command '%s' in '%s' failed." % (" ".join (cmdarray), self.cwd), level=logging.WARNING)
def _post_action_steps(self, curpath = None):
with lock:
# the status command should return 0 when nothing has changed
retcode = subprocess.call(cmd_status, cwd=self.cwd, shell=True)
if retcode != 0:
if curpath:
commitmsg = 'Autocommit of file %s changed on host %s' % (curpath, hostname)
else:
commitmsg = 'Autocommit of all changes since last autosync startup on host %s' % hostname
self._exec_cmd(cmd_commit, [commitmsg])
if retcode != 0 and syncmethod != 'none':
# reset the timer and start in case it is not yet running (start should be idempotent if it already is)
# this has the effect that, when another change is committed within the timer period (readfrequency seconds),
# then these changes will be pushed in one go
if self._push_timer and self._push_timer.is_alive():
logging.debug('Resetting already active push timer to new timeout of %s seconds until push would occur', readfrequency)
self._push_timer.reset()
else:
logging.debug('Starting push timer with %s seconds until push would occur (if no other changes happen in between)', readfrequency)
self._push_timer = ResettableTimer(maxtime=readfrequency, expire=self._real_push, inc=1, update=self.timer_tick)
self._push_timer.start()
else:
logging.debug('%s reported that there is nothing to commit, not touching commit timer' % cmd_commit.split(' ')[0])
def _queue_action(self, event, action, parms, act_on_dirs=False):
curpath = event.pathname
if self._ignore_events:
logging.debug('Ignoring event %s to %s, it is most probably caused by a remote change being currently pulled', event.maskname, event.pathname)
return
if event.dir and not act_on_dirs:
logging.debug('Ignoring change to directory %s', curpath)
return
if any(fnmatch.fnmatch(curpath, pattern) for pattern in self.ignored):
logging.debug('Ignoring change to file %s because it matches the ignored patterns from .gitignore', curpath)
return
# remember the event for this file, but don't act on it immediately
# this allows e.g. a file that has just been removed and re-created
# immediately afterwards (as many editors do) to be recorded just as
# being modified
with lock:
# each entry in the dict is a tuple of the list of events and a timer
if not self._file_events.has_key(curpath):
self._file_events[curpath] = [list(), None]
# and each entry in the list is a tuple of event name and associated action
self._file_events[curpath][0].append((event.maskname, action))
if self._file_events[curpath][1] and self._file_events[curpath][1].is_alive():
logging.debug('Resetting already active coalesce timer to new timeout of %s seconds until coalescing events for file %s would occur', coalesce_seconds, curpath)
self._file_events[curpath][1].reset()
else:
logging.debug('Starting coalesce timer with %s seconds until coalescing events for file %s would occur (if no other changes happen in between)', coalesce_seconds, curpath)
self._file_events[curpath][1] = ResettableTimer(maxtime=coalesce_seconds, expire=self._filter_and_handle_actions, inc=1, arg=[curpath, parms])
self._file_events[curpath][1].start()
def _filter_and_handle_actions(self, args):
curpath = args[0]
parms = args[1]
logging.info('Coalesce event triggered for file %s', curpath)
with lock:
logging.debug('Considering file %s, which has the following events recorded:', curpath)
events, timer = self._file_events[curpath]
lastevent = None
lastaction = None
for eventtype, action in events:
logging.debug(' Event type=%s, action=%s', eventtype, action)
if not lastevent:
lastevent = eventtype
lastaction = action
# prio 1: add
# prio 2: move
# prio 3: modify
# prio 4: rm
# special case: rm then add --> modify
if lastevent == 'IN_DELETE' and eventtype == 'IN_CREATE':
lastevent = 'IN_MODIFY'
lastaction = cmd_modify
break
# priority ordering
if lastevent == 'IN_MODIFY' and eventtype == 'IN_CREATE':
lastevent = eventtype
lastaction = action
if lastevent == 'IN_DELETE' and eventtype == 'IN_MODIFY':
lastevent = eventtype
lastaction = action
logging.info('Final action for file %s: type=%s, action=%s', curpath, lastevent, lastaction)
# and clear again for next events coalescing
del self._file_events[curpath]
printmsg('Local change', 'Committing changes in %s: %s' % (curpath, lastaction))
self._exec_cmd(lastaction, parms)
self._post_action_steps(curpath)
def timer_tick(self, counter):
logging.debug('Tick %d / %d' % (counter, self._push_timer.maxtime))
def startup(self):
with lock:
logging.info('Running startup command to check for local changes now: %s', cmd_startup)
self._exec_cmd(cmd_startup)
self._post_action_steps()
def _real_push(self, arg):
proc = subprocess.Popen(cmd_remoteurl.split(' '), stdout=subprocess.PIPE, cwd=self.cwd)
(remoteurl, errors) = proc.communicate()
printmsg('Pushing changes', 'Pushing last local changes to remote repository %s' % remoteurl)
with lock:
# TODO: check if we actually need a pull or a check-for-pull here
# or if all race conditions were already ruled out
# if we need a check-for-pull, then something like
# git fetch --dry-run | grep "Unpacking objects:
# might help
#self.protected_pull()
self._exec_cmd(cmd_push)
# and try to notify other instances
if bot:
bot.send(username, 'pushed %s' % remoteurl)
def protected_pull(self):
printmsg('Pulling changes', 'Pulling changes from remote repository')
# need to handle file change notification while applying remote
# changes caused by the pull: either conservative (ignore all
# file notifications while the pull is running) or optimized (replay the
# file changes that were seen during the pull after it has finished)
if conservative_pull_lock:
# conservative strategy: ignore all events from now on
self._ignore_events = True
with lock:
handler._exec_cmd(cmd_pull)
if conservative_pull_lock:
# pull done, now start handling events again
self._ignore_events = False
# and handle those local changes that might have happened while the
# pull ran and we weren't listening by simply doing the startup
# sequence again
self.startup()
# The definition of this class has to be OS arbitrated because pyinotify can't be
# imported under windows and inheriting from pyinotify.ProcessEvent needs it...
if detected_os == "LINUX":
class LinuxFileChangeHandlerAdapter(pyinotify.ProcessEvent):
def my_init(self, handler):
self.handler = handler
def process_IN_DELETE(self, event):
# sanity check - don't remove file if it still exists in the file system!
if os.path.exists(event.pathname):
logging.debug('Ignoring file delete event on %s, as it still exists - it was probably immediately re-created by the application', event.pathname)
return
self.handler._queue_action(event, cmd_rm, [event.pathname])
def process_IN_CREATE(self, event):
# sanity check - don't add file if it (no longer) exists in the file system!
if not os.path.exists(event.pathname):
logging.debug('Ignoring file create event on %s, as it (no longer) exists - it was probably created as a temporary file and immediately removed by the application', event.pathname)
return
self.handler._queue_action(event, cmd_add, [event.pathname])
def process_IN_MODIFY(self, event):
self.handler._queue_action(event, cmd_modify, [event.pathname])
def process_IN_CLOSE_WRITE(self, event):
self.handler._queue_action(event, cmd_modify, [event.pathname])
def process_IN_ATTRIB(self, event):
self.handler._queue_action(event, cmd_modify, [event.pathname])
def process_IN_MOVED_TO(self, event):
try:
if event.src_pathname:
logging.debug('Detected moved file from %s to %s', event.src_pathname, event.pathname)
self._handler.queue_action(event, cmd_move, [event.src_pathname, event.pathname], act_on_dirs=True)
else:
logging.debug('Moved file to %s, but unknown source, will simply add new file', event.pathname)
self.handler._queue_action(event, cmd_add, [event.pathname], act_on_dirs=True)
except AttributeError:
# we don't even have the attribute in the event, so also add
logging.debug('Moved file to %s, but unknown source, will simply add new file', event.pathname)
self.handler._queue_action(event, cmd_add, [event.pathname], act_on_dirs=True)
if detected_os == "WINDOWS":
class WindowsFileChangeHandlerAdapter(threading.Thread):
def __init__(self, path, ignoreabsolutepaths, handler):
threading.Thread.__init__(self)
self.handler = handler
self.ignoreabsolutepaths = ignoreabsolutepaths
self.path = path
# This is to mimic the event-type of inotify
class MyEvent():
def __init__(self, dir, pathname, action):
self.dir = dir
self.pathname = pathname
self.maskname = [ "", "IN_CREATE", "IN_DELETE", "IN_MODIFY", "IN_DELETE", "IN_CREATE"][action]
def run(self):
import win32file, win32con
FILE_LIST_DIRECTORY = 0x0001
path_to_watch = self.path
hDir = win32file.CreateFile (
path_to_watch,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
while 1:
results = win32file.ReadDirectoryChangesW (
hDir,
1024,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_SECURITY,
None,
None
)
for action, file in results:
full_filename = os.path.join (path_to_watch, file)
#check if this file is ignored:
if True in [x in full_filename for x in self.ignoreabsolutepaths]:
continue
event = self.MyEvent(os.path.isdir(file), file, action)
if action == 1 or action == 5: #CREATE or MOVE_TO
self.handler._queue_action(event, cmd_add, [event.pathname])
elif action == 2 or action == 4: #DELETE or MOVE_FROM
if os.path.exists(event.pathname):
logging.debug('Ignoring file delete event on %s, as it still exists - it was probably immediately re-created by the application', event.pathname)
continue
self.handler._queue_action(event, cmd_rm, [event.pathname])
elif action == 3: #MODIFY:
self.handler._queue_action(event, cmd_modify, [event.pathname])
if detected_os == "MAC_OS":
class MacOSFileChangeHandlerAdapter(threading.Thread):
def __init__(self, path, ignoreabsolutepaths, handler):
threading.Thread.__init__(self)
self.handler = handler
self.ignoreabsolutepaths = ignoreabsolutepaths
self.path = path
# This is to mimic the event-type of inotify
class MyEvent():
def __init__(self, dir, pathname, action):
self.dir = dir
self.pathname = pathname
masks = { # from doc : http://developer.apple.com/library/mac/#documentation/Darwin/Reference/FSEvents_Ref/FSEvents_h/index.html#HeaderDoc_enums
256:"IN_CREATE", # created
512:"IN_DELETE", # removed
# in doc, but don't seem to be used, included to prevent potential bug
2048:"IN_MODIFY", # renamed
4096:"IN_MODIFY", # modified
0x00000400:'InodeMetaMod',
0x00002000:'FinderInfoMod',
0x00004000:'ChangeOwner',
0x00008000:'XattrMod',
# not in doc, but actually used
64:"IN_DELETE", # before rename
128:"IN_CREATE", # after rename
2:"IN_MODIFY",
}
self.maskname = masks[action]
print self.maskname
def __call__(self, event):
for ignoreabsolutepath in self.ignoreabsolutepaths:
if event.name.startswith(ignoreabsolutepath):
return
event = self.MyEvent(os.path.isdir(event.name), event.name, event.mask)
if event.maskname == "IN_CREATE": #CREATE or MOVE_TO
self.handler._queue_action(event, cmd_add, [event.pathname], act_on_dirs=True)
elif event.maskname == "IN_DELETE": #DELETE or MOVE_FROM
if os.path.exists(event.pathname):
logging.debug('Ignoring file delete event on %s, as it still exists - it was probably immediately re-created by the application', event.pathname)
return
self.handler._queue_action(event, cmd_rm, [event.pathname], act_on_dirs=True)
elif event.maskname == "IN_MODIFY": #MODIFY:
self.handler._queue_action(event, cmd_modify, [event.pathname], act_on_dirs=True)
def run(self):
observer = Observer()
observer.start()
#handler = self.process_event(self)
stream = Stream(self, self.path, file_events=True)
observer.schedule(stream)
def initialize_win32notify(path, ignoreabsolutepaths, handler): # Windows
adapter = WindowsFileChangeHandlerAdapter(path, ignoreabsolutepaths, handler)
adapter.daemon = True
adapter.start()
def initialize_fsevents(path, ignoreabsolutepaths, handler): # Mac OS
adapter = MacOSFileChangeHandlerAdapter(path, ignoreabsolutepaths, handler)
adapter.daemon = True
adapter.start()
def initialize_inotify(ignoreabsolutepaths, path, ignorefilepatterns, readfrequency, handler): # Linux
excl = pyinotify.ExcludeFilter(ignoreabsolutepaths)
wm = pyinotify.WatchManager()
# TODO: frequency doesn't work....
notifier = pyinotify.ThreadedNotifier(wm, LinuxFileChangeHandlerAdapter(handler = handler), read_freq=readfrequency)
#notifier = pyinotify.ThreadedNotifier(wm, handler)
# coalescing events needs pyinotify >= 0.9, so make this optional
try:
notifier.coalesce_events()
except AttributeError as e:
logging.warning('Cannot coalesce events, pyinotify does not seem to support it (maybe too old): %s', e)
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_ATTRIB | pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO | pyinotify.IN_DONT_FOLLOW | pyinotify.IN_ONLYDIR
try:
logging.debug('Adding recursive, auto-adding watch for path %s with event mask %d', path, mask)
wd = wm.add_watch(path, mask, rec=True, auto_add=True, quiet=False, exclude_filter=excl)
if wd <= 0:
logging.warning('Unable to add watch for path %s - this will not work', path)
except pyinotify.WatchManagerError, e:
logging.warning("pyinotify.WatchManagerError: %s, %s", e, e.wmd)
logging.info('Start monitoring %s (type c^c to exit)', path)
notifier.start()
def config_get (section, option, optional=False):
ret = None
try:
ret = config.get(section, option)
except ConfigParser.NoSectionError:
if not optional:
printmsg ("Configuration error", "Could not load section %s from configuration at %s" % (section, config_locations), level=logging.ERROR)
sys.exit(2)
except ConfigParser.NoOptionError:
if not optional:
printmsg ("Configuration error", "Could not load option %s from section %s from configuration at %s" % (option, section, config_locations), level=logging.ERROR)
sys.exit(2)
except ConfigParser.ParsingError:
printmsg ("Configuration error", "Could not parse configuration at %s" % config_locations, level=logging.ERROR)
sys.exit(2)
return ret
def signal_handler(signal, frame):
logging.info('You pressed Ctrl+C, exiting gracefully!')
if notifier:
notifier.stop()
if bot:
bot.stop_serving()
# also remove the pidfile after a clean shutdown
if pidfile and os.path.exists(pidfile):
os.remove(pidfile)
sys.exit(0)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# try to set up desktop notification, first for KDE4, then for Gnome
# the signature is not correct, so rely on pynotify only at the moment
#try:
#import dbus
#knotify = dbus.SessionBus().get_object("org.kde.knotify", "/Notify")
#knotify.event("warning", "autosync application", [],
#'KDE4 notification initialized', 'Initialized KDE4 desktop notification via DBUS',
#[], [], 0, dbus_interface='org.kde.KNotify')
#desktopnotifykde = True
#except:
#print 'KDE4 KNotify does not seem to run or dbus is not installed'
try:
import pynotify, gtk
try:
icon = gtk.IconTheme().load_icon('dvcs-autosync', 48, gtk.ICON_LOOKUP_GENERIC_FALLBACK)
except:
icon = None
if pynotify.init('autosync application'):
logging.info('pynotify initialized successfully, will use desktop notifications')
desktopnotifygnome = True
else:
logging.warning('there was a problem initializing the pynotify module')
except:
logging.info('pynotify does not seem to be installed')
try:
import Growl
gnotifyImagePath = os.path.abspath('/usr/share/icons/hicolor/48x48/apps/dvcs-autosync.png')
if os.path.exists(gnotifyImagePath):
gnotifyIcon = Growl.Image.imageFromPath(gnotifyImagePath)
else:
gnotifyIcon = None
gnotify = Growl.GrowlNotifier( "AutoSync", ["Every notifications"], applicationIcon=gnotifyIcon)
gnotify.register()
logging.info('Growl initialized successfully, will use desktop notifications')
desktopnotifygrowl = True
except:
logging.info('Growl does not seem to be installed')
config = ConfigParser.RawConfigParser()
defaultcfgpath = os.path.expanduser('~/.autosync')
if len(sys.argv) >= 2:
config_locations = [sys.argv[1], defaultcfgpath]
else:
config_locations = [defaultcfgpath]
read_configfiles = config.read(config_locations)
if len(read_configfiles) == 0:
logging.error('No config file specified or config file(s) %s could not be opened' % config_locations)
sys.exit(10)
pathstr = config_get('autosync', 'path')
path = os.path.normpath(os.path.expanduser(pathstr))
if os.path.isdir(path):
logging.info('Watching path %s', path)
else:
logging.error('path %s (expanded from %s) does not exist', path, pathstr)
sys.exit(100)
# ensure that the script is not running twice with the same config file
pidfile = config_get('autosync', 'pidfile', optional=True)
if not pidfile:
# default pidfile name if not specified in config
pidfile = read_configfiles[0] + '.pid'
pidfile = os.path.normpath(os.path.expanduser(pidfile))
logging.debug('Checking/writing pidfile %s' % pidfile)
# does the file already exist?
if os.access(pidfile, os.F_OK):
# check if a process with that PID is still running
pidfd = open(pidfile)
pidfd.seek(0)
old_pid = pidfd.readline()
# Now we check the PID from lock file matches to the current
# process PID
if os.path.exists("/proc/%s" % old_pid):
logging.error('DVCS-autosync already running with config file %s under PID %s, exiting now' % (read_configfiles[0], old_pid))
sys.exit(9)
else:
logging.warning('PID file %s already exists, but no process seems to be running, removing file now' % pidfile)
os.remove(pidfile)
# if we get to here, process is not running and pidfile doesn't exist (anymore)
cur_pid = str(os.getpid())
pidfd = open(pidfile, 'w')
pidfd.write(cur_pid)
pidfd.close()
ignorepaths = config_get('autosync', 'ignorepath')
readfrequency = int(config_get('autosync', 'readfrequency'))
coalesce_seconds = 2
syncmethod = config_get('autosync', 'syncmethod')
# in the upper pynotify try, the desktopnotify options are set, but can override here
notifymethod = config_get('autosync', 'notifymethod', optional=True)
if not notifymethod or notifymethod == 'desktop':
xmppnotify = False
logging.info('Using only desktop notification')
elif notifymethod == 'xmpp':
xmppnotify = True
desktopnotifygnome = False
desktopnotifykde = False
desktopnotifygrowl = False
logging.info('Using only XMPP notification')
elif notifymethod == 'all':
xmppnotify = True
logging.info('Using all notification methods')
elif notifymethod == 'none':
xmppnotify = False
desktopnotifygnome = False
desktopnotifykde = False
desktopnotifygrowl = False
logging.info('Disabling all notification methods, will only log to console')
else:
logging.warning('Unknown notifymethod "%s" configured, will keep default (desktop)', notifymethod)
pulllock = config_get('autosync', 'pulllock')
if pulllock == 'conservative':
conservative_pull_lock = True
elif pulllock == 'optimized':
conservative_pull_lock = False
logging.error('Optimized pull strategy not fully implemented yet (event replay queue missing)')
sys.exit(101)
else:
logging.error('Unknown pull lock strategy %s, please use either conservative or optimized', pulllock)
sys.exit(100)
# Read required DVCS commands
cmd_status = config_get('dvcs', 'statuscmd')
cmd_startup = config_get('dvcs', 'startupcmd')
cmd_commit = config_get('dvcs', 'commitcmd')
cmd_push = config_get('dvcs', 'pushcmd')
cmd_pull = config_get('dvcs', 'pullcmd')
cmd_add = config_get('dvcs', 'addcmd')
cmd_rm = config_get('dvcs', 'rmcmd')
cmd_modify = config_get('dvcs', 'modifycmd')
cmd_move = config_get('dvcs', 'movecmd')
cmd_remoteurl = config_get('dvcs', 'remoteurlcmd')
# TODO: this is currently git-specific, should be configurable
ignorefile = os.path.join(path, '.gitignore')
# load the patterns and match them internally with fnmatch
if os.path.exists(ignorefile):
f = open(ignorefile, 'r')
ignorefilepatterns = [pat.strip() for pat in f.readlines()]
f.close()
else:
ignorefilepatterns = []
# (unfortunately, can't use pyinotify.ExcludeFilter, because this expects regexes (which .gitignore doesn't support))
logging.info('Ignoring files matching any of the patterns %s', ' '.join(ignorefilepatterns))
# but we can use the ignore filter with our own pathname excludes
# However, need to prepend the watch path name, as the excludes need to be
# absolute path names.
ignoreabsolutepaths = [os.path.normpath(path + os.sep + ignorepath) for ignorepath in ignorepaths.split()]
logging.info('Adding list to inotify exclude filter: %s', ignoreabsolutepaths)
signal.signal(signal.SIGINT, signal_handler)
if syncmethod == 'xmpp':
username = config_get('xmpp', 'username')
password = config_get('xmpp', 'password')
alsonotify = config_get('xmpp', 'alsonotify', optional=True)
if xmppnotify and not alsonotify:
logger.warning('XMPP notification requested, but alsonotify option not configured, will not send XMPP notifications')
if (detected_os == "LINUX") or (detected_os == "MAC_OS"):
hostname = os.uname()[1]
elif detected_os == "WINDOWS":
hostname = win32api.GetComputerName()
else:
hostname = "UNSUPPORTED_OS"
res = 'AutosyncJabberBot on %s' % hostname
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
bot = AutosyncJabberBot(username, password, res=res, debug=False, ignoreownmsg=False)
bot.start_serving()
bot.send(username, 'login %s' % res)
printmsg('Autosync Jabber login successful', 'Successfully logged into account %s' % username)
except Exception as e:
logging.error("Exception %s: %s", type(e), e)
printmsg('Autosync Jabber login failed', 'Could not login to Jabber account %s. Will not announce pushes to other running autosync instances.' % username)
elif syncmethod == 'autosync-server':
logging.error('Alternative autosync-server sync method not fully implemented yet')
sys.exit(101)
elif syncmethod == 'none':
logging.info('Synchronization method none configured, will not attempt to synchronize with any repository')
else:
printmsg('No synchronization method configured', 'No or unknown syncmethod configured, will not attempt to synchronize with any repository', level=logging.WARNING)
printmsg('autosync starting', 'Initialization of local file notifications and Jabber login done, starting main loop')
handler = FileChangeHandler(cwd=path, ignored=ignorefilepatterns)
# this is a central lock for guarding repository operations
lock = threading.RLock()
if detected_os == "LINUX":
initialize_inotify(ignoreabsolutepaths, path, ignorefilepatterns, readfrequency, handler)
elif detected_os == "WINDOWS":
initialize_win32notify(path, ignoreabsolutepaths, handler)
elif detected_os == "MAC_OS":
initialize_fsevents(path, ignoreabsolutepaths, handler)
else:
pass #TODO
logging.info('Executing startup synchronizaion')
if syncmethod != 'none':
handler.protected_pull()
if not conservative_pull_lock or syncmethod == 'none':
# only need to run the startup command here when not using conservative pull locking - otherwise the protected_pull will already do it
handler.startup()
logging.info('----------------------------------------------------------------')
while True:
time.sleep(10)
dvcs-autosync-0.5/dvcs-autosync.desktop 0000644 0000000 0000000 00000000601 11625302736 015245 0 ustar [Desktop Entry]
Name=DVCS-Autosync
GenericName=File Synchronizer based on Distributed Version Control
Comment=Synchronize and version your files across multiple computers with distributed version control (by default with Git)
Exec=/usr/share/dvcs-autosync/autosync-xdg-launcher.sh
Terminal=false
Type=Application
Icon=dvcs-autosync
Categories=Network;FileTransfer;
StartupNotify=false
dvcs-autosync-0.5/growl-python/ 0000755 0000000 0000000 00000000000 11625302736 013524 5 ustar dvcs-autosync-0.5/growl-python/Growl.py 0000644 0000000 0000000 00000016436 11625302736 015202 0 ustar """
A Python module that enables posting notifications to the Growl daemon.
See for more information.
"""
__version__ = "0.7"
__author__ = "Mark Rowe "
__copyright__ = "(C) 2003 Mark Rowe . Released under the BSD license."
__contributors__ = ["Ingmar J Stein (Growl Team)",
"Rui Carmo (http://the.taoofmac.com)",
"Jeremy Rossi ",
"Peter Hosey (Growl Team)",
]
import _growl
import types
import struct
import hashlib
import socket
GROWL_UDP_PORT=9887
GROWL_PROTOCOL_VERSION=1
GROWL_TYPE_REGISTRATION=0
GROWL_TYPE_NOTIFICATION=1
GROWL_APP_NAME="ApplicationName"
GROWL_APP_ICON="ApplicationIcon"
GROWL_NOTIFICATIONS_DEFAULT="DefaultNotifications"
GROWL_NOTIFICATIONS_ALL="AllNotifications"
GROWL_NOTIFICATIONS_USER_SET="AllowedUserNotifications"
GROWL_NOTIFICATION_NAME="NotificationName"
GROWL_NOTIFICATION_TITLE="NotificationTitle"
GROWL_NOTIFICATION_DESCRIPTION="NotificationDescription"
GROWL_NOTIFICATION_ICON="NotificationIcon"
GROWL_NOTIFICATION_APP_ICON="NotificationAppIcon"
GROWL_NOTIFICATION_PRIORITY="NotificationPriority"
GROWL_NOTIFICATION_STICKY="NotificationSticky"
GROWL_APP_REGISTRATION="GrowlApplicationRegistrationNotification"
GROWL_APP_REGISTRATION_CONF="GrowlApplicationRegistrationConfirmationNotification"
GROWL_NOTIFICATION="GrowlNotification"
GROWL_SHUTDOWN="GrowlShutdown"
GROWL_PING="Honey, Mind Taking Out The Trash"
GROWL_PONG="What Do You Want From Me, Woman"
GROWL_IS_READY="Lend Me Some Sugar; I Am Your Neighbor!"
growlPriority = {"Very Low":-2,"Moderate":-1,"Normal":0,"High":1,"Emergency":2}
class netgrowl:
"""Builds a Growl Network Registration packet.
Defaults to emulating the command-line growlnotify utility."""
__notAllowed__ = [GROWL_APP_ICON, GROWL_NOTIFICATION_ICON, GROWL_NOTIFICATION_APP_ICON]
def __init__(self, hostname, password ):
self.hostname = hostname
self.password = password
self.socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
def send(self, data):
self.socket.sendto(data, (self.hostname, GROWL_UDP_PORT))
def PostNotification(self, userInfo):
if userInfo.has_key(GROWL_NOTIFICATION_PRIORITY):
priority = userInfo[GROWL_NOTIFICATION_PRIORITY]
else:
priority = 0
if userInfo.has_key(GROWL_NOTIFICATION_STICKY):
sticky = userInfo[GROWL_NOTIFICATION_STICKY]
else:
sticky = False
data = self.encodeNotify(userInfo[GROWL_APP_NAME],
userInfo[GROWL_NOTIFICATION_NAME],
userInfo[GROWL_NOTIFICATION_TITLE],
userInfo[GROWL_NOTIFICATION_DESCRIPTION],
priority,
sticky)
return self.send(data)
def PostRegistration(self, userInfo):
data = self.encodeRegistration(userInfo[GROWL_APP_NAME],
userInfo[GROWL_NOTIFICATIONS_ALL],
userInfo[GROWL_NOTIFICATIONS_DEFAULT])
return self.send(data)
def encodeRegistration(self, application, notifications, defaultNotifications):
data = struct.pack("!BBH",
GROWL_PROTOCOL_VERSION,
GROWL_TYPE_REGISTRATION,
len(application) )
data += struct.pack("BB",
len(notifications),
len(defaultNotifications) )
data += application
for i in notifications:
encoded = i.encode("utf-8")
data += struct.pack("!H", len(encoded))
data += encoded
for i in defaultNotifications:
data += struct.pack("B", i)
return self.encodePassword(data)
def encodeNotify(self, application, notification, title, description,
priority = 0, sticky = False):
application = application.encode("utf-8")
notification = notification.encode("utf-8")
title = title.encode("utf-8")
description = description.encode("utf-8")
flags = (priority & 0x07) * 2
if priority < 0:
flags |= 0x08
if sticky:
flags = flags | 0x0001
data = struct.pack("!BBHHHHH",
GROWL_PROTOCOL_VERSION,
GROWL_TYPE_NOTIFICATION,
flags,
len(notification),
len(title),
len(description),
len(application) )
data += notification
data += title
data += description
data += application
return self.encodePassword(data)
def encodePassword(self, data):
checksum = hashlib.md5()
checksum.update(data)
if self.password:
checksum.update(self.password)
data += checksum.digest()
return data
class _ImageHook(type):
def __getattribute__(self, attr):
global Image
if Image is self:
from _growlImage import Image
return getattr(Image, attr)
class Image(object):
__metaclass__ = _ImageHook
class _RawImage(object):
def __init__(self, data): self.rawImageData = data
class GrowlNotifier(object):
"""
A class that abstracts the process of registering and posting
notifications to the Growl daemon.
You can either pass `applicationName', `notifications',
`defaultNotifications' and `applicationIcon' to the constructor
or you may define them as class-level variables in a sub-class.
`defaultNotifications' is optional, and defaults to the value of
`notifications'. `applicationIcon' is also optional but defaults
to a pointless icon so is better to be specified.
"""
applicationName = 'GrowlNotifier'
notifications = []
defaultNotifications = []
applicationIcon = None
_notifyMethod = _growl
def __init__(self, applicationName=None, notifications=None, defaultNotifications=None, applicationIcon=None, hostname=None, password=None):
if applicationName:
self.applicationName = applicationName
assert self.applicationName, 'An application name is required.'
if notifications:
self.notifications = list(notifications)
assert self.notifications, 'A sequence of one or more notification names is required.'
if defaultNotifications is not None:
self.defaultNotifications = list(defaultNotifications)
elif not self.defaultNotifications:
self.defaultNotifications = list(self.notifications)
if applicationIcon is not None:
self.applicationIcon = self._checkIcon(applicationIcon)
elif self.applicationIcon is not None:
self.applicationIcon = self._checkIcon(self.applicationIcon)
if hostname is not None and password is not None:
self._notifyMethod = netgrowl(hostname, password)
elif hostname is not None or password is not None:
raise KeyError, "Hostname and Password are both required for a network notification"
def _checkIcon(self, data):
if isinstance(data, str):
return _RawImage(data)
else:
return data
def register(self):
if self.applicationIcon is not None:
self.applicationIcon = self._checkIcon(self.applicationIcon)
regInfo = {GROWL_APP_NAME: self.applicationName,
GROWL_NOTIFICATIONS_ALL: self.notifications,
GROWL_NOTIFICATIONS_DEFAULT: self.defaultNotifications,
GROWL_APP_ICON:self.applicationIcon,
}
self._notifyMethod.PostRegistration(regInfo)
def notify(self, noteType, title, description, icon=None, sticky=False, priority=None):
assert noteType in self.notifications
notifyInfo = {GROWL_NOTIFICATION_NAME: noteType,
GROWL_APP_NAME: self.applicationName,
GROWL_NOTIFICATION_TITLE: title,
GROWL_NOTIFICATION_DESCRIPTION: description,
}
if sticky:
notifyInfo[GROWL_NOTIFICATION_STICKY] = 1
if priority is not None:
notifyInfo[GROWL_NOTIFICATION_PRIORITY] = priority
if icon:
notifyInfo[GROWL_NOTIFICATION_ICON] = self._checkIcon(icon)
self._notifyMethod.PostNotification(notifyInfo)
dvcs-autosync-0.5/growl-python/growlImage.m 0000644 0000000 0000000 00000020250 11625302736 015776 0 ustar /*
* Copyright 2004 Mark Rowe
* Released under the BSD license.
*/
#include "Python.h"
#import
typedef struct
{
PyObject_HEAD
NSImage *theImage;
} ImageObject;
static PyTypeObject ImageObject_Type;
#define ImageObject_Check(v) ((v)->ob_type == &ImageObject_Type)
static ImageObject *
newImageObject(NSImage *img)
{
ImageObject *self;
if (! img)
{
PyErr_SetString(PyExc_TypeError, "Invalid image.");
return NULL;
}
self = PyObject_New(ImageObject, &ImageObject_Type);
if (! self)
return NULL;
self->theImage = [img retain];
return self;
}
static void
ImageObject_dealloc(ImageObject *self)
{
PyObject_Del(self);
}
static PyObject *
ImageObject_getAttr(PyObject *self, PyObject *attr)
{
char *theAttr = PyString_AsString(attr);
NSAutoreleasePool *pool = nil;
if (strcmp(theAttr, "rawImageData") == 0)
{
pool = [[NSAutoreleasePool alloc] init];
NSData *imageData = [((ImageObject *) self)->theImage TIFFRepresentation];
PyObject *pyImageData = PyString_FromStringAndSize([imageData bytes], [imageData length]);
[pool release];
return pyImageData;
}
else
return PyObject_GenericGetAttr(self, attr);
}
static PyObject *
ImageObject_imageFromPath(PyTypeObject *cls, PyObject *args)
{
ImageObject *self;
char *fileName_ = NULL;
NSString *fileName = nil;
NSImage *theImage = nil;
NSAutoreleasePool *pool = nil;
if (! PyArg_ParseTuple(args, "et:imageFromPath",
Py_FileSystemDefaultEncoding, &fileName_))
return NULL;
pool = [[NSAutoreleasePool alloc] init];
fileName = [NSString stringWithUTF8String:fileName_];
theImage = [[[NSImage alloc] initWithContentsOfFile:fileName] autorelease];
self = newImageObject(theImage);
[pool release];
return (PyObject *) self;
}
static PyObject *
ImageObject_imageWithData(PyTypeObject *cls, PyObject *args)
{
ImageObject *self;
char *imageData = NULL;
int imageDataSize = 0;
NSImage *theImage = nil;
NSAutoreleasePool *pool = nil;
if (! PyArg_ParseTuple(args, "s#:imageWithData",
&imageData, &imageDataSize))
return NULL;
pool = [[NSAutoreleasePool alloc] init];
theImage = [[[NSImage alloc] initWithData:[NSData dataWithBytes:imageData
length:imageDataSize]] autorelease];
self = newImageObject(theImage);
[pool release];
return (PyObject *) self;
}
static PyObject *
ImageObject_imageWithIconForFile(PyTypeObject *cls, PyObject *args)
{
ImageObject *self;
char *fileName_ = NULL;
NSString *fileName = nil;
NSImage *theImage = nil;
NSAutoreleasePool *pool = nil;
if (! PyArg_ParseTuple(args, "et:imageWithIconForFile",
Py_FileSystemDefaultEncoding, &fileName_))
return NULL;
pool = [[NSAutoreleasePool alloc] init];
fileName = [NSString stringWithUTF8String:fileName_];
theImage = [[NSWorkspace sharedWorkspace] iconForFile:fileName];
self = newImageObject(theImage);
[pool release];
return (PyObject *) self;
}
static PyObject *
ImageObject_imageWithIconForFileType(PyTypeObject *cls, PyObject *args)
{
ImageObject *self;
char *fileType = NULL;
NSImage *theImage = nil;
NSAutoreleasePool *pool = nil;
if (! PyArg_ParseTuple(args, "s:imageWithIconForFileType",
&fileType))
return NULL;
pool = [[NSAutoreleasePool alloc] init];
theImage = [[NSWorkspace sharedWorkspace] iconForFileType:[NSString stringWithUTF8String:fileType]];
self = newImageObject(theImage);
[pool release];
return (PyObject *) self;
}
static PyObject *
ImageObject_imageWithIconForCurrentApplication(PyTypeObject *cls, PyObject *args)
{
ImageObject *self;
NSAutoreleasePool *pool = nil;
if (! PyArg_ParseTuple(args, ":imageWithIconForCurrentApplication"))
return NULL;
pool = [[NSAutoreleasePool alloc] init];
self = newImageObject([NSApp applicationIconImage]);
[pool release];
return (PyObject *) self;
}
static PyObject *
ImageObject_imageWithIconForApplication(PyTypeObject *cls, PyObject *args)
{
ImageObject *self;
char *appName_ = NULL;
NSString *appName = nil;
NSString *appPath = nil;
NSImage *theImage = nil;
NSAutoreleasePool *pool = nil;
if (! PyArg_ParseTuple(args, "et:imageWithIconForApplication",
Py_FileSystemDefaultEncoding, &appName_))
return NULL;
pool = [[NSAutoreleasePool alloc] init];
appName = [NSString stringWithUTF8String:appName_];
appPath = [[NSWorkspace sharedWorkspace] fullPathForApplication:appName];
if (! appPath)
{
PyErr_Format(PyExc_RuntimeError, "Application named '%s' not found", appName_);
self = NULL;
goto done;
}
theImage = [[NSWorkspace sharedWorkspace] iconForFile:appPath];
self = newImageObject(theImage);
done:
[pool release];
return (PyObject *) self;
}
static PyMethodDef ImageObject_methods[] = {
{"imageFromPath", (PyCFunction)ImageObject_imageFromPath, METH_VARARGS | METH_CLASS},
{"imageWithData", (PyCFunction)ImageObject_imageWithData, METH_VARARGS | METH_CLASS},
{"imageWithIconForFile", (PyCFunction)ImageObject_imageWithIconForFile, METH_VARARGS | METH_CLASS},
{"imageWithIconForFileType", (PyCFunction)ImageObject_imageWithIconForFileType, METH_VARARGS | METH_CLASS},
{"imageWithIconForCurrentApplication", (PyCFunction)ImageObject_imageWithIconForCurrentApplication, METH_VARARGS | METH_CLASS},
{"imageWithIconForApplication", (PyCFunction)ImageObject_imageWithIconForApplication, METH_VARARGS | METH_CLASS},
{NULL, NULL} /* sentinel */
};
static PyTypeObject ImageObject_Type = {
PyObject_HEAD_INIT(NULL)
0, /*ob_size*/
"_growlImage.Image", /*tp_name*/
sizeof(ImageObject), /*tp_basicsize*/
0, /*tp_itemsize*/
/* methods */
(destructor)ImageObject_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
ImageObject_getAttr, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_CLASS, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
ImageObject_methods, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
PyType_GenericAlloc, /*tp_alloc*/
0, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
};
static PyMethodDef _growlImage_methods[] = {
{NULL, NULL}
};
PyMODINIT_FUNC
init_growlImage(void)
{
PyObject *m;
if (PyType_Ready(&ImageObject_Type) < 0)
return;
m = Py_InitModule("_growlImage", _growlImage_methods);
PyModule_AddObject(m, "Image", (PyObject *)&ImageObject_Type);
}
dvcs-autosync-0.5/growl-python/libgrowl.c 0000644 0000000 0000000 00000013467 11625302736 015524 0 ustar /*
* Copyright 2004-2005 The Growl Project.
* Created by Jeremy Rossi
* Released under the BSD license.
*/
#include
#include
static PyObject * growl_PostDictionary(CFStringRef name, PyObject *self, PyObject *args) {
int i, j;
PyObject *inputDict;
PyObject *pKeys = NULL;
PyObject *pKey, *pValue;
CFMutableDictionaryRef note = CFDictionaryCreateMutable(kCFAllocatorDefault,
/*capacity*/ 0,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
if (!PyArg_ParseTuple(args, "O!", &PyDict_Type, &inputDict))
goto error;
pKeys = PyDict_Keys(inputDict);
for (i = 0; i < PyList_Size(pKeys); ++i) {
CFStringRef convertedKey;
/* Converting the PyDict key to NSString and used for key in note */
pKey = PyList_GetItem(pKeys, i);
if (!pKey)
// Exception already set
goto error;
pValue = PyDict_GetItem(inputDict, pKey);
if (!pValue) {
// XXX Neeed a real Error message here.
PyErr_SetString(PyExc_TypeError," ");
goto error;
}
if (PyUnicode_Check(pKey)) {
convertedKey = CFStringCreateWithBytes(kCFAllocatorDefault,
(const UInt8 *)PyUnicode_AS_DATA(pKey),
PyUnicode_GET_DATA_SIZE(pKey),
kCFStringEncodingUnicode,
false);
} else if (PyString_Check(pKey)) {
convertedKey = CFStringCreateWithCString(kCFAllocatorDefault,
PyString_AsString(pKey),
kCFStringEncodingUTF8);
} else {
PyErr_SetString(PyExc_TypeError,"The Dict keys must be strings/unicode");
goto error;
}
/* Converting the PyDict value to NSString or NSData based on class */
if (PyString_Check(pValue)) {
CFStringRef convertedValue = CFStringCreateWithCString(kCFAllocatorDefault,
PyString_AS_STRING(pValue),
kCFStringEncodingUTF8);
CFDictionarySetValue(note, convertedKey, convertedValue);
CFRelease(convertedValue);
} else if (PyUnicode_Check(pValue)) {
CFStringRef convertedValue = CFStringCreateWithBytes(kCFAllocatorDefault,
(const UInt8 *)PyUnicode_AS_DATA(pValue),
PyUnicode_GET_DATA_SIZE(pValue),
kCFStringEncodingUnicode,
false);
CFDictionarySetValue(note, convertedKey, convertedValue);
CFRelease(convertedValue);
} else if (PyInt_Check(pValue)) {
long v = PyInt_AS_LONG(pValue);
CFNumberRef convertedValue = CFNumberCreate(kCFAllocatorDefault,
kCFNumberLongType,
&v);
CFDictionarySetValue(note, convertedKey, convertedValue);
CFRelease(convertedValue);
} else if (pValue == Py_None) {
CFDataRef convertedValue = CFDataCreate(kCFAllocatorDefault, NULL, 0);
CFDictionarySetValue(note, convertedKey, convertedValue);
CFRelease(convertedValue);
} else if (PyList_Check(pValue)) {
int size = PyList_Size(pValue);
CFMutableArrayRef listHolder = CFArrayCreateMutable(kCFAllocatorDefault,
size,
&kCFTypeArrayCallBacks);
for (j = 0; j < size; ++j) {
PyObject *lValue = PyList_GetItem(pValue, j);
if (PyString_Check(lValue)) {
CFStringRef convertedValue = CFStringCreateWithCString(kCFAllocatorDefault,
PyString_AS_STRING(lValue),
kCFStringEncodingUTF8);
CFArrayAppendValue(listHolder, convertedValue);
CFRelease(convertedValue);
} else if (PyUnicode_Check(lValue)) {
CFStringRef convertedValue = CFStringCreateWithBytes(kCFAllocatorDefault,
(const UInt8 *)PyUnicode_AS_DATA(lValue),
PyUnicode_GET_DATA_SIZE(lValue),
kCFStringEncodingUnicode,
false);
CFArrayAppendValue(listHolder, convertedValue);
CFRelease(convertedValue);
} else {
CFRelease(convertedKey);
PyErr_SetString(PyExc_TypeError,"The lists must only contain strings");
goto error;
}
}
CFDictionarySetValue(note, convertedKey, listHolder);
CFRelease(listHolder);
} else if (PyObject_HasAttrString(pValue, "rawImageData")) {
PyObject *lValue = PyObject_GetAttrString(pValue, "rawImageData");
if (!lValue) {
goto error;
} else if (PyString_Check(lValue)) {
CFDataRef convertedValue = CFDataCreate(kCFAllocatorDefault,
(const UInt8 *)PyString_AsString(lValue),
PyString_Size(lValue));
CFDictionarySetValue(note, convertedKey, convertedValue);
CFRelease(convertedValue);
} else {
CFRelease(convertedKey);
PyErr_SetString(PyExc_TypeError, "Icon with rawImageData attribute present must ensure it is a string.");
goto error;
}
} else {
CFRelease(convertedKey);
PyErr_SetString(PyExc_TypeError, "Value is not of Str/List");
goto error;
}
CFRelease(convertedKey);
}
Py_BEGIN_ALLOW_THREADS
CFNotificationCenterPostNotification(CFNotificationCenterGetDistributedCenter(),
/*name*/ name,
/*object*/ NULL,
/*userInfo*/ note,
/*deliverImmediately*/ false);
CFRelease(note);
Py_END_ALLOW_THREADS
Py_DECREF(pKeys);
Py_INCREF(Py_None);
return Py_None;
error:
CFRelease(note);
Py_XDECREF(pKeys);
return NULL;
}
static PyObject * growl_PostRegistration(PyObject *self, PyObject *args) {
return growl_PostDictionary(CFSTR("GrowlApplicationRegistrationNotification"), self, args);
}
static PyObject * growl_PostNotification(PyObject *self, PyObject *args) {
return growl_PostDictionary(CFSTR("GrowlNotification"), self, args);
}
static PyMethodDef GrowlMethods[] = {
{"PostNotification", growl_PostNotification, METH_VARARGS, "Send a notification to GrowlHelperApp"},
{"PostRegistration", growl_PostRegistration, METH_VARARGS, "Send a registration to GrowlHelperApp"},
{NULL, NULL, 0, NULL} /* Sentinel */
};
PyMODINIT_FUNC init_growl(void) {
Py_InitModule("_growl", GrowlMethods);
}
dvcs-autosync-0.5/growl-python/setup.py 0000644 0000000 0000000 00000001403 11625302736 015234 0 ustar #!/usr/bin/python
from distutils.core import setup, Extension
import sys
_growl = Extension('_growl',
extra_link_args = ["-framework","CoreFoundation"],
sources = ['libgrowl.c'])
_growlImage = Extension('_growlImage',
extra_link_args = ["-framework","Cocoa"],
sources = ['growlImage.m'])
if sys.platform.startswith("darwin"):
modules = [_growl, _growlImage]
else:
modules = []
setup(name="py-Growl",
version="0.0.7",
description="Python bindings for posting notifications to the Growl daemon",
author="Mark Rowe",
author_email="bdash@users.sourceforge.net",
url="http://growl.info",
py_modules=["Growl"],
ext_modules = modules )
dvcs-autosync-0.5/icons/ 0000755 0000000 0000000 00000000000 11625302736 012166 5 ustar dvcs-autosync-0.5/icons/16x16/ 0000755 0000000 0000000 00000000000 11625302736 012753 5 ustar dvcs-autosync-0.5/icons/16x16/dvcs-autosync.png 0000644 0000000 0000000 00000001137 11625302736 016265 0 ustar PNG
IHDR a &IDATxڕnPr