./gnumed-server.22.17/server/0000755000175000017500000000000014172057744014113 5ustar ncqncq./gnumed-server.22.17/server/gm-move_backups_offsite.sh0000755000175000017500000000452414172057743021254 0ustar ncqncq#!/bin/bash #============================================================== # # This script can be used to move backups to another host, IOW # storing them "offsite" in the loosest sense of the word. # # # Imagine the following situation: # # 1) a laptop running client and database which is # taken to the office, to patients, etc # 2) a desktop at home with some spare storage # 3) the laptop is occasionally connected to the home # network and thus has access to the desktop machine # # One could add the following two lines to the cron # script on the laptop to make sure database backups # are replicated to the desktop whenever the laptop # has access to it: # # @reboot /usr/bin/gm-move_backups_offsite.sh # 5 0-23 * * * /usr/bin/gm-move_backups_offsite.sh # # author: Karsten Hilbert # license: GPL v2 or later #============================================================== CONF="/etc/gnumed/gnumed-backup.conf" #============================================================== # There really should not be any need to # change anything below this line. #============================================================== # load config file if [ -r ${CONF} ] ; then . ${CONF} else echo "Cannot read configuration file ${CONF}. Aborting." exit 1 fi # sanity check if [ ! -d "${BACKUP_DIR}" ] ; then mkdir "${BACKUP_DIR}" fi LOG="${BACKUP_DIR}/backup.log" HOST=`hostname` BACKUP_FILE_GLOB="backup-*.bz2" # do not run concurrently if test "`ps ax | grep $0 | grep -v grep | grep -v $$`" != "" ; then echo "${HOST}: "`date`": transfer already in progress, exiting" >> ${LOG} exit fi # setup rsync arguments ARGS="--quiet --archive --partial" if test -n ${MAX_OFFSITING_BANDWITH} ; then ARGS="${ARGS} --bwlimit=${MAX_OFFSITING_BANDWIDTH}" fi if test "${OFFSITE_BY_CRC}" = "yes" ; then ARGS="${ARGS} --checksum" fi echo "$HOST: "`date`": attempting backup (rsync ${ARGS}) to ${OFFSITE_BACKUP_HOST}:${OFFSITE_BACKUP_DIR}" >> $LOG if ping -c 3 -i 2 $OFFSITE_BACKUP_HOST > /dev/null; then if rsync ${ARGS} ${BACKUP_DIR}/${BACKUP_FILE_GLOB} ${OFFSITE_BACKUP_HOST}:${OFFSITE_BACKUP_DIR} ; then echo "$HOST: "`date`": success" >> $LOG else echo "$HOST: "`date`": failure: cannot transfer files" >> $LOG fi else echo "$HOST: "`date`": failure: cannot reach target host" >> $LOG fi #============================================================== ./gnumed-server.22.17/server/gm-remove_person.sh0000755000175000017500000000445714172057743017747 0ustar ncqncq#!/bin/bash #============================================================== # This script can be used to remove a person # from a GNUmed database. # # author: Karsten Hilbert # license: GPL v2 or later #============================================================== SQL_FILE="/tmp/gm-remove_person.sql" #============================================================== # There really should not be any need to # change anything below this line. #============================================================== TARGET_DB="$1" PERSON_PK="$2" # You will need to understand what this does # before exerting the power of setting it. # # You may want to start studying here: # # http://en.wikipedia.org/wiki/Database_transaction # # Use the Source, Luke. END_TX="$3" if test -z ${PERSON_PK} ; then echo "=============================================================" echo "usage: $0 " echo "" echo " : a GNUmed database (such as \"gnumed_vNN\")" echo " : primary key of a person in that database" echo "=============================================================" exit 1 fi if test -z ${END_TX} ; then END_TX="rollback" fi echo "" echo "Creating removal script ..." echo " ${SQL_FILE}" echo "" > $SQL_FILE ( cat <<-EOF -- GNUmed person removal script \set ON_ERROR_STOP 1 set default_transaction_read_only to off; begin; select dem.remove_person(${PERSON_PK}); ${END_TX}; EOF ) >> $SQL_FILE echo "" echo "Are you sure you want to remove the person #${PERSON_PK}" echo "*irrevocably* from the database \"${TARGET_DB}\" ?" echo "" read -e -p "Remove ? [yes / NO]: " if test "$REPLY" == "yes"; then echo "" echo "Removing person #${PERSON_PK} from database \"${TARGET_DB}\" ..." LOG="gm-remove_person.log" psql -a -U gm-dbo -d ${TARGET_DB} -f ${SQL_FILE} &> ${LOG} if test $? -ne 0 ; then echo "ERROR: failed to remove person." echo " see: ${LOG}" echo "" echo "-----------------------------------------------------" cat ${SQL_FILE} >> ${LOG} exit 1 fi if test "${END_TX}" != "commit"; then echo "" echo "This test seems fine. You should be good to go for real." echo "Learn about END_TX from the source of this script at:" echo "" echo $0 echo "" fi fi rm ${SQL_FILE} #============================================================== ./gnumed-server.22.17/server/gm-upgrade_server0000755000175000017500000000173714172057742017465 0ustar ncqncq#!/bin/sh #============================================================== # This wrapper is intended to be installed into a systemwide # admin-only executable directory such as "/usr/sbin/" # # It should be run as root and will call the upstream script # to actually upgrade an existing database. # # author: Andreas Tille, Karsten Hilbert # license: GPL v2 or later # #============================================================== # those probably need some adjustment from package maintainers: GM_LOG_BASE="/var/log/gnumed/server" GM_SERVER_DIR="/var/lib/gnumed/server/bootstrap" #============================================================== # do not run twice [ "${FLOCKER}" != "$0" ] && exec env FLOCKER="$0" flock --exclusive --nonblock "$0" "$0" "$@" || : cd ${GM_SERVER_DIR} || ( echo "=> Cannot change into ${GM_SERVER_DIR}. Aborting." ; exit 1 ) mkdir -p ${GM_LOG_BASE} export GM_LOG_BASE ./upgrade-db.sh "$@" #============================================================== ./gnumed-server.22.17/server/__init__.py0000644000175000017500000000000114172057744016213 0ustar ncqncq ./gnumed-server.22.17/server/gm-backup_database.sh0000755000175000017500000001636414172057743020155 0ustar ncqncq#!/bin/bash echo "Using <$0> is discouraged. Please switch to ." exit 1 #============================================================== # # This script creates an uncompressed, plain text (SQL) backup # of the database schema, data, and roles which can be used to # restore a GNUmed database from scratch with psql. # # You need to allow root to access the GNUmed database as # user "gm-dbo" by either editing pg_hba.conf or using a # .pgpass file. # # # anacron # ------- # The following line could be added to a system's # /etc/anacrontab to make sure it creates daily # database backups for GNUmed: # # 1 15 backup-gnumed- /usr/bin/gm-backup_database.sh # # # cron # ---- # add the following line to a crontab file to run a # database backup at 12:47 and 19:47 every day # # 47 12,19 * * * * /usr/bin/gm-backup_database.sh # # author: Karsten Hilbert # license: GPL v2 or later #============================================================== # Keep this properly updated to refer to the # database you want to currently backup. CONF="/etc/gnumed/gnumed-backup.conf" #============================================================== # There really should not be any need to # change anything below this line. #============================================================== # load config file if [ -r ${CONF} ] ; then . ${CONF} else echo "Cannot read configuration file ${CONF}. Aborting." exit 1 fi # switched off ? (database name empty) if [ "$GM_DATABASE" = "" ] ; then exit 0 fi # FIXME: check PORT/DBO/BACKUP_FILENAME too # sanity check # (his does not work on Mac, so you # may need to comment this out) if ! su -c "psql -t -l -p ${GM_PORT}" -l postgres | grep -q "^[[:space:]]*${GM_DATABASE}" ; then echo "The configuration in ${CONF} is set to backup" echo "the GNUmed database ${GM_DATABASE}. This" echo "database does not exist, however. Aborting." exit 1 fi # are we backing up the latest DB ? OUR_VER=`echo ${GM_DATABASE} | cut -f 2 -d v` if test -z ${GM_HOST} ; then HAS_HIGHER_VER=`sudo -u postgres psql -A -t -d ${GM_DATABASE} -p ${GM_PORT} -c "SELECT exists (select 1 from pg_database where datname like 'gnumed_v%' and substring(datname from 9 for 3)::integer > '${OUR_VER}');"` else HAS_HIGHER_VER=`sudo -u postgres psql -A -t -h ${GM_HOST} -d ${GM_DATABASE} -p ${GM_PORT} -c "SELECT exists (select 1 from pg_database where datname like 'gnumed_v%' and substring(datname from 9 for 3)::integer > '${OUR_VER}');"` fi; if test "${HAS_HIGHER_VER}" = "t" ; then echo "Backing up database ${GM_DATABASE}." echo "" echo "However, a newer database seems to exist:" echo "" sudo -u postgres psql -l -p ${GM_PORT} | grep gnumed_v echo "" echo "Make sure you really want to backup the older database !" fi ; # generate backup file name TS=`date +%Y-%m-%d-%H-%M-%S` if test -z ${GM_HOST} ; then BACKUP_BASENAME="backup-${GM_DATABASE}-${INSTANCE_OWNER}-"`hostname` else BACKUP_BASENAME="backup-${GM_DATABASE}-${INSTANCE_OWNER}-${GM_HOST}" fi ; BACKUP_FILENAME="${BACKUP_BASENAME}-${TS}" cd ${BACKUP_DIR} if test "$?" != "0" ; then echo "Cannot change into backup directory [${BACKUP_DIR}]. Aborting." exit 1 fi # create dumps if test -z ${GM_HOST} ; then # locally # -r -> -g for older versions sudo -u postgres pg_dumpall -r -v -p ${GM_PORT} > ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- Below find a list of database roles which were in use" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- in the GNUmed database \"${GM_DATABASE}\"." >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "--" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- Only those need to be restored to create a working" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- copy of your original database. All other roles can" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- be commented out by prepending '-- ' to the relevant" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- lines above." >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- In particular, you will very very likely want to" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- comment out the 'postgres' role." >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null ROLES=`psql -A -t -d ${GM_DATABASE} -p ${GM_PORT} -U ${GM_DBO} -c "select gm.get_users('${GM_DATABASE}');"` echo "-- ${ROLES}" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null pg_dump -C -v --column-inserts --disable-triggers -p ${GM_PORT} -U ${GM_DBO} -f ${BACKUP_FILENAME}-database.sql ${GM_DATABASE} 2> /dev/null else # remotely if ping -c 3 -i 2 ${GM_HOST} > /dev/null; then # -r -> -g for older versions pg_dumpall -r -v -h ${GM_HOST} -p ${GM_PORT} -U postgres > ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- Below find a list of database roles which were in use" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- in the GNUmed database \"${GM_DATABASE}\"." >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "--" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- Only those need to be restored to create a working" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- copy of your original database. All other roles can" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- be commented out by prepending '-- ' to the relevant" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- lines above." >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- In particular, you will very very likely want to" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- comment out the 'postgres' role." >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null ROLES=`psql -A -t -d ${GM_DATABASE} -p ${GM_PORT} -U ${GM_DBO} -c "select gm.get_users('${GM_DATABASE}');"` echo "-- ${ROLES}" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null pg_dump -C -v --column-inserts --disable-triggers -h ${GM_HOST} -p ${GM_PORT} -U ${GM_DBO} -f ${BACKUP_FILENAME}-database.sql ${GM_DATABASE} 2> /dev/null else echo "Cannot ping database host ${GM_HOST}." exit 1 fi ; fi ; # tar and test it if test -z ${VERIFY_TAR} ; then tar -cf ${BACKUP_FILENAME}.tar ${BACKUP_FILENAME}-database.sql ${BACKUP_FILENAME}-roles.sql else tar -cWf ${BACKUP_FILENAME}.tar ${BACKUP_FILENAME}-database.sql ${BACKUP_FILENAME}-roles.sql fi ; if test "$?" != "0" ; then echo "Creating backup tar archive [${BACKUP_FILENAME}.tar] failed. Aborting." exit 1 fi rm -f ${BACKUP_FILENAME}-database.sql rm -f ${BACKUP_FILENAME}-roles.sql chown ${BACKUP_OWNER} ${BACKUP_FILENAME}.tar exit 0 #============================================================== ./gnumed-server.22.17/server/gm-set_gm-dbo_password0000755000175000017500000000073214172057744020406 0ustar ncqncq#!/bin/sh #============================================================== # author: Karsten Hilbert # license: GPL v2 or later # # This wrapper allows setting the password for gm-dbo. # # It must be run as root. # #============================================================== PASSWD="$1" SQL="ALTER ROLE \\\"gm-dbo\\\" ENCRYPTED PASSWORD '${PASSWD}';" su -c "psql -d template1 -c \"${SQL}\"" -l postgres #============================================================== ./gnumed-server.22.17/server/gm-zip+sign_backups.sh0000755000175000017500000001141114172057743020316 0ustar ncqncq#!/bin/bash #============================================================== # author: Karsten Hilbert # license: GPL v2 or later # # anacron # ------- # The following line could be added to a system's # /etc/anacrontab to make sure it creates daily # database backups for GNUmed: # # 1 15 gnumed--sign-backups /usr/bin/gm-zip+sign_backups.sh # # # cron # ---- # Add the following line to a crontab file to sign # database backups at 12:47 and 19:47 every day: # # 47 12,19 * * * * /usr/bin/gm-zip+sign_backups.sh # # # It is useful to have a PROCMAIL rule for the GNotary server replies # piping them into the stoarage area where the backups are kept. #============================================================== CONF="/etc/gnumed/gnumed-backup.conf" #============================================================== # There really should not be any need to # change anything below this line. #============================================================== set -o pipefail # do not run twice [ "${FLOCKER}" != "$0" ] && exec env FLOCKER="$0" flock --exclusive --nonblock "$0" "$0" "$@" || : # load config file if [ -r ${CONF} ] ; then . ${CONF} else echo "Cannot read configuration file ${CONF}. Aborting." exit 1 fi TS=$(date +%Y-%m-%d-%H-%M-%S) BACKUP_BASENAME="backup-${GM_DATABASE}-${INSTANCE_OWNER}" cd "${BACKUP_DIR}" if test "$?" != "0" ; then echo "Cannot change into backup directory [${BACKUP_DIR}]. Aborting." exit 1 fi shopt -s -q nullglob # no glob matches -> "" AGGREGATE_EXIT_CODE=0 # find any leftover, untested tar files # and test them so they can be compressed for TAR_UNTESTED in "${BACKUP_BASENAME}"-*.tar.untested ; do # test tar --extract --to-stdout --file="${TAR_UNTESTED}" > /dev/null RESULT="$?" if test "${RESULT}" != "0" ; then echo "Verifying backup tar archive [${TAR_UNTESTED}] failed (${RESULT}). Skipping." AGGREGATE_EXIT_CODE=${RESULT} continue fi # rename to final archive name TAR_FINAL=$(basename "${TAR_UNTESTED}" .untested) mv --force "${TAR_UNTESTED}" "${TAR_FINAL}" RESULT="$?" if test "${RESULT}" != "0" ; then echo "Cannot rename tar archive (${RESULT}). Skipping." echo "FILES: ${TAR_UNTESTED} => ${TAR_FINAL}" AGGREGATE_EXIT_CODE=${RESULT} continue fi chown "${BACKUP_OWNER}" "${TAR_FINAL}" done # zip up any backups for TAR_FINAL in "${BACKUP_BASENAME}"-*.tar ; do BZ2_FINAL="${TAR_FINAL}.bz2" BZ2_SCRATCH="${BZ2_FINAL}.partial" BZ2_UNTESTED="${BZ2_FINAL}.untested" # compress tar archive # I have tried "xz -9 -e" and it did not make much of # a difference (48 MB in a 1.2 GB backup) #xz --quiet --extreme --check sha256 --no-warn -${COMPRESSION_LEVEL} ${BACKUP} #xz --quiet --test ${BACKUP}.xz bzip2 --quiet --stdout --keep --compress -"${COMPRESSION_LEVEL}" "${TAR_FINAL}" > "${BZ2_SCRATCH}" RESULT="$?" if test "${RESULT}" != "0" ; then echo "Compressing tar archive [${TAR_FINAL}] into [${BZ2_SCRATCH}] failed (${RESULT}). Skipping." AGGREGATE_EXIT_CODE=${RESULT} rm --force "${BZ2_SCRATCH}" continue fi # rename to "untested" archive name mv --force "${BZ2_SCRATCH}" "${BZ2_UNTESTED}" RESULT="$?" if test "${RESULT}" != "0" ; then echo "Renaming compressed archive [${BZ2_SCRATCH}] to [${BZ2_UNTESTED}] failed (${RESULT}). Skipping." AGGREGATE_EXIT_CODE=${RESULT} continue fi # verify compressed archive bzip2 --quiet --test "${BZ2_UNTESTED}" RESULT="$?" if test "${RESULT}" != "0" ; then echo "Verifying compressed archive [${BZ2_UNTESTED}] failed (${RESULT}). Removing." AGGREGATE_EXIT_CODE=${RESULT} rm --force "${BZ2_UNTESTED}" continue fi # rename to final archive name mv --force "${BZ2_UNTESTED}" "${BZ2_FINAL}" RESULT="$?" if test "${RESULT}" != "0" ; then echo "Renaming tested compressed archive [${BZ2_UNTESTED}] to [${BZ2_FINAL}] failed (${RESULT}). Skipping." AGGREGATE_EXIT_CODE=${RESULT} continue fi rm --force "${TAR_FINAL}" chmod "${BACKUP_MASK}" "${BZ2_FINAL}" chown "${BACKUP_OWNER}" "${BZ2_FINAL}" # GNotary support if test -n "${GNOTARY_TAN}" ; then LOCAL_MAILER=$(which mail) #SHA512="SHA 512:"`sha512sum -b ${BACKUP_FILENAME}.tar.bz2` SHA512=$(openssl dgst -sha512 -hex "${BZ2_FINAL}") RMD160=$(openssl dgst -ripemd160 -hex "${BZ2_FINAL}") export REPLYTO=${SIG_RECEIVER} # send mail ( echo " " echo "" echo "" echo " $GNOTARY_TAN" echo " notarize" echo " " echo " ${SHA512}" echo " ${RMD160}" echo " " echo "" echo " " ) | $LOCAL_MAILER -s "gnotarize" "$GNOTARY_SERVER" fi done exit ${AGGREGATE_EXIT_CODE} ./gnumed-server.22.17/server/pycommon/0000755000175000017500000000000014172057745015755 5ustar ncqncq./gnumed-server.22.17/server/pycommon/gmLog2.py0000644000175000017500000002411714172057744017462 0ustar ncqncq"""GNUmed logging framework setup. All error logging, user notification and otherwise unhandled exception handling should go through classes or functions of this module. Theory of operation: This module tailors the standard logging framework to the needs of GNUmed. By importing gmLog2 into your code you'll get the root logger send to a unicode file with messages in a format useful for debugging. The filename is either taken from the command line (--log-file=...) or derived from the name of the main application. The log file will be found in one of the following standard locations: 1) given on the command line as "--log-file=LOGFILE" 2) ~/./.log 3) /dir/of/binary/.log (mainly for DOS/Windows) where is derived from the name of the main application. If you want to specify just a directory for the log file you must end the --log-file definition with a slash. By importing "logging" and getting a logger your modules never need to worry about the real message destination or whether at any given time there's a valid logger available. Your MAIN module simply imports gmLog2 and all other modules will merrily and automagically start logging away. Ad hoc call stack logging recipe: call_stack = inspect.stack() call_stack.reverse() for idx in range(1, len(call_stack)): caller = call_stack[idx] _log.debug('%s[%s] @ [%s] in [%s]', ' '* idx, caller[3], caller[2], caller[1]) del call_stack """ # TODO: # - exception() # - ascii_ctrl2mnemonic() #======================================================================== __author__ = "K. Hilbert " __license__ = "GPL v2 or later (details at http://www.gnu.org)" # stdlib import logging import sys import os import io import codecs import locale import datetime as pydt import random import time import calendar _logfile_name = None _logfile = None #_string_encoding = None # table used for cooking non-printables AsciiName = ['<#0-0x00-nul>', '<#1-0x01-soh>', '<#2-0x02-stx>', '<#3-0x03-etx>', '<#4-0x04-eot>', '<#5-0x05-enq>', '<#6-0x06-ack>', '<#7-0x07-bel>', '<#8-0x08-bs>', '<#9-0x09-ht>', '<#10-0x0A-lf>', '<#11-0x0B-vt>', '<#12-0x0C-ff>', '<#13-0x0D-cr>', '<#14-0x0E-so>', '<#15-0x0F-si>', '<#16-0x10-dle>', '<#17-0x11-dc1/xon>', '<#18-0x12-dc2>', '<#19-0x13-dc3/xoff>', '<#20-0x14-dc4>', '<#21-0x15-nak>', '<#22-0x16-syn>', '<#23-0x17-etb>', '<#24-0x18-can>', '<#25-0x19-em>', '<#26-0x1A-sub>', '<#27-0x1B-esc>', '<#28-0x1C-fs>', '<#29-0x1D-gs>', '<#30-0x1E-rs>', '<#31-0x1F-us>' ] # msg = reduce(lambda x, y: x+y, (map(self.__char2AsciiName, list(tmp))), '') # # def __char2AsciiName(self, aChar): # try: # return AsciiName[ord(aChar)] # except IndexError: # return aChar # # def __tracestack(self): # """extract data from the current execution stack # # this is rather fragile, I guess # """ # stack = traceback.extract_stack() # self.__modulename = stack[-4][0] # self.__linenumber = stack[-4][1] # self.__functionname = stack[-4][2] # if (self.__functionname == "?"): # self.__functionname = "Main" #=============================================================== # external API #=============================================================== def flush(): logger = logging.getLogger('gm.logging') logger.critical('-------- synced log file -------------------------------') root_logger = logging.getLogger() for handler in root_logger.handlers: handler.flush() #=============================================================== def log_instance_state(instance): logger = logging.getLogger('gm.logging') logger.debug('state of %s', instance) for attr in [ a for a in dir(instance) if not a.startswith('__') ]: try: val = getattr(instance, attr) except AttributeError: val = '' logger.debug(' %s: %s', attr, val) #=============================================================== def log_stack_trace(message=None, t=None, v=None, tb=None): logger = logging.getLogger('gm.logging') if t is None: t = sys.exc_info()[0] if v is None: v = sys.exc_info()[1] if tb is None: tb = sys.exc_info()[2] if tb is None: logger.debug('sys.exc_info() did not return a traceback object, trying sys.last_traceback') try: tb = sys.last_traceback except AttributeError: logger.debug('no stack to trace (no exception information available)') return # log exception details logger.debug('exception: %s', v) logger.debug('type: %s', t) logger.debug('list of attributes:') for attr in [ a for a in dir(v) if not a.startswith('__') ]: try: val = getattr(v, attr) except AttributeError: val = '' logger.debug(' %s: %s', attr, val) # make sure we don't leave behind a binding # to the traceback as warned against in # sys.exc_info() documentation try: # recurse back to root caller while 1: if not tb.tb_next: break tb = tb.tb_next # put the frames on a stack stack_of_frames = [] frame = tb.tb_frame while frame: stack_of_frames.append(frame) frame = frame.f_back finally: del tb stack_of_frames.reverse() if message is not None: logger.debug(message) logger.debug('stack trace follows:') logger.debug('(locals by frame, outmost frame first)') for frame in stack_of_frames: logger.debug ( '--- frame [%s]: #%s, %s -------------------', frame.f_code.co_name, frame.f_lineno, frame.f_code.co_filename ) for varname, value in frame.f_locals.items(): if varname == '__doc__': continue logger.debug('%20s = %s', varname, value) #--------------------------------------------------------------- def log_multiline(level, message=None, line_prefix=None, text=None): if text is None: return if message is None: message = 'multiline text:' if line_prefix is None: line_template = ' > %s' else: line_template = '%s: %%s' % line_prefix lines2log = [message] lines2log.extend([ line_template % line for line in text.split('\n') ]) logger = logging.getLogger('gm.logging') logger.log(level, '\n'.join(lines2log)) #=============================================================== # internal API #=============================================================== __words2hide = [] def add_word2hide(word): if word is None: return if word.strip() == '': return if word not in __words2hide: __words2hide.append(str(word)) #--------------------------------------------------------------- __original_logger_write_func = None def __safe_logger_write_func(s): for word in __words2hide: # throw away up to 4 bits (plus the randint() cost) random.getrandbits(random.randint(1, 4)) # from there generate a replacement string valid for # *this* round of replacements of *this* word, # this approach won't mitigate guessing trivial passwords # from replacements of known data (a known-plaintext attack) # but will make automated searching for replaced strings # in the log more difficult bummer = hex(random.randint(0, sys.maxsize)).lstrip('0x') s = s.replace(word, bummer) __original_logger_write_func(s) #--------------------------------------------------------------- def __setup_logging(): global _logfile if _logfile is not None: return True if not __get_logfile_name(): return False _logfile = io.open(_logfile_name, mode = 'wt', encoding = 'utf8', errors = 'replace') global __original_logger_write_func __original_logger_write_func = _logfile.write _logfile.write = __safe_logger_write_func # setup fmt = '%(asctime)s %(levelname)-8s %(name)-12s [%(thread)d %(threadName)-10s] (%(pathname)s::%(funcName)s() #%(lineno)d): %(message)s' logging.basicConfig ( format = fmt, datefmt = '%Y-%m-%d %H:%M:%S', level = logging.DEBUG, stream = _logfile ) logging.captureWarnings(True) logger = logging.getLogger() logger.log_stack_trace = log_stack_trace logger.log_multiline = log_multiline # start logging #logger = logging.getLogger('gm.logging') logger.critical('-------- start of logging ------------------------------') logger.info('log file is <%s>', _logfile_name) logger.info('log level is [%s]', logging.getLevelName(logger.getEffectiveLevel())) logger.info('log file encoding is ') logger.debug('log file .write() patched from original %s to patched %s', __original_logger_write_func, __safe_logger_write_func) #--------------------------------------------------------------- def __get_logfile_name(): global _logfile_name if _logfile_name is not None: return _logfile_name def_log_basename = os.path.splitext(os.path.basename(sys.argv[0]))[0] default_logfile_name = '%s-%s-%s.log' % ( def_log_basename, pydt.datetime.now().strftime('%Y_%m_%d-%H_%M_%S'), os.getpid() ) # given on command line ? for option in sys.argv[1:]: if option.startswith('--log-file='): (opt_name, value) = option.split('=') (dir_name, file_name) = os.path.split(value) if dir_name == '': dir_name = '.' if file_name == '': file_name = default_logfile_name _logfile_name = os.path.abspath(os.path.expanduser(os.path.join(dir_name, file_name))) return True # else store it in ~/.gnumed/logs/def_log_basename/default_logfile_name dir_name = os.path.expanduser(os.path.join('~', '.gnumed', 'logs', def_log_basename)) try: os.makedirs(dir_name) except OSError as e: if (e.errno == 17) and not os.path.isdir(dir_name): raise _logfile_name = os.path.join(dir_name, default_logfile_name) return True #=============================================================== # main #--------------------------------------------------------------- __setup_logging() if __name__ == '__main__': if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() #----------------------------------------------------------- def test(): logger = logging.getLogger('gmLog2.test') logger.error('test %s', [1,2,3]) logger.error("I expected to see %s::test()" % __file__) add_word2hide('super secret passphrase') logger.debug('credentials: super secret passphrase') try: int(None) except Exception: logger.exception('unhandled exception') log_stack_trace() flush() #----------------------------------------------------------- test() ./gnumed-server.22.17/server/pycommon/gmPsql.py0000644000175000017500000001253014172057744017572 0ustar ncqncq# A Python class to replace the PSQL command-line interpreter # NOTE: this is not a full replacement for the interpeter, merely # enough functionality to run gnumed installation scripts # # Copyright (C) 2003, 2004 - 2010 GNUmed developers # Licence: GPL v2 or later #=================================================================== __author__ = "Ian Haywood" __license__ = "GPL v2 or later (details at http://www.gnu.org)" # stdlib import sys import os import re import logging import io _log = logging.getLogger('gm.bootstrapper') unformattable_error_id = 12345 #=================================================================== class Psql: def __init__ (self, conn): """ db : the interpreter to connect to, must be a DBAPI compliant interface """ self.conn = conn self.vars = {'ON_ERROR_STOP': None} #--------------------------------------------------------------- def match(self, pattern): match = re.match(pattern, self.line) if match is None: return 0 self.groups = match.groups() return 1 #--------------------------------------------------------------- def fmt_msg(self, aMsg): try: tmp = "%s:%d: %s" % (self.filename, self.lineno-1, aMsg) tmp = tmp.replace('\r', '') tmp = tmp.replace('\n', '') except UnicodeDecodeError: global unformattable_error_id tmp = "%s:%d: " % (self.filename, self.lineno-1, unformattable_error_id) try: print('ERROR: GNUmed bootstrap #%d:' % unformattable_error_id) print(aMsg) except Exception: pass unformattable_error_id += 1 return tmp #--------------------------------------------------------------- def run (self, filename): """ filename: a file, containg semicolon-separated SQL commands """ _log.debug('processing [%s]', filename) curs = self.conn.cursor() curs.execute('show session authorization') start_auth = curs.fetchall()[0][0] curs.close() _log.debug('session auth: %s', start_auth) if os.access (filename, os.R_OK): sql_file = io.open(filename, mode = 'rt', encoding = 'utf8') else: _log.error("cannot open file [%s]", filename) return 1 self.lineno = 0 self.filename = filename in_string = False bracketlevel = 0 curr_cmd = '' curs = self.conn.cursor() for self.line in sql_file: self.lineno += 1 if len(self.line.strip()) == 0: continue # \set if self.match(r"^\\set (\S+) (\S+)"): _log.debug('"\set" found: %s', self.groups) self.vars[self.groups[0]] = self.groups[1] if self.groups[0] == 'ON_ERROR_STOP': # adjusting from string to int so that "1" -> 1 -> True self.vars['ON_ERROR_STOP'] = int(self.vars['ON_ERROR_STOP']) continue # \unset if self.match (r"^\\unset (\S+)"): self.vars[self.groups[0]] = None continue # other '\' commands if self.match (r"^\\(.*)") and not in_string: # most other \ commands are for controlling output formats, don't make # much sense in an installation script, so we gently ignore them _log.warning(self.fmt_msg("psql command \"\\%s\" being ignored " % self.groups[0])) continue # non-'\' commands this_char = self.line[0] # loop over characters in line for next_char in self.line[1:] + ' ': # start/end of string detected if this_char == "'": in_string = not in_string # detect "--"-style comments if this_char == '-' and next_char == '-' and not in_string: break # detect bracketing if this_char == '(' and not in_string: bracketlevel += 1 if this_char == ')' and not in_string: bracketlevel -= 1 # have we: # - found end of command ? # - are not inside a string ? # - are not inside bracket pair ? if not ((in_string is False) and (bracketlevel == 0) and (this_char == ';')): curr_cmd += this_char else: if curr_cmd.strip() != '': try: curs.execute(curr_cmd) try: data = curs.fetchall() _log.debug('cursor data: %s', data) except Exception: # actually: psycopg2.ProgrammingError but no handle pass except Exception as error: _log.exception(curr_cmd) if re.match(r"^NOTICE:.*", str(error)): _log.warning(self.fmt_msg(error)) else: _log.error(self.fmt_msg(error)) if hasattr(error, 'diag'): for prop in dir(error.diag): if prop.startswith('__'): continue val = getattr(error.diag, prop) if val is None: continue _log.error('PG diags %s: %s', prop, val) if self.vars['ON_ERROR_STOP']: self.conn.commit() curs.close() return 1 self.conn.commit() curs.close() curs = self.conn.cursor() curr_cmd = '' this_char = next_char # end of loop over chars # end of loop over lines self.conn.commit() curs.execute('show session authorization') end_auth = curs.fetchall()[0][0] curs.close() _log.debug('session auth after sql file processing: %s', end_auth) if start_auth != end_auth: _log.error('session auth changed before/after processing sql file') return 0 #=================================================================== # testing code if __name__ == '__main__': if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() conn = PgSQL.connect(user='gm-dbo', database = 'gnumed') psql = Psql(conn) psql.run(sys.argv[1]) conn.close() ./gnumed-server.22.17/server/pycommon/gmBorg.py0000644000175000017500000000257314172057744017552 0ustar ncqncq#=================================================== # Thanks to Python Patterns ! # --------------------------- __author__ = "Karsten.Hilbert@gmx.net" __license__ = "GPL v2 or later" #=================================================== class cBorg(object): """A generic Borg mixin for new-style classes. - mixin this class with your class' ancestors to borg it - there may be many _instances_ of this - PER CHILD CLASS - but they all share _state_ """ _instances = {} def __new__(cls, *args, **kargs): # look up subclass instance cache if cBorg._instances.get(cls) is None: #cBorg._instances[cls] = object.__new__(cls, *args, **kargs) cBorg._instances[cls] = object.__new__(cls) return cBorg._instances[cls] #=================================================== if __name__ == '__main__': import sys if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() class A(cBorg): pass class B(cBorg): pass class C(cBorg): def __init__(self, val='default'): self.x = val print("testing new-style classes borg") a1 = A() a2 = A() a1.a = 5 print(a1.a, "==", a2.a) a3 = A() print(a1.a, "==", a2.a, "==", a3.a) b1 = B() b1.a = 10 print(b1.a) print(a1.a) b2 = B() print(b2.a) c1 = C(val = 'non-default') print(c1.x) c2 = C(val = 'non-default 2') print(c2.x) c3 = C() print(c3.x) #=================================================== ./gnumed-server.22.17/server/pycommon/gmNull.py0000644000175000017500000001102014172057744017556 0ustar ncqncq"""null.py This is a sample implementation of the 'Null Object' design pattern. Roughly, the goal with Null objects is to provide an 'intelligent' replacement for the often used primitive data type None in Python or Null (or Null pointers) in other languages. These are used for many purposes including the important case where one member of some group of otherwise similar elements is special for whatever reason. Most often this results in conditional statements to distinguish between ordinary elements and the primitive Null value. Among the advantages of using Null objects are the following: - Superfluous conditional statements can be avoided by providing a first class object alternative for the primitive value None. - Code readability is improved. - Null objects can act as a placeholder for objects with behaviour that is not yet implemented. - Null objects can be replaced for any other class. - Null objects are very predictable at what they do. To cope with the disadvantage of creating large numbers of passive objects that do nothing but occupy memory space Null objects are often combined with the Singleton pattern. For more information use any internet search engine and look for combinations of these words: Null, object, design and pattern. Dinu C. Gherman, August 2001 Karsten Hilbert July 2004 """ #============================================================== __author__ = "Dinu C. Gherman, Karsten Hilbert" __license__ = "GPL v2 or later (details at http://www.gnu.org)" import logging _log = logging.getLogger('cNull') #============================================================== class cNull(object): """A class for implementing Null objects. This class ignores all parameters passed when constructing or calling instances and traps all attribute and method requests. Instances of it always (and reliably) do 'nothing'. The code might benefit from implementing some further special Python methods depending on the context in which its instances are used. Especially when comparing and coercing Null objects the respective methods' implementation will depend very much on the environment and, hence, these special methods are not provided here. """ # object constructing def __init__(self, *args, **kwargs): "Ignore parameters." _log.debug('args: %s', args) _log.debug('kwargs: %s', kwargs) # object calling def __call__(self, *args, **kwargs): "Ignore method calls." _log.debug('args: %s', args) _log.debug('kwargs: %s', kwargs) return self # attribute handling def __getattr__(self, attribute): "Ignore attribute requests." _log.debug('%s.%s', self, attribute) return self def __setattr__(self, attribute, value): "Ignore attribute setting." _log.debug('%s.%s = %s', self, attribute, value) return self def __delattr__(self, attribute): "Ignore deleting attributes." _log.debug('%s.%s', self, attribute) return self # item handling def __getitem__(self, item): "Ignore item requests." _log.debug('%s[%s]', self, item) return self def __setitem__(self, item, value): "Ignore item setting." _log.debug('%s[%s] = %s', self, item, value) return self def __delitem__(self, item): "Ignore deleting items." _log.debug('%s[%s]', self, item) return self # misc. def __repr__(self): "Return a string representation." return "" % id(self) def __str__(self): "Convert to a string and return it." return '' def __nonzero__(self): _log.debug('returns 0') return 0 def __len__(self): _log.debug('0') return 0 #============================================================== def test(): "Perform some decent tests, or rather: demos." # constructing and calling n = cNull() n = cNull('value') n = cNull('value', param='value') n() n('value') n('value', param='value') # attribute handling n.attr1 n.attr1.attr2 n.method1() n.method1().method2() n.method('value') n.method(param='value') n.method('value', param='value') n.attr1.method1() n.method1().attr1 n.attr1 = 'value' n.attr1.attr2 = 'value' n['1'] n['2'] = '123' del n['3'] del n.attr1 del n.attr1.attr2.attr3 # representation and conversion to a string tmp = '' % id(n) assert repr(n) == tmp assert str(n) == '' # comparing if n == 1: print("Null object == 1") else: print("Null object != 1") #-------------------------------------------------------------- if __name__ == '__main__': import sys if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() test() ./gnumed-server.22.17/server/pycommon/gmBusinessDBObject.py0000644000175000017500000007045214172057744022012 0ustar ncqncq__doc__ = """GNUmed database object business class. Overview -------- This class wraps a source relation (table, view) which represents an entity that makes immediate business sense such as a vaccination or a medical document. In many if not most cases this source relation is a denormalizing view. The data in that view will in most cases, however, originate from several normalized tables. One instance of this class represents one row of said source relation. Note, however, that this class does not *always* simply wrap a single table or view. It can also encompass several relations (views, tables, sequences etc) that taken together form an object meaningful to *business* logic. Initialization -------------- There are two ways to initialize an instance with values. One way is to pass a "primary key equivalent" object into __init__(). Refetch_payload() will then pull the data from the backend. Another way would be to fetch the data outside the instance and pass it in via the argument. In that case the instance will not initially connect to the database which may offer a great boost to performance. Values API ---------- Field values are cached for later access. They can be accessed by a dictionary API, eg: old_value = object['field'] object['field'] = new_value The field names correspond to the respective column names in the "main" source relation. Accessing non-existant field names will raise an error, so does trying to set fields not listed in self.__class__._updatable_fields. To actually store updated values in the database one must explicitly call save_payload(). The class will in many cases be enhanced by accessors to related data that is not directly part of the business object itself but are closely related, such as codes linked to a clinical narrative entry (eg a diagnosis). Such accessors in most cases start with get_*. Related setters start with set_*. The values can be accessed via the object['field'] syntax, too, but they will be cached independantly. Concurrency handling -------------------- GNUmed connections always run transactions in isolation level "serializable". This prevents transactions happening at the *very same time* to overwrite each other's data. All but one of them will abort with a concurrency error (eg if a transaction runs a select-for-update later than another one it will hang until the first transaction ends. Then it will succeed or fail depending on what the first transaction did). This is standard transactional behaviour. However, another transaction may have updated our row between the time we first fetched the data and the time we start the update transaction. This is noticed by getting the XMIN system column for the row when initially fetching the data and using that value as a where condition value when updating the row later. If the row had been updated (xmin changed) or deleted (primary key disappeared) in the meantime the update will touch zero rows (as no row with both PK and XMIN matching is found) even if the query itself syntactically succeeds. When detecting a change in a row due to XMIN being different one needs to be careful how to represent that to the user. The row may simply have changed but it also might have been deleted and a completely new and unrelated row which happens to have the same primary key might have been created ! This row might relate to a totally different context (eg. patient, episode, encounter). One can offer all the data to the user: self.payload_most_recently_fetched - contains the data at the last successful refetch self.payload_most_recently_attempted_to_store - contains the modified payload just before the last failure of save_payload() - IOW what is currently in the database self._payload - contains the currently active payload which may or may not contain changes For discussion on this see the thread starting at: http://archives.postgresql.org/pgsql-general/2004-10/msg01352.php and here http://groups.google.com/group/pgsql.general/browse_thread/thread/e3566ba76173d0bf/6cf3c243a86d9233 (google for "XMIN semantic at peril") Problem cases with XMIN: 1) not unlikely - a very old row is read with XMIN - vacuum comes along and sets XMIN to FrozenTransactionId - now XMIN changed but the row actually didn't ! - an update with "... where xmin = old_xmin ..." fails although there is no need to fail 2) quite unlikely - a row is read with XMIN - a long time passes - the original XMIN gets frozen to FrozenTransactionId - another writer comes along and changes the row - incidentally the exact same old row gets the old XMIN *again* - now XMIN is (again) the same but the data changed ! - a later update fails to detect the concurrent change !! TODO: The solution is to use our own column for optimistic locking which gets updated by an AFTER UPDATE trigger. """ #============================================================ __author__ = "K.Hilbert " __license__ = "GPL v2 or later" import sys import inspect import logging import datetime if __name__ == '__main__': sys.path.insert(0, '../../') from Gnumed.pycommon import gmExceptions from Gnumed.pycommon import gmPG2 from Gnumed.pycommon.gmDateTime import pydt_strftime from Gnumed.pycommon.gmTools import tex_escape_string from Gnumed.pycommon.gmTools import xetex_escape_string from Gnumed.pycommon.gmTools import compare_dict_likes from Gnumed.pycommon.gmTools import format_dict_like from Gnumed.pycommon.gmTools import dicts2table from Gnumed.pycommon.gmTools import u_left_arrow _log = logging.getLogger('gm.db') #============================================================ class cBusinessDBObject(object): """Represents business objects in the database. Rules: - instances ARE ASSUMED TO EXIST in the database - PK construction (aPK_obj): DOES verify its existence on instantiation (fetching data fails) - Row construction (row): allowed by using a dict of pairs field name: field value (PERFORMANCE improvement) - does NOT verify FK target existence - does NOT create new entries in the database - does NOT lazy-fetch fields on access Class scope SQL commands and variables: <_cmd_fetch_payload> - must return exactly one row - WHERE clause argument values are expected in self.pk_obj (taken from __init__(aPK_obj)) - must return xmin of all rows that _cmds_store_payload will be updating, so views must support the xmin columns of their underlying tables <_cmds_store_payload> - one or multiple "update ... set ... where xmin_* = ... and pk* = ..." statements which actually update the database from the data in self._payload, - the last query must refetch at least the XMIN values needed to detect concurrent updates, their field names had better be the same as in _cmd_fetch_payload, - the last query CAN return other fields which is particularly useful when those other fields are computed in the backend and may thus change upon save but will not have been set by the client code explicitely - this is only really of concern if the saved subclass is to be reused after saving rather than re-instantiated - when subclasses tend to live a while after save_payload() was called and they support computed fields (say, _(some_column) you need to return *all* columns (see cEncounter) <_updatable_fields> - a list of fields available for update via object['field'] A template for new child classes: *********** start of template *********** #------------------------------------------------------------ from Gnumed.pycommon import gmBusinessDBObject from Gnumed.pycommon import gmPG2 #============================================================ # short description #------------------------------------------------------------ # search/replace "" " -> 3 "s # # search-replace get_XXX, use plural form _SQL_get_XXX = u"" " SELECT *, (xmin AS xmin_XXX) FROM XXX.v_XXX WHERE %s "" " class cXxxXxx(gmBusinessDBObject.cBusinessDBObject): "" "Represents ..."" " _cmd_fetch_payload = _SQL_get_XXX % u"pk_XXX = %s" _cmds_store_payload = [ u"" " -- typically the underlying table name UPDATE xxx.xxx SET -- typically "table_col = %(view_col)s" xxx = %(xxx)s, xxx = gm.nullify_empty_string(%(xxx)s) WHERE pk = %(pk_XXX)s AND xmin = %(xmin_XXX)s RETURNING xmin AS xmin_XXX -- also return columns which are calculated in the view used by -- the initial SELECT such that they will further on contain their -- updated value: --, ... --, ... "" " ] # view columns that can be updated: _updatable_fields = [ u'xxx', u'xxx' ] #-------------------------------------------------------- # def format(self): # return u'%s' % self #------------------------------------------------------------ def get_XXX(order_by=None): if order_by is None: order_by = u'true' else: order_by = u'true ORDER BY %s' % order_by cmd = _SQL_get_XXX % order_by rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd}], get_col_idx = True) return [ cXxxXxx(row = {'data': r, 'idx': idx, 'pk_field': 'pk_XXX'}) for r in rows ] #------------------------------------------------------------ def create_xxx(xxx=None, xxx=None): args = { u'xxx': xxx, u'xxx': xxx } cmd = u"" " INSERT INTO xxx.xxx ( xxx, xxx, xxx ) VALUES ( %(xxx)s, %(xxx)s, gm.nullify_empty_string(%(xxx)s) ) RETURNING pk --RETURNING * "" " rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True, get_col_idx = False) #rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True, get_col_idx = True) return cXxxXxx(aPK_obj = rows[0]['pk']) #return cXxxXxx(row = {'data': r, 'idx': idx, 'pk_field': 'pk_XXX'}) #------------------------------------------------------------ def delete_xxx(pk_XXX=None): args = {'pk': pk_XXX} cmd = u"DELETE FROM xxx.xxx WHERE pk = %(pk)s" gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}]) return True #------------------------------------------------------------ #------------------------------------------------------------ # widget code #------------------------------------------------------------ def edit_xxx(parent=None, xxx=None, single_entry=False, presets=None): #------------------------------------------------------------ def delete_xxx() #------------------------------------------------------------ def manage_xxx() #------------------------------------------------------------ # remember to add in clinical item generic workflows *********** end of template *********** """ #-------------------------------------------------------- def __init__(self, aPK_obj=None, row=None, link_obj=None): """Init business object. Call from child classes: super(cChildClass, self).__init__(aPK_obj = aPK_obj, row = row, link_obj = link_obj) """ # initialize those "too early" because checking descendants might # fail which will then call __str__ in stack trace logging if --debug # was given which in turn needs those instance variables self.pk_obj = '' self._idx = {} self._payload = [] # the cache for backend object values (mainly table fields) self._ext_cache = {} # the cache for extended method's results self._is_modified = False # sanity check child implementions self.__class__._cmd_fetch_payload self.__class__._cmds_store_payload self.__class__._updatable_fields if aPK_obj is not None: self.__init_from_pk(aPK_obj = aPK_obj, link_obj = link_obj) else: self._init_from_row_data(row = row) self._is_modified = False #-------------------------------------------------------- def __init_from_pk(self, aPK_obj=None, link_obj=None): """Creates a new clinical item instance by its PK. aPK_obj can be: - a simple value * the primary key WHERE condition must be a simple column - a dictionary of values * the primary key WHERE condition must be a subselect consuming the dict and producing the single-value primary key """ self.pk_obj = aPK_obj result = self.refetch_payload(link_obj = link_obj) if result is True: self.payload_most_recently_fetched = {} for field in self._idx: self.payload_most_recently_fetched[field] = self._payload[self._idx[field]] return True if result is False: raise gmExceptions.ConstructorError("[%s:%s]: error loading instance" % (self.__class__.__name__, self.pk_obj)) #-------------------------------------------------------- def _init_from_row_data(self, row=None): """Creates a new clinical item instance given its fields. row must be a dict with the fields: - idx: a dict mapping field names to position - data: the field values in a list (as returned by cursor.fetchone() in the DB-API) - pk_field: the name of the primary key field OR - pk_obj: a dictionary suitable for passed to cursor.execute and holding the primary key values, used for composite PKs row = { 'data': rows[0], 'idx': idx, 'pk_field': 'pk_XXX (the PK column name)', 'pk_obj': {'pk_col1': pk_col1_val, 'pk_col2': pk_col2_val} } rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True) objects = [ cChildClass(row = {'data': r, 'idx': idx, 'pk_field': 'the PK column name'}) for r in rows ] """ assert ('data' in row), "[%s:??]: 'data' missing from argument: %s" % (self.__class__.__name__, row) assert ('idx' in row), "[%s:??]: 'idx' missing from argument: %s" % (self.__class__.__name__, row) assert (len(row['idx']) == len(row['data'])), "[%s:??]: 'idx'<->'data' field count mismatch: %s" % (self.__class__.__name__, row) faulty_pk = (('pk_field' not in row) and ('pk_obj' not in row)) assert not faulty_pk, "[%s:??]: either 'pk_field' or 'pk_obj' must exist in argument: %s" % (self.__class__.__name__, row) self._idx = row['idx'] self._payload = row['data'] if 'pk_field' in row: self.pk_obj = row['data'][row['idx'][row['pk_field']]] else: self.pk_obj = row['pk_obj'] self.payload_most_recently_fetched = {} for field in self._idx: self.payload_most_recently_fetched[field] = self._payload[self._idx[field]] #-------------------------------------------------------- def __del__(self): if '_is_modified' in self.__dict__: if self._is_modified: _log.critical('[%s:%s]: loosing payload changes' % (self.__class__.__name__, self.pk_obj)) _log.debug('most recently fetched: %s' % self.payload_most_recently_fetched) _log.debug('modified: %s' % self._payload) #-------------------------------------------------------- def __str__(self): lines = [] try: for attr in self._idx: if self._payload[self._idx[attr]] is None: lines.append('%s: NULL' % attr) else: lines.append('%s: %s [%s]' % ( attr, self._payload[self._idx[attr]], type(self._payload[self._idx[attr]]) )) return '[%s:%s]:\n%s' % (self.__class__.__name__, self.pk_obj, '\n'.join(lines)) except Exception: return 'likely nascent [%s @ %s], cannot show payload and primary key' %(self.__class__.__name__, id(self)) #-------------------------------------------------------- def __getitem__(self, attribute): # use try: except KeyError: as it is faster and we want this as fast as possible # 1) backend payload cache try: return self._payload[self._idx[attribute]] except KeyError: pass # 2) extension method results ... getter = getattr(self, 'get_%s' % attribute, None) if not callable(getter): _log.warning('[%s]: no attribute [%s]' % (self.__class__.__name__, attribute)) _log.warning('[%s]: valid attributes: %s', self.__class__.__name__, list(self._idx)) _log.warning('[%s]: no getter method [get_%s]' % (self.__class__.__name__, attribute)) methods = [ m for m in inspect.getmembers(self, inspect.ismethod) if m[0].startswith('get_') ] _log.warning('[%s]: valid getter methods: %s' % (self.__class__.__name__, str(methods))) raise KeyError('[%s]: cannot read from key [%s]' % (self.__class__.__name__, attribute)) self._ext_cache[attribute] = getter() return self._ext_cache[attribute] #-------------------------------------------------------- def __setitem__(self, attribute, value): # 1) backend payload cache if attribute in self.__class__._updatable_fields: try: if self._payload[self._idx[attribute]] != value: self._payload[self._idx[attribute]] = value self._is_modified = True return except KeyError: _log.warning('[%s]: cannot set attribute <%s> despite marked settable' % (self.__class__.__name__, attribute)) _log.warning('[%s]: supposedly settable attributes: %s' % (self.__class__.__name__, str(self.__class__._updatable_fields))) raise KeyError('[%s]: cannot write to key [%s]' % (self.__class__.__name__, attribute)) # 2) setters providing extensions if hasattr(self, 'set_%s' % attribute): setter = getattr(self, "set_%s" % attribute) if not callable(setter): raise AttributeError('[%s] setter [set_%s] not callable' % (self.__class__.__name__, attribute)) try: del self._ext_cache[attribute] except KeyError: pass if type(value) == tuple: if setter(*value): self._is_modified = True return raise AttributeError('[%s]: setter [%s] failed for [%s]' % (self.__class__.__name__, setter, value)) if setter(value): self._is_modified = True return # 3) don't know what to do with _log.error('[%s]: cannot find attribute <%s> or setter method [set_%s]' % (self.__class__.__name__, attribute, attribute)) _log.warning('[%s]: settable attributes: %s' % (self.__class__.__name__, str(self.__class__._updatable_fields))) methods = [ m for m in inspect.getmembers(self, inspect.ismethod) if m[0].startswith('set_') ] _log.warning('[%s]: valid setter methods: %s' % (self.__class__.__name__, str(methods))) raise AttributeError('[%s]: cannot set [%s]' % (self.__class__.__name__, attribute)) #-------------------------------------------------------- # external API #-------------------------------------------------------- def same_payload(self, another_object=None): raise NotImplementedError('comparison between [%s] and [%s] not implemented' % (self, another_object)) #-------------------------------------------------------- def is_modified(self): return self._is_modified #-------------------------------------------------------- def get_fields(self): try: return list(self._idx) except AttributeError: return 'nascent [%s @ %s], cannot return keys' %(self.__class__.__name__, id(self)) #-------------------------------------------------------- def get_updatable_fields(self): return self.__class__._updatable_fields #-------------------------------------------------------- def fields_as_dict(self, date_format='%Y %b %d %H:%M', none_string='', escape_style=None, bool_strings=None): if bool_strings is None: bools = {True: 'True', False: 'False'} else: bools = {True: bool_strings[0], False: bool_strings[1]} data = {} for field in self._idx: # FIXME: harden against BYTEA fields #if type(self._payload[self._idx[field]]) == ... # data[field] = _('<%s bytes of binary data>') % len(self._payload[self._idx[field]]) # continue val = self._payload[self._idx[field]] if val is None: data[field] = none_string continue if isinstance(val, bool): data[field] = bools[val] continue if isinstance(val, datetime.datetime): if date_format is None: data[field] = val continue data[field] = pydt_strftime(val, format = date_format) if escape_style in ['latex', 'tex']: data[field] = tex_escape_string(data[field]) elif escape_style in ['xetex', 'xelatex']: data[field] = xetex_escape_string(data[field]) continue try: data[field] = str(val, encoding = 'utf8', errors = 'replace') except TypeError: try: data[field] = str(val) except (UnicodeDecodeError, TypeError): val = '%s' % str(val) data[field] = val.decode('utf8', 'replace') if escape_style in ['latex', 'tex']: data[field] = tex_escape_string(data[field]) elif escape_style in ['xetex', 'xelatex']: data[field] = xetex_escape_string(data[field]) return data #-------------------------------------------------------- def get_patient(self): _log.error('[%s:%s]: forgot to override get_patient()' % (self.__class__.__name__, self.pk_obj)) return None #-------------------------------------------------------- def _get_patient_pk(self): try: return self._payload[self._idx['pk_patient']] except KeyError: pass try: return self._payload[self._idx['pk_identity']] except KeyError: return None patient_pk = property(_get_patient_pk) #-------------------------------------------------------- def _get_staff_id(self): try: return self._payload[self._idx['pk_staff']] except KeyError: _log.debug('[%s]: .pk_staff should be added to the view', self.__class__.__name__) try: return self._payload[self._idx['pk_provider']] except KeyError: pass mod_by = None try: mod_by = self._payload[self._idx['modified_by_raw']] except KeyError: _log.debug('[%s]: .modified_by_raw should be added to the view', self.__class__.__name__) if mod_by is not None: # find by DB account args = {'db_u': mod_by} cmd = "SELECT pk FROM dem.staff WHERE db_user = %(db_u)s" rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False) if len(rows) > 0: # logically, they are all the same provider, because they share the DB account return rows[0][0] mod_by = self._payload[self._idx['modified_by']] # is .modified_by a "" ? if mod_by.startswith('<') and mod_by.endswith('>'): # find by DB account args = {'db_u': mod_by.lstrip('<').rstrip('>')} cmd = "SELECT pk FROM dem.staff WHERE db_user = %(db_u)s" rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False) if len(rows) > 0: # logically, they are all the same provider, because they share the DB account return rows[0][0] # .modified_by is probably dem.staff.short_alias args = {'alias': mod_by} cmd = "SELECT pk FROM dem.staff WHERE short_alias = %(alias)s" rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False) if len(rows) > 0: # logically, they are all the same provider, because they share the DB account return rows[0][0] _log.error('[%s]: cannot retrieve staff ID for [%s]', self.__class__.__name__, mod_by) return None staff_id = property(_get_staff_id) #-------------------------------------------------------- def format(self, *args, **kwargs): return format_dict_like ( self.fields_as_dict(none_string = ''), tabular = True, value_delimiters = None ).split('\n') #-------------------------------------------------------- def _get_revision_history(self, query, args, title): rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': query, 'args': args}], get_col_idx = True) lines = [] if rows == 0: lines.append('%s (no versions)' % title) else: lines.append('%s (%s versions)' % (title, rows[0]['row_version'] + 1)) column_labels = [ 'rev %s (%s)' % (r['row_version'], pydt_strftime(r['audit__action_when'], format = '%Y %b %d %H:%M', none_str = 'live row')) for r in rows ] lines.extend (dicts2table ( rows, left_margin = 1, eol = None, keys2ignore = ['audit__action_when', 'row_version', 'pk_audit'], show_only_changes = True, column_labels = column_labels, date_format = '%Y %b %d %H:%M', equality_value = u_left_arrow )) return lines #-------------------------------------------------------- def refetch_payload(self, ignore_changes=False, link_obj=None): """Fetch field values from backend.""" if self._is_modified: compare_dict_likes(self.original_payload, self.fields_as_dict(date_format = None, none_string = None), 'original payload', 'modified payload') if ignore_changes: _log.critical('[%s:%s]: loosing payload changes' % (self.__class__.__name__, self.pk_obj)) #_log.debug('most recently fetched: %s' % self.payload_most_recently_fetched) #_log.debug('modified: %s' % self._payload) else: _log.critical('[%s:%s]: cannot reload, payload changed' % (self.__class__.__name__, self.pk_obj)) return False if isinstance(self.pk_obj, dict): args = self.pk_obj else: args = [self.pk_obj] rows, self._idx = gmPG2.run_ro_queries ( link_obj = link_obj, queries = [{'cmd': self.__class__._cmd_fetch_payload, 'args': args}], get_col_idx = True ) if len(rows) == 0: _log.error('[%s:%s]: no such instance' % (self.__class__.__name__, self.pk_obj)) return False if len(rows) > 1: raise AssertionError('[%s:%s]: %s instances !' % (self.__class__.__name__, self.pk_obj, len(rows))) self._payload = rows[0] return True #-------------------------------------------------------- def __noop(self): pass #-------------------------------------------------------- def save(self, conn=None): return self.save_payload(conn = conn) #-------------------------------------------------------- def save_payload(self, conn=None): """Store updated values (if any) in database. Optionally accepts a pre-existing connection - returns a tuple (, ) - True: success - False: an error occurred * data is (error, message) * for error meanings see gmPG2.run_rw_queries() """ if not self._is_modified: return (True, None) args = {} for field in self._idx: args[field] = self._payload[self._idx[field]] self.payload_most_recently_attempted_to_store = args close_conn = self.__noop if conn is None: conn = gmPG2.get_connection(readonly=False) close_conn = conn.close queries = [] for query in self.__class__._cmds_store_payload: queries.append({'cmd': query, 'args': args}) rows, idx = gmPG2.run_rw_queries ( link_obj = conn, queries = queries, return_data = True, get_col_idx = True ) # success ? if len(rows) == 0: # nothing updated - this can happen if: # - someone else updated the row so XMIN does not match anymore # - the PK went away (rows were deleted from under us) # - another WHERE condition of the UPDATE did not produce any rows to update # - savepoints are used since subtransactions may relevantly change the xmin/xmax ... return (False, ('cannot update row', _('[%s:%s]: row not updated (nothing returned), row in use ?') % (self.__class__.__name__, self.pk_obj))) # update cached values from should-be-first-and-only # result row of last query, # update all fields returned such that computed # columns see their new values (given they are # returned by the query) row = rows[0] for key in idx: try: self._payload[self._idx[key]] = row[idx[key]] except KeyError: conn.rollback() close_conn() _log.error('[%s:%s]: cannot update instance, XMIN-refetch key mismatch on [%s]' % (self.__class__.__name__, self.pk_obj, key)) _log.error('payload keys: %s' % str(self._idx)) _log.error('XMIN-refetch keys: %s' % str(idx)) _log.error(args) raise # only at conn.commit() time will data actually # get committed (and thusly trigger based notifications # be sent out), so reset the local modification flag # right before that self._is_modified = False conn.commit() close_conn() # update to new "original" payload self.payload_most_recently_fetched = {} for field in self._idx: self.payload_most_recently_fetched[field] = self._payload[self._idx[field]] return (True, None) #============================================================ if __name__ == '__main__': if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() #-------------------------------------------------------- class cTestObj(cBusinessDBObject): _cmd_fetch_payload = None _cmds_store_payload = None _updatable_fields = [] #---------------------------------------------------- def get_something(self): pass #---------------------------------------------------- def set_something(self): pass #-------------------------------------------------------- from Gnumed.pycommon import gmI18N gmI18N.activate_locale() gmI18N.install_domain() data = { 'pk_field': 'bogus_pk', 'idx': {'bogus_pk': 0, 'bogus_field': 1, 'bogus_date': 2}, 'data': [-1, 'bogus_data', datetime.datetime.now()] #'data': {'bogus_pk': -1, 'bogus_field': 'bogus_data', 'bogus_date': datetime.datetime.now()} } obj = cTestObj(row=data) print(obj.format()) #print(obj['wrong_field']) #obj['wrong_field'] = 1 print(obj.fields_as_dict()) #============================================================ ./gnumed-server.22.17/server/pycommon/gmCrypto.py0000644000175000017500000005054014172057744020136 0ustar ncqncq# -*- coding: utf-8 -*- __doc__ = """GNUmed crypto tools. First and only rule: DO NOT REIMPLEMENT ENCRYPTION Use existing tools. """ #=========================================================================== __author__ = "K. Hilbert " __license__ = "GPL v2 or later (details at http://www.gnu.org)" # std libs import sys import os import logging import tempfile # GNUmed libs if __name__ == '__main__': sys.path.insert(0, '../../') from Gnumed.pycommon import gmLog2 from Gnumed.pycommon import gmShellAPI from Gnumed.pycommon import gmTools from Gnumed.pycommon import gmMimeLib _log = logging.getLogger('gm.encryption') #=========================================================================== # archiving methods #--------------------------------------------------------------------------- def create_encrypted_zip_archive_from_dir(source_dir, comment=None, overwrite=True, passphrase=None, verbose=False): """Use 7z to create an encrypted ZIP archive of a directory. will be included into the archive included as a file containing the comment remove existing archive before creation, avoiding *updating* of those, and thereby including unintended data minimum length of 5 The resulting zip archive will always be named "datawrapper.zip" for confidentiality reasons. If callers want another name they will have to shutil.move() the zip file themselves. This archive will be compressed and AES256 encrypted with the given passphrase. Therefore, the result will not decrypt with earlier versions of unzip software. On Windows, 7z oder WinZip are needed. The zip format does not support header encryption thereby allowing attackers to gain knowledge of patient details by observing the names of files and directories inside the encrypted archive. To reduce that attack surface, GNUmed will create _another_ zip archive inside "datawrapper.zip", which eventually wraps up the patient data as "data.zip". That archive is not compressed and not encrypted, and can thus be unpacked with any old unzipper. Note that GNUmed does NOT remember the passphrase for you. You will have to take care of that yourself, and possibly also safely hand over the passphrase to any receivers of the zip archive. """ if len(passphrase) < 5: _log.error(' must be at least 5 characters/signs/digits') return None gmLog2.add_word2hide(passphrase) source_dir = os.path.abspath(source_dir) if not os.path.isdir(source_dir): _log.error(' does not exist or is not a directory: %s', source_dir) return False for cmd in ['7z', '7z.exe']: found, binary = gmShellAPI.detect_external_binary(binary = cmd) if found: break if not found: _log.warning('no 7z binary found') return None sandbox_dir = gmTools.mk_sandbox_dir() archive_path_inner = os.path.join(sandbox_dir, 'data') if not gmTools.mkdir(archive_path_inner): _log.error('cannot create scratch space for inner achive: %s', archive_path_inner) archive_fname_inner = 'data.zip' archive_name_inner = os.path.join(archive_path_inner, archive_fname_inner) archive_path_outer = gmTools.gmPaths().tmp_dir archive_fname_outer = 'datawrapper.zip' archive_name_outer = os.path.join(archive_path_outer, archive_fname_outer) # remove existing archives so they don't get *updated* rather than newly created if overwrite: if not gmTools.remove_file(archive_name_inner, force = True): _log.error('cannot remove existing archive [%s]', archive_name_inner) return False if not gmTools.remove_file(archive_name_outer, force = True): _log.error('cannot remove existing archive [%s]', archive_name_outer) return False # 7z does not support ZIP comments so create a text file holding the comment if comment is not None: tmp, fname = os.path.split(source_dir.rstrip(os.sep)) comment_filename = os.path.join(sandbox_dir, '000-%s-comment.txt' % fname) with open(comment_filename, mode = 'wt', encoding = 'utf8', errors = 'replace') as comment_file: comment_file.write(comment) # create inner (data) archive: uncompressed, unencrypted, similar to a tar archive args = [ binary, 'a', # create archive '-sas', # be smart about archive name extension '-bd', # no progress indicator '-mx0', # no compression (only store files) '-mcu=on', # UTF8 filenames '-l', # store content of links, not links '-scsUTF-8', # console charset '-tzip' # force ZIP format ] if verbose: args.append('-bb3') args.append('-bt') else: args.append('-bb1') args.append(archive_name_inner) args.append(source_dir) if comment is not None: args.append(comment_filename) success, exit_code, stdout = gmShellAPI.run_process(cmd_line = args, encoding = 'utf8', verbose = verbose) if not success: _log.error('cannot create inner archive') return None # create "decompress instructions" file instructions_filename = os.path.join(archive_path_inner, '000-on_Windows-open_with-WinZip_or_7z_tools') open(instructions_filename, mode = 'wt').close() # create outer (wrapper) archive: compressed, encrypted args = [ binary, 'a', # create archive '-sas', # be smart about archive name extension '-bd', # no progress indicator '-mx9', # best available zip compression ratio '-mcu=on', # UTF8 filenames '-l', # store content of links, not links '-scsUTF-8', # console charset '-tzip', # force ZIP format '-mem=AES256', # force useful encryption '-p%s' % passphrase # set passphrase ] if verbose: args.append('-bb3') args.append('-bt') else: args.append('-bb1') args.append(archive_name_outer) args.append(archive_path_inner) success, exit_code, stdout = gmShellAPI.run_process(cmd_line = args, encoding = 'utf8', verbose = verbose) if success: return archive_name_outer _log.error('cannot create outer archive') return None #--------------------------------------------------------------------------- def create_zip_archive_from_dir(source_dir, archive_name=None, comment=None, overwrite=True, verbose=False): source_dir = os.path.abspath(source_dir) if not os.path.isdir(source_dir): _log.error(' does not exist or is not a directory: %s', source_dir) return False for cmd in ['7z', '7z.exe']: found, binary = gmShellAPI.detect_external_binary(binary = cmd) if found: break if not found: _log.warning('no 7z binary found') return None if archive_name is None: # do not assume we can write to "sourcedir/../" archive_path = gmTools.gmPaths().tmp_dir # but do take archive name from source_dir tmp, archive_fname = os.path.split(source_dir.rstrip(os.sep) + '.zip') archive_name = os.path.join(archive_path, archive_fname) # remove any existing archives so they don't get *updated* # rather than newly created if overwrite: if not gmTools.remove_file(archive_name, force = True): _log.error('cannot remove existing archive [%s]', archive_name) return False # 7z does not support ZIP comments so create # a text file holding the comment ... if comment is not None: comment_filename = os.path.abspath(archive_name) + '.comment.txt' if gmTools.remove_file(comment_filename, force = True): with open(comment_filename, mode = 'wt', encoding = 'utf8', errors = 'replace') as comment_file: comment_file.write(comment) else: _log.error('cannot remove existing archive comment file [%s]', comment_filename) comment = None # compress args = [ binary, 'a', # create archive '-sas', # be smart about archive name extension '-bd', # no progress indicator '-mx9', # best available zip compression ratio '-mcu=on', # UTF8 filenames '-l', # store content of links, not links '-scsUTF-8', # console charset '-tzip' # force ZIP format ] if verbose: args.append('-bb3') args.append('-bt') else: args.append('-bb1') args.append(archive_name) args.append(source_dir) if comment is not None: args.append(comment_filename) success, exit_code, stdout = gmShellAPI.run_process(cmd_line = args, encoding = 'utf8', verbose = verbose) if comment is not None: gmTools.remove_file(comment_filename) if success: return archive_name return None #=========================================================================== # file decryption methods #--------------------------------------------------------------------------- def gpg_decrypt_file(filename=None, passphrase=None, verbose=False, target_ext=None): assert (filename is not None), ' must not be None' _log.debug('attempting GPG decryption') for cmd in ['gpg2', 'gpg', 'gpg2.exe', 'gpg.exe']: found, binary = gmShellAPI.detect_external_binary(binary = cmd) if found: break if not found: _log.warning('no gpg binary found') return None basename = os.path.splitext(filename)[0] filename_decrypted = gmTools.get_unique_filename(prefix = '%s-decrypted-' % basename, suffix = target_ext) args = [ binary, '--utf8-strings', '--display-charset', 'utf-8', '--batch', '--no-greeting', '--enable-progress-filter', '--decrypt', '--output', filename_decrypted ##'--use-embedded-filename' # not all encrypted files carry a filename ] if verbose: args.extend ([ '--verbose', '--verbose', '--debug-level', '8', '--debug', 'packet,mpi,crypto,filter,iobuf,memory,cache,memstat,trust,hashing,clock,lookup,extprog' ##'--debug-all', # will log passphrase ##'--debug, 'ipc', # will log passphrase ##'--debug-level', 'guru', # will log passphrase ##'--debug-level', '9', # will log passphrase ]) args.append(filename) success, exit_code, stdout = gmShellAPI.run_process(cmd_line = args, verbose = verbose, encoding = 'utf-8') if success: return filename_decrypted return None #=========================================================================== # file encryption methods #--------------------------------------------------------------------------- def gpg_encrypt_file_symmetric(filename=None, comment=None, verbose=False, passphrase=None, remove_unencrypted=False): #add short decr instr to comment assert (filename is not None), ' must not be None' _log.debug('attempting symmetric GPG encryption') for cmd in ['gpg2', 'gpg', 'gpg2.exe', 'gpg.exe']: found, binary = gmShellAPI.detect_external_binary(binary = cmd) if found: break if not found: _log.warning('no gpg binary found') return None filename_encrypted = filename + '.asc' args = [ binary, '--utf8-strings', '--display-charset', 'utf-8', '--batch', '--no-greeting', '--enable-progress-filter', '--symmetric', '--cipher-algo', 'AES256', '--armor', '--output', filename_encrypted ] if comment is not None: args.extend(['--comment', comment]) if verbose: args.extend ([ '--verbose', '--verbose', '--debug-level', '8', '--debug', 'packet,mpi,crypto,filter,iobuf,memory,cache,memstat,trust,hashing,clock,lookup,extprog', ##'--debug-all', # will log passphrase ##'--debug, 'ipc', # will log passphrase ##'--debug-level', 'guru', # will log passphrase ##'--debug-level', '9', # will log passphrase ]) pwd_fname = None if passphrase is not None: pwd_file = tempfile.NamedTemporaryFile(mode = 'w+t', encoding = 'utf8', delete = False) pwd_fname = pwd_file.name args.extend ([ '--pinentry-mode', 'loopback', '--passphrase-file', pwd_fname ]) pwd_file.write(passphrase) pwd_file.close() args.append(filename) try: success, exit_code, stdout = gmShellAPI.run_process(cmd_line = args, verbose = verbose, encoding = 'utf-8') finally: if pwd_fname is not None: os.remove(pwd_fname) if not success: return None if not remove_unencrypted: return filename_encrypted if gmTools.remove_file(filename): return filename_encrypted gmTools.remove_file(filename_encrypted) return None #--------------------------------------------------------------------------- def aes_encrypt_file(filename=None, passphrase=None, comment=None, verbose=False, remove_unencrypted=False): assert (filename is not None), ' must not be None' assert (passphrase is not None), ' must not be None' if len(passphrase) < 5: _log.error(' must be at least 5 characters/signs/digits') return None gmLog2.add_word2hide(passphrase) #add 7z/winzip url to comment.txt _log.debug('attempting 7z AES encryption') for cmd in ['7z', '7z.exe']: found, binary = gmShellAPI.detect_external_binary(binary = cmd) if found: break if not found: _log.warning('no 7z binary found, trying gpg') return None if comment is not None: archive_path, archive_name = os.path.split(os.path.abspath(filename)) comment_filename = gmTools.get_unique_filename ( prefix = '%s.7z.comment-' % archive_name, tmp_dir = archive_path, suffix = '.txt' ) with open(comment_filename, mode = 'wt', encoding = 'utf8', errors = 'replace') as comment_file: comment_file.write(comment) else: comment_filename = '' filename_encrypted = '%s.7z' % filename args = [binary, 'a', '-bb3', '-mx0', "-p%s" % passphrase, filename_encrypted, filename, comment_filename] encrypted, exit_code, stdout = gmShellAPI.run_process(cmd_line = args, encoding = 'utf8', verbose = verbose) gmTools.remove_file(comment_filename) if not encrypted: return None if not remove_unencrypted: return filename_encrypted if gmTools.remove_file(filename): return filename_encrypted gmTools.remove_file(filename_encrypted) return None #--------------------------------------------------------------------------- def encrypt_pdf(filename=None, passphrase=None, verbose=False, remove_unencrypted=False): assert (filename is not None), ' must not be None' assert (passphrase is not None), ' must not be None' if len(passphrase) < 5: _log.error(' must be at least 5 characters/signs/digits') return None gmLog2.add_word2hide(passphrase) _log.debug('attempting PDF encryption') for cmd in ['qpdf', 'qpdf.exe']: found, binary = gmShellAPI.detect_external_binary(binary = cmd) if found: break if not found: _log.warning('no qpdf binary found') return None filename_encrypted = '%s.encrypted.pdf' % os.path.splitext(filename)[0] args = [ binary, '--verbose', '--encrypt', passphrase, '', '128', '--print=full', '--modify=none', '--extract=n', '--use-aes=y', '--', filename, filename_encrypted ] success, exit_code, stdout = gmShellAPI.run_process(cmd_line = args, encoding = 'utf8', verbose = verbose) if not success: return None if not remove_unencrypted: return filename_encrypted if gmTools.remove_file(filename): return filename_encrypted gmTools.remove_file(filename_encrypted) return None #--------------------------------------------------------------------------- def encrypt_file_symmetric(filename=None, passphrase=None, comment=None, verbose=False, remove_unencrypted=False, convert2pdf=False): """Encrypt with a symmetric cipher. - True: convert to PDF, if possible, and encrypt that. """ assert (filename is not None), ' must not be None' if convert2pdf: _log.debug('PDF encryption preferred, attempting conversion if needed') pdf_fname = gmMimeLib.convert_file ( filename = filename, target_mime = 'application/pdf', target_filename = filename + '.pdf', verbose = verbose ) if pdf_fname is not None: _log.debug('successfully converted to PDF') # remove non-pdf file gmTools.remove_file(filename) filename = pdf_fname # try PDF-inherent AES encrypted_filename = encrypt_pdf ( filename = filename, passphrase = passphrase, verbose = verbose, remove_unencrypted = remove_unencrypted ) if encrypted_filename is not None: return encrypted_filename # try 7z based AES encrypted_filename = aes_encrypt_file ( filename = filename, passphrase = passphrase, comment = comment, verbose = verbose, remove_unencrypted = remove_unencrypted ) if encrypted_filename is not None: return encrypted_filename # try GPG based AES return gpg_encrypt_file_symmetric(filename = filename, passphrase = passphrase, comment = comment, verbose = verbose, remove_unencrypted = remove_unencrypted) #--------------------------------------------------------------------------- def encrypt_file(filename=None, receiver_key_ids=None, passphrase=None, comment=None, verbose=False, remove_unencrypted=False, convert2pdf=False): """Encrypt an arbitrary file. True: remove unencrypted source file if encryption succeeded True: attempt conversion to PDF of input file before encryption success: the PDF is encrypted (and the non-PDF source file is removed) failure: the source file is encrypted """ assert (filename is not None), ' must not be None' # cannot do asymmetric if receiver_key_ids is None: _log.debug('no receiver key IDs: cannot try asymmetric encryption') return encrypt_file_symmetric ( filename = filename, passphrase = passphrase, comment = comment, verbose = verbose, remove_unencrypted = remove_unencrypted, convert2pdf = convert2pdf ) # asymmetric not implemented yet return None #--------------------------------------------------------------------------- def encrypt_directory_content(directory=None, receiver_key_ids=None, passphrase=None, comment=None, verbose=False, remove_unencrypted=True, convert2pdf=False): assert (directory is not None), 'source must not be None' _log.debug('encrypting content of [%s]', directory) try: items = os.listdir(directory) except OSError: return False for item in items: full_item = os.path.join(directory, item) if os.path.isdir(full_item): subdir_encrypted = encrypt_directory_content ( directory = full_item, receiver_key_ids = receiver_key_ids, passphrase = passphrase, comment = comment, verbose = verbose ) if subdir_encrypted is False: return False continue fname_encrypted = encrypt_file ( filename = full_item, receiver_key_ids = receiver_key_ids, passphrase = passphrase, comment = comment, verbose = verbose, remove_unencrypted = remove_unencrypted, convert2pdf = convert2pdf ) if fname_encrypted is None: return False return True #--------------------------------------------------------------------------- def pdf_is_encrypted(filename:str=None) -> bool: pass #=========================================================================== # main #--------------------------------------------------------------------------- if __name__ == '__main__': if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() # for testing: logging.basicConfig(level = logging.DEBUG) from Gnumed.pycommon import gmI18N gmI18N.activate_locale() gmI18N.install_domain() #----------------------------------------------------------------------- def test_gpg_decrypt(): print(gpg_decrypt_file(filename = sys.argv[2], verbose = True)) #----------------------------------------------------------------------- def test_gpg_encrypt_symmetric(): print(gpg_encrypt_file_symmetric(filename = sys.argv[2], passphrase = sys.argv[3], verbose = True, comment = 'GNUmed testing')) #----------------------------------------------------------------------- def test_aes_encrypt(): print(aes_encrypt_file(filename = sys.argv[2], passphrase = sys.argv[3], comment = sys.argv[4], verbose = True)) #----------------------------------------------------------------------- def test_encrypt_pdf(): print(encrypt_pdf(filename = sys.argv[2], passphrase = sys.argv[3], verbose = True)) #----------------------------------------------------------------------- def test_encrypt_file(): print(encrypt_file(filename = sys.argv[2], passphrase = sys.argv[3], verbose = True, convert2pdf = True)) #----------------------------------------------------------------------- def test_zip_archive_from_dir(): print(create_zip_archive_from_dir ( sys.argv[2], #archive_name=None, comment = 'GNUmed test archive', overwrite = True, verbose = True )) #----------------------------------------------------------------------- def test_encrypted_zip_archive_from_dir(): print(create_encrypted_zip_archive_from_dir ( sys.argv[2], comment = 'GNUmed test archive', overwrite = True, passphrase = sys.argv[3], verbose = True )) #----------------------------------------------------------------------- # encryption #test_aes_encrypt() #test_encrypt_pdf() #test_gpg_encrypt_symmetric() test_encrypt_file() # decryption #test_gpg_decrypt() #test_zip_archive_from_dir() #test_encrypted_zip_archive_from_dir() ./gnumed-server.22.17/server/pycommon/gmShellAPI.py0000644000175000017500000003021414172057744020253 0ustar ncqncq__doc__ = """GNUmed general tools.""" #=========================================================================== __author__ = "K. Hilbert " __license__ = "GPL v2 or later (details at http://www.gnu.org)" # stdlib import os import sys import logging import subprocess import shlex _log = logging.getLogger('gm.shell') #=========================================================================== def is_cmd_in_path(cmd=None): _log.debug('cmd: [%s]', cmd) dirname = os.path.dirname(cmd) _log.debug('dir: [%s]', dirname) if dirname != '': _log.info('command with full or relative path, not searching in PATH for binary') return (None, None) #env_paths = str(os.environ['PATH'], encoding = sys.getfilesystemencoding(), errors = 'replace') env_paths = os.environ['PATH'] _log.debug('${PATH}: %s', env_paths) for path in env_paths.split(os.pathsep): candidate = os.path.join(path, cmd) if os.access(candidate, os.X_OK): _log.debug('found [%s]', candidate) return (True, candidate) else: _log.debug('not found: %s', candidate) _log.debug('command not found in PATH') return (False, None) #=========================================================================== def is_executable_by_wine(cmd=None): if not cmd.startswith('wine'): _log.debug('not a WINE call: %s', cmd) return (False, None) exe_path = cmd.encode(sys.getfilesystemencoding()) exe_path = exe_path[4:].strip().strip('"').strip() # [wine "/standard/unix/path/to/binary.exe"] ? if os.access(exe_path, os.R_OK): _log.debug('WINE call with UNIX path: %s', exe_path) return (True, cmd) # detect [winepath] found, full_winepath_path = is_cmd_in_path(cmd = r'winepath') if not found: _log.error('[winepath] not found, cannot check WINE call for Windows path conformance: %s', exe_path) return (False, None) # [wine "drive:\a\windows\path\to\binary.exe"] ? cmd_line = r'%s -u "%s"' % ( full_winepath_path.encode(sys.getfilesystemencoding()), exe_path ) _log.debug('converting Windows path to UNIX path: %s' % cmd_line) cmd_line = shlex.split(cmd_line) try: winepath = subprocess.Popen ( cmd_line, stdout = subprocess.PIPE, stderr = subprocess.PIPE, universal_newlines = True ) except OSError: _log.exception('cannot run ') return (False, None) stdout, stderr = winepath.communicate() full_path = stdout.strip('\r\n') _log.debug('UNIX path: %s', full_path) if winepath.returncode != 0: _log.error(' returned [%s], failed to convert path', winepath.returncode) return (False, None) if os.access(full_path, os.R_OK): _log.debug('WINE call with Windows path') return (True, cmd) _log.warning('Windows path [%s] not verifiable under UNIX: %s', exe_path, full_path) return (False, None) #=========================================================================== def detect_external_binary(binary=None): """ is the name of the executable with or without .exe/.bat""" _log.debug('searching for [%s]', binary) binary = binary.lstrip() # is it a sufficiently qualified, directly usable, explicit path ? if os.access(binary, os.X_OK): _log.debug('found: executable explicit path') return (True, binary) # can it be found in PATH ? found, full_path = is_cmd_in_path(cmd = binary) if found: if os.access(full_path, os.X_OK): _log.debug('found: executable in ${PATH}') return (True, full_path) # does it seem to be a call via WINE ? is_wine_call, full_path = is_executable_by_wine(cmd = binary) if is_wine_call: _log.debug('found: is valid WINE call') return (True, full_path) # maybe we can be a bit smart about Windows ? if os.name == 'nt': # try .exe (but not if already .bat or .exe) if not (binary.endswith('.exe') or binary.endswith('.bat')): exe_binary = binary + r'.exe' _log.debug('re-testing as %s', exe_binary) found_dot_exe_binary, full_path = detect_external_binary(binary = exe_binary) if found_dot_exe_binary: return (True, full_path) # not found with .exe, so try .bat: bat_binary = binary + r'.bat' _log.debug('re-testing as %s', bat_binary) found_bat_binary, full_path = detect_external_binary(binary = bat_binary) if found_bat_binary: return (True, full_path) else: _log.debug('not running under Windows, not testing .exe/.bat') return (False, None) #=========================================================================== def find_first_binary(binaries=None): found = False binary = None for cmd in binaries: _log.debug('looking for [%s]', cmd) if cmd is None: continue found, binary = detect_external_binary(binary = cmd) if found: break return (found, binary) #=========================================================================== def run_command_in_shell(command=None, blocking=False, acceptable_return_codes=None): """Runs a command in a subshell via standard-C system(). The shell command to run including command line options. This will make the code *block* until the shell command exits. It will likely only work on UNIX shells where "cmd &" makes sense. http://stackoverflow.com/questions/35817/how-to-escape-os-system-calls-in-python """ if acceptable_return_codes is None: acceptable_return_codes = [0] _log.debug('shell command >>>%s<<<', command) _log.debug('blocking: %s', blocking) _log.debug('acceptable return codes: %s', str(acceptable_return_codes)) # FIXME: command should be checked for shell exploits command = command.strip() if os.name == 'nt': # http://stackoverflow.com/questions/893203/bat-files-nonblocking-run-launch if blocking is False: if not command.startswith('start '): command = 'start "GNUmed" /B "%s"' % command # elif blocking is True: # if not command.startswith('start '): # command = 'start "GNUmed" /WAIT /B "%s"' % command else: # what the following hack does is this: the user indicated # whether she wants non-blocking external display of files # - the real way to go about this is to have a non-blocking command # in the line in the mailcap file for the relevant mime types # - as non-blocking may not be desirable when *not* displaying # files from within GNUmed the really right way would be to # add a "test" clause to the non-blocking mailcap entry which # yields true if and only if GNUmed is running # - however, this is cumbersome at best and not supported in # some mailcap implementations # - so we allow the user to attempt some control over the process # from within GNUmed by setting a configuration option # - leaving it None means to use the mailcap default or whatever # was specified in the command itself # - True means: tack " &" onto the shell command if necessary # - False means: remove " &" from the shell command if its there # - all this, of course, only works in shells which support # detaching jobs with " &" (so, most POSIX shells) if blocking is True: command = command.rstrip(' &') elif blocking is False: if not command.strip().endswith('&'): command += ' &' _log.info('running shell command >>>%s<<<', command) # FIXME: use subprocess.Popen() ret_val = os.system(command.encode(sys.getfilesystemencoding())) _log.debug('os.system() returned: [%s]', ret_val) exited_normally = False if not hasattr(os, 'WIFEXITED'): _log.error('platform does not support exit status differentiation') if ret_val in acceptable_return_codes: _log.info('os.system() return value contained in acceptable return codes') _log.info('continuing and hoping for the best') return True return exited_normally _log.debug('exited via exit(): %s', os.WIFEXITED(ret_val)) if os.WIFEXITED(ret_val): _log.debug('exit code: [%s]', os.WEXITSTATUS(ret_val)) exited_normally = (os.WEXITSTATUS(ret_val) in acceptable_return_codes) _log.debug('normal exit: %s', exited_normally) _log.debug('dumped core: %s', os.WCOREDUMP(ret_val)) _log.debug('stopped by signal: %s', os.WIFSIGNALED(ret_val)) if os.WIFSIGNALED(ret_val): try: _log.debug('STOP signal was: [%s]', os.WSTOPSIG(ret_val)) except AttributeError: _log.debug('platform does not support os.WSTOPSIG()') try: _log.debug('TERM signal was: [%s]', os.WTERMSIG(ret_val)) except AttributeError: _log.debug('platform does not support os.WTERMSIG()') return exited_normally #=========================================================================== def run_first_available_in_shell(binaries=None, args=None, blocking=False, run_last_one_anyway=False, acceptable_return_codes=None): found, binary = find_first_binary(binaries = binaries) if not found: _log.warning('cannot find any of: %s', binaries) if run_last_one_anyway: binary = binaries[-1] _log.debug('falling back to trying to run [%s] anyway', binary) else: return False return run_command_in_shell(command = '%s %s' % (binary, args), blocking = blocking, acceptable_return_codes = acceptable_return_codes) #=========================================================================== def _log_output(level, stdout=None, stderr=None): lines2log = ['process output:'] if stdout is not None: lines2log.extend([ ' STDOUT: %s' % line for line in stdout.split('\n') ]) if stderr is not None: lines2log.extend([ ' STDERR: %s' % line for line in stderr.split('\n') ]) _log.log(level, '\n'.join(lines2log)) #=========================================================================== def run_process(cmd_line=None, timeout=None, encoding='utf8', input_data=None, acceptable_return_codes=None, verbose=False): assert (cmd_line is not None), ' must not be None' if acceptable_return_codes is None: acceptable_return_codes = [0] _log.info('running: %s' % cmd_line) try: if input_data is None: proc_result = subprocess.run ( args = cmd_line, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, encoding = encoding, errors = 'replace' ) else: proc_result = subprocess.run ( args = cmd_line, input = input_data, stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, encoding = encoding, errors = 'replace' ) except (subprocess.TimeoutExpired, FileNotFoundError): _log.exception('there was a problem running external process') return False, -1, '' _log.info('exit code [%s]', proc_result.returncode) if verbose: _log_output(logging.DEBUG, stdout = proc_result.stdout, stderr = proc_result.stderr) if proc_result.returncode not in acceptable_return_codes: _log.error('there was a problem executing the external process') _log.debug('expected one of: %s', acceptable_return_codes) if not verbose: _log_output(logging.ERROR, stdout = proc_result.stdout, stderr = proc_result.stderr) return False, proc_result.returncode, '' return True, proc_result.returncode, proc_result.stdout #=========================================================================== # main #--------------------------------------------------------------------------- if __name__ == '__main__': if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() logging.basicConfig(level = logging.DEBUG) #--------------------------------------------------------- def test_detect_external_binary(): found, path = detect_external_binary(binary = sys.argv[2]) if found: print("found as:", path) else: print(sys.argv[2], "not found") #--------------------------------------------------------- def test_run_command_in_shell(): print("-------------------------------------") print("running:", sys.argv[2]) if run_command_in_shell(command=sys.argv[2], blocking=False): print("-------------------------------------") print("success") else: print("-------------------------------------") print("failure, consult log") #--------------------------------------------------------- def test_is_cmd_in_path(): print(is_cmd_in_path(cmd = sys.argv[2])) #--------------------------------------------------------- def test_is_executable_by_wine(): print(is_executable_by_wine(cmd = sys.argv[2])) #--------------------------------------------------------- #test_run_command_in_shell() #test_detect_external_binary() test_is_cmd_in_path() #test_is_executable_by_wine() #=========================================================================== ./gnumed-server.22.17/server/pycommon/gmWorkerThread.py0000644000175000017500000000725414172057744021263 0ustar ncqncq__doc__ = """GNUmed worker threads.""" #===================================================================== __author__ = "K.Hilbert " __license__ = "GPL v2 or later" import sys import logging import threading import datetime as dt import copy # wx.CallAfter() does not seem to work with multiprocessing ! #import multiprocessing if __name__ == '__main__': sys.path.insert(0, '../../') _log = logging.getLogger('gm.worker') #===================================================================== def execute_in_worker_thread(payload_function=None, payload_kwargs=None, completion_callback=None, worker_name=None): """Create a thread and have it execute . - if not None - better be prepared to receive the result of . """ _log.debug('worker [%s]', worker_name) # decouple from calling thread __payload_kwargs = copy.deepcopy(payload_kwargs) worker_thread = None #------------------------------- def _run_payload(): try: if payload_kwargs is None: payload_result = payload_function() else: payload_result = payload_function(**__payload_kwargs) _log.debug('finished running payload function: %s', payload_function) except Exception: _log.exception('error running payload function: %s', payload_function) return if completion_callback is None: return try: completion_callback(payload_result) _log.debug('finished running completion callback') except Exception: _log.exception('error running completion callback: %s', completion_callback) _log.info('worker thread [name=%s, PID=%s] shuts down', worker_thread.name, worker_thread.ident) return #------------------------------- if not callable(payload_function): raise ValueError('<%s> is not callable', payload_function) if completion_callback is not None: if not callable(completion_callback): raise ValueError('<%s> is not callable', completion_callback) if worker_name is None: __thread_name = dt.datetime.now().strftime('%f-%S') else: __thread_name = '%sThread-%s' % ( worker_name, dt.datetime.now().strftime('%f') ) _log.debug('creating thread "%s"', __thread_name) _log.debug(' "%s" payload function: %s', __thread_name, payload_function) _log.debug(' "%s" results callback: %s', __thread_name, completion_callback) #worker_thread = multiprocessing.Process ( worker_thread = threading.Thread ( target = _run_payload, name = __thread_name ) # we don't want hung workers to prevent us from exiting GNUmed worker_thread.daemon = True _log.info('starting thread "%s"', __thread_name) worker_thread.start() _log.debug(' "%s" ident (= PID): %s', worker_thread.name, worker_thread.ident) # from here on, another thread executes _run_payload() # which executes payload_function() and, eventually, # completion_callback() if available, # return thread ident so people can join() it if needed return worker_thread.ident #===================================================================== # main #===================================================================== if __name__ == "__main__": if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() import time import random from Gnumed.pycommon import gmLog2 def test_print_dots(ident=None): def slowly_print_dots(info=None): for i in range(5): print('* (#%s in %s)' % (i, info)) time.sleep(1 + (random.random()*4)) return '%s' % time.localtime() def print_dot_end_time(time_str): print('done: %s' % time_str) execute_in_worker_thread ( payload_function = slowly_print_dots, payload_kwargs = {'info': ident}, completion_callback = print_dot_end_time ) test_print_dots('A') test_print_dots('B') ./gnumed-server.22.17/server/pycommon/gmPrinting.py0000644000175000017500000002305014172057744020444 0ustar ncqncq__doc__ = """GNUmed printing.""" __author__ = "K.Hilbert " __license__ = 'GPL v2 or later (details at http://www.gnu.org)' # ======================================================================= import logging import sys import os import io import time if __name__ == '__main__': sys.path.insert(0, '../../') from Gnumed.pycommon import gmShellAPI from Gnumed.pycommon import gmTools from Gnumed.pycommon import gmLog2 _log = logging.getLogger('gm.printing') known_printjob_types = [ 'medication_list', 'generic_document' ] external_print_APIs = [ 'gm-print_doc', 'os_startfile', # win, mostly 'gsprint', # win 'acrobat_reader', # win 'gtklp', # Linux 'Internet_Explorer', # win 'Mac_Preview' # MacOSX ] #======================================================================= # internal print API #----------------------------------------------------------------------- def print_files(filenames=None, jobtype=None, print_api=None, verbose=False): _log.debug('printing "%s": %s', jobtype, filenames) for fname in filenames: try: open(fname, 'r').close() except Exception: _log.exception('cannot open [%s], aborting', fname) return False if jobtype not in known_printjob_types: print("unregistered print job type <%s>" % jobtype) _log.warning('print job type "%s" not registered', jobtype) if print_api not in external_print_APIs: _log.warning('print API "%s" unknown, trying all', print_api) if print_api == 'os_startfile': return _print_files_by_os_startfile(filenames = filenames) if print_api == 'gm-print_doc': return _print_files_by_shellscript(filenames = filenames, jobtype = jobtype, verbose = verbose) if print_api == 'gsprint': return _print_files_by_gsprint_exe(filenames = filenames, verbose = verbose) if print_api == 'acrobat_reader': return _print_files_by_acroread_exe(filenames = filenames, verbose = verbose) if print_api == 'gtklp': return _print_files_by_gtklp(filenames = filenames, verbose = verbose) if print_api == 'Internet_Explorer': return _print_files_by_IE(filenames = filenames) if print_api == 'Mac_Preview': return _print_files_by_mac_preview(filenames = filenames, verbose = verbose) # else try all if (sys.platform == 'darwin') or (os.name == 'mac'): if _print_files_by_mac_preview(filenames = filenames, verbose = verbose): return True elif os.name == 'posix': if _print_files_by_gtklp(filenames = filenames, verbose = verbose): return True elif os.name == 'nt': if _print_files_by_shellscript(filenames = filenames, jobtype = jobtype, verbose = verbose): return True if _print_files_by_gsprint_exe(filenames = filenames, verbose = verbose): return True if _print_files_by_acroread_exe(filenames = filenames, verbose = verbose): return True if _print_files_by_os_startfile(filenames = filenames): return True if _print_files_by_IE(filenames = filenames): return True return False if _print_files_by_shellscript(filenames = filenames, jobtype = jobtype, verbose = verbose): return True return False #======================================================================= # external print APIs #----------------------------------------------------------------------- def _print_files_by_mac_preview(filenames=None, verbose=False): # if os.name != 'mac': # does not work if sys.platform != 'darwin': _log.debug('MacOSX only available under MacOSX/Darwin') return False for filename in filenames: cmd_line = [ 'open', # "open" must be in the PATH '-a Preview', # action = Preview filename ] success, returncode, stdout = gmShellAPI.run_process(cmd_line = cmd_line, verbose = verbose) if not success: return False return True #----------------------------------------------------------------------- def _print_files_by_IE(filenames=None): if os.name != 'nt': _log.debug('Internet Explorer only available under Windows') return False try: from win32com import client as dde_client except ImportError: _log.exception(' Python module not available for use in printing') return False try: i_explorer = dde_client.Dispatch("InternetExplorer.Application") for filename in filenames: if i_explorer.Busy: time.sleep(1) i_explorer.Navigate(os.path.normpath(filename)) if i_explorer.Busy: time.sleep(1) i_explorer.Document.printAll() i_explorer.Quit() except Exception: _log.exception('error calling IE via DDE') return False return True #----------------------------------------------------------------------- def _print_files_by_gtklp(filenames=None, verbose=False): # if os.name != 'posix': if sys.platform != 'linux': _log.debug(' only available under Linux') return False cmd_line = ['gtklp', '-i', '-# 1'] cmd_line.extend(filenames) success, returncode, stdout = gmShellAPI.run_process(cmd_line = cmd_line, verbose = verbose) if not success: return False return True #----------------------------------------------------------------------- def _print_files_by_gsprint_exe(filenames=None, verbose=False): """Use gsprint.exe from Ghostscript tools. Windows only. - docs: http://pages.cs.wisc.edu/~ghost/gsview/gsprint.htm - download: http://www.cs.wisc.edu/~ghost/ """ if os.name != 'nt': _log.debug(' only available under Windows') return False conf_filename = gmTools.get_unique_filename ( prefix = 'gm2gsprint-', suffix = '.cfg' ).encode(sys.getfilesystemencoding()) for filename in filenames: conf_file = io.open(conf_filename, mode = 'wt', encoding = 'utf8') conf_file.write('-color\n') conf_file.write('-query\n') # printer setup dialog conf_file.write('-all\n') # all pages conf_file.write('-copies 1\n') conf_file.write('%s\n' % os.path.normpath(filename)) conf_file.close() cmd_line = ['gsprint.exe', '-config', conf_filename] # "gsprint.exe" must be in the PATH success, returncode, stdout = gmShellAPI.run_process(cmd_line = cmd_line, verbose = verbose) if not success: return False return True #----------------------------------------------------------------------- def _print_files_by_acroread_exe(filenames, verbose=False): """Use Adobe Acrobat Reader. Windows only. - docs: http://www.robvanderwoude.com/printfiles.php#PrintPDF """ if os.name != 'nt': _log.debug('Acrobat Reader only used under Windows') return False for filename in filenames: cmd_line = [ 'AcroRd32.exe', # "AcroRd32.exe" must be in the PATH '/s', # no splash '/o', # no open-file dialog '/h', # minimized '/p', # go straight to printing dialog os.path.normpath(filename) ] success, returncode, stdout = gmShellAPI.run_process(cmd_line = cmd_line, verbose = verbose) if not success: # retry with "acroread.exe" cmd_line[0] = r'acroread.exe' # "acroread.exe" must be in the PATH success, returncode, stdout = gmShellAPI.run_process(cmd_line = cmd_line, verbose = verbose) if not success: return False return True #----------------------------------------------------------------------- def _print_files_by_os_startfile(filenames=None): try: os.startfile except AttributeError: _log.error('platform does not support "os.startfile()"') return False for filename in filenames: fname = os.path.normcase(os.path.normpath(filename)) _log.debug('%s -> %s', filename, fname) try: try: os.startfile(fname, 'print') except WindowsError as e: _log.exception('no action defined for this type of file') if e.winerror == 1155: # try (default) action os.startfile(fname) except Exception: _log.exception('os.startfile() failed') gmLog2.log_stack_trace() return False return True #----------------------------------------------------------------------- def _print_files_by_shellscript(filenames=None, jobtype=None, verbose=False): paths = gmTools.gmPaths() local_script = os.path.join(paths.local_base_dir, '..', 'external-tools', 'gm-print_doc') candidates = ['gm-print_doc', local_script, 'gm-print_doc.bat'] found, binary = gmShellAPI.find_first_binary(binaries = candidates) if not found: binary = r'gm-print_doc.bat' cmd_line = [binary, jobtype] cmd_line.extend(filenames) success, returncode, stdout = gmShellAPI.run_process(cmd_line = cmd_line, verbose = verbose) if not success: return False return True #======================================================================= # main #----------------------------------------------------------------------- if __name__ == '__main__': if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() from Gnumed.pycommon import gmLog2 from Gnumed.pycommon import gmI18N gmI18N.activate_locale() gmI18N.install_domain() #-------------------------------------------------------------------- def test_print_files(): return print_files(filenames = [sys.argv[2]], jobtype = sys.argv[3]) #-------------------------------------------------------------------- def test_print_files_by_shellscript(): print_files(filenames = [sys.argv[2], sys.argv[2]], jobtype = 'generic_document', print_api = 'gm-print_doc') #-------------------------------------------------------------------- def test_print_files_by_gtklp(): print_files(filenames = [sys.argv[2], sys.argv[2]], jobtype = 'generic_document', print_api = 'gtklp') #-------------------------------------------------------------------- def test_print_files_by_mac_preview(): print("testing printing via Mac Preview") _print_files_by_mac_preview(filenames = [sys.argv[0]]) #-------------------------------------------------------------------- print(test_print_files()) #test_print_files_by_gtklp() #test_print_files_by_mac_preview() ./gnumed-server.22.17/server/pycommon/gmMimeLib.py0000644000175000017500000004517714172057744020206 0ustar ncqncq# -*- coding: utf-8 -*- """This module encapsulates mime operations. http://www.dwheeler.com/essays/open-files-urls.html """ #======================================================================================= __author__ = "Karsten Hilbert " __license__ = "GPL" # stdlib import sys import os import mailcap import mimetypes import subprocess import shutil import logging import io # GNUmed if __name__ == '__main__': sys.path.insert(0, '../../') from Gnumed.pycommon import gmShellAPI from Gnumed.pycommon import gmTools from Gnumed.pycommon import gmCfg2 from Gnumed.pycommon import gmWorkerThread _log = logging.getLogger('gm.mime') #======================================================================================= def guess_mimetype(filename=None): """Guess mime type of arbitrary file. filenames are supposed to be in Unicode """ worst_case = "application/octet-stream" _log.debug('guessing mime type of [%s]', filename) # 1) use Python libextractor try: import extractor xtract = extractor.Extractor() props = xtract.extract(filename = filename) for prop, val in props: if (prop == 'mimetype') and (val != worst_case): return val except ImportError: _log.debug('module (python wrapper for libextractor) not installed') except OSError as exc: # winerror 126, errno 22 if exc.errno != 22: raise _log.exception('module (python wrapper for libextractor) not installed') ret_code = -1 # 2) use "file" system command # -i get mime type # -b don't display a header mime_guesser_cmd = 'file -i -b "%s"' % filename # this only works on POSIX with 'file' installed (which is standard, however) # it might work on Cygwin installations aPipe = os.popen(mime_guesser_cmd, 'r') if aPipe is None: _log.debug("cannot open pipe to [%s]" % mime_guesser_cmd) else: pipe_output = aPipe.readline().replace('\n', '').strip() ret_code = aPipe.close() if ret_code is None: _log.debug('[%s]: <%s>' % (mime_guesser_cmd, pipe_output)) if pipe_output not in ['', worst_case]: return pipe_output.split(';')[0].strip() else: _log.error('[%s] on %s (%s): failed with exit(%s)' % (mime_guesser_cmd, os.name, sys.platform, ret_code)) # 3) use "extract" shell level libextractor wrapper mime_guesser_cmd = 'extract -p mimetype "%s"' % filename aPipe = os.popen(mime_guesser_cmd, 'r') if aPipe is None: _log.debug("cannot open pipe to [%s]" % mime_guesser_cmd) else: pipe_output = aPipe.readline()[11:].replace('\n', '').strip() ret_code = aPipe.close() if ret_code is None: _log.debug('[%s]: <%s>' % (mime_guesser_cmd, pipe_output)) if pipe_output not in ['', worst_case]: return pipe_output else: _log.error('[%s] on %s (%s): failed with exit(%s)' % (mime_guesser_cmd, os.name, sys.platform, ret_code)) # If we and up here we either have an insufficient systemwide # magic number file or we suffer from a deficient operating system # alltogether. It can't get much worse if we try ourselves. _log.info("OS level mime detection failed, falling back to built-in magic") import gmMimeMagic mime_type = gmTools.coalesce(gmMimeMagic.filedesc(filename), worst_case) del gmMimeMagic _log.debug('"%s" -> <%s>' % (filename, mime_type)) return mime_type #----------------------------------------------------------------------------------- def get_viewer_cmd(aMimeType = None, aFileName = None, aToken = None): """Return command for viewer for this mime type complete with this file""" if aFileName is None: _log.error("You should specify a file name for the replacement of %s.") # last resort: if no file name given replace %s in original with literal '%s' # and hope for the best - we certainly don't want the module default "/dev/null" aFileName = """%s""" mailcaps = mailcap.getcaps() (viewer, junk) = mailcap.findmatch(mailcaps, aMimeType, key = 'view', filename = '%s' % aFileName) # FIXME: we should check for "x-token" flags _log.debug("<%s> viewer: [%s]" % (aMimeType, viewer)) return viewer #----------------------------------------------------------------------------------- def get_editor_cmd(mimetype=None, filename=None): if filename is None: _log.error("You should specify a file name for the replacement of %s.") # last resort: if no file name given replace %s in original with literal '%s' # and hope for the best - we certainly don't want the module default "/dev/null" filename = """%s""" mailcaps = mailcap.getcaps() (editor, junk) = mailcap.findmatch(mailcaps, mimetype, key = 'edit', filename = '%s' % filename) # FIXME: we should check for "x-token" flags _log.debug("<%s> editor: [%s]" % (mimetype, editor)) return editor #----------------------------------------------------------------------------------- def guess_ext_by_mimetype(mimetype=''): """Return file extension based on what the OS thinks a file of this mimetype should end in.""" # ask system first ext = mimetypes.guess_extension(mimetype) if ext is not None: _log.debug('<%s>: %s' % (mimetype, ext)) return ext _log.error("<%s>: no suitable file extension known to the OS" % mimetype) # try to help the OS a bit cfg = gmCfg2.gmCfgData() ext = cfg.get ( group = 'extensions', option = mimetype, source_order = [('user-mime', 'return'), ('system-mime', 'return')] ) if ext is not None: _log.debug('<%s>: %s (%s)' % (mimetype, ext, candidate)) return ext _log.error("<%s>: no suitable file extension found in config files" % mimetype) return ext #----------------------------------------------------------------------------------- def guess_ext_for_file(aFile=None): if aFile is None: return None (path_name, f_ext) = os.path.splitext(aFile) if f_ext != '': return f_ext # try to guess one mime_type = guess_mimetype(aFile) f_ext = guess_ext_by_mimetype(mime_type) if f_ext is None: _log.error('unable to guess file extension for mime type [%s]' % mime_type) return None return f_ext #----------------------------------------------------------------------------------- def adjust_extension_by_mimetype(filename): mimetype = guess_mimetype(filename) mime_suffix = guess_ext_by_mimetype(mimetype) if mime_suffix is None: return filename old_name, old_ext = os.path.splitext(filename) if old_ext == '': new_filename = filename + mime_suffix elif old_ext.lower() == mime_suffix.lower(): return filename new_filename = old_name + mime_suffix _log.debug('[%s] -> [%s]', filename, new_filename) try: os.rename(filename, new_filename) return new_filename except OSError: _log.exception('cannot rename, returning original filename') return filename #----------------------------------------------------------------------------------- _system_startfile_cmd = None open_cmds = { 'xdg-open': 'xdg-open "%s"', # nascent standard on Linux 'kfmclient': 'kfmclient exec "%s"', # KDE 'gnome-open': 'gnome-open "%s"', # GNOME 'exo-open': 'exo-open "%s"', 'op': 'op "%s"', 'open': 'open "%s"', # MacOSX: "open -a AppName file" (-a allows to override the default app for the file type) 'cmd.exe': 'cmd.exe /c "%s"' # Windows #'run-mailcap' #'explorer' } def _get_system_startfile_cmd(filename): global _system_startfile_cmd if _system_startfile_cmd == '': return False, None if _system_startfile_cmd is not None: return True, _system_startfile_cmd % filename open_cmd_candidates = list(open_cmds) for candidate in open_cmd_candidates: found, binary = gmShellAPI.detect_external_binary(binary = candidate) if not found: continue _system_startfile_cmd = open_cmds[candidate] _log.info('detected local startfile cmd: [%s]', _system_startfile_cmd) return True, _system_startfile_cmd % filename _system_startfile_cmd = '' return False, None #----------------------------------------------------------------------------------- def convert_file(filename=None, target_mime=None, target_filename=None, target_extension=None, verbose=False): """Convert file from one format into another. target_mime: a mime type """ assert (target_mime is not None), ' must not be None' assert (filename is not None), ' must not be None' assert (filename != target_filename), ' must be different from ' source_mime = guess_mimetype(filename = filename) if source_mime.lower() == target_mime.lower(): _log.debug('source file [%s] already target mime type [%s]', filename, target_mime) if target_filename is None: return filename shutil.copyfile(filename, target_filename) return target_filename converted_ext = guess_ext_by_mimetype(target_mime) if converted_ext is None: if target_filename is not None: tmp, converted_ext = os.path.splitext(target_filename) if converted_ext is None: converted_ext = target_extension # can still stay None converted_ext = gmTools.coalesce(converted_ext, '').strip().lstrip('.') converted_fname = gmTools.get_unique_filename(suffix = converted_ext) _log.debug('attempting conversion: [%s] -> [<%s>:%s]', filename, target_mime, gmTools.coalesce(target_filename, converted_fname)) script_name = 'gm-convert_file' paths = gmTools.gmPaths() local_script = os.path.join(paths.local_base_dir, '..', 'external-tools', script_name) candidates = [ script_name, local_script ] #, script_name + u'.bat' found, binary = gmShellAPI.find_first_binary(binaries = candidates) if not found: # try anyway binary = script_name# + r'.bat' _log.debug('<%s> API: SOURCEFILE TARGET_MIMETYPE TARGET_EXTENSION TARGET_FILENAME' % binary) cmd_line = [ binary, filename, target_mime, converted_ext, converted_fname ] success, returncode, stdout = gmShellAPI.run_process(cmd_line = cmd_line, verbose = True) if not success: _log.error('conversion returned error exit code') if not os.path.exists(converted_fname): return None _log.info('conversion target file found') stats = os.stat(converted_fname) if stats.st_size == 0: return None _log.info('conversion target file size > 0') achieved_mime = guess_mimetype(filename = converted_fname) if achieved_mime != target_mime: _log.error('target: [%s], achieved: [%s]', target_mime, achieved_mime) return None _log.info('conversion target file mime type [%s], as expected, might be usable', achieved_mime) # we may actually have something despite a non-0 exit code if target_filename is None: return converted_fname shutil.copyfile(converted_fname, target_filename) return target_filename #----------------------------------------------------------------------------------- def __run_file_describer(filename=None): base_name = 'gm-describe_file' paths = gmTools.gmPaths() local_script = os.path.join(paths.local_base_dir, '..', 'external-tools', base_name) candidates = [base_name, local_script] #, base_name + '.bat' found, binary = gmShellAPI.find_first_binary(binaries = candidates) if not found: _log.error('cannot find <%s(.bat)>', base_name) return (False, _('<%s(.bat)> not found') % base_name) cmd_line = [binary, filename] _log.debug('describing: %s', cmd_line) try: proc_result = subprocess.run ( args = cmd_line, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, #timeout = timeout, encoding = 'utf8', errors = 'backslashreplace' ) except (subprocess.TimeoutExpired, FileNotFoundError): _log.exception('there was a problem running external process') return (False, _('problem with <%s>') % binary) _log.info('exit code [%s]', proc_result.returncode) if proc_result.returncode != 0: _log.error('[%s] failed', binary) _log.error('STDERR:\n%s', proc_result.stderr) _log.error('STDOUT:\n%s', proc_result.stdout) return (False, _('problem with <%s>') % binary) return (True, proc_result.stdout) #----------------------------------------------------------------------------------- def describe_file(filename, callback=None): if callback is None: return __run_file_describer(filename) payload_kwargs = {'filename': filename} gmWorkerThread.execute_in_worker_thread ( payload_function = __run_file_describer, payload_kwargs = payload_kwargs, completion_callback = callback ) #----------------------------------------------------------------------------------- def call_viewer_on_file(aFile = None, block=None): """Try to find an appropriate viewer with all tricks and call it. block: try to detach from viewer or not, None means to use mailcap default """ if not os.path.isdir(aFile): # is the file accessible at all ? try: open(aFile).close() except Exception: _log.exception('cannot read [%s]', aFile) msg = _('[%s] is not a readable file') % aFile return False, msg # try to detect any of the UNIX openers found, startfile_cmd = _get_system_startfile_cmd(aFile) if found: if gmShellAPI.run_command_in_shell(command = startfile_cmd, blocking = block): return True, '' mime_type = guess_mimetype(aFile) viewer_cmd = get_viewer_cmd(mime_type, aFile) if viewer_cmd is not None: if gmShellAPI.run_command_in_shell(command = viewer_cmd, blocking = block): return True, '' _log.warning("no viewer found via standard mailcap system") if os.name == "posix": _log.warning("you should add a viewer for this mime type to your mailcap file") _log.info("let's see what the OS can do about that") # does the file already have an extension ? (path_name, f_ext) = os.path.splitext(aFile) # no if f_ext in ['', '.tmp']: # try to guess one f_ext = guess_ext_by_mimetype(mime_type) if f_ext is None: _log.warning("no suitable file extension found, trying anyway") file_to_display = aFile f_ext = '?unknown?' else: file_to_display = aFile + f_ext shutil.copyfile(aFile, file_to_display) # yes else: file_to_display = aFile file_to_display = os.path.normpath(file_to_display) _log.debug("file %s (ext %s) -> file %s" % (aFile, mime_type, f_ext, file_to_display)) try: os.startfile(file_to_display) return True, '' except AttributeError: _log.exception('os.startfile() does not exist on this platform') except Exception: _log.exception('os.startfile(%s) failed', file_to_display) msg = _("Unable to display the file:\n\n" " [%s]\n\n" "Your system does not seem to have a (working)\n" "viewer registered for the file type\n" " [%s]" ) % (file_to_display, mime_type) return False, msg #----------------------------------------------------------------------------------- def call_editor_on_file(filename=None, block=True): """Try to find an appropriate editor with all tricks and call it. block: try to detach from editor or not, None means to use mailcap default. """ if not os.path.isdir(filename): # is the file accessible at all ? try: open(filename).close() except Exception: _log.exception('cannot read [%s]', filename) msg = _('[%s] is not a readable file') % filename return False, msg mime_type = guess_mimetype(filename) editor_cmd = get_editor_cmd(mime_type, filename) if editor_cmd is not None: if gmShellAPI.run_command_in_shell(command = editor_cmd, blocking = block): return True, '' viewer_cmd = get_viewer_cmd(mime_type, filename) if viewer_cmd is not None: if gmShellAPI.run_command_in_shell(command = viewer_cmd, blocking = block): return True, '' _log.warning("no editor or viewer found via standard mailcap system") if os.name == "posix": _log.warning("you should add an editor and/or viewer for this mime type to your mailcap file") _log.info("let's see what the OS can do about that") # does the file already have a useful extension ? (path_name, f_ext) = os.path.splitext(filename) if f_ext in ['', '.tmp']: # try to guess one f_ext = guess_ext_by_mimetype(mime_type) if f_ext is None: _log.warning("no suitable file extension found, trying anyway") file_to_display = filename f_ext = '?unknown?' else: file_to_display = filename + f_ext shutil.copyfile(filename, file_to_display) else: file_to_display = filename file_to_display = os.path.normpath(file_to_display) _log.debug("file %s (ext %s) -> file %s" % (filename, mime_type, f_ext, file_to_display)) # try to detect any of the UNIX openers (will only find viewers) found, startfile_cmd = _get_system_startfile_cmd(filename) if found: if gmShellAPI.run_command_in_shell(command = startfile_cmd, blocking = block): return True, '' # last resort: hand over to Python itself try: os.startfile(file_to_display) return True, '' except AttributeError: _log.exception('os.startfile() does not exist on this platform') except Exception: _log.exception('os.startfile(%s) failed', file_to_display) msg = _("Unable to edit/view the file:\n\n" " [%s]\n\n" "Your system does not seem to have a (working)\n" "editor or viewer registered for the file type\n" " [%s]" ) % (file_to_display, mime_type) return False, msg #======================================================================================= if __name__ == "__main__": if len(sys.argv) < 2: sys.exit() if sys.argv[1] != 'test': sys.exit() from Gnumed.pycommon import gmI18N # for testing: logging.basicConfig(level = logging.DEBUG) filename = sys.argv[2] _get_system_startfile_cmd(filename) #-------------------------------------------------------- def test_edit(): mimetypes = [ 'application/x-latex', 'application/x-tex', 'text/latex', 'text/tex', 'text/plain' ] for mimetype in mimetypes: editor_cmd = get_editor_cmd(mimetype, filename) if editor_cmd is not None: break if editor_cmd is None: # LaTeX code is text: also consider text *viewers* # since pretty much any of them will be an editor as well for mimetype in mimetypes: editor_cmd = get_viewer_cmd(mimetype, filename) if editor_cmd is not None: break if editor_cmd is None: return False result = gmShellAPI.run_command_in_shell(command = editor_cmd, blocking = True) return result #-------------------------------------------------------- def test_describer(): status, desc = describe_file(filename) print(status) print(desc) #-------------------------------------------------------- def test_convert_file(): print(convert_file ( filename = filename, target_mime = sys.argv[3] )) #-------------------------------------------------------- # print(_system_startfile_cmd) # print(guess_mimetype(filename)) # print(get_viewer_cmd(guess_mimetype(filename), filename)) # print(get_editor_cmd(guess_mimetype(filename), filename)) # print(get_editor_cmd('application/x-latex', filename)) # print(get_editor_cmd('application/x-tex', filename)) # print(get_editor_cmd('text/latex', filename)) # print(get_editor_cmd('text/tex', filename)) # print(get_editor_cmd('text/plain', filename)) #print(guess_ext_by_mimetype(mimetype=filename)) # call_viewer_on_file(aFile = filename, block = True) #call_editor_on_file(filename) #test_describer() #print(test_edit()) test_convert_file() ./gnumed-server.22.17/server/pycommon/__init__.py0000644000175000017500000000025514172057744020067 0ustar ncqncq #===================================================================== # $Log: __init__.py,v $ # Revision 1.1 2004-02-25 09:30:13 ncq # - moved here from python-common # ./gnumed-server.22.17/server/pycommon/gmI18N.py0000644000175000017500000004622214172057744017337 0ustar ncqncq__doc__ = """GNUmed client internationalization/localization. All i18n/l10n issues should be handled through this modules. Theory of operation: To activate proper locale settings and translation services you need to - import this module - call activate_locale() - call install_domain() The translating method gettext.gettext() will then be installed into the global (!) namespace as _(). Your own modules thus need not do _anything_ (not even import gmI18N) to have _() available to them for translating strings. You need to make sure, however, that gmI18N is imported in your main module before any of the modules using it. In order to resolve circular references involving modules that absolutely _have_ to be imported before this module you can explicitly import gmI18N into them at the very beginning. The text domain (i.e. the name of the message catalog file) is derived from the name of the main executing script unless explicitly passed to install_domain(). The language you want to translate to is derived from environment variables by the locale system unless explicitly passed to install_domain(). This module searches for message catalog files in 3 main locations: - standard POSIX places (/usr/share/locale/ ...) - below "${YOURAPPNAME_DIR}/po/" - below "/../po/" For DOS/Windows I don't know of standard places so probably only the last option will work. I don't know a thing about classic Mac behaviour. New Macs are POSIX, of course. It will then try to install candidates and *verify* whether the translation works by checking for the translation of a tag within itself (this is similar to the self-compiling compiler inserting a backdoor into its self-compiled copies). If none of this works it will fall back to making _() a noop. @copyright: authors """ #=========================================================================== __author__ = "H. Herb , I. Haywood , K. Hilbert " __license__ = "GPL v2 or later (details at http://www.gnu.org)" # stdlib import sys import os.path import os import locale import gettext import logging import codecs import builtins import re as regex builtins._ = lambda x:x _log = logging.getLogger('gm.i18n') system_locale = '' system_locale_level = {} _translate_via_gettext = lambda x:x _substitutes_regex = regex.compile(r'%\(.+?\)s') # *************************************************************************** # *************************************************************************** # The following line is needed to check for successful # installation of the desired message catalog. # -- do not remove or change this line -------------------------------------- __orig_tag__ = 'Translate this or i18n into will not work properly !' # *************************************************************************** # *************************************************************************** #=========================================================================== def __split_locale_into_levels(): """Split locale into language, country and variant parts. - we have observed the following formats in the wild: - de_DE@euro - ec_CA.UTF-8 - en_US:en - German_Germany.1252 """ _log.debug('splitting canonical locale [%s] into levels', system_locale) global system_locale_level system_locale_level['full'] = system_locale # trim '@' part system_locale_level['country'] = regex.split('@|:|\.', system_locale, 1)[0] # trim '_@' part system_locale_level['language'] = system_locale.split('_', 1)[0] _log.debug('system locale levels: %s', system_locale_level) #--------------------------------------------------------------------------- def __log_locale_settings(message=None): _setlocale_categories = {} for category in 'LC_ALL LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT LC_IDENTIFICATION'.split(): try: _setlocale_categories[category] = getattr(locale, category) except Exception: _log.warning('this OS does not have locale.%s', category) _getlocale_categories = {} for category in 'LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT LC_IDENTIFICATION'.split(): try: _getlocale_categories[category] = getattr(locale, category) except Exception: pass if message is not None: _log.debug(message) _log.debug('current locale settings:') _log.debug('locale.getlocale(): %s' % str(locale.getlocale())) for category in _getlocale_categories: _log.debug('locale.getlocale(%s): %s' % (category, locale.getlocale(_getlocale_categories[category]))) for category in _setlocale_categories: _log.debug('(locale.setlocale(%s): %s)' % (category, locale.setlocale(_setlocale_categories[category]))) try: _log.debug('locale.getdefaultlocale() - default (user) locale: %s' % str(locale.getdefaultlocale())) except ValueError: _log.exception('the OS locale setup seems faulty') _log.debug('encoding sanity check (also check "locale.nl_langinfo(CODESET)" below):') pref_loc_enc = locale.getpreferredencoding(do_setlocale = False) loc_enc = locale.getlocale()[1] py_str_enc = sys.getdefaultencoding() sys_fs_enc = sys.getfilesystemencoding() _log.debug('sys.getdefaultencoding(): [%s]' % py_str_enc) _log.debug('locale.getpreferredencoding(): [%s]' % pref_loc_enc) _log.debug('locale.getlocale()[1]: [%s]' % loc_enc) _log.debug('sys.getfilesystemencoding(): [%s]' % sys_fs_enc) if loc_enc is not None: loc_enc = loc_enc.upper() loc_enc_compare = loc_enc.replace('-', '') else: loc_enc_compare = loc_enc if pref_loc_enc.upper().replace('-', '') != loc_enc_compare: _log.warning('encoding suggested by locale (%s) does not match encoding currently set in locale (%s)' % (pref_loc_enc, loc_enc)) _log.warning('this might lead to encoding errors') for enc in [pref_loc_enc, loc_enc, py_str_enc, sys_fs_enc]: if enc is not None: try: codecs.lookup(enc) _log.debug(' module CAN handle encoding [%s]' % enc) except LookupError: _log.warning(' module can NOT handle encoding [%s]' % enc) _log.debug('on Linux you can determine a likely candidate for the encoding by running "locale charmap"') _log.debug('locale related environment variables (${LANG} is typically used):') for var in 'LANGUAGE LC_ALL LC_CTYPE LANG'.split(): try: _log.debug('${%s}=%s' % (var, os.environ[var])) except KeyError: _log.debug('${%s} not set' % (var)) _log.debug('database of locale conventions:') data = locale.localeconv() for key in data: if loc_enc is None: _log.debug('locale.localeconv(%s): %s', key, data[key]) else: try: _log.debug('locale.localeconv(%s): %s', key, str(data[key])) except UnicodeDecodeError: _log.debug('locale.localeconv(%s): %s', key, str(data[key], loc_enc)) _nl_langinfo_categories = {} for category in 'CODESET D_T_FMT D_FMT T_FMT T_FMT_AMPM RADIXCHAR THOUSEP YESEXPR NOEXPR CRNCYSTR ERA ERA_D_T_FMT ERA_D_FMT ALT_DIGITS'.split(): try: _nl_langinfo_categories[category] = getattr(locale, category) except Exception: _log.warning('this OS does not support nl_langinfo category locale.%s' % category) try: for category in _nl_langinfo_categories: if loc_enc is None: _log.debug('locale.nl_langinfo(%s): %s' % (category, locale.nl_langinfo(_nl_langinfo_categories[category]))) else: try: _log.debug('locale.nl_langinfo(%s): %s', category, str(locale.nl_langinfo(_nl_langinfo_categories[category]))) except UnicodeDecodeError: _log.debug('locale.nl_langinfo(%s): %s', category, str(locale.nl_langinfo(_nl_langinfo_categories[category]), loc_enc)) except Exception: _log.exception('this OS does not support nl_langinfo') _log.debug('gmI18N.get_encoding(): %s', get_encoding()) #--------------------------------------------------------------------------- def _translate_safely(term): """This wraps _(). It protects against translation errors such as a different number of "%s". """ translation = _translate_via_gettext(term) # different number of %s substitutes ? if translation.count('%s') != term.count('%s'): _log.error('count("%s") mismatch, returning untranslated string') _log.error('original : %s', term) _log.error('translation: %s', translation) return term substitution_keys_in_original = set(_substitutes_regex.findall(term)) substitution_keys_in_translation = set(_substitutes_regex.findall(translation)) if not substitution_keys_in_translation.issubset(substitution_keys_in_original): _log.error('"%(...)s" keys in translation not a subset of keys in original, returning untranslated string') _log.error('original : %s', term) _log.error('translation: %s', translation) return term return translation #--------------------------------------------------------------------------- # external API #--------------------------------------------------------------------------- def activate_locale(): """Get system locale from environment.""" global system_locale __log_locale_settings('unmodified startup locale settings (could be [C])') loc, enc = None, None # activate user-preferred locale try: loc = locale.setlocale(locale.LC_ALL, '') _log.debug("activating user-default locale with returns: [%s]" % loc) except AttributeError: _log.exception('Windows does not support locale.LC_ALL') except Exception: _log.exception('error activating user-default locale') _log.log_stack_trace() __log_locale_settings('locale settings after activating user-default locale') # assume en_EN if we did not find any locale settings if loc in [None, 'C']: _log.error('the current system locale is still [None] or [C], falling back to [en_EN]') system_locale = "en_EN" else: system_locale = loc # generate system locale levels __split_locale_into_levels() return True #--------------------------------------------------------------------------- def install_domain(domain=None, language=None, prefer_local_catalog=False): """Install a text domain suitable for the main script.""" # text domain directly specified ? if domain is None: _log.info('domain not specified, deriving from script name') # get text domain from name of script domain = os.path.splitext(os.path.basename(sys.argv[0]))[0] _log.info('text domain is [%s]' % domain) # http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html _log.debug('searching message catalog file for system locale [%s]' % system_locale) _log.debug('checking process environment:') for env_var in ['LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG']: tmp = os.getenv(env_var) if env_var is None: _log.debug(' ${%s} not set' % env_var) else: _log.debug(' ${%s} = [%s]' % (env_var, tmp)) # language codes to try lang_candidates = [] # first: explicit language or default system language # language=None: unadulterated default language for user (locale.getlocale()[0] value) # language != None: explicit language setting as passed in by the caller lang_candidates.append(language) if language is not None: _log.info('explicit request for target language [%s]' % language) # next: try default language for user if explicit language fails lang_candidates.append(None) # next try locale.getlocale()[0], if different (this can be strange on, say, Windows: Hungarian_Hungary) if locale.getlocale()[0] not in lang_candidates: lang_candidates.append(locale.getlocale()[0]) # next try locale.get*default*locale()[0], if different if locale.getdefaultlocale()[0] not in lang_candidates: lang_candidates.append(locale.getdefaultlocale()[0]) _log.debug('languages to try for translation: %s (None: implicit system default)', lang_candidates) initial_lang = os.getenv('LANG') _log.info('initial ${LANG} setting: %s', initial_lang) # loop over language candidates for lang_candidate in lang_candidates: # setup baseline _log.debug('resetting ${LANG} to initial user default [%s]', initial_lang) if initial_lang is None: del os.environ['LANG'] lang2log = '$LANG=<>' else: os.environ['LANG'] = initial_lang lang2log = '$LANG(default)=%s' % initial_lang # setup candidate language if lang_candidate is not None: _log.info('explicitely overriding system locale language [%s] by setting ${LANG} to [%s]', initial_lang, lang_candidate) os.environ['LANG'] = lang_candidate lang2log = '$LANG(explicit)=%s' % lang_candidate if __install_domain(domain = domain, prefer_local_catalog = prefer_local_catalog, language = lang2log): return True # install a dummy translation class _log.warning("falling back to NullTranslations() class") # this shouldn't fail dummy = gettext.NullTranslations() dummy.install() return True #--------------------------------------------------------------------------- def __install_domain(domain, prefer_local_catalog, language='?'): # only used for logging # search for message catalog candidate_PO_dirs = [] # - locally if prefer_local_catalog: _log.debug('prioritizing local message catalog') # - one level above path to binary # last resort for inferior operating systems such as DOS/Windows # strip one directory level # this is a rather neat trick :-) loc_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..', 'po')) _log.debug('looking one level above binary install directory: %s', loc_dir) candidate_PO_dirs.append(loc_dir) # - in path to binary loc_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'po')) _log.debug('looking in binary install directory: %s', loc_dir) candidate_PO_dirs.append(loc_dir) # - standard places if os.name == 'posix': _log.debug('system is POSIX, looking in standard locations (see Python Manual)') # if this is reported to segfault/fail/except on some # systems we may have to assume "sys.prefix/share/locale/" candidate_PO_dirs.append(gettext.bindtextdomain(domain)) else: _log.debug('No use looking in standard POSIX locations - not a POSIX system.') # - $(_DIR)/ env_key = "%s_DIR" % os.path.splitext(os.path.basename(sys.argv[0]))[0].upper() _log.debug('looking at ${%s}' % env_key) if env_key in os.environ: loc_dir = os.path.abspath(os.path.join(os.environ[env_key], 'po')) _log.debug('${%s} = "%s" -> [%s]' % (env_key, os.environ[env_key], loc_dir)) candidate_PO_dirs.append(loc_dir) else: _log.info("${%s} not set" % env_key) # - locally if not prefer_local_catalog: # - one level above path to binary # last resort for inferior operating systems such as DOS/Windows # strip one directory level # this is a rather neat trick :-) loc_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..', 'po')) _log.debug('looking above binary install directory [%s]' % loc_dir) candidate_PO_dirs.append(loc_dir) # - in path to binary loc_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'po' )) _log.debug('looking in binary install directory [%s]' % loc_dir) candidate_PO_dirs.append(loc_dir) # now try to actually install it for candidate_PO_dir in candidate_PO_dirs: _log.debug('trying with (base=%s, %s, domain=%s)', candidate_PO_dir, language, domain) _log.debug(' -> %s.mo', os.path.join(candidate_PO_dir, language, domain)) if not os.path.exists(candidate_PO_dir): continue try: gettext.install(domain, candidate_PO_dir) except Exception: _log.exception('installing text domain [%s] failed from [%s]', domain, candidate_PO_dir) continue global _ # does it translate ? if _(__orig_tag__) == __orig_tag__: _log.debug('does not translate: [%s] => [%s]', __orig_tag__, _(__orig_tag__)) continue _log.debug('found msg catalog: [%s] => [%s]', __orig_tag__, _(__orig_tag__)) global _translate_via_gettext _translate_via_gettext = builtins._ builtins._ = _translate_safely return True return False #=========================================================================== _encoding_mismatch_already_logged = False _current_encoding = None def get_encoding(): """Try to get a sane encoding. On MaxOSX locale.setlocale(locale.LC_ALL, '') does not have the desired effect, so that locale.getlocale()[1] still returns None. So in that case try to fallback to locale.getpreferredencoding(). - what Python itself uses to convert string <-> unicode when no other encoding was specified - ascii by default - can be set in site.py and sitecustomize.py - what the current locale is *actually* using as the encoding for text conversion - what the current locale would *recommend* using as the encoding for text conversion """ global _current_encoding if _current_encoding is not None: return _current_encoding enc = sys.getdefaultencoding() if enc != 'ascii': _current_encoding = enc return _current_encoding enc = locale.getlocale()[1] if enc is not None: _current_encoding = enc return _current_encoding global _encoding_mismatch_already_logged if not _encoding_mismatch_already_logged: _log.debug('*actual* encoding of locale is None, using encoding *recommended* by locale') _encoding_mismatch_already_logged = True return locale.getpreferredencoding(do_setlocale=False) #=========================================================================== # Main #--------------------------------------------------------------------------- if __name__ == "__main__": if len(sys.argv) == 1: sys.exit() if sys.argv[1] != 'test': sys.exit() logging.basicConfig(level = logging.DEBUG) #---------------------------------------------------------------------- def test_strcoll(): candidates = [ # (u'a', u'a'), # (u'a', u'b'), # (u'1', u'1'), # (u'1', u'2'), # (u'A', u'A'), # (u'a', u'A'), ('\u270d', '\u270d'), ('4', '\u270d' + '4'), ('4.4', '\u270d' + '4.4'), ('44', '\u270d' + '44'), ('4', '\u270d' + '9'), ('4', '\u270d' + '2'), # (u'9', u'\u270d' + u'9'), # (u'9', u'\u270d' + u'4'), ] for cands in candidates: print(cands[0], '', cands[1], '=', locale.strcoll(cands[0], cands[1])) # print(cands[1], u'', cands[0], '=', locale.strcoll(cands[1], cands[0])) #---------------------------------------------------------------------- print("======================================================================") print("GNUmed i18n") print("") print("authors:", __author__) print("license:", __license__) print("======================================================================") activate_locale() print("system locale: ", system_locale, "; levels:", system_locale_level) print("likely encoding:", get_encoding()) if len(sys.argv) > 2: install_domain(domain = sys.argv[2]) else: install_domain() test_strcoll() # ********************************************************************* # # == do not remove this line ========================================== # # it is needed to check for successful installation of # # the desired message catalog # # ********************************************************************* # tmp = _('Translate this or i18n into will not work properly !') # # ********************************************************************* # # ********************************************************************* # ./gnumed-server.22.17/server/pycommon/gmExceptions.py0000644000175000017500000000426314172057744021000 0ustar ncqncq############################################################################ # gmExceptions - classes for exceptions GNUmed modules may throw # -------------------------------------------------------------------------- # # @author: Dr. Horst Herb # @copyright: author # @license: GPL v2 or later (details at http://www.gnu.org) # @dependencies: nil # @change log: # 07.02.2002 hherb first draft, untested ############################################################################ class AccessDenied(Exception): def __init__(self, msg, source=None, code=None, details=None): self.errmsg = msg self.source = source self.code = code self.details = details #---------------------------------- def __str__(self): txt = self.errmsg if self.source is not None: txt += '\nSource: %s' % self.source if self.code is not None: txt += '\nCode: %s' % self.code if self.details is not None: txt += '\n%s' % self.details return txt #---------------------------------- def __repr__(self): txt = self.errmsg if self.source is not None: txt += '\nSource: %s' % source if self.code is not None: txt += '\nCode: %s' % self.code if self.details is not None: txt += '\n%s' % self.details return txt #------------------------------------------------------------ class ConnectionError(Exception): #raised whenever the database backend connection fails def __init__(self, errmsg): self.errmsg=errmsg def __str__(self): return self.errmsg #------------------------------------------------------------ # constructor errors class ConstructorError(Exception): """Raised when a constructor fails.""" def __init__(self, errmsg = None): if errmsg is None: self.errmsg = "%s.__init__() failed" % self.__class__.__name__ else: self.errmsg = errmsg def __str__(self): return self.errmsg # business DB-object exceptions class NoSuchBusinessObjectError(ConstructorError): """Raised when a business db-object can not be found.""" def __init__(self, errmsg = None): if errmsg is None: self.errmsg = "no such business DB-object found" else: self.errmsg = errmsg def __str__(self): return self.errmsg #===================================================================== ./gnumed-server.22.17/server/pycommon/gmCfg.py0000644000175000017500000004502314172057744017355 0ustar ncqncq"""GNUmed configuration handling. This source of configuration information is supported: - database tables Theory of operation: It is helpful to have a solid log target set up before importing this module in your code. This way you will be able to see even those log messages generated during module import. Once your software has established database connectivity you can set up a config source from the database. You can limit the option applicability by the constraints "workplace", "user", and "cookie". The basic API for handling items is get()/set(). The database config objects auto-sync with the backend. @copyright: GPL v2 or later """ # TODO: # - optional arg for set -> type #================================================================== __author__ = "Karsten Hilbert " # standard modules import sys, pickle, decimal, logging, re as regex # gnumed modules if __name__ == '__main__': sys.path.insert(0, '../../') from Gnumed.pycommon import gmPG2, gmTools _log = logging.getLogger('gm.cfg') # don't change this without knowing what you do as # it will already be in many databases cfg_DEFAULT = "xxxDEFAULTxxx" #================================================================== def get_all_options(order_by=None): if order_by is None: order_by = '' else: order_by = 'ORDER BY %s' % order_by cmd = """ SELECT * FROM ( SELECT vco.*, cs.value FROM cfg.v_cfg_options vco JOIN cfg.cfg_string cs ON (vco.pk_cfg_item = cs.fk_item) UNION ALL SELECT vco.*, cn.value::text FROM cfg.v_cfg_options vco JOIN cfg.cfg_numeric cn ON (vco.pk_cfg_item = cn.fk_item) UNION ALL SELECT vco.*, csa.value::text FROM cfg.v_cfg_options vco JOIN cfg.cfg_str_array csa ON (vco.pk_cfg_item = csa.fk_item) UNION ALL SELECT vco.*, cd.value::text FROM cfg.v_cfg_options vco JOIN cfg.cfg_data cd ON (vco.pk_cfg_item = cd.fk_item) ) as option_list %s""" % order_by rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd}], get_col_idx = False) return rows #================================================================== # FIXME: make a cBorg around this class cCfgSQL: # def __init__(self): # pass #----------------------------------------------- # external API #----------------------------------------------- def get(self, option=None, workplace=None, cookie=None, bias=None, default=None, sql_return_type=None): return self.get2 ( option = option, workplace = workplace, cookie = cookie, bias = bias, default = default, sql_return_type = sql_return_type ) #----------------------------------------------- def get2(self, option=None, workplace=None, cookie=None, bias=None, default=None, sql_return_type=None): """Retrieve configuration option from backend. @param bias: Determine the direction into which to look for config options. 'user': When no value is found for "current_user/workplace" look for a value for "current_user" regardless of workspace. The corresponding concept is: "Did *I* set this option anywhere on this site ? If so, reuse the value." 'workplace': When no value is found for "current_user/workplace" look for a value for "workplace" regardless of user. The corresponding concept is: "Did anyone set this option for *this workplace* ? If so, reuse that value." @param default: if no value is found for the option this value is returned instead, also the option is set to this value in the backend, if a missing option will NOT be created in the backend @param sql_return_type: a PostgreSQL type the value of the option is to be cast to before returning, if None no cast will be applied, you will want to make sure that sql_return_type and type(default) are compatible """ if None in [option, workplace]: raise ValueError('neither