kinterbasdb-3.3.0/0000755000175000001440000000000011133100174013174 5ustar pcisaruserskinterbasdb-3.3.0/PKG-INFO0000644000175000001440000000154611133100174014277 0ustar pcisarusersMetadata-Version: 1.0 Name: kinterbasdb Version: 3.3.0 Summary: Python DB API 2.0 extension for Firebird and Interbase Home-page: http://kinterbasdb.sourceforge.net Author: Originally by Alexander Kuznetsov ; rewritten and expanded by David S. Rushby with contributions from several others (see docs/license.txt for details). Author-email: woodsplitter@rocketmail.com License: see docs/license.txt Description: KInterbasDB implements Python Database API 2.0-compliant support for the open source relational database Firebird and some versions of its proprietary cousin Borland Interbase(R). In addition to the minimal feature set of the standard Python DB API, KInterbasDB also exposes nearly the entire native client API of the database engine. Platform: UNKNOWN kinterbasdb-3.3.0/typeconv_datetime_stdlib.py0000644000175000001440000000723511130647414020653 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : DateTime/Python 2.3+ Standard Library # # Version 3.1 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2004 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( # kinterbasdb-native date and time converters: 'date_conv_in', 'date_conv_out', 'time_conv_in', 'time_conv_out', 'timestamp_conv_in', 'timestamp_conv_out', # DB API 2.0 standard date and time type constructors: 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', ) from kinterbasdb.k_exceptions import * # THIS CONVERSION MODULE IS NOT EXPECTED TO WORK WITH < PYTHON 2.3, # since it uses the standard datetime module for its date/time operations. import datetime as dt ################################################################################ ## DATE AND TIME ################################################################################ # kinterbasdb-native date and time converters: def date_conv_in(dtObj): # Allow implicit param conv: if dtObj is None or isinstance(dtObj, basestring): return dtObj if not isinstance(dtObj, dt.date): raise InterfaceError( 'Required type: %s ; supplied type: %s' % ( str(dt.date), str(type(dtObj)) ) ) return dtObj.timetuple()[:3] def date_conv_out(dateTuple): if dateTuple is None: return None return dt.date(*dateTuple) def time_conv_in(tmObj): # Allow implicit param conv: if tmObj is None or isinstance(tmObj, basestring): return tmObj if not isinstance(tmObj, dt.time): raise InterfaceError( 'Required type: %s ; supplied type: %s' % ( str(dt.time), str(type(tmObj)) ) ) timeTuple = (tmObj.hour, tmObj.minute, tmObj.second, tmObj.microsecond) return timeTuple def time_conv_out(timeTuple): if timeTuple is None: return None return dt.time(*timeTuple) def timestamp_conv_in(tsObj): # Allow implicit param conv: if tsObj is None or isinstance(tsObj, basestring): return tsObj if not isinstance(tsObj, dt.datetime): raise InterfaceError( 'Required type: %s ; supplied type: %s' % ( str(dt.datetime), str(type(tsObj)) ) ) timestampTuple = ( tsObj.year, tsObj.month, tsObj.day, tsObj.hour, tsObj.minute, tsObj.second, tsObj.microsecond ) return timestampTuple def timestamp_conv_out(timestampTuple): if timestampTuple is None: return None return dt.datetime(*timestampTuple) # DB API 2.0 standard date and time type constructors: def _makeFilteredConstructor(underlyingConstructor): def Constructor(*args, **kwargs): try: return underlyingConstructor(*args, **kwargs) except ValueError, e: raise DataError(str(e)) return Constructor Date = _makeFilteredConstructor(dt.date) Time = _makeFilteredConstructor(dt.time) Timestamp = _makeFilteredConstructor(dt.datetime) DateFromTicks = _makeFilteredConstructor(dt.date.fromtimestamp) TimeFromTicks = _makeFilteredConstructor(lambda ticks: TimestampFromTicks(ticks).time()) TimestampFromTicks = _makeFilteredConstructor(dt.datetime.fromtimestamp) kinterbasdb-3.3.0/_kiconversion_blob_streaming.c0000644000175000001440000007027311130647414021302 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Streaming Blob Conversion * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kiconversion.c, * without the involvement of a header file. */ static int BlobReader_close_without_unlink(BlobReader *, boolean); #ifdef ENABLE_CONNECTION_TIMEOUT #define BLOBREADER_REQUIRE_OPEN(self) \ if (!BlobReader_is_open(self)) { \ if (BlobReader_con_timed_out(self)) { \ raise_exception(ConnectionTimedOut, "This BlobReader's Connection" \ " timed out; the BlobReader can no longer be used." \ ); \ } else { \ raise_exception(ProgrammingError, \ "I/O operation on closed BlobReader" \ ); \ } \ return NULL; \ } #else #define BLOBREADER_REQUIRE_OPEN(self) \ if (!BlobReader_is_open(self)) { \ raise_exception(ProgrammingError, \ "I/O operation on closed BlobReader" \ ); \ return NULL; \ } #endif #define BLOBREADER_BYTES_AVAILABLE(self) ((self)->total_size - (self)->pos) #define DECLARE_AND_INITIALIZE_TRANS(self) \ Transaction *trans; \ \ assert (self != NULL); \ \ trans = (self)->trans; \ assert (trans != NULL); \ assert (Transaction_is_not_closed(trans)); /* Retarded C rule against introducing new declarations except at the beginning * of a scope forces us to repeat code here: */ #define DECLARE_AND_INITIALIZE_TRANS_AND_SV(self) \ Transaction *trans; \ ISC_STATUS *sv; \ \ assert (self != NULL); \ \ trans = (self)->trans; \ assert (trans != NULL); \ assert (Transaction_is_not_closed(trans)); \ sv = Transaction_get_sv(trans); \ assert (sv != NULL); /*** BlobReaderTracker MEMBER FUNC DEFINITIONS AND SUPPORTING FUNCS: BEGIN ***/ #include "_kisupport_lifo_linked_list.h" LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_PYALLOC_NOQUAL( BlobReaderTracker, BlobReader ) /*** BlobReaderTracker MEMBER FUNC DEFINITIONS AND SUPPORTING FUNCS: END *****/ /******************** BlobReader METHODS:BEGIN ********************/ static void BlobReader_create_references_to_superiors(BlobReader *self, Transaction *trans ) { assert (self != NULL); assert (self->trans == NULL); assert (self->con_python_wrapper == NULL); assert (trans != NULL); Py_INCREF(trans); self->trans = trans; /* TAG:TRANSACTION_SUBORDINATE_OBJECT_SURVIVAL_BYPASS: */ /* If (Transaction_is_main(self->trans)), the Transaction will not own a * reference to its connection, so we always create an artificial reference, * to ensure that the connection will remain alive at least as long as this * BlobReader does. */ self->con_python_wrapper = Transaction_get_con_python_wrapper(trans); assert (self->con_python_wrapper != NULL); Py_INCREF(self->con_python_wrapper); } /* BlobReader_set_superior_references */ static void BlobReader_clear_references_to_superiors(BlobReader *self) { assert (self != NULL); assert (self->trans != NULL); assert (self->con_python_wrapper != NULL); /* TAG:TRANSACTION_SUBORDINATE_OBJECT_SURVIVAL_BYPASS: */ Py_DECREF(self->trans); self->trans = NULL; Py_DECREF(self->con_python_wrapper); self->con_python_wrapper = NULL; } /* BlobReader_clear_references_to_superiors */ /* This constructor is not accessible to the Python client programmer (i.e., * it's not registered in BlobReaderType's tp_init slot or otherwise exposed) * because there's no reason to instantiate BlobReader objects from Python. * An attempt to do so raises a TypeError. */ static BlobReader *BlobReader_create(Transaction *trans) { BlobReader *self = PyObject_New(BlobReader, &BlobReaderType); if (self == NULL) { goto fail; } self->state = BLOBREADER_STATE_LIMBO; /* blob_handle is assigned a meaningful value by BlobReader_open. */ self->blob_handle = NULL_BLOB_HANDLE; self->total_size = -1; self->max_segment_size = 0; self->pos = -1; self->iter_chunk_size = DTT_BLOB_CHUNK_SIZE_UNSPECIFIED; self->trans = NULL; /* Initial nullification of mem. */ self->con_python_wrapper = NULL; BlobReader_create_references_to_superiors(self, trans); assert (self->trans != NULL); assert (self->con_python_wrapper != NULL); return self; fail: Py_XDECREF(self); return NULL; } /* BlobReader_create */ static int BlobReader_open(BlobReader *self, ISC_QUAD *blob_id) { /* Returns 0 if the op succeeds; -1 on error. */ DECLARE_AND_INITIALIZE_TRANS_AND_SV(self) assert ((self)->blob_handle == NULL_BLOB_HANDLE); /* Based on the ID of the blob, open a handle to it. The handle will be used * in all future operations on the blob. */ { /* Notice that we call Transaction_get_handle_p while holding the GIL. */ isc_db_handle *db_handle_p = Transaction_get_db_handle_p(trans); isc_tr_handle *tr_handle_p = Transaction_get_handle_p(trans); ENTER_GDAL isc_open_blob2(sv, db_handle_p, tr_handle_p, &self->blob_handle, blob_id, /* Last two params indicate "no blob parameter buffer supplied": */ 0, NULL ); LEAVE_GDAL } if (DB_API_ERROR(sv)) { raise_sql_exception(OperationalError, "BlobReader_open.isc_open_blob2: ", sv ); return -1; } /* Determine the total size and max segment size of the blob. */ if (0 != _blob_info_total_size_and_max_segment_size(sv, &self->blob_handle, &self->total_size, &self->max_segment_size ) ) { return -1; } assert (self->total_size >= 0); /* max_segment_size is guaranteed to be >= 0 because it's unsigned. */ if (BlobReaderTracker_add(&trans->open_blobreaders, self) == -1) { return -1; } self->pos = 0; self->state = BLOBREADER_STATE_OPEN; return 0; } /* BlobReader_open */ static int _BlobReader_close_handle_only( BlobReader *self, boolean allowed_to_raise ) { /* Returns 0 if the op succeeds; -1 on error. */ int status = -1; DECLARE_AND_INITIALIZE_TRANS_AND_SV(self) assert (self->blob_handle != NULL_BLOB_HANDLE); { /* This code can be reached when the CTT is timing out a connection. In * that case, we want the GIL to remain held during the entire timeout * operation. */ OPEN_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE #ifdef ENABLE_CONNECTION_TIMEOUT const boolean should_manip_gil = NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD; if (should_manip_gil) { #endif LEAVE_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_STARTING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif ENTER_GDAL_WITHOUT_LEAVING_PYTHON isc_close_blob(sv, &self->blob_handle); LEAVE_GDAL_WITHOUT_ENTERING_PYTHON #ifdef ENABLE_CONNECTION_TIMEOUT if (should_manip_gil) { #endif ENTER_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_ENDING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif CLOSE_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE } /* end of lock manipulation scope */ if (!DB_API_ERROR(sv)) { assert (self->blob_handle == NULL_BLOB_HANDLE); status = 0; } else { if (allowed_to_raise) { raise_sql_exception(OperationalError, "_BlobReader_close: ", sv); } else { self->blob_handle = NULL_BLOB_HANDLE; } } return status; } /* _BlobReader_close_handle_only */ static int _BlobReader_close(BlobReader *self, boolean should_unlink_self, boolean allowed_to_raise ) { /* Returns 0 if the op succeeds; -1 on error. */ int status = 0; DECLARE_AND_INITIALIZE_TRANS(self) assert (BlobReader_is_open(self)); #ifdef ENABLE_CONNECTION_TIMEOUT assert ( Connection_timeout_enabled(Transaction_get_con(trans)) ? CURRENT_THREAD_OWNS_CON_TP(Transaction_get_con(trans)) : TRUE ); #endif /* ENABLE_CONNECTION_TIMEOUT */ if (_BlobReader_close_handle_only(self, allowed_to_raise) != 0) { /* If we're supposed to do our best to close regardless of any problems * encountered, forge ahead despite the error closing the blob handle. */ status = -1; if (allowed_to_raise) { goto fail; } else { self->blob_handle = NULL_BLOB_HANDLE; SUPPRESS_EXCEPTION; } } assert (self->blob_handle == NULL_BLOB_HANDLE); if (should_unlink_self) { if (BlobReaderTracker_remove(&trans->open_blobreaders, self, TRUE) != 0) { status = -1; if (allowed_to_raise) { goto fail; } else { SUPPRESS_EXCEPTION; } } } self->pos = -1; self->state = BLOBREADER_STATE_CLOSED; goto clean; fail: assert (allowed_to_raise ? !!PyErr_Occurred() : !PyErr_Occurred()); /* Fall through to clean: */ clean: assert (!allowed_to_raise ? self->state == BLOBREADER_STATE_CLOSED : TRUE); return status; } /* _BlobReader_close */ static int BlobReader_close_without_unlink( BlobReader *self, boolean allowed_to_raise ) { return _BlobReader_close(self, FALSE, allowed_to_raise); } /* BlobReader_close_without_unlink */ static int BlobReader_untrack(BlobReader *self, boolean allowed_to_raise) { int res = -1; assert (self->trans != NULL); assert (self->con_python_wrapper != NULL); assert (BlobReader_is_open(self)); if (BlobReader_close_without_unlink(self, allowed_to_raise) == 0) { res = 0; } /* 2007.01.17: Closing a BlobReader no longer clears its superior refs: */ assert (self->trans != NULL); assert (self->con_python_wrapper != NULL); assert (!allowed_to_raise ? self->state != BLOBREADER_STATE_OPEN : TRUE); return res; } /* BlobReader_close_untrack */ static int BlobReader_close_with_unlink( BlobReader *self, boolean allowed_to_raise ) { #ifdef ENABLE_CONNECTION_TIMEOUT assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); #endif /* ENABLE_CONNECTION_TIMEOUT */ return _BlobReader_close(self, TRUE, allowed_to_raise); } /* BlobReader_close_with_unlink */ static PyObject *pyob_BlobReader_close(BlobReader *self) { /* Thin Python wrapper over BlobReader_close_with_unlink. */ PyObject *ret = NULL; CConnection *con = NULL; #ifdef ENABLE_CONNECTION_TIMEOUT boolean con_timeout_was_enabled; boolean con_activation_succeeded = TRUE; #endif /* ENABLE_CONNECTION_TIMEOUT */ BLOBREADER_REQUIRE_OPEN(self); assert (self->trans != NULL); assert (self->con_python_wrapper != NULL); con = Transaction_get_con(self->trans); assert (con != NULL); #ifdef ENABLE_CONNECTION_TIMEOUT { con_timeout_was_enabled = (boolean) Connection_timeout_enabled(con); if (con_timeout_was_enabled) { assert (!CURRENT_THREAD_OWNS_CON_TP(con)); ACQUIRE_CON_TP_WITH_GIL_HELD(con); CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION__ALREADY_LOCKED( con, goto fail_without_passivating ); assert (CURRENT_THREAD_OWNS_CON_TP(con)); } } #endif /* ENABLE_CONNECTION_TIMEOUT */ if (BlobReader_close_with_unlink(self, TRUE) == 0) { assert (!BlobReader_is_open(self)); ret = Py_None; Py_INCREF(Py_None); } else { goto fail; } /* 2007.01.17: Closing a BlobReader no longer clears its superior refs: */ assert (self->trans != NULL); assert (self->con_python_wrapper != NULL); goto clean; #ifdef ENABLE_CONNECTION_TIMEOUT fail_without_passivating: con_activation_succeeded = FALSE; /* Fall through to fail: */ #endif /* ENABLE_CONNECTION_TIMEOUT */ fail: assert (PyErr_Occurred()); assert (ret == NULL); assert (con_activation_succeeded); /* Fall through to clean: */ clean: #ifdef ENABLE_CONNECTION_TIMEOUT if (con_timeout_was_enabled) { if (con_activation_succeeded) { assert (CURRENT_THREAD_OWNS_CON_TP(con)); CON_PASSIVATE__ALREADY_LOCKED(con); } TP_UNLOCK(con->timeout); assert (!CURRENT_THREAD_OWNS_CON_TP(con)); } #endif /* ENABLE_CONNECTION_TIMEOUT */ CON_MUST_NOT_BE_ACTIVE(con); return ret; } /* pyob_BlobReader_close */ static PyObject *BlobReader_read(BlobReader *self, int req_chunk_size) { const int bytes_available = BLOBREADER_BYTES_AVAILABLE(self); /* The caller should've already ensured that self is open: */ assert (BlobReader_is_open(self)); assert (self->trans != NULL); CON_MUST_ALREADY_BE_ACTIVE(Transaction_get_con(self->trans)); /* If the requested number of bytes was negative (including the default value * of -1), read all bytes available. * If the requested number of bytes exceeds the available number, reduce the * requested number. */ if (req_chunk_size < 0 || req_chunk_size > bytes_available) { req_chunk_size = bytes_available; } if (req_chunk_size == 0) { /* No data left; match Python file-like interface by returning the empty * string. */ return PyString_FromStringAndSize("", 0); } else { /* The documentation for isc_get_segment says: * "isc_segment indicates the buffer is not large enough to hold the * entire current segment; the next call to isc_get_segment() gets the * next chunk of the oversized segment rather than getting the next * segment." * conv_out_blob_materialized_in_single_chunk exploits this behavior to * return exactly req_chunk_size bytes, at which point the generated Python * string is filled and isc_get_segment raises the isc_segment code. Any * subsequent call to isc_get_segment will start where the previous one * left off, which is exactly what we need. */ PyObject *py_str = conv_out_blob_materialized_in_single_chunk( Transaction_get_sv(self->trans), &self->blob_handle, self->max_segment_size, req_chunk_size, TRUE ); if (py_str == NULL) { return NULL; } self->pos += req_chunk_size; return py_str; } } /* BlobReader_read */ static PyObject *pyob_BlobReader_read(BlobReader *self, PyObject *args) { PyObject *ret = NULL; int req_chunk_size = -1; BLOBREADER_REQUIRE_OPEN(self); assert (self->trans != NULL); CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION( Transaction_get_con(self->trans), return NULL ); if (!PyArg_ParseTuple(args, "|i", &req_chunk_size)) { goto fail; } ret = BlobReader_read(self, req_chunk_size); if (ret == NULL) { goto fail; } goto clean; fail: assert (PyErr_Occurred()); assert (ret == NULL); /* Fall through to clean: */ clean: { CConnection *con = Transaction_get_con(self->trans); CON_PASSIVATE(con); CON_MUST_NOT_BE_ACTIVE(con); } return ret; } /* pyob_BlobReader_read */ static PyObject *pyob_BlobReader__iter_read_chunk(BlobReader *self) { /* Supporting method for pyob_BlobReader_chunks (this method is called to * return each chunk). */ PyObject *ret = NULL; BLOBREADER_REQUIRE_OPEN(self); assert (self->trans != NULL); CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION( Transaction_get_con(self->trans), return NULL ); if (self->iter_chunk_size == DTT_BLOB_CHUNK_SIZE_UNSPECIFIED) { raise_exception(ProgrammingError, "This method is private and must not be" " called directly." ); goto fail; } ret = BlobReader_read(self, self->iter_chunk_size); if (ret == NULL) { goto fail; } goto clean; fail: assert (PyErr_Occurred()); assert (ret == NULL); /* Fall through to clean: */ clean: { CConnection *con = Transaction_get_con(self->trans); CON_PASSIVATE(con); CON_MUST_NOT_BE_ACTIVE(con); } return ret; } /* pyob_BlobReader__iter_read_chunk */ static PyObject *pyob_BlobReader_chunks(BlobReader *self, PyObject *args) { /* This method is equivalent to the following Python method definition: * def chunks(self, chunk_size): * return iter(lambda: self.read(chunk_size), '') */ PyObject *it = NULL; int req_chunk_size = -1; BLOBREADER_REQUIRE_OPEN(self); assert (self->trans != NULL); CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION( Transaction_get_con(self->trans), return NULL ); if (self->iter_chunk_size != DTT_BLOB_CHUNK_SIZE_UNSPECIFIED) { raise_exception(ProgrammingError, "At most one iterator can be opened on" " a given BlobReader via the chunks() method." ); goto fail; } if (!PyArg_ParseTuple(args, "i", &req_chunk_size)) { goto fail; } if (req_chunk_size <= 0) { raise_exception(ProgrammingError, "chunk size must be > 0"); goto fail; } self->iter_chunk_size = req_chunk_size; { PyObject *bound_method__iter_read_chunk; PyObject *sentinel; bound_method__iter_read_chunk = PyObject_GetAttr( (PyObject *) self, blob_streaming__method_name__iter_read_chunk ); if (bound_method__iter_read_chunk == NULL) { goto fail; } sentinel = PyString_FromStringAndSize("", 0); if (sentinel == NULL) { Py_DECREF(bound_method__iter_read_chunk); goto fail; } it = PyCallIter_New(bound_method__iter_read_chunk, sentinel); Py_DECREF(bound_method__iter_read_chunk); Py_DECREF(sentinel); } goto clean; fail: assert (PyErr_Occurred()); assert (it == NULL); /* Fall through to clean: */ clean: { CConnection *con = Transaction_get_con(self->trans); CON_PASSIVATE(con); CON_MUST_NOT_BE_ACTIVE(con); } return it; } /* pyob_BlobReader_chunks */ static PyObject *pyob_BlobReader_tell(BlobReader *self) { BLOBREADER_REQUIRE_OPEN(self); /* self->pos is of type ISC_LONG, which is always 32-bit, even on 64-bit * platforms, so using PyInt_FromLong is fine: */ return PyInt_FromLong(self->pos); } /* pyob_BlobReader_tell */ static PyObject *pyob_BlobReader_mode_get(BlobReader *self, void *closure) { /* BlobReaders only support one mode ("rb"), so we just return a new * reference to a perpetually cached "constant" Python string with that * value. */ Py_INCREF(blob_streaming__reader_mode__rb); return blob_streaming__reader_mode__rb; } /* pyob_BlobReader_mode_get */ static PyObject *pyob_BlobReader_closed_get(BlobReader *self, void *closure) { /* Although a BlobReader has three states, if we have to choose a boolean, * it's more appropriate to consider the "limbo" state closed than open. */ return PyBool_FromLong(!BlobReader_is_open(self)); } /* pyob_BlobReader_closed_get */ static PyObject *pyob_BlobReader_repr(BlobReader *self) { if (BlobReader_is_open(self)) { return PyString_FromFormat("<%s at %p (open; %ld of %ld bytes read)>", self->ob_type->tp_name, (void *) self, /* Up-cast in anticipation of possible enlargement of ISC_LONG: */ (long) self->pos, (long) self->total_size ); } else { return PyString_FromFormat("<%s at %p (closed)>", self->ob_type->tp_name, (void *) self ); } } /* pyob_BlobReader_repr */ static void pyob_BlobReader___del__(BlobReader *self) { assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); if (self->trans != NULL) { /* TAG:TRANSACTION_SUBORDINATE_OBJECT_SURVIVAL_BYPASS: */ /* We need to make sure that the Transaction and the connection that * underlies it stay alive at least until we've completely the process of * removing their references to self. * * Fortunately, as of the changes on 2007.01.17, this is easy: * BlobReader_create_references_to_superiors creates references to *both* * the Transaction and the kinterbasdb.Connection that underlies it. * BlobReader_create_references_to_superiors creates that artificial * reference to the kinterbasdb.Connection in case * Transaction_is_main(self->trans) * , which would mean that self->trans doesn't actually hold a physical * reference to its connection. * * BlobReader_clear_references_to_superiors, which is not invoked until the * end of this very code block, clears both of those references in the * appropriate order. * * So, we do not need to do anything special to ensure that the Transaction * survives long enough for self to be torn down in an orderly manner. */ CConnection *con; Transaction *trans = self->trans; assert (trans->ob_refcnt >= 1); assert (self->con_python_wrapper != NULL); /* We can make the following assertion about the kinterbasdb.Connection * instance's reference count because we know that self owns a reference * *directly* to the kinterbasdb.Connectioin, regardless of whether * self->trans actually owns a physical reference to it (see comment above * for details): */ assert (self->con_python_wrapper->ob_refcnt >= 1); con = Transaction_get_con(trans); assert (con == NULL ? !BlobReader_is_open(self) : TRUE); if (con != NULL) { #ifdef ENABLE_CONNECTION_TIMEOUT assert ( Connection_timeout_enabled(con) ? !CURRENT_THREAD_OWNS_CON_TP(con) : TRUE ); ACQUIRE_CON_TP_WITH_GIL_HELD(con); assert ( Connection_timeout_enabled(con) ? CURRENT_THREAD_OWNS_CON_TP(con) : TRUE ); #endif /* ENABLE_CONNECTION_TIMEOUT */ if (BlobReader_is_open(self)) { BlobReader_close_with_unlink(self, FALSE); } /* As of 2007.01.17, BlobReader_close_with_unlink should no longer clear * self->trans: */ assert (self->trans != NULL); assert (self->trans == trans); assert (trans->ob_refcnt >= 1); assert (self->con_python_wrapper != NULL); #ifdef ENABLE_CONNECTION_TIMEOUT assert ( Connection_timeout_enabled(con) ? CURRENT_THREAD_OWNS_CON_TP(con) : TRUE ); RELEASE_CON_TP(con); assert ( Connection_timeout_enabled(con) ? !CURRENT_THREAD_OWNS_CON_TP(con) : TRUE ); #endif /* ENABLE_CONNECTION_TIMEOUT */ } /* end of if (con != NULL) block */ BlobReader_clear_references_to_superiors(self); assert (self->trans == NULL); assert (self->con_python_wrapper == NULL); /* Note that if more code is ever added after this point, IT IS NO LONGER * SAFE TO REFER TO trans OR con, so code such as this should be enabled: * trans = NULL; * con = NULL; */ } /* end of if (self->trans != NULL) block */ assert (!BlobReader_is_open(self)); assert (self->trans == NULL); assert (self->con_python_wrapper == NULL); assert (self->blob_handle == NULL_BLOB_HANDLE); PyObject_Del(self); } /* pyob_BlobReader___del__ */ static PyMethodDef BlobReader_methods[] = { {"close", (PyCFunction) pyob_BlobReader_close, METH_NOARGS, "'close' method of file-like interface" }, {"read", (PyCFunction) pyob_BlobReader_read, METH_VARARGS, "'read' method of file-like interface" }, {"chunks", (PyCFunction) pyob_BlobReader_chunks, METH_VARARGS, "Return an iterator over M chunks of the indicated size and N of a" " potentially smaller size. Both M and N can be 0; N is never greater" " than 1. This method can only be called once on a given BlobReader." }, /* Private support method for the chunks method: */ {"_iter_read_chunk", (PyCFunction) pyob_BlobReader__iter_read_chunk, METH_NOARGS, NULL }, {"tell", (PyCFunction) pyob_BlobReader_tell, METH_NOARGS, "'tell' method of file-like interface" }, {NULL} /* sentinel */ }; static PyGetSetDef BlobReader_getters_setters[] = { {"mode", (getter) pyob_BlobReader_mode_get, NULL, "'mode' read-only property of file-like interface", NULL }, {"closed", (getter) pyob_BlobReader_closed_get, NULL, "'closed' read-only property of file-like interface", NULL }, {NULL} /* sentinel */ }; /******************** BlobReader METHODS:END ********************/ /************ CHUNKED BLOB WRITING FUNCTIONS:BEGIN **************/ static InputStatus conv_in_blob_from_pyfilelike( PyObject *py_filelike, ISC_QUAD *blob_id, ISC_STATUS *status_vector, isc_db_handle db_handle, isc_tr_handle trans_handle ) { isc_blob_handle blob_handle = NULL_BLOB_HANDLE; isc_blob_handle *blob_handle_ptr = &blob_handle; PyObject *read_method; PyObject *chunk = NULL; assert (py_filelike != NULL); /* Retrieve bound method py_filelike.read and call that (potentially numerous * times) instead of looking up the method repeatedly. */ read_method = PyObject_GetAttr(py_filelike, blob_streaming__method_name_read ); if (read_method == NULL) { goto fail; } /* Create a blob and retrieve its handle into blob_handle. */ ENTER_GDAL isc_create_blob2(status_vector, &db_handle, &trans_handle, blob_handle_ptr, blob_id, /* Last two params indicate "no blob parameter buffer supplied". */ 0, NULL ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "conv_in_blob_from_pyfilelike.isc_create_blob2: ", status_vector ); goto fail; } for (;;) { Py_ssize_t chunk_len; char *chunk_buf; /* Call the previously retrieved bound method with a perpetually cached * argument tuple for best performance. */ chunk = PyObject_CallObject(read_method, blob_streaming__1Tuple_containing_MAX_BLOB_SEGMENT_SIZE ); if (chunk == NULL) { goto fail_with_blob_cancel; } if (!PyString_CheckExact(chunk)) { /* unicode object is not acceptable. */ raise_exception(ProgrammingError, "File-like object's read method must" " return object of type str (conceptually, a byte buffer)." ); goto fail_with_blob_cancel; } chunk_len = PyString_GET_SIZE(chunk); if (chunk_len == 0) { Py_DECREF(chunk); /* the empty string */ chunk = NULL; break; /* File-like object is exhausted. */ } else if (chunk_len > MAX_BLOB_SEGMENT_SIZE || chunk_len > INT_MAX) { PyObject *err_msg; if (chunk_len > INT_MAX) { err_msg = PyString_FromString("The database API does not yet" " officially support blobs larger than 2 GB." ); } else { err_msg = PyString_FromFormat("Requested %d bytes from file-like" " object's read method; received too many" " (" Py_ssize_t_STRING_FORMAT ").", (int) MAX_BLOB_SEGMENT_SIZE, chunk_len ); } if (err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } goto fail_with_blob_cancel; } chunk_buf = PyString_AS_STRING(chunk); ENTER_GDAL isc_put_segment(status_vector, blob_handle_ptr, (unsigned short) chunk_len, chunk_buf ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "conv_in_blob_from_pyfilelike.isc_put_segment: ", status_vector ); goto fail_with_blob_cancel; } Py_DECREF(chunk); } ENTER_GDAL isc_close_blob(status_vector, blob_handle_ptr); LEAVE_GDAL if (!DB_API_ERROR(status_vector)) { blob_handle = NULL_BLOB_HANDLE; } else { raise_sql_exception(OperationalError, "conv_in_blob_from_pyfilelike.isc_close_blob: ", status_vector ); goto fail_with_blob_cancel; } assert (blob_handle == NULL_BLOB_HANDLE); assert (chunk == NULL); assert (read_method != NULL); Py_DECREF(read_method); return INPUT_OK; fail_with_blob_cancel: /* This function encountered an error while it had an open blob handle, so * try to cancel the blob: */ ENTER_GDAL isc_cancel_blob(status_vector, blob_handle_ptr); /*if (DB_API_ERROR(status_vector)) {*/ /* The database client library wasn't even able to do cancel the blob, so * just forget about it (don't free, because it's a handle, which is not * necessarily a pointer). */ /*}*/ blob_handle = NULL_BLOB_HANDLE; LEAVE_GDAL /* Fall through to regular failure: */ fail: assert (PyErr_Occurred()); assert (blob_handle == NULL_BLOB_HANDLE); Py_XDECREF(read_method); Py_XDECREF(chunk); return INPUT_ERROR; } /* conv_in_blob_from_pyfilelike */ /************ CHUNKED BLOB WRITING FUNCTIONS:END **************/ kinterbasdb-3.3.0/_kiconversion_blob.c0000644000175000001440000003314711130647414017230 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Materialized Blob Conversion * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kiconversion.c, * without the involvement of a header file. */ /******************** FUNCTION PROTOTYPES:BEGIN ********************/ static int _blob_info_total_size_and_max_segment_size( ISC_STATUS *status_vector, isc_blob_handle *blob_handle_ptr, ISC_LONG *total_size, unsigned short *max_segment_size ); /******************** FUNCTION PROTOTYPES:END ********************/ /******************** INPUT FUNCTIONS:BEGIN ********************/ /* This implementation of conv_in_blob_from_pybuffer uses the Python raw buffer * interface rather than slicing and converting each segment to a string before * passing it to isc_put_segment, as the previous implementation did. */ static InputStatus conv_in_blob_from_pybuffer( PyObject *py_buf, ISC_QUAD *blob_id, ISC_STATUS *status_vector, isc_db_handle db_handle, isc_tr_handle trans_handle ) { isc_blob_handle blob_handle = NULL_BLOB_HANDLE; isc_blob_handle *blob_handle_ptr = &blob_handle; PyBufferProcs *bufferProcs; char *py_buf_start_ptr; int bytes_written_so_far; unsigned short bytes_to_write_this_time; int total_size; char *err_preamble = ""; boolean err_should_cancel_blob = TRUE; /* This function is only called by kinterbasdb's internals, so it's * acceptable to apply the type check only in non-production builds. */ assert (PyBuffer_Check(py_buf)); { const Py_ssize_t total_size_ss = PySequence_Length(py_buf); if (total_size_ss == -1) { return INPUT_ERROR; } else if (total_size_ss > INT_MAX) { raise_exception(NotSupportedError, "The database API does not yet" " officially support blobs larger than 2 GB." ); return INPUT_ERROR; } total_size = (int) total_size_ss; } /* Get a pointer to the PyBufferObject's getreadbuffer method, then call * that method, which will make py_buf_start_ptr point to the start of * the PyBufferObject's raw data buffer. */ bufferProcs = py_buf->ob_type->tp_as_buffer; /* Since this function is only called by kinterbasdb's internals, it's * acceptable to check for a NULL bf_getreadbuffer only in non-production * builds. */ assert (bufferProcs->bf_getreadbuffer != NULL); (*bufferProcs->bf_getreadbuffer)(py_buf, 0, (void **) &py_buf_start_ptr); /* Within this ENTER/LEAVE_GDAL block, a Python object (py_buf) is manipulated, * even though the GIL is not held. However, no Python API calls are made; * in fact, py_buf is only manipulated in the sense that its internal binary * buffer (pointed to by py_buf_start_ptr) is read. Since the code * surrounding the ENTER/LEAVE_GDAL block holds a reference to py_buf, and * thereby ensures that py_buf will not be destroyed prematurely, this code * should be safe. */ /* Create a blob and retrieve its handle into blob_handle. */ ENTER_GDAL isc_create_blob2(status_vector, &db_handle, &trans_handle, blob_handle_ptr, blob_id, /* Last two params indicate "no blob parameter buffer supplied". */ 0, NULL ); if (DB_API_ERROR(status_vector)) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK err_preamble = "conv_in_blob_from_pybuffer.isc_create_blob2: "; err_should_cancel_blob = FALSE; goto fail; } /* Copy the data from py_buf's internal byte buffer into the database in * chunks of size MAX_BLOB_SEGMENT_SIZE (all but the last chunk, which may be * smaller). */ bytes_written_so_far = 0; bytes_to_write_this_time = MAX_BLOB_SEGMENT_SIZE; while (bytes_written_so_far < total_size) { if (total_size - bytes_written_so_far < MAX_BLOB_SEGMENT_SIZE) { bytes_to_write_this_time = (unsigned short) (total_size - bytes_written_so_far); } isc_put_segment(status_vector, blob_handle_ptr, bytes_to_write_this_time, py_buf_start_ptr + bytes_written_so_far ); if (DB_API_ERROR(status_vector)) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK err_preamble = "conv_in_blob_from_pybuffer.isc_put_segment: "; goto fail; } bytes_written_so_far += bytes_to_write_this_time; } isc_close_blob(status_vector, blob_handle_ptr); if (DB_API_ERROR(status_vector)) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK err_preamble = "conv_in_blob_from_pybuffer.isc_close_blob: "; goto fail; } LEAVE_GDAL return INPUT_OK; fail: assert (DB_API_ERROR(status_vector)); raise_sql_exception(OperationalError, err_preamble, status_vector); if (err_should_cancel_blob) { ENTER_GDAL isc_cancel_blob(status_vector, blob_handle_ptr); LEAVE_GDAL } return INPUT_ERROR; } /* conv_in_blob_from_pybuffer */ /* DSR created this version of conv_in_blob_from_pystring on 2002.02.23 to * replace the previous implementation, which broke with strings of length * >= 2^16. * This function just creates a Python buffer object from the Python str object * it receives. This "conversion" is QUITE A CHEAP OPERATION. It involves no * memory copying because it simply creates a "read-only reference" into the * string's existing character buffer. */ static InputStatus conv_in_blob_from_pystring( PyObject *str, ISC_QUAD *blob_id, ISC_STATUS *status_vector, isc_db_handle db_handle, isc_tr_handle trans_handle ) { PyObject *pyBuffer; InputStatus result; /* This function is only called by kinterbasdb's internals, so it's * acceptable to apply the type check only in non-production builds. */ assert (PyString_Check(str)); pyBuffer = PyBuffer_FromObject(str, 0, PyString_GET_SIZE(str)); if (pyBuffer == NULL) { return INPUT_ERROR; } result = conv_in_blob_from_pybuffer(pyBuffer, blob_id, status_vector, db_handle, trans_handle ); /* *Must* DECREF the buffer we've created; even though its creation doesn't * involve copying the string's internal buffer, the string will never be * garbage collected if the buffer is not DECREFed. */ Py_DECREF(pyBuffer); /* conv_in_blob_from_pybuffer will take care of raising an exception if it * must; we'll just pass its return value upward. */ return result; } /* conv_in_blob_from_pystring */ /******************** INPUT FUNCTIONS:END ********************/ /******************** OUTPUT FUNCTIONS:BEGIN ********************/ static PyObject *conv_out_blob_materialized_in_single_chunk( ISC_STATUS *status_vector, isc_blob_handle *blob_handle_ptr, const unsigned short max_segment_size, const int bytes_requested, boolean allow_incomplete_segment_read ) { ISC_LONG bytes_read_so_far = 0; unsigned short bytes_actually_read; ISC_STATUS blob_stat; /* 2007.02.10: FB 2.1 fix: int->ISC_STATUS */ char *py_str_start_ptr; /* Create an empty PyStringObject large enough to hold the entire chunk. */ PyObject *py_str = PyString_FromStringAndSize(NULL, bytes_requested); if (py_str == NULL) { return NULL; } /* Set py_str_start_ptr to point the beginning of py_str's internal buffer. */ py_str_start_ptr = PyString_AS_STRING(py_str); /* DSR documented his concerns about this GIL-handling scheme in a lengthy * comment in function conv_in_blob_from_pybuffer. */ ENTER_GDAL /* Now, transfer the blob's contents from the database into the preallocated * Python string named py_str. Use repeated calls to isc_get_segment to * effect the transfer. */ assert (bytes_read_so_far == 0); while (bytes_read_so_far < bytes_requested) { blob_stat = isc_get_segment(status_vector, blob_handle_ptr, &bytes_actually_read, (unsigned short) MIN( (long)max_segment_size, bytes_requested - bytes_read_so_far ), py_str_start_ptr + bytes_read_so_far ); /* Since clients of this function are required to refrain from submitting * requests for more bytes than are available, it is not necessary to check * for isc_segstr_eof. * * But isc_segment can arise under normal circumstances; it simply means * that the requested number of bytes did not consume the last processed * segment entirely. The database API's retrieval function, * isc_get_segment, is smart enough to pick up where it left off during the * next call to this function. */ if (blob_stat != 0) { if (blob_stat == isc_segment && allow_incomplete_segment_read) { /* Record the success (from our perspective) of the most recent read, * then exit the read loop. */ bytes_read_so_far += bytes_actually_read; break; } LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK raise_sql_exception(OperationalError, "conv_out_blob_materialized_in_single_chunk.isc_get_segment: segment" " retrieval error: ", status_vector ); Py_DECREF(py_str); return NULL; } bytes_read_so_far += bytes_actually_read; } LEAVE_GDAL assert (bytes_read_so_far == bytes_requested); return py_str; } /* conv_out_blob_materialized_in_single_chunk */ static PyObject *conv_out_blob_materialized( ISC_QUAD *blob_id, ISC_STATUS *status_vector, isc_db_handle db_handle, isc_tr_handle trans_handle ) { isc_blob_handle blob_handle = NULL_BLOB_HANDLE; ISC_LONG total_size = -1; unsigned short max_segment_size = 0; /* Based on the blob's ID, open a handle to it. */ ENTER_GDAL isc_open_blob2(status_vector, &db_handle, &trans_handle, &blob_handle, blob_id, /* Last two params indicate "no blob parameter buffer supplied": */ 0, NULL ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "conv_out_blob_materialized.isc_open_blob2: ", status_vector ); return NULL; } /* Before actually reading any of the blob's contents, determine the total * size of the blob and the size of its largest segment. */ if (_blob_info_total_size_and_max_segment_size( status_vector, &blob_handle, &total_size, &max_segment_size ) != 0 ) { return NULL; } /* Handle the very remote possibility that passing an ISC_LONG to * PyString_FromStringAndSize would cause an overflow (on most current * platforms, ISC_LONG and int are identical, so no overflow is possible). */ if (total_size > INT_MAX) { raise_exception(InternalError, "conv_out_blob_materialized:" " The size of the requested blob exceeds the capacity of a Python str" " object; use chunked retrieval instead." ); return NULL; } { PyObject *py_str = conv_out_blob_materialized_in_single_chunk(status_vector, &blob_handle, max_segment_size, (int) total_size, FALSE ); /* Close the blob regardless of whether an exception arose while reading * it. Don't check to see whether the close op succeeds; reading was the * important part, and it's already finished. */ ENTER_GDAL isc_close_blob(status_vector, &blob_handle); LEAVE_GDAL return py_str; } } /* conv_out_blob_materialized */ /******************** OUTPUT FUNCTIONS:END ********************/ /******************** UTILITY FUNCTIONS:BEGIN ********************/ /* _blob_info_total_size_and_max_segment_size inserts into its arguments * total_size and max_segment_size the total size and maximum segment size * (respectively) of the specified blob. * Returns 0 if successful, otherwise -1. * * See IB6 API Guide chapter entitled "Working with Blob Data". */ static int _blob_info_total_size_and_max_segment_size( ISC_STATUS *status_vector, isc_blob_handle *blob_handle_ptr, ISC_LONG *total_size, unsigned short *max_segment_size ) { char blob_info_items[] = { isc_info_blob_total_length, isc_info_blob_max_segment }; char result_buffer[ISC_INFO_BUFFER_SIZE]; short length; char *ptr; char item; ENTER_GDAL isc_blob_info(status_vector, blob_handle_ptr, sizeof(blob_info_items), blob_info_items, sizeof(result_buffer), result_buffer ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { raise_sql_exception(InternalError, "_blob_info_total_size_and_max_segment_size.isc_blob_info: ", status_vector ); return -1; }; /* Extract the values returned in the result buffer. */ ptr = result_buffer; while (*ptr != isc_info_end) { item = *ptr++; ENTER_GDAL length = (short) isc_vax_integer(ptr, sizeof(short)); LEAVE_GDAL ptr += sizeof(short); switch (item) { case isc_info_blob_total_length: ENTER_GDAL *total_size = isc_vax_integer(ptr, length); LEAVE_GDAL break; case isc_info_blob_max_segment: ENTER_GDAL *max_segment_size = (unsigned short) isc_vax_integer(ptr, length); LEAVE_GDAL break; case isc_info_truncated: raise_sql_exception(InternalError, "_blob_info_total_size_and_max_segment_size: isc_blob_info return" " truncated: ", status_vector ); return -1; } ptr += length; } return 0; } /* _blob_info_total_size_and_max_segment_size */ /******************** UTILITY FUNCTIONS:END ********************/ kinterbasdb-3.3.0/_kicore_transaction_support.c0000644000175000001440000001726711130647414021203 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Low-Level Transaction * Operations * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /*************************** DECLARATIONS : begin ****************************/ static isc_tr_handle begin_transaction( /* Either: */ isc_db_handle db_handle, char *tpb, Py_ssize_t tpb_len, /* Or: */ ISC_TEB *tebs, short teb_count, ISC_STATUS *status_vector ); static PyObject *trans___s__trans_handle; static PyObject *trans___s__default_tpb_str_; static PyObject *trans___s_SAVEPOINT_SPACE; static PyObject *trans___s_ROLLBACK_TO_SPACE; /**************************** DECLARATIONS : end *****************************/ /*********************** RAW TRANSACTION OPS : begin *************************/ static int init_kidb_transaction_support(void) { #define INIT_TRANS_STRING_CONST(s) \ trans___s_ ## s = PyString_FromString(#s); \ if (trans___s_ ## s == NULL) { goto fail; } INIT_TRANS_STRING_CONST(_trans_handle); INIT_TRANS_STRING_CONST(_default_tpb_str_); /* Can't use INIT_TRANS_STRING_CONST for these, because they contain * spaces: */ trans___s_SAVEPOINT_SPACE = PyString_FromString("SAVEPOINT "); if (trans___s_SAVEPOINT_SPACE == NULL) { goto fail; } trans___s_ROLLBACK_TO_SPACE = PyString_FromString("ROLLBACK TO "); if (trans___s_ROLLBACK_TO_SPACE == NULL) { goto fail; } return 0; fail: return -1; } /* init_kidb_transaction_support */ static isc_tr_handle begin_transaction( /* Either: */ isc_db_handle db_handle, char *tpb, Py_ssize_t tpb_len, /* Or: */ ISC_TEB *tebs, short teb_count, ISC_STATUS *status_vector ) { isc_tr_handle trans_handle = NULL_TRANS_HANDLE; /* (db_handle+tpb+tpb_len) and (tebs+teb_count) are mutually exclusive * parameters. */ assert ( db_handle != NULL_DB_HANDLE ? tebs == NULL : tebs != NULL && tpb == NULL ); /* 2003.02.21: A huge TPB such as 'con.begin(tpb='x'*50000)' crashes the * FB 1.0.2 server process, but responsibly raises an error with FB 1.5b2. * Since kinterbasdb only exposes some 20 TPB component values, many of which * are mutually exclusive, I decided to impose a reasonable limit right * here. */ if (tpb_len > 255) { raise_exception(ProgrammingError, "Transaction parameter buffer (TPB) too" " large. len(tpb) must be <= 255." ); goto fail; } ENTER_GDAL if (tebs == NULL) { isc_start_transaction(status_vector, &trans_handle, /* Only one database handle is being passed. */ 1, &db_handle, (unsigned short) tpb_len, /* Cast is safe b/c already checked val. */ tpb ); } else { isc_start_multiple(status_vector, &trans_handle, teb_count, tebs); } LEAVE_GDAL if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "begin transaction: ", status_vector); goto fail; } assert (trans_handle != NULL_TRANS_HANDLE); return trans_handle; fail: assert (PyErr_Occurred()); return NULL_TRANS_HANDLE; } /* begin_transaction */ static TransactionalOperationResult prepare_transaction( isc_tr_handle *trans_handle_p, ISC_STATUS *status_vector ) { assert (trans_handle_p != NULL); if (*trans_handle_p == NULL_TRANS_HANDLE) { raise_exception(ProgrammingError, "Attempted to prepare closed" " transaction" ); return OP_RESULT_ERROR; } ENTER_GDAL isc_prepare_transaction(status_vector, trans_handle_p); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "prepare: ", status_vector); return OP_RESULT_ERROR; } return OP_RESULT_OK; } /* prepare_transaction */ static TransactionalOperationResult commit_transaction( isc_tr_handle *trans_handle_p, boolean retaining, ISC_STATUS *status_vector ) { assert (trans_handle_p != NULL); if (*trans_handle_p == NULL_TRANS_HANDLE) { /* As discussed on the Python DB-SIG in message: * http://mail.python.org/pipermail/db-sig/2003-February/003158.html * , allow a transaction to be committed even if its existence is only * implicit. */ return OP_RESULT_OK; } { /* This code can be reached when the CTT is timing out a connection. In * that case, we want the GIL to remain held during the entire timeout * operation. */ OPEN_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE #ifdef ENABLE_CONNECTION_TIMEOUT const boolean should_manip_gil = NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD; if (should_manip_gil) { #endif LEAVE_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_STARTING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif ENTER_GDAL_WITHOUT_LEAVING_PYTHON if (!retaining) { isc_commit_transaction(status_vector, trans_handle_p); } else { isc_commit_retaining(status_vector, trans_handle_p); assert (*trans_handle_p != NULL_TRANS_HANDLE); } LEAVE_GDAL_WITHOUT_ENTERING_PYTHON #ifdef ENABLE_CONNECTION_TIMEOUT if (should_manip_gil) { #endif ENTER_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_ENDING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif CLOSE_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE } /* end of lock manipulation scope */ if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "commit: ", status_vector); return OP_RESULT_ERROR; } return OP_RESULT_OK; } /* commit_transaction */ static TransactionalOperationResult rollback_transaction( isc_tr_handle *trans_handle_p, boolean retaining, boolean allowed_to_raise, ISC_STATUS *status_vector ) { assert (trans_handle_p != NULL); /* If there is not an active transaction, rolling back is meaningless, but * acceptable. */ if (*trans_handle_p == NULL_TRANS_HANDLE) { return OP_RESULT_OK; } { /* This code can be reached when the CTT is timing out a connection. In * that case, we want the GIL to remain held during the entire timeout * operation. */ OPEN_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE #ifdef ENABLE_CONNECTION_TIMEOUT const boolean should_manip_gil = NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD; if (should_manip_gil) { #endif LEAVE_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_STARTING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif ENTER_GDAL_WITHOUT_LEAVING_PYTHON if (!retaining) { isc_rollback_transaction(status_vector, trans_handle_p); } else { isc_rollback_retaining(status_vector, trans_handle_p); assert (*trans_handle_p != NULL_TRANS_HANDLE); } LEAVE_GDAL_WITHOUT_ENTERING_PYTHON #ifdef ENABLE_CONNECTION_TIMEOUT if (should_manip_gil) { #endif ENTER_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_ENDING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif CLOSE_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE } /* end of lock manipulation scope */ if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "rollback: ", status_vector); if (allowed_to_raise) { return OP_RESULT_ERROR; } else { SUPPRESS_EXCEPTION; } } return OP_RESULT_OK; } /* rollback_transaction */ /************************ RAW TRANSACTION OPS : end **************************/ kinterbasdb-3.3.0/typeconv_fixed_stdlib.py0000644000175000001440000000526211130647414020154 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : Fixed/Standard Library # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( # kinterbasdb-native fixed point converters (old, precison_mode style): 'fixed_conv_in_imprecise', 'fixed_conv_in_precise', 'fixed_conv_out_imprecise', 'fixed_conv_out_precise', ) import sys from kinterbasdb.k_exceptions import * ################################################################################ ## FIXED POINT ################################################################################ _tenTo = [10**x for x in range(20)] del x # The fixed point input funcions are GUARANTEED to receive a single parameter # that is a 2-tuple of the form: (original input object, scale). # The original input object may, of course, be None. def fixed_conv_in_imprecise((val, scale)): if val is None or isinstance(val, basestring): # Allow implicit param conv. return val if not isinstance(val, (float, int, long)): raise InterfaceError( 'float required as input for fixed point field in imprecise mode.' ) absScale = abs(scale) return int(round(val, absScale) * _tenTo[absScale]) def fixed_conv_in_precise((val, scale)): if val is None or isinstance(val, basestring): # Allow implicit param conv. return val if not isinstance(val, (int, long)): raise InterfaceError( 'int or long required as input for fixed point field in precise mode.' ) # $val is already a scaled integer; just pass it through. return val # The fixed point output funcions receive a single parameter that is either # - None (if the SQL value is NULL), or # - a 2-tuple of the form: (scaled integer value, scale) def fixed_conv_out_imprecise(x): # Return a floating point representation of the scaled integer. if x is None: return None (val, scale) = x if scale == 0: return val # Don't convert to float if no decimal places. return float(val) / _tenTo[abs(scale)] def fixed_conv_out_precise(x): # Simply return the scaled integer, not interpreted in any way. if x is None: return None return x[0] kinterbasdb-3.3.0/_kiservices.h0000644000175000001440000000217211130647414015667 0ustar pcisarusers/* KInterbasDB Python Package - Header File for Services Manager Support * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifndef _KISERVICES_H #define _KISERVICES_H #include "_kinterbasdb.h" static PyTypeObject ServicesConnectionType; typedef struct { /* definition of type ServicesConnectionObject */ PyObject_HEAD /* Python API - infrastructural macro. */ isc_svc_handle service_handle; /* Buffer used by Interbase API to store error status of calls. */ ISC_STATUS status[STATUS_VECTOR_SIZE]; } ServicesConnectionObject; #endif /* not def _KISERVICES_H */ kinterbasdb-3.3.0/services.py0000644000175000001440000012121011130647414015400 0ustar pcisarusers# KInterbasDB Python Package - Python Wrapper for Services API # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere # This module facilitates interaction with the database Services Manager via # the low-level C module _kiservices (and thence, the database's C API). # Like the C module named _kinterbasdb that underlies the main kinterbasdb # module, the underlying C module here (_kiservices) # **SHOULD NOT BE USED DIRECTLY** # in typical Python programs; it is subject to change without notice. # # The names of private members in this module begin with a leading underscore; # the same caveat about unannounced modification applies to them. import os.path, struct, sys, warnings import kinterbasdb # Acquire references to kinterbasdb's DB API exception classes: from k_exceptions import * from kinterbasdb import _kiservices as _ksrv # The following SHUT_* constants are to be passed as the $shutdownMethod # parameter to Connection.shutdown: SHUT_FORCE = _ksrv.isc_spb_prp_shutdown_db SHUT_DENY_NEW_TRANSACTIONS = _ksrv.isc_spb_prp_deny_new_transactions SHUT_DENY_NEW_ATTACHMENTS = _ksrv.isc_spb_prp_deny_new_attachments # The following WRITE_* constants are to be passed as the $mode parameter # to Connection.setWriteMode: WRITE_FORCED = _ksrv.isc_spb_prp_wm_sync WRITE_BUFFERED = _ksrv.isc_spb_prp_wm_async # The following ACCESS_* constants are to be passed as the $mode parameter # to Connection.setAccessMode: ACCESS_READ_WRITE = _ksrv.isc_spb_prp_am_readwrite ACCESS_READ_ONLY = _ksrv.isc_spb_prp_am_readonly def connect(host='service_mgr', user=os.environ.get('ISC_USER', 'sysdba'), password=os.environ.get('ISC_PASSWORD', None) ): """ Establishes a connection to the Services Manager. The $user and $password parameters must refer to an administrative user such as sysdba. In fact, $user can be left blank, in which case it will default to sysdba. The $password parameter is required. If the $host parameter is not supplied, the connection will default to the local host. NOTE: By definition, a Services Manager connection is bound to a particular host. Therefore, the database specified as a parameter to methods such as getStatistics MUST NOT include the host name of the database server. """ if not _ksrv.is_initialized(): kinterbasdb._ensureInitialized() # Now that we know kinterbasdb has been intialized, grant the # _kiservices module access to some global variables in kinterbasdb._k. # This awkward step is necessary because _kiservices and _k are # compiled to separate extension modules (i.e., shared libraries), and # Python's importation mechanism doesn't provide an automatic way to # access global variables in an extension module. _ksrv.initialize_from(kinterbasdb._k) assert _ksrv.is_initialized() if password is None: raise ProgrammingError('A password is required to use the Services' ' Manager.' ) _checkString(host) _checkString(user) _checkString(password) # The database engine's Services API requires that connection strings # conform to one of the following formats: # 1. 'service_mgr' - Connects to the Services Manager on localhost. # 2. 'hostname:service_mgr' - Connects to the Services Manager on the # server named hostname. # # This Python function glosses over the database engine's rules as follows: # - If the $host parameter is not supplied, the connection defaults to # the local host. # - If the $host parameter is supplied, the ':service_mgr' suffix is # optional (the suffix will be appended automatically if necessary). # # Of course, this scheme would collapse if someone actually had a host # named 'service_mgr', and supplied the connection string 'service_mgr' # with the intent of connecting to that host. In that case, the connection # would be attempted to the local host, not to the host named # 'service_mgr'. An easy workaround would be to supply the following # connection string: # 'service_mgr:service_mgr'. if not host.endswith('service_mgr'): if not host.endswith(':'): host += ':' host += 'service_mgr' return Connection(host, user, password) class Connection(object): def __init__(self, *args, **keywords_args): self._C_conn = None self._C_conn = apply(_ksrv.connect, args, keywords_args) def close(self): if self._C_conn is None: return _ksrv.close(self._C_conn) del self._C_conn ## Query methods: ## def getServiceManagerVersion(self): return self._QI(_ksrv.isc_info_svc_version) def getServerVersion(self): return self._QS(_ksrv.isc_info_svc_server_version) def getArchitecture(self): return self._QS(_ksrv.isc_info_svc_implementation) def getHomeDir(self): return self._QS(_ksrv.isc_info_svc_get_env) def getSecurityDatabasePath(self): return self._QS(_ksrv.isc_info_svc_user_dbpath) def getLockFileDir(self): return self._QS(_ksrv.isc_info_svc_get_env_lock) def getCapabilityMask(self): return self._QI(_ksrv.isc_info_svc_capabilities) def getMessageFileDir(self): return self._QS(_ksrv.isc_info_svc_get_env_msg) def getConnectionCount(self): return self._get_isc_info_svc_svr_db_info()[0] def getAttachedDatabaseNames(self): return self._get_isc_info_svc_svr_db_info()[1] def getLog(self): """ Note: Current versions of the database server do not rotate the log file, so it can become VERY large, and take a long time to retrieve. """ reqBuf = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_get_ib_log) return self._actAndReturnTextualResults(reqBuf) # This capability no longer exists in FB 1.5rc4 and later: #def getServerConfig(self): return self._get_isc_info_svc_get_config() def getLimboTransactionIDs(self, database): _checkString(database) reqBuf = _ServiceActionRequestBuilder() reqBuf.addOptionMask(_ksrv.isc_spb_rpr_list_limbo_trans) raw = self._repairAction(database, reqBuf) nBytes = len(raw) transIDs = [] i = 0 while i < nBytes: byte = ord(raw[i]) if byte in (_ksrv.isc_spb_single_tra_id, _ksrv.isc_spb_multi_tra_id): # The transaction ID is a 32-bit integer that begins # immediately after position i. transID = struct.unpack('i', raw[i+1:i+5])[0] i += 5 # Advance past the marker byte and the 32-bit integer. transIDs.append(transID) else: raise InternalError('Unable to process buffer contents' ' beginning at position %d.' % i ) return transIDs def _resolveLimboTransaction(self, resolution, database, transactionID): _checkString(database) reqBuf = _ServiceActionRequestBuilder() reqBuf.addNumeric(resolution, transactionID) self._repairAction(database, reqBuf) def commitLimboTransaction(self, database, transactionID): return self._resolveLimboTransaction(_ksrv.isc_spb_rpr_commit_trans, database, transactionID ) def rollbackLimboTransaction(self, database, transactionID): return self._resolveLimboTransaction(_ksrv.isc_spb_rpr_rollback_trans, database, transactionID ) # Database statistics retrieval methods: def getStatistics(self, database, showOnlyDatabaseLogPages=0, showOnlyDatabaseHeaderPages=0, showUserDataPages=1, showUserIndexPages=1, # 2004.06.06: False by default b/c gstat behaves that way: showSystemTablesAndIndexes=0 ): _checkString(database) reqBuf = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_db_stats) optionMask = 0 if showUserDataPages: optionMask |= _ksrv.isc_spb_sts_data_pages if showOnlyDatabaseLogPages: optionMask |= _ksrv.isc_spb_sts_db_log if showOnlyDatabaseHeaderPages: optionMask |= _ksrv.isc_spb_sts_hdr_pages if showUserIndexPages: optionMask |= _ksrv.isc_spb_sts_idx_pages if showSystemTablesAndIndexes: optionMask |= _ksrv.isc_spb_sts_sys_relations reqBuf.addDatabaseName(database) reqBuf.addOptionMask(optionMask) return self._actAndReturnTextualResults(reqBuf) ## Action methods: ## # Backup and Restore methods: def backup(self, sourceDatabase, destFilenames, destFileSizes=(), #factor=None, # YYY:What is this? # Backup operation optionMask: ignoreChecksums=0, ignoreLimboTransactions=0, metadataOnly=0, garbageCollect=1, #oldDescriptions=0, kinterbasdb doesn't even support IB < 5.5 transportable=1, convertExternalTablesToInternalTables=1, expand=1 # YYY:What is this? ): # Begin parameter validation section. _checkString(sourceDatabase) destFilenames = _requireStrOrTupleOfStr(destFilenames) destFilenamesCount = len(destFilenames) # 2004.07.17: YYY: Temporary warning: # Current (1.5.1) versions of the database engine appear to hang the # Services API request when it contains more than 11 destFilenames if destFilenamesCount > 11: warnings.warn( 'Current versions of the database engine appear to hang when' ' passed a request to generate a backup with more than 11' ' constituents.', RuntimeWarning ) if destFilenamesCount > 9999: raise ProgrammingError("The database engine cannot output a" " single source database to more than 9999 backup files." ) _validateCompanionStringNumericSequences(destFilenames, destFileSizes, 'destination filenames', 'destination file sizes' ) if len(_excludeElementsOfTypes(destFileSizes, (int, long))) > 0: raise TypeError("Every element of destFileSizes must be an int" " or long." ) destFileSizesCount = len(destFileSizes) # The following should have already been checked by # _validateCompanionStringNumericSequences. assert destFileSizesCount == destFilenamesCount - 1 # End parameter validation section. # Begin option bitmask setup section. optionMask = 0 if ignoreChecksums: optionMask |= _ksrv.isc_spb_bkp_ignore_checksums if ignoreLimboTransactions: optionMask |= _ksrv.isc_spb_bkp_ignore_limbo if metadataOnly: optionMask |= _ksrv.isc_spb_bkp_metadata_only if not garbageCollect: optionMask |= _ksrv.isc_spb_bkp_no_garbage_collect if not transportable: optionMask |= _ksrv.isc_spb_bkp_non_transportable if convertExternalTablesToInternalTables: optionMask |= _ksrv.isc_spb_bkp_convert if expand: optionMask |= _ksrv.isc_spb_bkp_expand # End option bitmask setup section. # Construct the request buffer. request = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_backup) # Source database filename: request.addDatabaseName(sourceDatabase) # Backup filenames and sizes: request.addSequenceOfStringNumericPairs( _ksrv.isc_spb_bkp_file, destFilenames, _ksrv.isc_spb_bkp_length, destFileSizes ) # Options bitmask: request.addNumeric(_ksrv.isc_spb_options, optionMask) # Tell the service to make its output available to us. request.addCode(_ksrv.isc_spb_verbose) # Done constructing the request buffer. return self._actAndReturnTextualResults(request) def restore(self, sourceFilenames, destFilenames, destFilePages=(), pageSize=None, cacheBuffers=None, accessModeReadOnly=0, replace=0, create=1, deactivateIndexes=0, doNotRestoreShadows=0, doNotEnforceConstraints=0, commitAfterEachTable=0, # If $useAllPageSpace is 1, entirely fill each page rather than # reserving 20% of each page for future use: useAllPageSpace=0 ): # Begin parameter validation section. sourceFilenames = _requireStrOrTupleOfStr(sourceFilenames) destFilenames = _requireStrOrTupleOfStr(destFilenames) _validateCompanionStringNumericSequences(destFilenames, destFilePages, 'destination filenames', 'destination file page counts' ) # End parameter validation section. # Begin option bitmask setup section. optionMask = 0 if replace: optionMask |= _ksrv.isc_spb_res_replace if create: optionMask |= _ksrv.isc_spb_res_create if deactivateIndexes: optionMask |= _ksrv.isc_spb_res_deactivate_idx if doNotRestoreShadows: optionMask |= _ksrv.isc_spb_res_no_shadow if doNotEnforceConstraints: optionMask |= _ksrv.isc_spb_res_no_validity if commitAfterEachTable: optionMask |= _ksrv.isc_spb_res_one_at_a_time if useAllPageSpace: optionMask |= _ksrv.isc_spb_res_use_all_space # End option bitmask setup section. # Construct the request buffer. request = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_restore) # Backup filenames: request.addSequenceOfStrings(_ksrv.isc_spb_bkp_file, sourceFilenames) # Database filenames: request.addSequenceOfStringNumericPairs( _ksrv.isc_spb_dbname, destFilenames, _ksrv.isc_spb_res_length, destFilePages ) # Page size of the restored database: if pageSize: request.addNumeric(_ksrv.isc_spb_res_page_size, pageSize) # cacheBuffers is the number of default cache buffers to configure for # attachments to the restored database: if cacheBuffers: request.addNumeric(_ksrv.isc_spb_res_buffers, cacheBuffers) # accessModeReadOnly controls whether the restored database is # "mounted" in read only or read-write mode: if accessModeReadOnly: accessMode = _ksrv.isc_spb_prp_am_readonly else: accessMode = _ksrv.isc_spb_prp_am_readwrite request.addNumeric(_ksrv.isc_spb_res_access_mode, accessMode, numCType='B' ) # Options bitmask: request.addNumeric(_ksrv.isc_spb_options, optionMask) # Tell the service to make its output available to us. request.addCode(_ksrv.isc_spb_verbose) # Done constructing the request buffer. _ksrv.action_thin(self._C_conn, request.render()) # Return the results to the caller synchronously. return self._collectUnformattedResults() # Database property alteration methods: def setDefaultPageBuffers(self, database, n): _checkString(database) return self._propertyActionWithSingleNumericCode(database, _ksrv.isc_spb_prp_page_buffers, n ) def setSweepInterval(self, database, n): _checkString(database) return self._propertyActionWithSingleNumericCode(database, _ksrv.isc_spb_prp_sweep_interval, n ) def shutdown(self, database, shutdownMethod, timeout): _checkString(database) if shutdownMethod not in ( SHUT_FORCE, SHUT_DENY_NEW_TRANSACTIONS, SHUT_DENY_NEW_ATTACHMENTS ): raise ValueError('shutdownMethod must be one of the following' ' constants: services.SHUT_FORCE,' ' services.SHUT_DENY_NEW_TRANSACTIONS,' ' services.SHUT_DENY_NEW_ATTACHMENTS.' ) return self._propertyActionWithSingleNumericCode(database, shutdownMethod, timeout ) def bringOnline(self, database): _checkString(database) reqBuf = _ServiceActionRequestBuilder() reqBuf.addOptionMask(_ksrv.isc_spb_prp_db_online) return self._propertyAction(database, reqBuf) def setShouldReservePageSpace(self, database, shouldReserve): _checkString(database) if shouldReserve: reserveCode = _ksrv.isc_spb_prp_res else: reserveCode = _ksrv.isc_spb_prp_res_use_full return self._propertyActionWithSingleNumericCode(database, _ksrv.isc_spb_prp_reserve_space, reserveCode, numCType='b' ) def setWriteMode(self, database, mode): _checkString(database) if mode not in (WRITE_FORCED, WRITE_BUFFERED): raise ValueError('mode must be one of the following constants:' ' services.WRITE_FORCED, services.WRITE_BUFFERED.' ) return self._propertyActionWithSingleNumericCode(database, _ksrv.isc_spb_prp_write_mode, mode, numCType='b' ) def setAccessMode(self, database, mode): _checkString(database) if mode not in (ACCESS_READ_WRITE, ACCESS_READ_ONLY): raise ValueError('mode must be one of the following constants:' ' services.ACCESS_READ_WRITE, services.ACCESS_READ_ONLY.' ) return self._propertyActionWithSingleNumericCode(database, _ksrv.isc_spb_prp_access_mode, mode, numCType='b' ) def setSQLDialect(self, database, dialect): _checkString(database) # The IB 6 API Guide says that dialect "must be 1 or 3", but other # dialects may become valid in future versions, so don't require # dialect in (1, 3) return self._propertyActionWithSingleNumericCode(database, _ksrv.isc_spb_prp_set_sql_dialect, dialect ) def activateShadowFile(self, database): _checkString(database) reqBuf = _ServiceActionRequestBuilder() reqBuf.addOptionMask(_ksrv.isc_spb_prp_activate) return self._propertyAction(database, reqBuf) # Database repair/maintenance methods: def sweep(self, database, markOutdatedRecordsAsFreeSpace=1): _checkString(database) reqBuf = _ServiceActionRequestBuilder() optionMask = 0 if markOutdatedRecordsAsFreeSpace: optionMask |= _ksrv.isc_spb_rpr_sweep_db reqBuf.addOptionMask(optionMask) return self._repairAction(database, reqBuf) def repair(self, database, readOnlyValidation=0, ignoreChecksums=0, removeReferencesToUnavailableShadowFiles=0, markCorruptedRecordsAsUnavailable=0, releaseUnassignedPages=1, releaseUnassgnedRecordFragments=1 ): _checkString(database) # YYY: With certain option combinations, this method raises errors # that may not be very comprehensible to a Python programmer who's not # well versed with IB/FB. Should option combination filtering be # done right here instead of leaving that responsibility to the # database engine? # I think not, since any filtering done in this method is liable to # become outdated, or to inadvertently enforce an unnecessary, # crippling constraint on a certain option combination that the # database engine would have allowed. reqBuf = _ServiceActionRequestBuilder() optionMask = 0 if readOnlyValidation: optionMask |= _ksrv.isc_spb_rpr_check_db if ignoreChecksums: optionMask |= _ksrv.isc_spb_rpr_ignore_checksum if removeReferencesToUnavailableShadowFiles: optionMask |= _ksrv.isc_spb_rpr_kill_shadows if markCorruptedRecordsAsUnavailable: optionMask |= _ksrv.isc_spb_rpr_mend_db if releaseUnassignedPages: optionMask |= _ksrv.isc_spb_rpr_validate_db if releaseUnassgnedRecordFragments: optionMask |= _ksrv.isc_spb_rpr_full reqBuf.addOptionMask(optionMask) return self._repairAction(database, reqBuf) # 2003.07.12: Removed method resolveLimboTransactions (dropped plans to # support that operation from kinterbasdb since transactions IDs are not # exposed at the Python level and I don't consider limbo transaction # resolution compelling enough to warrant exposing transaction IDs). # User management methods: def getUsers(self, username=None): """ By default, lists all users. Specify parameter $username to list only the user with that username. """ if username is not None: _checkString(username) reqBuf = _ServiceActionRequestBuilder( _ksrv.isc_action_svc_display_user ) if username: username = username.upper() # 2002.12.11 reqBuf.addString(_ksrv.isc_spb_sec_username, username) self._act(reqBuf) raw = self._QR(_ksrv.isc_info_svc_get_users) users = [] curUser = None pos = 1 # Ignore raw[0]. upper_limit = len(raw) - 1 while pos < upper_limit: cluster = ord(raw[pos]) pos += 1 if cluster == _ksrv.isc_spb_sec_username: if curUser is not None: users.append(curUser) curUser = None (username, pos) = _extract_sized_string(raw, pos) curUser = User(username) elif cluster == _ksrv.isc_spb_sec_firstname: (firstName, pos) = _extract_sized_string(raw, pos) curUser.firstName = firstName elif cluster == _ksrv.isc_spb_sec_middlename: (middleName, pos) = _extract_sized_string(raw, pos) curUser.middleName = middleName elif cluster == _ksrv.isc_spb_sec_lastname: (lastName, pos) = _extract_sized_string(raw, pos) curUser.lastName = lastName elif cluster == _ksrv.isc_spb_sec_groupid: (groupId, pos) = _extract_long_unsigned(raw, pos) curUser.groupId = groupId elif cluster == _ksrv.isc_spb_sec_userid: (userId, pos) = _extract_long_unsigned(raw, pos) curUser.userId = userId # Handle the last user: if curUser is not None: users.append(curUser) curUser = None return users def addUser(self, user): """ Parameter $user must be an instance of services.User with *at least* its username and password attributes specified as non-empty values. All other $user attributes are optional. This method ignores the userId and groupId attributes of $user regardless of their values. """ if not user.username: raise ProgrammingError('You must specify a username.') else: _checkString(user.username) if not user.password: raise ProgrammingError('You must specify a password.') else: _checkString(user.password) reqBuf = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_add_user) reqBuf.addString(_ksrv.isc_spb_sec_username, user.username) reqBuf.addString(_ksrv.isc_spb_sec_password, user.password) if user.firstName: reqBuf.addString(_ksrv.isc_spb_sec_firstname, user.firstName) if user.middleName: reqBuf.addString(_ksrv.isc_spb_sec_middlename, user.middleName) if user.lastName: reqBuf.addString(_ksrv.isc_spb_sec_lastname, user.lastName) self._actAndReturnTextualResults(reqBuf) def modifyUser(self, user): reqBuf = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_modify_user) reqBuf.addString(_ksrv.isc_spb_sec_username, user.username) reqBuf.addString(_ksrv.isc_spb_sec_password, user.password) # Change the optional attributes whether they're empty or not. reqBuf.addString(_ksrv.isc_spb_sec_firstname, user.firstName) reqBuf.addString(_ksrv.isc_spb_sec_middlename, user.middleName) reqBuf.addString(_ksrv.isc_spb_sec_lastname, user.lastName) self._actAndReturnTextualResults(reqBuf) def removeUser(self, user): """ Accepts either an instance of services.User or a string username, and deletes the specified user. """ if isinstance(user, User): username = user.username else: _checkString(user) username = user reqBuf = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_delete_user) reqBuf.addString(_ksrv.isc_spb_sec_username, username) self._actAndReturnTextualResults(reqBuf) def userExists(self, user): """ Returns a boolean that indicates whether the specified user exists. The $user parameter can be an instance of services.User or a string username. """ if isinstance(user, User): username = user.username else: _checkString(user) username = user # 2002.12.11: bug fix: return len(self.getUsers(username=username)) > 0 ## Private methods: ## def _act(self, requestBuffer): return _ksrv.action_thin(self._C_conn, requestBuffer.render()) def _actAndReturnTextualResults(self, requestBuffer): self._act(requestBuffer) return self._collectUnformattedResults() def _repairAction(self, database, partialReqBuf): # Begin constructing the request buffer (incorporate the one passed as # param $partialReqBuf). fullReqBuf = _ServiceActionRequestBuilder(_ksrv.isc_action_svc_repair) # The filename of the database must be specified regardless of the # action sub-action being perfomed. fullReqBuf.addDatabaseName(database) # Incorporate the caller's partial request buffer. fullReqBuf.extend(partialReqBuf) _ksrv.action_thin(self._C_conn, fullReqBuf.render()) # Return the results to the caller synchronously (in this case, there # won't be any textual results, but issuing this call will helpfully # cause the program to block until the Services Manager is finished # with the action). return self._collectUnformattedResults() def _propertyAction(self, database, partialReqBuf): # Begin constructing the request buffer (incorporate the one passed as # param $partialReqBuf). fullReqBuf = _ServiceActionRequestBuilder( _ksrv.isc_action_svc_properties ) # The filename of the database must be specified regardless of the # action sub-action being perfomed. fullReqBuf.addDatabaseName(database) # Incorporate the caller's partial request buffer. fullReqBuf.extend(partialReqBuf) _ksrv.action_thin(self._C_conn, fullReqBuf.render()) # Return the results to the caller synchronously. # Since they don't produce output, is the following useful? # LATER: Yes, because it blocks until there's been some resolution of # the action. return self._collectUnformattedResults() def _propertyActionWithSingleNumericCode(self, database, code, num, numCType='I' ): reqBuf = _ServiceActionRequestBuilder() reqBuf.addNumeric(code, num, numCType=numCType) return self._propertyAction(database, reqBuf) def _Q(self, code, resultType): return _ksrv.query_base(self._C_conn, code, resultType) def _QI(self, code): return self._Q(code, _ksrv.QUERY_TYPE_PLAIN_INTEGER) def _QS(self, code): return self._Q(code, _ksrv.QUERY_TYPE_PLAIN_STRING) def _QR(self, code): return self._Q(code, _ksrv.QUERY_TYPE_RAW) def _collectUnformattedResults(self, lineSep='\n'): # YYY: It might be desirable to replace this function with a more # performant version based on _ksrv.isc_info_svc_to_eof rather than # _ksrv.isc_info_svc_line; the function's interface is transparent # either way. # This enhancement should be a very low priority; the Service Manager # API is not typically used for performance-intensive operations. resultLines = [] while 1: try: line = self._QS(_ksrv.isc_info_svc_line) except OperationalError: # YYY: It is routine for actions such as RESTORE to raise an # exception at the end of their output. We ignore any such # exception and assume that it was expected, which is somewhat # risky. For example, suppose the network connection is broken # while the client is receiving the action's output... break if not line: break resultLines.append(line) return lineSep.join(resultLines) def _get_isc_info_svc_svr_db_info(self): num_attachments = -1 databases = [] raw = self._QR(_ksrv.isc_info_svc_svr_db_info) assert raw[-1] == chr(_ksrv.isc_info_flag_end) pos = 1 # Ignore raw[0]. upper_limit = len(raw) - 1 while pos < upper_limit: cluster = ord(raw[pos]) pos += 1 if cluster == _ksrv.isc_spb_num_att: # Number of attachments. (num_attachments, pos) = _extract_long_unsigned(raw, pos) elif cluster == _ksrv.isc_spb_num_db: # Number of databases # attached to. # Do nothing except to advance pos; the number of databases # can be had from len(databases). (_, pos) = _extract_long_unsigned(raw, pos) elif cluster == _ksrv.isc_spb_dbname: (db_name, pos) = _extract_sized_string(raw, pos) databases.append(db_name) return (num_attachments, databases) def _get_isc_info_svc_get_config(self): config = {} raw = self._QR(_ksrv.isc_info_svc_get_config) assert raw[-1] == chr(_ksrv.isc_info_flag_end) def _store_ulong(key, raw, pos, config=config): (val, pos) = _extract_long_unsigned(raw, pos) config[key] = val return pos pos = 1 # Ignore raw[0]. upper_limit = len(raw) - 1 while pos < upper_limit: cluster = ord(raw[pos]) pos += 1 # These are all unsigned long values; no unique parsing is needed. pos = _store_ulong(cluster, raw, pos) return config class User(object): def __init__(self, username=None): if username: _checkString(username) self.username = username.upper() else: self.username = None # The password is not returned by user output methods, but must be # specified to add a user. self.password = None self.firstName = None self.middleName = None self.lastName = None # The user id and group id are not fully supported. For details, see # the documentation of the "User Management Methods" of # services.Connection. self.userId = None self.groupId = None def __str__(self): return '' % ( (self.username is None and 'without a name') or 'named "%s"' % self.username ) ############################################################################### # TOTALLY PRIVATE SECTION : BEGIN # ############################################################################### # Client programmers of this module MUST NOT RELY on anything within this # section. Note, however, that the private content in this module is not # limited to this section. There are private members in other sections, always # denoted by a leading underscore. def _requireStrOrTupleOfStr(x): if isinstance(x, str): x = (x,) elif isinstance(x, unicode): # We know the following call to _checkString will raise an exception, # but calling it anyway allows us to centralize the error message # generation: _checkString(x) for el in x: _checkString(el) return x def _excludeElementsOfTypes(seq, theTypesToExclude): if not isinstance(theTypesToExclude, tuple): theTypesToExclude = tuple(theTypesToExclude) return [ element for element in seq if not isinstance(element, theTypesToExclude) ] def _validateCompanionStringNumericSequences( strings, numbers, stringCaption, numberCaption ): # The core constraint here is that len(numbers) must equal len(strings) - 1 stringsCount = len(strings) numbersCount = len(numbers) requiredNumbersCount = stringsCount - 1 if numbersCount != requiredNumbersCount: raise ValueError( 'Since you passed %d %s, you must %s corresponding %s.' % (stringsCount, stringCaption, (requiredNumbersCount > 0 and 'pass %d' % requiredNumbersCount ) or 'not pass any', numberCaption ) ) def _extract_long_unsigned(s, index): new_index = index + _ksrv.SIZEOF_SHORT_UNSIGNED return ( _ksrv.vax(s[index:new_index]), new_index ) def _extract_sized_string(s, index): (s_len, index) = _extract_long_unsigned(s, index) new_index = index + s_len return ( s[index:new_index], new_index ) # Rather tricky conversion functions: def _vax_inverse(i, format): # Apply the inverse of _ksrv.isc_vax_integer to a Python integer; return # the raw bytes of the resulting value. iRaw = struct.pack(format, i) iConv = _ksrv.vax(iRaw) iConvRaw = struct.pack(format, iConv) return iConvRaw def _renderSizedIntegerForSPB(i, format): # In order to prepare the Python integer i for inclusion in a Services # API action request buffer, the byte sequence of i must be reversed, which # will make i unrepresentible as a normal Python integer. # Therefore, the rendered version of i must be stored in a raw byte # buffer. # This function returns a 2-tuple containing: # 1. the calculated struct.pack-compatible format string for i # 2. the Python string containing the SPB-compatible raw binary rendering # of i # # Example: # To prepare the Python integer 12345 for storage as an unsigned int in a # SPB, use code such as this: # (iPackFormat, iRawBytes) = _renderSizedIntegerForSPB(12345, 'I') # spbBytes = struct.pack(iPackFormat, iRawBytes) # destFormat = '%ds' % struct.calcsize(format) destVal = _vax_inverse(i, format) return (destFormat, destVal) def _string2spb(spb, code, s): sLen = len(s) _numeric2spb(spb, code, sLen, numCType='H') format = str(sLen) + 's' # The length, then 's'. spb.append( struct.pack(format, s) ) def _numeric2spb(spb, code, num, numCType='I'): # numCType is one of the pack format characters specified by the Python # standard library module 'struct'. _code2spb(spb, code) (numericFormat, numericBytes) = _renderSizedIntegerForSPB(num, numCType) spb.append( struct.pack(numericFormat, numericBytes) ) def _code2spb(spb, code): (format, bytes) = _renderSizedIntegerForSPB(code, 'b') spb.append( struct.pack(format, bytes) ) class _ServiceActionRequestBuilder(object): # This private class helps public facilities in this module to build # the binary action request buffers required by the database Services API # using high-level, easily comprehensible syntax. def __init__(self, clusterIdentifier=None): self._buffer = [] if clusterIdentifier: self.addCode(clusterIdentifier) def __str__(self): return self.render() def extend(self, otherRequestBuilder): self._buffer.append(otherRequestBuilder.render()) def addCode(self, code): _code2spb(self._buffer, code) def addString(self, code, s): _checkString(s) _string2spb(self._buffer, code, s) def addSequenceOfStrings(self, code, stringSequence): for s in stringSequence: self.addString(code, s) def addSequenceOfStringNumericPairs(self, stringCode, stringSequence, numericCode, numericSequence ): stringCount = len(stringSequence) numericCount = len(numericSequence) if numericCount != stringCount - 1: raise ValueError("Numeric sequence must contain exactly one less" " element than its companion string sequence." ) i = 0 while i < stringCount: self.addString(stringCode, stringSequence[i]) if i < numericCount: self.addNumeric(numericCode, numericSequence[i]) i += 1 def addNumeric(self, code, n, numCType='I'): _numeric2spb(self._buffer, code, n, numCType=numCType) def addOptionMask(self, optionMask): self.addNumeric(_ksrv.isc_spb_options, optionMask) def addDatabaseName(self, databaseName): # 2003.07.20: Issue a warning for a hostname-containing databaseName # because it will cause isc_service_start to raise an inscrutable error # message with Firebird 1.5 (though it would not have raised an error # at all with Firebird 1.0 and earlier). colonIndex = databaseName.find(':') if colonIndex != -1: # This code makes no provision for platforms other than Windows # that allow colons in paths (such as MacOS). Some of # kinterbasdb's current implementation (e.g., event handling) is # constrained to Windows or POSIX anyway. if not sys.platform.lower().startswith('win') or ( # This client process is running on Windows. # # Files that don't exist might still be valid if the connection # is to a server other than the local machine. not os.path.exists(databaseName) # "Guess" that if the colon falls within the first two # characters of the string, the pre-colon portion refers to a # Windows drive letter rather than to a remote host. # This isn't guaranteed to be correct. and colonIndex > 1 ): warnings.warn( ' Unlike conventional DSNs, Services API database names' ' must not include the host name; remove the "%s" from' ' your database name.' ' (Firebird 1.0 will accept this, but Firebird 1.5 will' ' raise an error.)' % databaseName[:colonIndex+1], UserWarning ) self.addString(_ksrv.isc_spb_dbname, databaseName) def render(self): return ''.join(self._buffer) def _checkString(s): try: if isinstance(s, str): # In str instances, Python allows any character in the "default # encoding", which is typically not ASCII. Since Firebird's # Services API only works (properly) with ASCII, we need to make # sure there are no non-ASCII characters in s, even though we # already know s is a str instance. s.encode('ASCII') else: if isinstance(s, unicode): # Raise a more specific error message than the general case. raise UnicodeError else: raise TypeError('String argument to Services API must be' ' of type str, not %s.' % type(s) ) except UnicodeError: raise TypeError("The database engine's Services API only works" " properly with ASCII string parameters, so str instances that" " contain non-ASCII characters, and all unicode instances, are" " disallowed." ) ############################################################################### # TOTALLY PRIVATE SECTION : END # ############################################################################### kinterbasdb-3.3.0/_kiservices.c0000644000175000001440000010616711130647414015673 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Services Manager Support * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #include "_kiservices.h" #define SPB_BOILERPLATE_SIZE 2 #define INFINITE_TIMEOUT -1 /* Set the following to a very small value if you want to test result buffer * truncation handling. */ #define SERVICE_RESULT_BUFFER_INITIAL_SIZE ((unsigned short) 1024) /* Types of query output handling that pyob_query_base can be instructed to * perform (these constants are made accessible to Python in function * _init_kiservices_ibase_header_constants): */ #define QUERY_TYPE_PLAIN_STRING 1 #define QUERY_TYPE_PLAIN_INTEGER 2 #define QUERY_TYPE_RAW 3 /* The following is a version of the ibase.h-standard macro ADD_SPB_NUMERIC * that doesn't generate compiler warnings. This custom version is probably a * little slower, but this macro isn't used in performance-intensive areas * anyway. */ #define ADD_SPB_NUMERIC_DSR(buf_pos, data) \ memcpy(buf_pos, &data, sizeof(unsigned long)); \ buf_pos += sizeof(unsigned long); /******************** GLOBAL VARIABLES:BEGIN ***********************/ boolean initialized = FALSE; #include "_kilock.h" #ifdef ENABLE_CONCURRENCY /* Values are transferred from the _kinterbasdb shared lib in * pyob_initialize_from before any of kinterbasdb's Services API code is * actually executed: */ int global_concurrency_level = UNKNOWN_CONCURRENCY_LEVEL; PyThread_type_lock _global_db_client_lock = NULL; #endif /* def ENABLE_CONCURRENCY */ /* Global references to the DB API exception objects from _kinterbasdb. * References to the exception objects are transferred here by the * pyob_initialize_from function so that code in this module can access these * exceptions in order to raise them. */ static PyObject *Warning = NULL; static PyObject *Error = NULL; static PyObject *InterfaceError = NULL; static PyObject *DatabaseError = NULL; static PyObject *DataError = NULL; static PyObject *OperationalError = NULL; static PyObject *TransactionConflict = NULL; static PyObject *IntegrityError = NULL; static PyObject *InternalError = NULL; static PyObject *ProgrammingError = NULL; static PyObject *NotSupportedError = NULL; #include "_kinterbasdb_exception_functions.c" /******************** GLOBAL VARIABLES:END ***********************/ /******************** PRIVATE FUNCTION PROTOTYPES:BEGIN ********************/ static PyObject *pyob_initialize_from(PyObject *self, PyObject *args); static PyObject *pyob_SConnection_connect(PyObject *self, PyObject *args); static PyObject *pyob_SConnection_close(PyObject *self, PyObject *args); static int SConnection_close( ServicesConnectionObject *con, boolean allowed_to_raise ); static void pyob_SConnection___del__(PyObject *con); static PyObject *pyob_isc_vax_integer(PyObject *self, PyObject *args); static PyObject *pyob_query_base(PyObject *self, PyObject *args); /******************** PRIVATE FUNCTION PROTOTYPES:END ********************/ /****** SERVICES MANAGER CONNECTION CREATION/DELETION FUNCTIONS:BEGIN *******/ static PyObject *pyob_SConnection_connect(PyObject *self, PyObject *args) { ServicesConnectionObject *con = NULL; char *service_manager_name = NULL; Py_ssize_t service_manager_name_len = -1; char *username = NULL; Py_ssize_t username_len = -1; char *password = NULL; Py_ssize_t password_len = -1; char *spb = NULL; char *spb_walk = NULL; size_t spb_length; if (!PyArg_ParseTuple(args, "z#z#z#", &service_manager_name, &service_manager_name_len, &username, &username_len, &password, &password_len ) ) { goto fail; } if (service_manager_name_len + username_len + password_len > 118) { raise_exception(ProgrammingError, "The combined length of the host, user," " and password cannot exceed 118 bytes." ); goto fail; } con = PyObject_New(ServicesConnectionObject, &ServicesConnectionType); if (con == NULL) { goto fail; } con->service_handle = NULL_SVC_HANDLE; spb_length = SPB_BOILERPLATE_SIZE + 1 /* the code isc_spb_user_name */ + 1 /* the one-byte length username_len */ + PYTHON_SIZE_TO_SIZE_T(username_len) /* the contents of username */ + 1 /* the code isc_spb_password */ + 1 /* the one-byte length password_len */ + PYTHON_SIZE_TO_SIZE_T(password_len) /* the contents of password */ ; if (spb_length > USHRT_MAX) { /* If the size of the username, the password, and a few other bytes exceeds * USHRT_MAX, it's certainly due to client programmer error: */ raise_exception(ProgrammingError, "Service parameter buffer created to" " hold username and password were too large." ); goto fail; } spb = kimem_main_malloc(spb_length); if (spb == NULL) { goto fail; } spb_walk = spb; /* SPB_BOILERPLATE_SIZE refers to the next two entries: */ *spb_walk++ = isc_spb_version; *spb_walk++ = isc_spb_current_version; *spb_walk++ = isc_spb_user_name; /* Cast is safe b/c already checked val: */ *spb_walk++ = (char) username_len; strncpy(spb_walk, username, PYTHON_SIZE_TO_SIZE_T(username_len)); spb_walk += PYTHON_SIZE_TO_SIZE_T(username_len); *spb_walk++ = isc_spb_password; /* Cast is safe b/c already checked val: */ *spb_walk++ = (char) password_len; strncpy(spb_walk, password, PYTHON_SIZE_TO_SIZE_T(password_len)); spb_walk += PYTHON_SIZE_TO_SIZE_T(password_len); assert (spb_length == (size_t) (spb_walk - spb)); LEAVE_GIL_WITHOUT_AFFECTING_DB ENTER_GDAL_WITHOUT_LEAVING_PYTHON ENTER_GCDL_WITHOUT_LEAVING_PYTHON isc_service_attach(con->status, /* Cast is safe b/c already checked val: */ (unsigned short) service_manager_name_len, service_manager_name, &con->service_handle, /* Cast is safe b/c already checked val: */ (unsigned short) spb_length, spb ); LEAVE_GCDL_WITHOUT_ENTERING_PYTHON LEAVE_GDAL_WITHOUT_ENTERING_PYTHON ENTER_GIL_WITHOUT_AFFECTING_DB if (DB_API_ERROR(con->status)) { raise_sql_exception(OperationalError, "_kiservices.pyob_SConnection_connect: ", con->status ); goto fail; } goto cleanup; fail: assert (PyErr_Occurred()); Py_XDECREF((PyObject *) con); con = NULL; /* Fall through to cleanup. */ cleanup: if (spb != NULL) { kimem_main_free(spb); } return (PyObject *) con; } /* pyob_SConnection_connect */ static int SConnection_close( ServicesConnectionObject *con, boolean allowed_to_raise ) { if (con->service_handle != NULL_SVC_HANDLE) { LEAVE_GIL_WITHOUT_AFFECTING_DB ENTER_GDAL_WITHOUT_LEAVING_PYTHON ENTER_GCDL_WITHOUT_LEAVING_PYTHON isc_service_detach(con->status, &con->service_handle); LEAVE_GCDL_WITHOUT_ENTERING_PYTHON LEAVE_GDAL_WITHOUT_ENTERING_PYTHON ENTER_GIL_WITHOUT_AFFECTING_DB /* Set NULL to prevent segfault on "double jeopardy disconnect" (where * exception is raised by con.close(), then con.__del__ calls this function * again with an invalid service handle). * Note: isc_service_detach apparently follows the model of other * detach/close functions in the Firebird C API, i.e. we need not manually * free the service handle's memory. */ con->service_handle = NULL_SVC_HANDLE; if (DB_API_ERROR(con->status)) { raise_sql_exception(OperationalError, "_kiservices could not cleanly" " disconnect from the service manager: ", con->status ); if (allowed_to_raise) { goto fail; } else { SUPPRESS_EXCEPTION; } } } assert (con->service_handle == NULL_SVC_HANDLE); return 0; fail: assert (PyErr_Occurred()); return -1; } /* SConnection_close */ static PyObject *pyob_SConnection_close(PyObject *self, PyObject *args) { ServicesConnectionObject *con = NULL; if (!PyArg_ParseTuple(args, "O!", &ServicesConnectionType, &con)) { goto fail; } if (SConnection_close(con, TRUE) != 0) { goto fail; } RETURN_PY_NONE; fail: assert (PyErr_Occurred()); return NULL; } /* pyob_SConnection_close */ static void pyob_SConnection___del__(PyObject *con) { ServicesConnectionObject *_con = (ServicesConnectionObject *) con; SConnection_close(_con, FALSE); /* FALSE -> Ignore any errors in closing. */ /* Due to the fact that it was called from this destructor context, where * error recovery isn't possible, the SConnection_close function should've set * _con->service_handle to NULL regardless of whether the close operation * succeeded. */ assert (_con->service_handle == NULL_SVC_HANDLE); /* Free the memory of the ServicesConnectionObject struct itself: */ PyObject_Del(con); } /* pyob_SConnection___del__ */ /****** SERVICES MANAGER CONNECTION CREATION/DELETION FUNCTIONS:END *******/ /*********************** ACTION FUNCTIONS:BEGIN *****************************/ static PyObject *pyob_action_thin(PyObject *self, PyObject *args) { ServicesConnectionObject *con = NULL; char *request_buf = NULL; Py_ssize_t req_buf_size = -1; if (!PyArg_ParseTuple(args, "O!s#", &ServicesConnectionType, &con, &request_buf, &req_buf_size ) ) { goto fail; } if (req_buf_size > USHRT_MAX) { PyObject *err_msg = PyString_FromFormat( "The size of the request buffer must not exceed %d.", USHRT_MAX ); if (err_msg == NULL) { goto fail; } raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); goto fail; } ENTER_GDAL isc_service_start(con->status, &con->service_handle, NULL, /* Cast is safe b/c already checked val: */ (unsigned short) req_buf_size, request_buf ); LEAVE_GDAL if (DB_API_ERROR(con->status)) { raise_sql_exception(OperationalError, "Unable to perform the requested" " Services API action: ", con->status ); goto fail; } RETURN_PY_NONE; fail: assert (PyErr_Occurred()); return NULL; } /* pyob_action_thin */ /*********************** ACTION FUNCTIONS:END *****************************/ /********* SERVICES MANAGER QUERY FUNCTIONS:BEGIN **********/ static PyObject *pyob_query_base(PyObject *self, PyObject *args) { ServicesConnectionObject *con = NULL; char req_items[] = " "; int req_item; #define Q_P_STR_REQ_ITEM_COUNT ((unsigned short) 1) int query_return_type; long timeout = INFINITE_TIMEOUT; char spb[6]; char *spb_walk = spb; char *raw_result = NULL; size_t raw_result_size; char *raw_result_walk; PyObject *py_ret = NULL; if (!PyArg_ParseTuple(args, "O!ii|l", &ServicesConnectionType, &con, &req_item, &query_return_type, &timeout ) ) { goto fail; } if (req_item < 0 || req_item > UCHAR_MAX) { PyObject *err_msg = PyString_FromFormat("The service query request_buf" " code must fall between 0 and %d, inclusive.", UCHAR_MAX ); if (err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } goto fail; } req_items[0] = (char) req_item; if (timeout != INFINITE_TIMEOUT) { *spb_walk++ = isc_info_svc_timeout; ADD_SPB_NUMERIC_DSR(spb_walk, timeout); } raw_result_size = SERVICE_RESULT_BUFFER_INITIAL_SIZE; /* Loop, enlarging the raw_result buffer, until the query's results can fit * in raw_result. */ for (;;) { if (raw_result_size > USHRT_MAX) { raise_exception(InternalError, "Database C API constrains maximum" " result buffer size to USHRT_MAX." ); goto fail; } raw_result = kimem_main_realloc(raw_result, raw_result_size); if (raw_result == NULL) { goto fail; } memset(raw_result, 0, raw_result_size); ENTER_GDAL isc_service_query(con->status, &con->service_handle, NULL, (unsigned short) (spb_walk - spb), spb, Q_P_STR_REQ_ITEM_COUNT, req_items, /* Cast is safe b/c already checked raw_result_size: */ (unsigned short) raw_result_size, raw_result ); LEAVE_GDAL if (DB_API_ERROR(con->status)) { raise_sql_exception(OperationalError, "_kiservices could not query: ", con->status ); goto fail; } if (raw_result[0] == isc_info_truncated) { /* We need to allocate a bigger buffer because the service results * couldn't fit in the one we initially supplied. */ raw_result_size *= 4; continue; } break; /* raw_result was big enough; move on. */ } /* ends for loop */ raw_result_walk = raw_result; assert (*raw_result_walk == req_items[0]); raw_result_walk++; switch (query_return_type) { case QUERY_TYPE_PLAIN_STRING: { /* The database C API currently constrains the size of result strings to * USHRT_MAX or less. */ unsigned short res_len; ENTER_GDAL res_len = (unsigned short) isc_vax_integer( raw_result_walk, sizeof(unsigned short) ); raw_result_walk += sizeof(unsigned short); LEAVE_GDAL py_ret = PyString_FromStringAndSize(NULL, res_len); if (py_ret == NULL) { goto fail; } strncpy(PyString_AS_STRING(py_ret), raw_result_walk, res_len); raw_result_walk += res_len; assert (*raw_result_walk == isc_info_end); break; } case QUERY_TYPE_PLAIN_INTEGER: { unsigned int return_value_as_uint; ENTER_GDAL return_value_as_uint = (unsigned int) isc_vax_integer( raw_result_walk, sizeof(unsigned int) ); LEAVE_GDAL py_ret = PyInt_FromLong(return_value_as_uint); if (py_ret == NULL) { goto fail; } raw_result_walk += sizeof (unsigned int); assert (*raw_result_walk == isc_info_end); break; } case QUERY_TYPE_RAW: raw_result_walk = raw_result + (raw_result_size - 1); while (*raw_result_walk == '\0') { raw_result_walk--; } /* The return string might contain NULL bytes (Python strings have no * problem with that). */ py_ret = PyString_FromStringAndSize(raw_result, SIZE_T_TO_PYTHON_SIZE(raw_result_walk - raw_result) ); break; default: PyErr_SetString(PyExc_TypeError, "_kiservices.query_base is not equipped" " to handle this query type." ); goto fail; } goto cleanup; /* Success. */ fail: assert (PyErr_Occurred()); if (py_ret != NULL) { Py_DECREF(py_ret); py_ret = NULL; } cleanup: if (raw_result != NULL) { kimem_main_free(raw_result); } return py_ret; } /* pyob_query_base */ /********* SERVICES MANAGER QUERY FUNCTIONS:END **********/ /************** SERVICES UTILITIY FUNCTIONS:BEGIN ***************/ static PyObject *pyob_isc_vax_integer(PyObject *self, PyObject *args) { /* isc_vax_integer reverses the byte order of an integer. This Python * wrapper is used in services.py when parsing the raw return buffer from a * Services Manager query. */ char *raw_bytes; Py_ssize_t raw_len; int result; if (!PyArg_ParseTuple( args, "s#", &raw_bytes, &raw_len)) { goto fail; } if (raw_len != 4 && raw_len != 2 && raw_len != 1) { raise_exception(InternalError, "pyob_isc_vax_integer: len(buf) must be in (1,2,4)" ); goto fail; } ENTER_GDAL result = isc_vax_integer(raw_bytes, /* Cast is safe b/c already checked val: */ (unsigned short) raw_len ); LEAVE_GDAL return PyInt_FromLong(result); fail: assert (PyErr_Occurred()); return NULL; } /* pyob_isc_vax_integer */ /************** SERVICES UTILITIY FUNCTIONS:END *****************/ /********* PYTHON TYPE OBJECT SETUP:BEGIN **********/ static PyTypeObject ServicesConnectionType = { PyObject_HEAD_INIT(NULL) 0, "_kiservices.ServicesConnection", sizeof( ServicesConnectionObject ), 0, pyob_SConnection___del__, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* ServicesConnectionType */ /********* PYTHON TYPE OBJECT SETUP:END **********/ /****************** MODULE ADMINISTRATION FUNCTIONS:BEGIN ********************/ static PyObject *pyob_is_initialized(PyObject *self) { return PyBool_FromLong(initialized); } /* pyob_is_initialized */ static PyObject *pyob_initialize_from(PyObject *self, PyObject *args) { /* Makes kinterbasdb's global thread lock and references to its Python DB API * exception classes readily available to this C code. */ PyObject *source = NULL; if (!PyArg_ParseTuple(args, "O", &source)) { goto fail; } #ifdef ENABLE_CONCURRENCY { PyObject *cl = PyObject_CallMethod(source, "concurrency_level_get", NULL); if (cl == NULL) { goto fail; } assert (PyInt_Check(cl)); global_concurrency_level = (int) PyInt_AS_LONG(cl); Py_DECREF(cl); } { PyObject *lock_wrapper = PyObject_GetAttrString(source, "_global_db_client_lock__python_Wrapper" ); if (lock_wrapper == NULL) { goto fail; } assert (_global_db_client_lock == NULL); _global_db_client_lock = PyCObject_AsVoidPtr(lock_wrapper); Py_DECREF(lock_wrapper); if (_global_db_client_lock == NULL) { goto fail; } } #endif /* def ENABLE_CONCURRENCY */ #define LOAD_EXC_PTR(ex_name) \ /* PyObject_GetAttrString returns a new reference; no need for us to \ * INCREF. */ \ ex_name = PyObject_GetAttrString(source, #ex_name); \ if (ex_name == NULL) { goto fail; } LOAD_EXC_PTR(Warning) LOAD_EXC_PTR(Error) LOAD_EXC_PTR(InterfaceError) LOAD_EXC_PTR(DatabaseError) LOAD_EXC_PTR(DataError) LOAD_EXC_PTR(OperationalError) LOAD_EXC_PTR(TransactionConflict) LOAD_EXC_PTR(IntegrityError) LOAD_EXC_PTR(InternalError) LOAD_EXC_PTR(ProgrammingError) LOAD_EXC_PTR(NotSupportedError) initialized = TRUE; RETURN_PY_NONE; fail: assert (PyErr_Occurred()); return NULL; } /* initialize_from */ int _init_kiservices_ibase_header_constants(PyObject *module) { /* Makes a bunch of Services-API-related database engine constants available * at the Python level. */ /* 2005.06.24: I analyzed the potential memory savings of writing this code * so that at most one Python int object with a given value is created. * Using the following program * from kinterbasdb.services import _ksrv * for x in sorted( * getattr(_ksrv, nm) for nm in dir(_ksrv) if nm.startswith('isc_') * ): * print x * I found that the vast majority of the 330 integers loaded when compiling * against FB 1.5 are in the range already cached by Python, so there's * virtually no memory savings to be gained by manual optimization here. */ /* SICN is a shortcut for entering header constants into the module's * namespace. */ #define SICN(name, value) \ if (PyModule_AddIntConstant(module, name, value) == -1) { goto fail; } /* SIC is a further shortcut for values that we want to expose under the same * name they have in C. */ #define SIC(token) \ SICN(#token, token) SICN("SIZEOF_SHORT", sizeof(short)); SICN("SIZEOF_SHORT_UNSIGNED", sizeof(unsigned short)); SICN("SIZEOF_LONG", sizeof(long)); SICN("SIZEOF_LONG_UNSIGNED", sizeof(unsigned long)); /* Locally defined header constants that must be accessible from Python: */ SIC(QUERY_TYPE_PLAIN_STRING); SIC(QUERY_TYPE_PLAIN_INTEGER); SIC(QUERY_TYPE_RAW); /* YYY: Cull these constants. They've never been accessible to the client * programmer, so there's no danger of breaking client code in doing so. The * constants are used only in services.py, and only a small fraction of those * listed here are used there. */ /* Drawn from ibase.h: */ SIC(isc_spb_version1); SIC(isc_spb_current_version); SIC(isc_spb_version); SIC(isc_spb_current_version); SIC(isc_spb_user_name); SIC(isc_spb_sys_user_name); SIC(isc_spb_sys_user_name_enc); SIC(isc_spb_password); SIC(isc_spb_password_enc); SIC(isc_spb_command_line); SIC(isc_spb_dbname); SIC(isc_spb_verbose); SIC(isc_spb_options); SIC(isc_spb_connect_timeout); SIC(isc_spb_dummy_packet_interval); SIC(isc_spb_sql_role_name); SIC(isc_dpb_sys_user_name); SIC(isc_dpb_sys_user_name_enc); SIC(isc_dpb_password); SIC(isc_dpb_password_enc); SIC(isc_dpb_connect_timeout); SIC(isc_dpb_dummy_packet_interval); SIC(isc_dpb_user_name); SIC(isc_dpb_sql_role_name); SIC(isc_info_end); SIC(isc_info_truncated); SIC(isc_info_error); SIC(isc_info_data_not_ready); SIC(isc_info_flag_end); SIC(isc_info_db_id); SIC(isc_info_reads); SIC(isc_info_writes); SIC(isc_info_fetches); SIC(isc_info_marks); SIC(isc_info_implementation); SIC(isc_info_base_level); SIC(isc_info_page_size); SIC(isc_info_num_buffers); SIC(isc_info_limbo); SIC(isc_info_current_memory); SIC(isc_info_max_memory); SIC(isc_info_window_turns); SIC(isc_info_license); SIC(isc_info_allocation); SIC(isc_info_attachment_id); SIC(isc_info_read_seq_count); SIC(isc_info_read_idx_count); SIC(isc_info_insert_count); SIC(isc_info_update_count); SIC(isc_info_delete_count); SIC(isc_info_backout_count); SIC(isc_info_purge_count); SIC(isc_info_expunge_count); SIC(isc_info_sweep_interval); SIC(isc_info_ods_version); SIC(isc_info_ods_minor_version); SIC(isc_info_no_reserve); SIC(isc_info_logfile); SIC(isc_info_cur_logfile_name); SIC(isc_info_cur_log_part_offset); SIC(isc_info_num_wal_buffers); SIC(isc_info_wal_buffer_size); SIC(isc_info_wal_ckpt_length); SIC(isc_info_wal_cur_ckpt_interval); SIC(isc_info_wal_prv_ckpt_fname); SIC(isc_info_wal_prv_ckpt_poffset); SIC(isc_info_wal_recv_ckpt_fname); SIC(isc_info_wal_recv_ckpt_poffset); SIC(isc_info_wal_grpc_wait_usecs); SIC(isc_info_wal_num_io); SIC(isc_info_wal_avg_io_size); SIC(isc_info_wal_num_commits); SIC(isc_info_wal_avg_grpc_size); SIC(isc_info_forced_writes); SIC(isc_info_user_names); SIC(isc_info_page_errors); SIC(isc_info_record_errors); SIC(isc_info_bpage_errors); SIC(isc_info_dpage_errors); SIC(isc_info_ipage_errors); SIC(isc_info_ppage_errors); SIC(isc_info_tpage_errors); SIC(isc_info_set_page_buffers); SIC(isc_info_db_sql_dialect); SIC(isc_info_db_read_only); SIC(isc_info_db_size_in_pages); #ifdef isc_info_firebird_version SIC(isc_info_db_class); SIC(isc_info_firebird_version); SIC(isc_info_oldest_transaction); SIC(isc_info_oldest_active); SIC(isc_info_oldest_snapshot); SIC(isc_info_next_transaction); SIC(isc_info_db_provider); SIC(isc_info_db_last_value); SIC(isc_info_version); SIC(isc_info_isc_version); #endif /* isc_info_firebird_version */ SIC(isc_info_db_impl_rdb_vms); SIC(isc_info_db_impl_rdb_eln); SIC(isc_info_db_impl_rdb_eln_dev); SIC(isc_info_db_impl_rdb_vms_y); SIC(isc_info_db_impl_rdb_eln_y); SIC(isc_info_db_impl_jri); SIC(isc_info_db_impl_jsv); SIC(isc_info_db_impl_isc_apl_68K); SIC(isc_info_db_impl_isc_vax_ultr); SIC(isc_info_db_impl_isc_vms); SIC(isc_info_db_impl_isc_sun_68k); SIC(isc_info_db_impl_isc_os2); SIC(isc_info_db_impl_isc_sun4); SIC(isc_info_db_impl_isc_hp_ux); SIC(isc_info_db_impl_isc_sun_386i); SIC(isc_info_db_impl_isc_vms_orcl); SIC(isc_info_db_impl_isc_mac_aux); SIC(isc_info_db_impl_isc_rt_aix); SIC(isc_info_db_impl_isc_mips_ult); SIC(isc_info_db_impl_isc_xenix); SIC(isc_info_db_impl_isc_dg); SIC(isc_info_db_impl_isc_hp_mpexl); SIC(isc_info_db_impl_isc_hp_ux68K); SIC(isc_info_db_impl_isc_sgi); SIC(isc_info_db_impl_isc_sco_unix); SIC(isc_info_db_impl_isc_cray); SIC(isc_info_db_impl_isc_imp); SIC(isc_info_db_impl_isc_delta); SIC(isc_info_db_impl_isc_next); SIC(isc_info_db_impl_isc_dos); #ifdef isc_info_firebird_version SIC(isc_info_db_impl_m88K); SIC(isc_info_db_impl_unixware); SIC(isc_info_db_impl_isc_winnt_x86); SIC(isc_info_db_impl_isc_epson); SIC(isc_info_db_impl_alpha_osf); SIC(isc_info_db_impl_alpha_vms); SIC(isc_info_db_impl_netware_386); SIC(isc_info_db_impl_win_only); SIC(isc_info_db_impl_ncr_3000); SIC(isc_info_db_impl_winnt_ppc); SIC(isc_info_db_impl_dg_x86); SIC(isc_info_db_impl_sco_ev); SIC(isc_info_db_impl_i386); SIC(isc_info_db_impl_freebsd); SIC(isc_info_db_impl_netbsd); SIC(isc_info_db_impl_darwin); SIC(isc_info_db_impl_last_value); #endif /* isc_info_firebird_version */ SIC(isc_info_db_impl_isc_a); SIC(isc_info_db_impl_isc_apl_68K); SIC(isc_info_db_impl_isc_u); SIC(isc_info_db_impl_isc_vax_ultr); SIC(isc_info_db_impl_isc_v); SIC(isc_info_db_impl_isc_vms); SIC(isc_info_db_impl_isc_s); SIC(isc_info_db_impl_isc_sun_68k); SIC(isc_info_db_class_access); SIC(isc_info_db_class_y_valve); SIC(isc_info_db_class_rem_int); SIC(isc_info_db_class_rem_srvr); SIC(isc_info_db_class_pipe_int); SIC(isc_info_db_class_pipe_srvr); SIC(isc_info_db_class_sam_int); SIC(isc_info_db_class_sam_srvr); SIC(isc_info_db_class_gateway); SIC(isc_info_db_class_cache); #ifdef isc_info_firebird_version SIC(isc_info_db_class_classic_access); SIC(isc_info_db_class_server_access); SIC(isc_info_db_class_last_value); SIC(isc_info_db_code_rdb_eln); SIC(isc_info_db_code_rdb_vms); SIC(isc_info_db_code_interbase); SIC(isc_info_db_code_firebird); SIC(isc_info_db_code_last_value); #endif /* isc_info_firebird_version */ SIC(isc_info_number_messages); SIC(isc_info_max_message); SIC(isc_info_max_send); SIC(isc_info_max_receive); SIC(isc_info_state); SIC(isc_info_message_number); SIC(isc_info_message_size); SIC(isc_info_request_cost); SIC(isc_info_access_path); SIC(isc_info_req_select_count); SIC(isc_info_req_insert_count); SIC(isc_info_req_update_count); SIC(isc_info_req_delete_count); SIC(isc_info_rsb_end); SIC(isc_info_rsb_begin); SIC(isc_info_rsb_type); SIC(isc_info_rsb_relation); SIC(isc_info_rsb_plan); SIC(isc_info_rsb_unknown); SIC(isc_info_rsb_indexed); SIC(isc_info_rsb_navigate); SIC(isc_info_rsb_sequential); SIC(isc_info_rsb_cross); SIC(isc_info_rsb_sort); SIC(isc_info_rsb_first); SIC(isc_info_rsb_boolean); SIC(isc_info_rsb_union); SIC(isc_info_rsb_aggregate); SIC(isc_info_rsb_merge); SIC(isc_info_rsb_ext_sequential); SIC(isc_info_rsb_ext_indexed); SIC(isc_info_rsb_ext_dbkey); SIC(isc_info_rsb_left_cross); SIC(isc_info_rsb_select); SIC(isc_info_rsb_sql_join); SIC(isc_info_rsb_simulate); SIC(isc_info_rsb_sim_cross); SIC(isc_info_rsb_once); SIC(isc_info_rsb_procedure); SIC(isc_info_rsb_and); SIC(isc_info_rsb_or); SIC(isc_info_rsb_dbkey); SIC(isc_info_rsb_index); SIC(isc_info_req_active); SIC(isc_info_req_inactive); SIC(isc_info_req_send); SIC(isc_info_req_receive); SIC(isc_info_req_select); SIC(isc_info_req_sql_stall); SIC(isc_info_blob_num_segments); SIC(isc_info_blob_max_segment); SIC(isc_info_blob_total_length); SIC(isc_info_blob_type); SIC(isc_info_tra_id); SIC(isc_action_svc_backup); SIC(isc_action_svc_restore); SIC(isc_action_svc_repair); SIC(isc_action_svc_add_user); SIC(isc_action_svc_delete_user); SIC(isc_action_svc_modify_user); SIC(isc_action_svc_display_user); SIC(isc_action_svc_properties); SIC(isc_action_svc_add_license); SIC(isc_action_svc_remove_license); SIC(isc_action_svc_db_stats); SIC(isc_action_svc_get_ib_log); SIC(isc_info_svc_svr_db_info); SIC(isc_info_svc_get_license); SIC(isc_info_svc_get_license_mask); SIC(isc_info_svc_get_config); SIC(isc_info_svc_version); SIC(isc_info_svc_server_version); SIC(isc_info_svc_implementation); SIC(isc_info_svc_capabilities); SIC(isc_info_svc_user_dbpath); SIC(isc_info_svc_get_env); SIC(isc_info_svc_get_env_lock); SIC(isc_info_svc_get_env_msg); SIC(isc_info_svc_line); SIC(isc_info_svc_to_eof); SIC(isc_info_svc_timeout); SIC(isc_info_svc_get_licensed_users); SIC(isc_info_svc_limbo_trans); SIC(isc_info_svc_running); SIC(isc_info_svc_get_users); SIC(isc_spb_sec_userid); SIC(isc_spb_sec_groupid); SIC(isc_spb_sec_username); SIC(isc_spb_sec_password); SIC(isc_spb_sec_groupname); SIC(isc_spb_sec_firstname); SIC(isc_spb_sec_middlename); SIC(isc_spb_sec_lastname); /* Won't bother with license-related constants. */ /* */ SIC(isc_action_svc_backup); SIC(isc_spb_bkp_file); SIC(isc_spb_bkp_factor); SIC(isc_spb_bkp_length); /* begin bitmask components */ SIC(isc_spb_bkp_ignore_checksums); SIC(isc_spb_bkp_ignore_limbo); SIC(isc_spb_bkp_metadata_only); SIC(isc_spb_bkp_no_garbage_collect); SIC(isc_spb_bkp_old_descriptions); SIC(isc_spb_bkp_non_transportable); SIC(isc_spb_bkp_convert); SIC(isc_spb_bkp_expand); /* end bitmask components */ /* */ SIC(isc_action_svc_properties); SIC(isc_spb_prp_page_buffers); SIC(isc_spb_prp_sweep_interval); SIC(isc_spb_prp_shutdown_db); SIC(isc_spb_prp_deny_new_attachments); SIC(isc_spb_prp_deny_new_transactions); SIC(isc_spb_prp_reserve_space); SIC(isc_spb_prp_write_mode); SIC(isc_spb_prp_access_mode); SIC(isc_spb_prp_set_sql_dialect); SIC(isc_spb_prp_activate); SIC(isc_spb_prp_db_online); SIC(isc_spb_prp_reserve_space); SIC(isc_spb_prp_res_use_full); SIC(isc_spb_prp_res); SIC(isc_spb_prp_write_mode); SIC(isc_spb_prp_wm_async); SIC(isc_spb_prp_wm_sync); SIC(isc_spb_prp_access_mode); SIC(isc_spb_prp_am_readonly); SIC(isc_spb_prp_am_readwrite); SIC(isc_action_svc_repair); SIC(isc_spb_rpr_commit_trans); SIC(isc_spb_rpr_rollback_trans); SIC(isc_spb_rpr_recover_two_phase); SIC(isc_spb_tra_id); SIC(isc_spb_single_tra_id); SIC(isc_spb_multi_tra_id); SIC(isc_spb_tra_state); SIC(isc_spb_tra_state_limbo); SIC(isc_spb_tra_state_commit); SIC(isc_spb_tra_state_rollback); SIC(isc_spb_tra_state_unknown); SIC(isc_spb_tra_host_site); SIC(isc_spb_tra_remote_site); SIC(isc_spb_tra_db_path); SIC(isc_spb_tra_advise); SIC(isc_spb_tra_advise_commit); SIC(isc_spb_tra_advise_rollback); SIC(isc_spb_tra_advise_unknown); SIC(isc_spb_rpr_validate_db); SIC(isc_spb_rpr_sweep_db); SIC(isc_spb_rpr_mend_db); SIC(isc_spb_rpr_list_limbo_trans); SIC(isc_spb_rpr_check_db); SIC(isc_spb_rpr_ignore_checksum); SIC(isc_spb_rpr_kill_shadows); SIC(isc_spb_rpr_full); SIC(isc_action_svc_restore); SIC(isc_spb_res_buffers); SIC(isc_spb_res_page_size); SIC(isc_spb_res_length); SIC(isc_spb_res_access_mode); SIC(isc_spb_res_deactivate_idx); SIC(isc_spb_res_no_shadow); SIC(isc_spb_res_no_validity); SIC(isc_spb_res_one_at_a_time); SIC(isc_spb_res_replace); SIC(isc_spb_res_create); SIC(isc_spb_res_use_all_space); SIC(isc_spb_res_access_mode); SIC(isc_spb_res_am_readonly); SIC(isc_spb_prp_am_readonly); SIC(isc_spb_res_am_readwrite); SIC(isc_spb_prp_am_readwrite); SIC(isc_info_svc_svr_db_info); SIC(isc_spb_num_att); SIC(isc_spb_num_db); SIC(isc_spb_sts_data_pages); SIC(isc_spb_sts_db_log); SIC(isc_spb_sts_hdr_pages); SIC(isc_spb_sts_idx_pages); SIC(isc_spb_sts_sys_relations); SIC(isc_info_sql_select); SIC(isc_info_sql_bind); SIC(isc_info_sql_num_variables); SIC(isc_info_sql_describe_vars); SIC(isc_info_sql_describe_end); SIC(isc_info_sql_sqlda_seq); SIC(isc_info_sql_message_seq); SIC(isc_info_sql_type); SIC(isc_info_sql_sub_type); SIC(isc_info_sql_scale); SIC(isc_info_sql_length); SIC(isc_info_sql_null_ind); SIC(isc_info_sql_field); SIC(isc_info_sql_relation); SIC(isc_info_sql_owner); SIC(isc_info_sql_alias); SIC(isc_info_sql_sqlda_start); SIC(isc_info_sql_stmt_type); SIC(isc_info_sql_get_plan); SIC(isc_info_sql_records); SIC(isc_info_sql_batch_fetch); SIC(isc_info_sql_stmt_select); SIC(isc_info_sql_stmt_insert); SIC(isc_info_sql_stmt_update); SIC(isc_info_sql_stmt_delete); SIC(isc_info_sql_stmt_ddl); SIC(isc_info_sql_stmt_get_segment); SIC(isc_info_sql_stmt_put_segment); SIC(isc_info_sql_stmt_exec_procedure); SIC(isc_info_sql_stmt_start_trans); SIC(isc_info_sql_stmt_commit); SIC(isc_info_sql_stmt_rollback); SIC(isc_info_sql_stmt_select_for_upd); SIC(isc_info_sql_stmt_set_generator); #ifdef isc_info_sql_stmt_savepoint SIC(isc_info_sql_stmt_savepoint); #endif /* The following symbols are no longer available in FB 1.5rc4. As far as DSR * knows, they were included here for completeness, rather than because * kinterbasdb itself actually used them. */ #ifdef ISCCFG_LOCKMEM_KEY SIC(ISCCFG_LOCKMEM_KEY); SIC(ISCCFG_LOCKSIG_KEY); SIC(ISCCFG_EVNTMEM_KEY); SIC(ISCCFG_DBCACHE_KEY); SIC(ISCCFG_PRIORITY_KEY); SIC(ISCCFG_IPCMAP_KEY); SIC(ISCCFG_MEMMIN_KEY); SIC(ISCCFG_MEMMAX_KEY); SIC(ISCCFG_LOCKORDER_KEY); SIC(ISCCFG_ANYLOCKMEM_KEY); SIC(ISCCFG_ANYLOCKSEM_KEY); SIC(ISCCFG_ANYLOCKSIG_KEY); SIC(ISCCFG_ANYEVNTMEM_KEY); SIC(ISCCFG_LOCKHASH_KEY); SIC(ISCCFG_DEADLOCK_KEY); SIC(ISCCFG_LOCKSPIN_KEY); SIC(ISCCFG_CONN_TIMEOUT_KEY); SIC(ISCCFG_DUMMY_INTRVL_KEY); /* Since ISCCFG_TRACE_POOLS_KEY is marked "For internal use only" in ibase.h, * it is not loaded into the module's namespace. */ SIC(ISCCFG_REMOTE_BUFFER_KEY); #endif /* ISCCFG_LOCKMEM_KEY */ #ifdef ISCCFG_NO_NAGLE_KEY SIC(ISCCFG_NO_NAGLE_KEY); #endif #ifdef ISCCFG_CPU_AFFINITY_KEY SIC(ISCCFG_CPU_AFFINITY_KEY); #endif return 0; fail: assert (PyErr_Occurred()); return -1; } /* _init_kiservices_ibase_header_constants */ static PyMethodDef _kiservices_GlobalMethods[] = { {"is_initialized", (PyCFunction) pyob_is_initialized, METH_NOARGS}, {"initialize_from", pyob_initialize_from, METH_VARARGS}, {"connect", pyob_SConnection_connect, METH_VARARGS}, {"close", pyob_SConnection_close, METH_VARARGS}, {"action_thin", pyob_action_thin, METH_VARARGS}, {"query_base", pyob_query_base, METH_VARARGS}, {"vax", pyob_isc_vax_integer, METH_VARARGS}, {NULL, NULL} }; DL_EXPORT(void) init_kiservices(void) { /* This function is called automatically when the module is first imported. * Python provides no way to recover from errors during C extension module * initialization, so error handling here is lax. */ PyObject *module = Py_InitModule("_kiservices", _kiservices_GlobalMethods); if (module == NULL) { goto fail; } if (init_kidb_exception_support() != 0) { PyErr_SetString(PyExc_ImportError, "Unable to initialize kinterbasdb exception support code." ); return; } /* Cause a bunch of constants defined at the C level to be loaded into the * module dictionary so that they'll be accessible to the services.py module * implemented on top of this C module. The Python-facing API implemented by * services.py is high-level enough that client programmers should not need * access to these constants). */ if (_init_kiservices_ibase_header_constants(module) != 0) { goto fail; } fail: /* There's really nothing we can do. */ return; } /* init_kiservices */ /****************** MODULE ADMINISTRATION FUNCTIONS:END ********************/ kinterbasdb-3.3.0/README0000644000175000001440000000032611130647414014067 0ustar pcisarusersKInterbasDB Python Database Module - README All of the KInterbasDB documentation (including the installation guide, usage guide, and change log) resides in the docs subdirectory. Start with docs/index.html kinterbasdb-3.3.0/_kisupport_lifo_linked_list.h0000644000175000001440000002077711130647414021165 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of LIFO Linked List * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifndef _KISUPPORT_LIFO_LINKED_LIST_H #define _KISUPPORT_LIFO_LINKED_LIST_H #define LIFO_LINKED_LIST_DEFINE_CONS( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ allocation_func \ ) \ static int _ ## ListType ## _cons(ListTypeQualified **list_slot, \ ContainedTypeQualified *cont, ListTypeQualified *next \ ) \ { \ /* Maintain a pointer to the previous occupant of the 'into' slot so we \ * can restore it if the mem alloc fails. */ \ ListTypeQualified *prev_occupant = *list_slot; \ *list_slot = allocation_func(sizeof(ListType)); \ if (*list_slot == NULL) { \ *list_slot = prev_occupant; \ return -1; \ } \ /* Initialize the two fields of the new node: */ \ (*list_slot)->contained = cont; \ (*list_slot)->next = next; \ return 0; \ } #define LIFO_LINKED_LIST_DEFINE_ADD( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified \ ) \ static int ListType ## _add( \ ListTypeQualified **list_slot, ContainedTypeQualified *cont \ ) \ { \ assert (list_slot != NULL); \ \ if (*list_slot == NULL) { \ if (_ ## ListType ## _cons(list_slot, cont, NULL) != 0) { \ return -1; \ } \ } else { \ ListTypeQualified *prev_head = *list_slot; \ if (_ ## ListType ## _cons(list_slot, cont, prev_head) != 0) { \ return -1; \ } \ } \ \ assert ((*list_slot)->contained == cont); \ \ return 0; \ } #define LIFO_LINKED_LIST_DEFINE_REMOVE( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ deallocation_func \ ) \ static int ListType ## _remove( \ ListTypeQualified **list_slot, ContainedTypeQualified *cont, \ boolean object_if_missing \ ) \ { \ ListTypeQualified *nodeBack; \ ListTypeQualified *nodeForward; \ \ /* Traverse the linked list looking for a node whose ->reader points to \ * cont: */ \ nodeBack = nodeForward = *list_slot; \ while (nodeForward != NULL && nodeForward->contained != cont) { \ nodeBack = nodeForward; \ nodeForward = nodeForward->next; \ } \ if (nodeForward == NULL) { \ if (!object_if_missing) { \ return 0; \ } else { \ /* Note that this calls the Py_* API; GIL must be held. */ \ raise_exception(InternalError, \ # ListType "_remove: node was not in list" \ ); \ return -1; \ } \ } \ \ /* Unlink nodeForward: */ \ if (nodeBack == nodeForward) { \ /* We found the desired node in the first position of the linked \ * list. */ \ *list_slot = nodeForward->next; \ } else { \ nodeBack->next = nodeForward->next; \ } \ deallocation_func((ListType *) nodeForward); \ \ return 0; \ } #define LIFO_LINKED_LIST_DEFINE_RELEASE__WITH_ALLOWED_TO_RAISE_SPEC( \ allowed_to_raise, \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ deallocation_func \ ) \ static int ListType ## _release(ListTypeQualified **list_slot) \ { \ ListTypeQualified *list; \ assert (list_slot != NULL); \ list = *list_slot; \ if (list == NULL) { \ /* It's already been released. */ \ return 0; \ } \ \ do { \ assert (list->contained != NULL); \ \ /* Direct the contained object not to try to unlink itself upon \ * closure, because that's the very thing we're doing! */ \ if (ContainedType ## _untrack(list->contained, allowed_to_raise) != 0) { \ /* We weren't able to collect this node properly. Make sure \ * list_slot points to this node (which is currently the head of the \ * linked list) so that the entire linked list isn't lost. */ \ list_slot = &list; \ return -1; \ } else { \ ListTypeQualified *next_list = list->next; \ deallocation_func((ListType *) list); \ list = next_list; \ } \ } while (list != NULL); \ \ *list_slot = NULL; \ return 0; \ } #define LIFO_LINKED_LIST_DEFINE_RELEASE( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ deallocation_func \ ) \ LIFO_LINKED_LIST_DEFINE_RELEASE__WITH_ALLOWED_TO_RAISE_SPEC(TRUE, \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ deallocation_func \ ) #define LIFO_LINKED_LIST_DEFINE_TRAVERSE( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified \ ) \ static int ListType ## _traverse(ListTypeQualified *node_start, \ ListType ## MappedFunction modifier \ ) \ { \ ListTypeQualified *node_cur = node_start; \ ListTypeQualified *node_prev = NULL; \ \ while (node_cur != NULL) { \ if (modifier(node_prev, node_cur) != 0) { \ goto fail; \ } \ node_prev = node_cur; \ node_cur = node_cur->next; \ } \ \ return 0; \ \ fail: \ assert (PyErr_Occurred()); \ return -1; \ } #define LIFO_LINKED_LIST_DEFINE_TRAVERSE_NOQUAL(ListType, ContainedType) \ LIFO_LINKED_LIST_DEFINE_TRAVERSE( \ ListType, ListType, ContainedType, ContainedType \ ) #define LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ allocation_func, deallocation_func \ ) \ LIFO_LINKED_LIST_DEFINE_CONS( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ allocation_func \ ) \ LIFO_LINKED_LIST_DEFINE_ADD( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified \ ) \ LIFO_LINKED_LIST_DEFINE_REMOVE( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ deallocation_func \ ) \ LIFO_LINKED_LIST_DEFINE_RELEASE( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ deallocation_func \ ) #define LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_PYALLOC( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified \ ) \ /* The GIL must be held during method calls to this linked list type. */ \ LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ kimem_main_malloc, kimem_main_free \ ) #define LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_PYALLOC_NOQUAL( \ ListType, ContainedType \ ) \ LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_PYALLOC( \ ListType, ListType, ContainedType, ContainedType \ ) #define LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_SYSALLOC( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified \ ) \ /* The GIL does not need to be held during method calls to this linked \ * list type. */ \ LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_( \ ListType, ListTypeQualified, ContainedType, ContainedTypeQualified, \ kimem_plain_malloc, kimem_plain_free \ ) #define LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_SYSALLOC_NOQUAL( \ ListType, ContainedType \ ) \ LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_SYSALLOC( \ ListType, ListType, ContainedType, ContainedType \ ) PyObject *pyob_TrackerToList(AnyTracker *tracker) { PyObject *py_list = PyList_New(0); if (py_list == NULL) { goto fail; } while (tracker != NULL) { PyObject *element = (PyObject *) tracker->contained; assert (element != NULL); if (PyList_Append(py_list, element) != 0) { goto fail; } tracker = tracker->next; } return py_list; fail: assert (PyErr_Occurred()); Py_XDECREF(py_list); return NULL; } // pyob_TrackerToList #endif /* not def _KISUPPORT_LIFO_LINKED_LIST_H */ kinterbasdb-3.3.0/MANIFEST.in0000644000175000001440000000740511133044261014744 0ustar pcisarusers# Manifest file for building a kinterbasdb source or RPM distribution # # I listed the files individually because typically, several files relevant # only to the developers end up sitting around the source directories; there's # no reason to pass them on to users. # # This command is a start, but doesn't catch all, of course: # dir /B *.c *.h *.py | sort > temp.txt # setup-related: include MANIFEST.in include setup.cfg include setup.py include version.txt # core-Python: include __init__.py include _array_descriptor.py include _connection_timeout.py include _request_buffer_builder.py include k_exceptions.py include services.py include typeconv_23plus.py include typeconv_23plus_lowmem.py include typeconv_24plus.py include typeconv_backcompat.py include typeconv_datetime_mx.py include typeconv_datetime_naked.py include typeconv_datetime_stdlib.py include typeconv_fixed_decimal.py include typeconv_fixed_fixedpoint.py include typeconv_fixed_stdlib.py include typeconv_naked.py include typeconv_text_unicode.py # core-C: include _kiconversion.c include _kiconversion_array.c include _kiconversion_blob.c include _kiconversion_blob_nonstandard.c include _kiconversion_blob_streaming.c include _kiconversion_field_precision.c include _kiconversion_from_db.c include _kiconversion_to_db.c include _kiconversion_type_translation.c include _kicore_connection.c include _kicore_connection_timeout.c include _kicore_connection_timeout.h include _kicore_create_drop_db.c include _kicore_cursor.c include _kicore_preparedstatement.c include _kicore_transaction.c include _kicore_transaction_distributed.c include _kicore_transaction_support.c include _kicore_xsqlda.c include _kievents.c include _kievents.h include _kievents_infra.c include _kilock.h include _kimem.h include _kinterbasdb.c include _kinterbasdb.h include _kinterbasdb_constants.c include _kinterbasdb_exception_functions.c include _kinterbasdb_exception_functions_without_python.c include _kinterbasdb_exception_functions_without_python.h include _kinterbasdb_exceptions.h include _kiservices.c include _kiservices.h include _kisupport.h include _kisupport_lifo_linked_list.h include _kisupport_platform.c include _kisupport_platform.h include _kisupport_platform_posix.c include _kisupport_platform_windows.c include _kisupport_threadsafe_fifo_queue.c include _kisupport_threadsafe_fifo_queue.h include _kisupport_time.c # documentation: include README include docs/index.html include docs/installation.html include docs/tutorial.html include docs/Python-DB-API-2.0.html include docs/python-db-api-compliance.html include docs/beyond-python-db-api.html include docs/concurrency.html include docs/thread-safety-overview.html include docs/links.html include docs/changelog.html include docs/license.html include docs/genindex.html include docs/modindex.html include docs/objects.inv include docs/search.html include docs/searchindex.js include docs/_static/contents.png include docs/_static/default.css include docs/_static/doctools.js include docs/_static/file.png include docs/_static/jquery.js include docs/_static/minus.png include docs/_static/navigation.png include docs/_static/plus.png include docs/_static/pygments.css include docs/_static/rightsidebar.css include docs/_static/searchtools.js include docs/_static/sphinxdoc.css include docs/_static/stickysidebar.css include docs/_static/traditional.css include docs/_sources/index.txt include docs/_sources/installation.txt include docs/_sources/tutorial.txt include docs/_sources/Python-DB-API-2.0.txt include docs/_sources/python-db-api-compliance.txt include docs/_sources/beyond-python-db-api.txt include docs/_sources/concurrency.txt include docs/_sources/thread-safety-overview.txt include docs/_sources/links.txt include docs/_sources/changelog.txt include docs/_sources/license.txt global-exclude CVS kinterbasdb-3.3.0/k_exceptions.py0000644000175000001440000000246311130647414016260 0ustar pcisarusers# KInterbasDB Python Package - Standard DB API Exceptions Import-Cycle-Breaker # # Version 3.1 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2004 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( 'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError', 'OperationalError', 'IntegrityError', 'InternalError', 'ProgrammingError', 'NotSupportedError', ) # The main module of kinterbasdb will initialize these exception references. Warning = None Error = None InterfaceError = None DatabaseError = None DataError = None OperationalError = None IntegrityError = None InternalError = None ProgrammingError = None NotSupportedError = None kinterbasdb-3.3.0/typeconv_naked.py0000644000175000001440000000222611130647414016573 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : Minimal # # Version 3.1 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2004 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere # This module can be conveniently activated as the process-wide default via: # kinterbasdb.init(type_conv=0) from kinterbasdb import typeconv_datetime_naked from kinterbasdb import typeconv_fixed_stdlib _underlying_modules = (typeconv_datetime_naked, typeconv_fixed_stdlib) # Load the required members from the underlying modules into the namespace of # this module. globalz = globals() for m in _underlying_modules: for req_member in m.__all__: globalz[req_member] = getattr(m, req_member) del globalz kinterbasdb-3.3.0/_kinterbasdb_exception_functions.c0000644000175000001440000003104511130647414022152 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of "Exception-Raising" Functions * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /******************** EXCEPTION FUNCTIONS:BEGIN ********************/ #ifndef _KINTERBASDB_EXCEPTION_FUNCTIONS_C #define _KINTERBASDB_EXCEPTION_FUNCTIONS_C #include "_kinterbasdb_exceptions.h" static PyObject *exc_support__str_join; static PyObject *exc_support__str_splitlines; static PyObject *exc_support__str_startswith; static PyObject *exc_support__str_exception_header_start; static PyObject *exc_support__str_newline; static PyObject *exc_support__str_spaces_2; static PyObject *exc_support__str_tb_caption; /* This file is included in both _kinterbasdb.c and _kiservices.c, which are * compiled in separate compilation units. init_kidb_exception_support * therefore needs to be called once in *each* compilation unit. * The required functions could be made non-static and the _kiservices dynamic * lib could then be linked against the _kinterbasdb dynamic lib, but that's * not worth the complications to the build process. */ static int init_kidb_exception_support(void) { #define IKES_INIT_CACHED_STRING(var, str_const) \ var = PyString_FromString(str_const); \ if (var == NULL) { goto fail; } IKES_INIT_CACHED_STRING(exc_support__str_join, "join"); IKES_INIT_CACHED_STRING(exc_support__str_splitlines, "splitlines"); IKES_INIT_CACHED_STRING(exc_support__str_startswith, "startswith"); IKES_INIT_CACHED_STRING(exc_support__str_exception_header_start, "exception " ); IKES_INIT_CACHED_STRING(exc_support__str_newline, "\n"); IKES_INIT_CACHED_STRING(exc_support__str_spaces_2, " "); IKES_INIT_CACHED_STRING(exc_support__str_tb_caption, "SQL traceback (most recent call last):" ); return 0; fail: /* This function is indirectly called by the module loader, which makes no * provision for error recovery. */ return -1; } /* init_kidb_exception_support */ void raise_sql_exception_exc_type_filter( PyObject *exc_type, const char *preamble, ISC_STATUS *status_vector, PyObject *filter ) { /* Given the type of exception to raise (exc_type), an introductory message * (preamble), and the status vector into which a Firebird API function has * stored numeric error codes (status_vector), set a Python exception bearing * the following payload: * (int numeric database API error code, str error message) */ const ISC_STATUS *status_vector_start = status_vector; char buf[MAX_ISC_ERROR_MESSAGE_BUFFER_SIZE]; /* Buffer overflow potential here is ultra-low, since only a single error * message supplied by the database library's error code interpretretation * function is placed in the fixed-size buffer, and in FB 2.0 and later, * the interpretation function checks the size of the output buffer. */ ISC_LONG db_error_code; #ifdef USE_MODERN_INTERP_FUNC const #endif ISC_STATUS *ptr_status_vector = #ifdef USE_MODERN_INTERP_FUNC (const ISC_STATUS *) #endif status_vector ; Py_ssize_t i; PyObject *py_msg = NULL; PyObject *py_msg_lines = NULL; memset(buf, '\0', MAX_ISC_ERROR_MESSAGE_BUFFER_SIZE); py_msg_lines = PyList_New(0); if (py_msg_lines == NULL) { goto fail; } if (preamble != NULL) { PyObject *py_preamble = PyString_FromString(preamble); if (py_preamble == NULL) { goto fail; } else { const int append_res = PyList_Append(py_msg_lines, py_preamble); Py_DECREF(py_preamble); if (append_res != 0) { goto fail; } } } #define RSE__CLEAR_SEGMENT_PYVARS \ if (py_segment != NULL) { \ Py_DECREF(py_segment); \ py_segment = NULL; \ } \ \ if (py_segment_lines != NULL) { \ Py_DECREF(py_segment_lines); \ py_segment_lines = NULL; \ } #define RSE__FAIL \ assert (PyErr_Occurred()); \ RSE__CLEAR_SEGMENT_PYVARS; \ goto fail; { PyObject *py_segment = NULL; PyObject *py_segment_lines = NULL; ENTER_GDAL db_error_code = isc_sqlcode(status_vector); LEAVE_GDAL for (;;) { ISC_STATUS interp_result; ENTER_GDAL interp_result = #ifdef USE_MODERN_INTERP_FUNC fb_interpret(buf, MAX_ISC_ERROR_MESSAGE_BUFFER_SIZE, &ptr_status_vector ) #else isc_interprete(buf, &ptr_status_vector) #endif ; LEAVE_GDAL if (interp_result == 0) { /* No more messages available. */ break; } py_segment = PyString_FromString(buf); if (py_segment == NULL) { RSE__FAIL; } py_segment_lines = PyObject_CallMethodObjArgs(py_segment, exc_support__str_splitlines, NULL ); if (py_segment_lines == NULL) { RSE__FAIL; } assert (PyList_CheckExact(py_segment_lines)); if ( ptr_status_vector - status_vector_start + 1 < STATUS_VECTOR_SIZE && *(ptr_status_vector + 1) == isc_stack_trace ) { if (PyList_Append(py_segment_lines, exc_support__str_tb_caption) != 0) { RSE__FAIL; } } /* If an exception-type-filter was provided, call it now. */ if (filter != NULL) { ISC_STATUS raw_code = 0; PyObject *py_raw_code = NULL; PyObject *py_sql_code = NULL; PyObject *py_filter_res = NULL; /* Extract the "raw" code from the status vector: */ { /* These constants can be found in the src/include/gen/codes.h file * in a Firebird source distribution, but they're not included in * the normal headers. */ const long gds_arg_end = 0; const long gds_arg_gds = 1; ISC_STATUS *sv = status_vector; Py_ssize_t sv_index = 0; /* Solely for bounds assertion below. */ while (*sv != gds_arg_end) { assert (sv_index < STATUS_VECTOR_SIZE); if (*sv == gds_arg_gds) { raw_code = status_vector[1]; break; } sv++; sv_index++; } } /* Create filter arguments: */ /* 2007.02.10: FB 2.1 fix: ISC_STATUS can be > INT_MAX in FB 2.1: */ py_raw_code = PythonIntOrLongFrom64BitValue(raw_code); if (py_raw_code == NULL) { goto exception_type_filter_finish; } py_sql_code = PyInt_FromLong(db_error_code); if (py_sql_code == NULL) { goto exception_type_filter_finish; } /* Call filter: */ py_filter_res = PyObject_CallFunctionObjArgs(filter, py_raw_code, py_sql_code, py_segment_lines, NULL ); if (py_filter_res == NULL) { goto exception_type_filter_finish; } if (py_filter_res != Py_None) { /* The filter has indicated that a different exception type should be * used. */ /* We don't own a reference to exc_type, so although we'll keep a * reference to py_filter_res, we'll discard our ownership of it. * Because this feature is only used by kinterbasdb's internals, we * can guarantee the exception type won't be collected when we DECREF * it. */ assert (py_filter_res->ob_refcnt >= 2); exc_type = py_filter_res; } Py_DECREF(py_filter_res); /* Fall through to finish: */ exception_type_filter_finish: Py_XDECREF(py_raw_code); Py_XDECREF(py_sql_code); if (PyErr_Occurred()) { RSE__FAIL; } } for (i = 0; i < PyList_GET_SIZE(py_segment_lines); i++) { PyObject *line = PyList_GET_ITEM(py_segment_lines, i); boolean should_indent = TRUE; /* If the current line is the first in this segment, and it starts with * 'exception ', don't indent it: */ if (i == 0) { PyObject *py_is_exception_header_line = PyObject_CallMethodObjArgs( line, exc_support__str_startswith, exc_support__str_exception_header_start, NULL ); if (py_is_exception_header_line == NULL) { RSE__FAIL; } else { const int is_exception_header_line = PyObject_IsTrue(py_is_exception_header_line); Py_DECREF(py_is_exception_header_line); if (is_exception_header_line == -1) { RSE__FAIL; } else if (is_exception_header_line == 1) { should_indent = FALSE; } } } /* If the current line is the kinterbasdb-injected header for a SQL * traceback, don't indent it: */ if (should_indent) { const int cmp_res = PyObject_Compare(exc_support__str_tb_caption, line ); if (PyErr_Occurred()) { RSE__FAIL; } if (cmp_res == 0) { should_indent = FALSE; } } if (!should_indent) { if (PyList_Append(py_msg_lines, line) != 0) { RSE__FAIL; } } else { /* Establish a new reference to the string consisting of two spaces, * then concatenate the original line to it (our reference to the * original line is borrowed, so there's no need to use * PyString_ConcatAndDel). */ PyObject *line_indented = exc_support__str_spaces_2; Py_INCREF(line_indented); PyString_Concat(&line_indented, line); if (line_indented == NULL) { RSE__FAIL; } { const int append_res = PyList_Append(py_msg_lines, line_indented); Py_DECREF(line_indented); if (append_res != 0) { RSE__FAIL; } } } } /* for line in py_segment_lines */ RSE__CLEAR_SEGMENT_PYVARS; } } /* py_msg = '\n'.join(py_msg_lines) */ py_msg = PyObject_CallMethodObjArgs(exc_support__str_newline, exc_support__str_join, py_msg_lines, NULL ); if (py_msg == NULL) { goto fail; } { /* Raise an exception whose payload consists of a tuple of the form * (error_code, error_message): */ PyObject *exc_tuple = Py_BuildValue("(lO)", (long) db_error_code, py_msg); if (exc_tuple == NULL) { goto fail; } PyErr_SetObject(exc_type, exc_tuple); Py_DECREF(exc_tuple); } goto exit; fail: assert (PyErr_Occurred()); /* Fall through to exit: */ exit: Py_XDECREF(py_msg); Py_XDECREF(py_msg_lines); return; } /* raise_sql_exception */ void raise_sql_exception( PyObject *exc_type, const char *preamble, ISC_STATUS *status_vector ) { raise_sql_exception_exc_type_filter(exc_type, preamble, status_vector, NULL); } static void raise_exception_with_numeric_error_code( PyObject *exc_type, const LONG_LONG error_code, /* 2007.02.10: FB 2.1 fix: long->LONG_LONG */ const char *description ) { /* raise_exception_with_numeric_error_code allows the database API error code * to be set directly, rather than having it extracted from a status vector. * Thus, raise_exception_with_numeric_error_code might be said to "fall * midway between raise_sql_exception and raise_exception". */ PyObject *exceptionTuple = Py_BuildValue("(Ls)", error_code, description); if (exceptionTuple == NULL) { return; } PyErr_SetObject(exc_type, exceptionTuple); Py_DECREF(exceptionTuple); } /* raise_exception_with_numeric_error_code */ void raise_exception(PyObject *exc_type, const char *description) { raise_exception_with_numeric_error_code(exc_type, 0, description); } /* raise_exception */ #define SUPPRESS_EXCEPTION \ suppress_python_exception_if_any(__FILE__, __LINE__) #define SUPPRESS_DB_API_ERROR(sv, preamble) \ suppress_database_api_error(sv, preamble, __FILE__, __LINE) static void suppress_python_exception_if_any( const char *file_name, const int line ) { if (PyErr_Occurred()) { fprintf(stderr, "kinterbasdb ignoring exception\n"); fprintf(stderr, " on line %d\n", line); fprintf(stderr, " of file %s:\n ", file_name); PyErr_Print(); /* PyErr_Print cleared the exception: */ assert (!PyErr_Occurred()); } } /* suppress_exception_if_any */ /******************** EXCEPTION FUNCTIONS:END ********************/ #endif /* _KINTERBASDB_EXCEPTION_FUNCTIONS_C */ kinterbasdb-3.3.0/_connection_timeout.py0000644000175000001440000000461411130647414017631 0ustar pcisarusers# KInterbasDB Python Package - Python Wrapper for Core # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere # This module is private. import threading _lock = threading.Lock() _objects = {} def startTimeoutThreadIfNecessary(cttMain, cttStopFunc): _lock.acquire() try: if 'ctt' not in _objects: startedEvent = threading.Event() # Start the CTT: ctt = ConnectionTimeoutThread(cttMain, startedEvent) ctt.start() _objects['ctt'] = ctt # Wait for the CTT to indicate that it has finished starting: startedEvent.wait() del startedEvent _objects['stopFunc'] = cttStopFunc finally: _lock.release() def isTimeoutThreadStarted(): _lock.acquire() try: return 'ctt' in _objects finally: _lock.release() def stopConnectionTimeoutThread(): _lock.acquire() try: _objects['stopFunc']() assert not _objects['ctt'].isAlive() _objects.clear() finally: _lock.release() class ConnectionTimeoutThread(threading.Thread): # This class exists to facilitate: # - Proper bootstrapping of Python thread state onto the # ConnectionTimeoutThread. # - Communication between Python code and the ConnectionTimeoutThread. # # All of the "real functionality" of the ConnectionTimeoutThread is written # in C, and most of it executes with the GIL released. def __init__(self, cttMain, startedEvent): threading.Thread.__init__(self, name='kinterbasdb_ConTimeoutThread') self.setDaemon(True) self._cttMain = cttMain self._startedEvent = startedEvent def run(self): startedEvent = self._startedEvent del self._startedEvent cttMain = self._cttMain del self._cttMain cttMain(self, startedEvent) kinterbasdb-3.3.0/_kisupport_platform.c0000644000175000001440000000022211130647414017451 0ustar pcisarusers#include "_kisupport.h" #ifdef PLATFORM_WINDOWS #include "_kisupport_platform_windows.c" #else #include "_kisupport_platform_posix.c" #endif kinterbasdb-3.3.0/_kicore_transaction.c0000644000175000001440000016245311130647414017405 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Transaction class * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ static int TransactionTracker_add(TransactionTracker **list_slot, Transaction *cont ); static int TransactionTracker_remove(TransactionTracker **list_slot, Transaction *cont, boolean ); static int Transaction_ensure_active(Transaction *self, PyObject *py_tpb); static void Transaction_stats_clear(Transaction *self); static int Transaction_close_with_unlink( Transaction *self, boolean allowed_to_raise ); static int Transaction_close_without_unlink( Transaction *self, boolean allowed_to_raise ); static PyObject *pyob_Transaction_cursor(Transaction *self); static PyObject *pyob_Transaction_convert_and_validate_tpb( PyObject *py_tpb_raw ); #define Transaction_has_been_untracked(trans) ((trans)->con == NULL) #ifdef ENABLE_CONNECTION_TIMEOUT #define TRANS_REQUIRE_OPEN_(self, failure_action) \ if (!Transaction_is_not_closed(self)) { \ if (Transaction_con_timed_out(self)) { \ raise_exception(ConnectionTimedOut, "This Transaction's Connection" \ " timed out; the Transaction can no longer be used." \ ); \ } else { \ raise_exception(ProgrammingError, \ "I/O operation on closed Transaction" \ ); \ } \ failure_action; \ } else { \ /* If the transaction claims it's open, verify that its con and \ * con_python_wrapper members are not NULL: */ \ assert ((self)->con != NULL); \ assert ((self)->con_python_wrapper != NULL); \ } #else #define TRANS_REQUIRE_OPEN_(self, failure_action) \ if (!Transaction_is_not_closed(self)) { \ raise_exception(ProgrammingError, \ "I/O operation on closed Transaction" \ ); \ failure_action; \ } #endif #define TRANS_REQUIRE_OPEN(self) TRANS_REQUIRE_OPEN_(self, return NULL) /************* Transaction INITIALIZATION AND DESTRUCTION:BEGIN **************/ static void Transaction_struct_raw_init(Transaction *self) { /* Nullify all of the self's fields first, so that if one of the field * initializations that requires additional allocation fails, the cleanup * code can check each field without fear of referring to uninitialized * memory. */ self->state = TR_STATE_CREATED; self->con = NULL; self->con_python_wrapper = NULL; self->trans_handle = NULL_TRANS_HANDLE; self->group = NULL; self->default_tpb = NULL; self->open_cursors = NULL; self->open_blobreaders = NULL; self->n_physical_transactions_started = 0; self->n_prepared_statements_executed_since_current_phys_start = 0; } /* Transaction_struct_raw_init */ static PyObject *pyob_Transaction_new(PyTypeObject *subtype, PyObject *args, PyObject *kwargs ) { Transaction *self = (Transaction *) subtype->tp_alloc(subtype, 0); if (self == NULL) { goto fail; } Transaction_struct_raw_init(self); return (PyObject *) self; fail: /* Lack of assert (PyErr_Occurred()) here is deliberate. */ Py_XDECREF(self); return NULL; } /* pyob_Transaction_new */ static int Transaction_init(Transaction *self, PyObject *args, PyObject *kwargs ) { static char *kwarg_list[] = {"con", "tpb", NULL}; CConnection *con_owned_ref = NULL; CConnection *con_unowned_ref = NULL; PyObject *default_tpb_raw = NULL; { /* Scope for the ambiguously named object con_wrapper. */ PyObject *con_wrapper; assert (self->state == TR_STATE_CREATED); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O", kwarg_list, &con_wrapper, &default_tpb_raw )) { goto fail; } /* Validate the provided connection. If the caller supplied a CConnection * instance directly, instead of its Python wrapper object, then we can * short-circuit. */ if (PyObject_TypeCheck(con_wrapper, &ConnectionType)) { Py_INCREF(con_wrapper); con_owned_ref = (CConnection *) con_wrapper; } else { PyObject *con_maybe = PyObject_GetAttr(con_wrapper, shared___s__C_con); if (con_maybe == NULL) { goto fail; } if (!PyObject_TypeCheck(con_maybe, &ConnectionType)) { raise_exception(InternalError, "Transaction_init: _C_con is not of" " type ConnectionType." ); Py_DECREF(con_maybe); goto fail; } /* In this case, ownership of the reference is passed into con_owned_ref * and from there into self->con. When it makes the transition from * con_owned_ref to self->con, this function loses ownership over the * reference, and therefore uses con_unowned_ref to refer to it from that * point forward. */ con_owned_ref = (CConnection *) con_maybe; } } /* end of scope for the ambiguously named object con_wrapper */ /* Validate default_tpb_raw and store it in self->default_tpb, if it was * specified: */ assert (self->default_tpb == NULL); if (default_tpb_raw != NULL && default_tpb_raw != Py_None) { self->default_tpb = pyob_Transaction_convert_and_validate_tpb( default_tpb_raw ); if (self->default_tpb == NULL) { goto fail; } assert (PyString_CheckExact(self->default_tpb)); } assert (con_owned_ref != NULL); CON_ACTIVATE(con_owned_ref, goto fail); /* CON_ACTIVATE should've verified this already: */ assert (con_owned_ref != null_connection); assert (con_unowned_ref == NULL); self->con = con_unowned_ref = con_owned_ref; con_owned_ref = NULL; /* Create owned reference to the kinterbasdb.Connection Python wrapper * instance that sits on top of CConnection self->con: */ assert (con_unowned_ref->python_wrapper_obj != NULL); Py_INCREF(con_unowned_ref->python_wrapper_obj); self->con_python_wrapper = con_unowned_ref->python_wrapper_obj; /* self->con_python_wrapper is supposed to be an instance of Python class * kinterbasdb.Connection, not an instance of CConnection: */ assert (!PyObject_TypeCheck(self->con_python_wrapper, &ConnectionType)); /* Enter self in the connection's Transaction tracker: */ if (TransactionTracker_add(&con_unowned_ref->transactions, self) != 0) { goto fail_with_passivation; } self->state = TR_STATE_RESOLVED; CON_PASSIVATE(con_unowned_ref); assert (con_owned_ref == NULL); assert (self->con != NULL); assert (self->con_python_wrapper != NULL); return 0; fail_with_passivation: assert (PyErr_Occurred()); assert (con_owned_ref == NULL); assert (con_unowned_ref != NULL); CON_PASSIVATE(con_unowned_ref); /* Fall through to fail: */ fail: assert (PyErr_Occurred()); if (con_owned_ref != NULL) { assert (con_unowned_ref == NULL); Py_DECREF(con_owned_ref); } return -1; } /* Transaction_init */ static void Transaction_delete(Transaction *self) { /* If the Transaction object succeeded in being completely constructed, then * its state will have moved past TR_STATE_CREATED. If that happened, then * the transaction should already have been properly closed by the time it * reaches this point: */ assert ( self->state == TR_STATE_CREATED ? TRUE : !Transaction_is_not_closed(self) ); assert (self->con == NULL); assert (self->con_python_wrapper == NULL); assert (self->trans_handle == NULL_TRANS_HANDLE); assert (self->group == NULL); if (self->default_tpb != NULL) { Py_DECREF(self->default_tpb); self->default_tpb = NULL; } assert (self->open_cursors == NULL); assert (self->open_blobreaders == NULL); /* No action on the following fields: * - n_physical_transactions_started * - n_prepared_statements_executed_since_current_phys_start */ } /* Transaction_delete */ static void pyob_Transaction___del__(Transaction *self) { CConnection *con = self->con; assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); if (con != NULL) { /* Make sure con stays alive until we're done with it: */ PyObject *con_python_wrapper = con->python_wrapper_obj; boolean should_manipulate_con_refcnt; assert (con_python_wrapper != NULL); /* If this destructor is being called as a result of the execution of * trans's destructor, we most definitely must not manipulate trans's * reference count, which would cause trans to be "resurrected" and then * for its destructor to execute recursively! * Also, if self is con's main_trans, we must not manipulate the reference * count. */ should_manipulate_con_refcnt = (con->ob_refcnt != 0 && !Transaction_is_main(self)); if (should_manipulate_con_refcnt) { Py_INCREF(con_python_wrapper); Py_INCREF(con); } { #ifdef ENABLE_CONNECTION_TIMEOUT const boolean needed_to_acquire_tp = !CURRENT_THREAD_OWNS_CON_TP(con); if (needed_to_acquire_tp) { ACQUIRE_CON_TP_WITH_GIL_HELD(con); } /* If the Connection Timeout Thread (CTT) caused this Transaction to be * untracked while this thread was waiting for the lock, this thread should * not close the Transaction again. */ if (!Transaction_has_been_untracked(self)) { #endif /* ENABLE_CONNECTION_TIMEOUT */ if (Transaction_is_not_closed(self)) { /* Close self, in the process removing self from the connection's * Transaction tracker: */ assert (self->con != NULL); assert (self->con->transactions != NULL); Transaction_close_with_unlink(self, FALSE); } else { /* self won't be in the connection's Transaction tracker because self * has already been moved to a non-open state. */ assert (self->con == NULL); assert (self->con_python_wrapper == NULL); Transaction_close_without_unlink(self, FALSE); } assert (self->con == NULL); assert (self->con_python_wrapper == NULL); assert (self->trans_handle == NULL_TRANS_HANDLE); assert (self->group == NULL); assert (self->open_cursors == NULL); assert (self->open_blobreaders == NULL); #ifdef ENABLE_CONNECTION_TIMEOUT } if (needed_to_acquire_tp) { RELEASE_CON_TP(con); } #endif /* ENABLE_CONNECTION_TIMEOUT */ } if (should_manipulate_con_refcnt) { Py_DECREF(con); Py_DECREF(con_python_wrapper); } con = NULL; } Transaction_delete(self); /* Low-level deletion of members. */ /* Release the Transaction struct itself: */ self->ob_type->tp_free((PyObject *) self); } /* pyob_Transaction___del__ */ /************** Transaction INITIALIZATION AND DESTRUCTION:END ***************/ /************** Transaction METHODS INACCESSIBLE TO PYTHON:BEGIN *************/ static CConnection *Transaction_get_con(Transaction *trans) { assert (trans != NULL); return trans->con; } /* Transaction_get_con */ static PyObject *Transaction_get_con_python_wrapper(Transaction *trans) { assert (trans != NULL); /* trans->con and trans->con_python_wrapper should be maintained in * tight synch: */ assert ( trans->con != NULL ? trans->con_python_wrapper != NULL : trans->con_python_wrapper == NULL ); return trans->con_python_wrapper; } /* Transaction_get_con_python_wrapper */ static isc_tr_handle *Transaction_get_handle_p(Transaction *self) { assert (self != NULL); /* Note that the GIL must be held when the function is called. */ if (self->trans_handle != NULL_TRANS_HANDLE) { assert (self->group == NULL); return &self->trans_handle; } else { PyObject *group = self->group; isc_tr_handle *native_handle_addr = NULL; if (group != NULL) { PyObject *py_trans_handle = PyObject_GetAttr(group, trans___s__trans_handle ); if (py_trans_handle == NULL) { goto fail; } /* The Python layer should not allow this function to be called if the * ConnectionGroup has not yet established a transaction handle. */ assert (py_trans_handle != Py_None); if (!StandaloneTransactionHandle_Check(py_trans_handle)) { raise_exception(InternalError, "ConnectionGroup._trans_handle should" " be a StandaloneTransactionHandle object." ); Py_DECREF(py_trans_handle); goto fail; } native_handle_addr = &((StandaloneTransactionHandle *) py_trans_handle)->native_handle; /* Obviously, this function assumes that the caller will not use the * returned address after the death of the StandaloneTransactionHandle * object. */ Py_DECREF(py_trans_handle); } return native_handle_addr; } assert (FALSE); /* Should never reach this point. */ fail: assert (PyErr_Occurred()); return NULL; } /* Transaction_get_handle_p */ static void Transaction_reconsider_state(Transaction *self) { /* Although a Transaction normally keeps its state and trans_handle members * consistent, there are ways for them to become inconsistent. For example, * con.execute_immediate('rollback') * could leave con.main_transaction.state == TR_STATE_UNRESOLVED, but * con.main_transaction.trans_hanlde == NULL_TRANS_HANDLE. * This method is provided so that client code of self that has just executed * a statement can give self a chance to bring state and trans_handle back * into synch. */ assert (self != NULL); { isc_tr_handle *trans_handle_p = Transaction_get_handle_p(self); if (trans_handle_p == NULL || *trans_handle_p == NULL_TRANS_HANDLE) { if (self->state != TR_STATE_RESOLVED) { assert (self->state == TR_STATE_UNRESOLVED); self->state = TR_STATE_RESOLVED; } } else { /* Handle indicates active transaction: */ if (self->state != TR_STATE_UNRESOLVED) { assert (self->state == TR_STATE_RESOLVED); self->state = TR_STATE_UNRESOLVED; } } } } /* Transaction_reconsider_state */ static ISC_STATUS *Transaction_get_sv(Transaction *self) { assert (self != NULL); /* This shouldn't even be called when the Transaction isn't open, so we * validate with assertion instead of exception. */ assert (Transaction_get_con(self) != NULL); return Transaction_get_con(self)->status_vector; } /* Transaction_get_sv */ static isc_db_handle *Transaction_get_db_handle_p(Transaction *self) { assert (self != NULL); /* This shouldn't even be called when the Transaction isn't open, so we * validate with assertion instead of exception. */ assert (Transaction_get_con(self) != NULL); assert (!Connection_is_closed(Transaction_get_con(self))); return &Transaction_get_con(self)->db_handle; } /* Transaction_get_db_handle_p */ static unsigned short Transaction_get_dialect(Transaction *self) { assert (self != NULL); assert (Transaction_get_con(self) != NULL); assert (!Connection_is_closed(Transaction_get_con(self))); return Transaction_get_con(self)->dialect; } /* Transaction_get_dialect */ static int Transaction_execute_immediate(Transaction *self, PyObject *py_sql_raw ) { int status = -1; PyObject *py_sql_as_str = NULL; Py_ssize_t sql_len; assert (self != NULL); assert (py_sql_raw != NULL); /* Caller should've already verified that self isn't closed (though it's * acceptable if self is not *active* when this function is called): */ assert (Transaction_is_not_closed(self)); assert (self->con != NULL); /* Caller should've already activated the connection: */ CON_MUST_ALREADY_BE_ACTIVE(self->con); if (PyString_CheckExact(py_sql_raw)) { /* The INCREF is logically unnecessary, but we perform it for symmetry with * the unicode-converting branch: */ Py_INCREF(py_sql_raw); py_sql_as_str = py_sql_raw; } else if (PyUnicode_CheckExact(py_sql_raw)) { py_sql_as_str = PyUnicode_AsASCIIString(py_sql_raw); if (py_sql_as_str == NULL) { goto fail; } } else { assert (py_sql_as_str == NULL); raise_exception(ProgrammingError, "SQL argument to execute_immediate must" " be str." ); goto fail; } assert (py_sql_as_str != NULL); sql_len = PyString_GET_SIZE(py_sql_as_str); if (!_check_statement_length(sql_len)) { assert (PyErr_Occurred()); goto fail; } /* Start a physical transaction, if self doesn't already have one: */ if (Transaction_ensure_active(self, NULL) != 0) { assert (PyErr_Occurred()); goto fail; } { /* Note that we call Transaction_get_handle_p while holding the GIL: */ isc_tr_handle *trans_handle_p = Transaction_get_handle_p(self); char *sql = PyString_AS_STRING(py_sql_as_str); CConnection *con = self->con; assert (con != NULL); ENTER_GDAL isc_dsql_execute_immediate(con->status_vector, &con->db_handle, trans_handle_p, /* Cast is safe because sql_len has already been constrained: */ (unsigned short) sql_len, sql, con->dialect, NULL ); LEAVE_GDAL Transaction_reconsider_state(self); if (DB_API_ERROR(con->status_vector)) { raise_sql_exception_exc_type_filter(ProgrammingError, "isc_dsql_execute_immediate: ", con->status_vector, pyob_Cursor_execute_exception_type_filter ); goto fail; } } /* end of scope surrounding isc_dsql_execute_immediate call */ assert (!PyErr_Occurred()); status = 0; goto clean; fail: assert (PyErr_Occurred()); assert (status == -1); /* Fall through to clean: */ clean: Py_XDECREF(py_sql_as_str); return status; } /* Transaction_execute_immediate */ static PyObject *pyob_Transaction_execute_immediate( Transaction *self, PyObject *args ) { PyObject *py_res = NULL; PyObject *py_sql; CConnection *con; TRANS_REQUIRE_OPEN(self); assert (self->con != NULL); con = self->con; CON_ACTIVATE(con, return NULL); /* We extract py_sql from the args tuple here, but don't validate it. * Transaction_execute_immediate will do that. */ if (!PyArg_ParseTuple(args, "O", &py_sql)) { goto fail; } if (Transaction_execute_immediate(self, py_sql) != 0) { goto fail; } assert (!PyErr_Occurred()); py_res = Py_None; Py_INCREF(Py_None); goto clean; fail: assert (PyErr_Occurred()); assert (py_res == NULL); /* Fall through to clean: */ clean: CON_PASSIVATE(con); CON_MUST_NOT_BE_ACTIVE(con); return py_res; } /* pyob_Transaction_execute_immediate */ static PyObject *pyob_Transaction_convert_and_validate_tpb( PyObject *py_tpb_raw ) { /* On success, returns a new reference to a str that contains the rendered * TPB. On failure, sets an exception and returns NULL. */ PyObject *tpb = PyObject_CallFunctionObjArgs(pyob_validate_tpb, py_tpb_raw, NULL ); if (tpb == NULL) { assert (PyErr_Occurred()); goto fail; } /* Keep in mind that tpb contains an owned reference here. */ if (!PyString_CheckExact(tpb)) { /* tpb isn't a str, so presumably it's a kinterbasdb.TPB instance. Execute * the equivalent of this Python statement: * tpb = tpb.render() */ { PyObject *tpb_str = PyObject_CallMethod(tpb, "render", NULL); Py_DECREF(tpb); tpb = tpb_str; } if (tpb == NULL) { assert (PyErr_Occurred()); goto fail; } else if (!PyString_CheckExact(tpb)) { raise_exception(ProgrammingError, "TPB must be an instance of str or" " kinterbasdb.TPB." ); goto fail; } } assert (tpb != NULL); assert (PyString_CheckExact(tpb)); return tpb; fail: assert (PyErr_Occurred()); Py_XDECREF(tpb); return NULL; } /* pyob_Transaction_convert_and_validate_tpb */ static PyObject *pyob_Transaction_get_default_tpb(Transaction *self) { /* This function "bubbles upward" from the Transaction to the Connection to * the kinterbasdb module, returning a new reference to a str (memory buffer) * representation of the first non-NULL default TPB that it finds. */ if (self->default_tpb != NULL) { /* The default_tpb member of a Transaction object, unlike the default_tpb * member of a Connection object, is always a str: */ assert (PyString_CheckExact(self->default_tpb)); Py_INCREF(self->default_tpb); return self->default_tpb; } else { /* The bubbling from the connection to the module level is implicit, since * the connection will have inherited the module-level default_tpb if * appropriate. */ PyObject *con_default_tpb = PyObject_GetAttr(self->con_python_wrapper, trans___s__default_tpb_str_ ); if (con_default_tpb == NULL) { goto fail; } /* The _default_tpb_str_ property of the connection should have rendered * the TPB to a str if necessary: */ assert (PyString_CheckExact(con_default_tpb)); /* PyObject_GetAttr already supplied a new reference; no need to INCREF. */ return con_default_tpb; } assert (FALSE); /* Shouldn't reach this point. */ fail: assert (PyErr_Occurred()); return NULL; } /* pyob_Transaction_get_default_tpb */ static void Transaction_clear_connection_references(Transaction *self) { const boolean is_main = Transaction_is_main(self); assert (self->con != NULL); if (!is_main) { Py_DECREF(self->con); } self->con = NULL; assert (self->con_python_wrapper != NULL); if (!is_main) { Py_DECREF(self->con_python_wrapper); } self->con_python_wrapper = NULL; } /* Transaction_clear_connection_references */ #define define_Transaction_close_tracker(tracker_type, tracker_member_name) \ static int Transaction_close_ ## tracker_member_name (Transaction *self) { \ return tracker_type ## _release(&self->tracker_member_name); \ } /* Defines Transaction_close_open_cursors(...): */ define_Transaction_close_tracker(CursorTracker, open_cursors) /* Defines Transaction_close_open_blobreaders(...): */ define_Transaction_close_tracker(BlobReaderTracker, open_blobreaders) static int Transaction_close_open_blobreaders_ignoring_errors( Transaction *self ) { /* This function is essentially equivalent to * Transaction_close_open_blobreaders, except that it doesn't stop when it * encounters errors, and in particular, it informs BlobReader_untrack that * it is !allowed_to_raise. * * It would be better to add a 'boolean allowed_to_raise' parameter to * *Tracker_release(...), but that's impractical with the current macro-based * implementation. */ int status = 0; BlobReaderTracker *br_node = self->open_blobreaders; while (br_node != NULL) { BlobReader *br = br_node->contained; assert (br != NULL); if (BlobReader_untrack(br, FALSE /* !allowed_to_raise*/) != 0) { status = -1; /* There shouldn't be a Python exception, because we ordered * BlobReader_untrack not to raise one: */ assert (!PyErr_Occurred()); } { /* Note that we free br_node as we advance to the next node: */ BlobReaderTracker *br_node_next = br_node->next; kimem_main_free(br_node); br_node = br_node_next; } } self->open_blobreaders = NULL; return status; } /* Transaction_close_open_blobreaders_ignoring_errors */ static int Transaction_close_without_unlink( Transaction *self, boolean allowed_to_raise ) { /* The "without unlink" part of this function's name refers to not unlinking * Transaction self from self's connection. It does *not* refer to not * unlinking self's dependent objects (BlobReaders and Cursors) from self. */ int status = 0; /* YYY: Note that Transaction_close_open_cursors doesn't really support * !allowed_to_raise: */ if (Transaction_close_open_cursors(self) != 0) { HANDLE_ERROR_WHEN_POSSIBLY_NOT_ALLOWED_TO_RAISE(allowed_to_raise) } if (allowed_to_raise) { if (Transaction_close_open_blobreaders(self) != 0) { assert (PyErr_Occurred()); goto fail; } } else { if (Transaction_close_open_blobreaders_ignoring_errors(self) != 0) { /* Transaction_close_open_blobreaders_ignoring_errors shouldn't raise a * Python exception, even if it encounters an error: */ assert (!PyErr_Occurred()); /* Set status to error, but keep going, because !allowed_to_raise: */ status = OP_RESULT_ERROR; } } if (Transaction_is_active(self)) { const TransactionalOperationResult resolution_status = Transaction_commit_or_rollback(OP_ROLLBACK, self, FALSE, allowed_to_raise ); if (resolution_status != OP_RESULT_OK) { if (allowed_to_raise) { goto fail; } else { /* Do the best we can, since we're not allowed to raise exception: */ self->trans_handle = NULL_TRANS_HANDLE; SUPPRESS_EXCEPTION; } } } self->state = TR_STATE_CLOSED; return status; fail: assert (PyErr_Occurred()); return -1; } /* Transaction_close_without_unlink */ static int Transaction_untrack(Transaction *self, boolean allowed_to_raise) { /* We're here because the superior object (a CConnection) ordered a purge of * its tracker. * Since self might have subordinate objects (Cursors and BlobReaders) that * will release their references to their superior object (self), we must * ensure that if self becomes eligible for destruction as a result of this * untracking operation, self remains alive at least long enough to complete * the untracking in an orderly manner. * So, note the artifical INCREF(self)/DECREF(self) in this method. */ int status = -1; assert (self != NULL); assert (self->ob_refcnt > 0); Py_INCREF(self); /* Note that we assert Transaction_is_not_closed, but we do not require * Transaction_is_active, because Transactions can remain in their trackers * even when they don't have an open physical transaction. */ assert (Transaction_is_not_closed(self)); if (Transaction_close_without_unlink(self, allowed_to_raise) != 0) { if (allowed_to_raise) { goto fail; } } assert (allowed_to_raise ? !Transaction_is_not_closed(self) : TRUE); assert (allowed_to_raise ? !Transaction_is_active(self) : TRUE); Transaction_clear_connection_references(self); assert (!PyErr_Occurred()); assert (Transaction_has_been_untracked(self)); status = 0; goto clean; fail: assert (PyErr_Occurred()); assert (status == -1); /* Fall through to clean: */ clean: Py_DECREF(self); return status; } /* Transaction_untrack */ static int Transaction_close_with_unlink( Transaction *self, boolean allowed_to_raise ) { int status = 0; assert (self->con != NULL); assert (self->con->transactions != NULL); if (Transaction_close_without_unlink(self, allowed_to_raise) != 0) { HANDLE_ERROR_WHEN_POSSIBLY_NOT_ALLOWED_TO_RAISE(allowed_to_raise); } assert (!Transaction_is_not_closed(self)); /* Remove self from the connection's transaction tracker: */ if (TransactionTracker_remove(&self->con->transactions, self, TRUE) != 0) { HANDLE_ERROR_WHEN_POSSIBLY_NOT_ALLOWED_TO_RAISE(allowed_to_raise); } Transaction_clear_connection_references(self); assert (Transaction_has_been_untracked(self)); return status; fail: assert (PyErr_Occurred()); return -1; } /* Transaction_close_with_unlink */ static int Transaction_ensure_active(Transaction *self, PyObject *py_tpb) { /* tpb can be set to NULL to cause this method to use the default TPB. */ int status = -1; PyObject *py_tpb_owned_ref = NULL; CConnection *con; assert (self != NULL); TRANS_REQUIRE_OPEN_(self, goto fail); assert (self->con != NULL); assert (self->con_python_wrapper != NULL); con = self->con; #ifdef ENABLE_CONNECTION_TIMEOUT /* This function does not activate the connection, so it should only be * called when the connection has already been activated: */ assert ( Connection_timeout_enabled(con) ? con->timeout->state == CONOP_ACTIVE : TRUE ); #endif if (!Transaction_is_active(self)) { if (self->group == NULL) { if (py_tpb != NULL) { py_tpb_owned_ref = pyob_Transaction_convert_and_validate_tpb(py_tpb); } else { py_tpb_owned_ref = pyob_Transaction_get_default_tpb(self); } if (py_tpb_owned_ref == NULL) { goto fail; } assert (PyString_CheckExact(py_tpb_owned_ref)); { char *tpb_ptr = PyString_AS_STRING(py_tpb_owned_ref); const Py_ssize_t tpb_len = PyString_GET_SIZE(py_tpb_owned_ref); self->trans_handle = begin_transaction( con->db_handle, tpb_ptr, tpb_len, NULL, -1, /* all TEB-related params are null */ con->status_vector ); } if (self->trans_handle == NULL_TRANS_HANDLE) { goto fail; } assert (self->state == TR_STATE_RESOLVED); self->state = TR_STATE_UNRESOLVED; } else { if (py_tpb != NULL) { raise_exception(ProgrammingError, "Cannot specify custom TPB when" " starting a distributed transaction." ); goto fail; } else { /* Call the 'begin' method of self->group: */ PyObject *py_ret = PyObject_CallMethod(self->group, "begin", NULL); if (py_ret == NULL) { goto fail; } Py_DECREF(py_ret); } } ++self->n_physical_transactions_started; } /* end of if !Transaction_is_active(self) block */ assert (Transaction_is_active(self)); assert (self->group != NULL ? self->trans_handle == NULL_TRANS_HANDLE : TRUE); assert (Transaction_get_handle_p(self) != NULL); assert (*Transaction_get_handle_p(self) != NULL_TRANS_HANDLE); assert (!PyErr_Occurred()); status = 0; goto clean; fail: assert (PyErr_Occurred()); assert (status == -1); /* Fall through to clean: */ clean: Py_XDECREF(py_tpb_owned_ref); return status; } /* Transaction_ensure_active */ static TransactionalOperationResult Transaction_commit_or_rollback( const WhichTransactionOperation op, Transaction *self, const boolean retaining, const boolean allowed_to_raise ) { TransactionalOperationResult status = OP_RESULT_OK; assert (self != NULL); assert (self->con != NULL); /* Either the Connection should have been marked active, if this method is * not being called from the CTT, or its TP should be held, if this method * is being called from the CTT. * * The fact that we call a method of self->group if self->group != NULL, and * the group might call a method of self->con that attempts to acquire the * lock again, is not a problem, because Connections that have timeout * enabled are forbidden from joining ConnectionGroup. */ #ifndef NDEBUG if (!RUNNING_IN_CONNECTION_TIMEOUT_THREAD) { CON_MUST_ALREADY_BE_ACTIVE(self->con); } else { assert (!allowed_to_raise); assert (CURRENT_THREAD_OWNS_CON_TP(self->con)); } #endif assert (Transaction_is_active(self)); assert (Transaction_get_handle_p(self) != NULL); assert (*Transaction_get_handle_p(self) != NULL_TRANS_HANDLE); if (allowed_to_raise) { if (Transaction_close_open_blobreaders(self) != 0) { assert (PyErr_Occurred()); return OP_RESULT_ERROR; } } else { if (Transaction_close_open_blobreaders_ignoring_errors(self) != 0) { /* Transaction_close_open_blobreaders_ignoring_errors shouldn't raise a * Python exception, even if it encounters an error: */ assert (!PyErr_Occurred()); /* Set status to error, but keep going, because !allowed_to_raise: */ status = OP_RESULT_ERROR; } } if (self->group != NULL) { assert (self->trans_handle == NULL_TRANS_HANDLE); /* Since connections with timeout enabled aren't allowed to participate in * distributed transactions, there's no danger that calling a method of * the group will cause an attempt to activate the connection again before * we've deactivated it. */ assert (!Connection_timeout_enabled(self->con)); { /* Call the appropriate method of self->group: */ const char *method_name = (op == OP_COMMIT ? "commit" : "rollback"); PyObject *py_ret = PyObject_CallMethod(self->group, (char *) method_name, NULL ); if (py_ret != NULL) { Py_DECREF(py_ret); status = OP_RESULT_OK; } else { if (allowed_to_raise) { assert (PyErr_Occurred()); return OP_RESULT_ERROR; } else { /* Set error return code, but supress Python exception and keep * going: */ status = OP_RESULT_ERROR; SUPPRESS_EXCEPTION; } } } } else { switch (op) { case OP_COMMIT: status = commit_transaction(Transaction_get_handle_p(self), retaining, self->con->status_vector ); break; case OP_ROLLBACK: status = rollback_transaction(Transaction_get_handle_p(self), retaining, TRUE, self->con->status_vector ); break; } if (status == OP_RESULT_ERROR && !allowed_to_raise) { /* Allow error return code, but supress Python exception and keep * going: */ SUPPRESS_EXCEPTION; } } if (status == OP_RESULT_OK) { if (!retaining) { self->trans_handle = NULL_TRANS_HANDLE; Transaction_stats_clear(self); self->state = TR_STATE_RESOLVED; assert (!Transaction_is_active(self)); } } return status; } /* Transaction_commit_or_rollback */ static PyObject *_pyob_Transaction_commit_or_rollback( const WhichTransactionOperation op, Transaction *self, PyObject *args, PyObject *kwargs ) { /* NOTE: Although this method normally activates/passivates self->con, it * must parse the arguments and make a decision on the basis of their values * before it can know whether it can even requires that self be open. That * is necessary because the DB API requires that .commit() and .rollback() be * accepted even when there is no transaction. * All failure (or success) exits up until the CON_ACTIVATE call should exit * via 'return' rather than via 'goto', but all thereafter should pass * through clean:, in order to passivate the connection. */ PyObject *py_res = NULL; static char *kwarg_list[] = {"retaining", "savepoint", NULL}; int retaining_int = FALSE; PyObject *py_savepoint_name = NULL; assert (self != NULL); /* Some internal callers pass NULL for args and kwargs, which we should * interpret as a request to use the default options: */ if (args == NULL && kwargs == NULL) { assert (retaining_int == FALSE); assert (py_savepoint_name == NULL); } else { PyObject *py_retaining = Py_False; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO", kwarg_list, &py_retaining, &py_savepoint_name )) { return NULL; } retaining_int = PyObject_IsTrue(py_retaining); if (retaining_int == -1) { return NULL; } if (py_savepoint_name == Py_None) { /* Py_None and NULL are equivalent in this context. */ py_savepoint_name = NULL; } if (py_savepoint_name != NULL && !PyString_CheckExact(py_savepoint_name)) { raise_exception(ProgrammingError, "Savepoint name must be a str."); return NULL; } } /* end of use-default-args vs. validate-supplied-args block */ if (!Transaction_is_active(self)) { /* DB API standard requires that a commit or rollback on a nonexistent * transaction succeed. That does *not* apply to rollback-to-savepoint, * however. Rolling back a transaction that doesn't exist is excusable, * but it would be dangerous to allow the user to rollback to a nonexistent * savepoint. */ if (py_savepoint_name != NULL) { /* We've already ensured that py_savepoint_name is a str. */ PyObject *py_err_msg = PyString_FromFormat( "Cannot roll back to savepoint \"%s\", because there is no active" " transaction.", PyString_AS_STRING(py_savepoint_name) ); if (py_err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(py_err_msg)); Py_DECREF(py_err_msg); } return NULL; } else { assert (self->trans_handle == NULL_TRANS_HANDLE); RETURN_PY_NONE; } } TRANS_REQUIRE_OPEN(self); assert (self->con != NULL); CON_ACTIVATE(self->con, return NULL); /* The implementation of .rollback(savepoint=...) is totally different from * that of a normal commit or rollback. */ if (op == OP_ROLLBACK && py_savepoint_name != NULL) { /* PyString_Concat will steal a ref to py_sql and store a new reference to * the concatenated str in py_sql. */ PyObject *py_sql = trans___s_ROLLBACK_TO_SPACE; /* Immortal str. */ Py_INCREF(py_sql); /* The type of py_savepoint_name should've been validated earlier: */ assert (PyString_CheckExact(py_savepoint_name)); PyString_Concat(&py_sql, py_savepoint_name); if (py_sql == NULL) { goto fail; } else { const int exec_res = Transaction_execute_immediate(self, py_sql); Py_DECREF(py_sql); if (exec_res != 0) { goto fail; } } } else { const boolean is_retaining = (boolean) retaining_int; const TransactionalOperationResult trans_op_res = Transaction_commit_or_rollback(op, self, is_retaining, TRUE /* allowed_to_raise */ ); if (trans_op_res != OP_RESULT_OK) { goto fail; } #ifndef NDEBUG if (!is_retaining) { assert (!Transaction_is_active(self)); assert (self->trans_handle == NULL_TRANS_HANDLE); } else { assert (Transaction_is_active(self)); assert (Transaction_get_handle_p(self) != NULL); assert (*Transaction_get_handle_p(self) != NULL_TRANS_HANDLE); } #endif /* assertion block */ } /* end of "is it rollback-to-savepoint or other-resolution-op"? block */ assert (!PyErr_Occurred()); Py_INCREF(Py_None); py_res = Py_None; goto clean; fail: assert (PyErr_Occurred()); assert (py_res == NULL); /* Fall through to clean: */ clean: CON_PASSIVATE(self->con); CON_MUST_NOT_BE_ACTIVE(self->con); return py_res; } /* _pyob_Transaction_commit_or_rollback */ static TransactionalOperationResult Transaction_rollback_without_connection_activation( Transaction *self, boolean allowed_to_raise ) { assert (self != NULL); { /* Note that this method DOES NOT activates or deactivate the * connection. */ TransactionalOperationResult status = Transaction_commit_or_rollback( OP_ROLLBACK, self, FALSE, allowed_to_raise ); if (status != OP_RESULT_OK && !allowed_to_raise) { SUPPRESS_EXCEPTION; } return status; } } /* Transaction_rollback_without_connection_activation */ static void Transaction_stats_record_ps_executed(Transaction *self) { assert (self != NULL); ++self->n_prepared_statements_executed_since_current_phys_start; } /* Transaction_stats_record_ps_executed */ static LONG_LONG Transaction_stats_n_executed_since_phys_start( Transaction *self ) { assert (self != NULL); return self->n_prepared_statements_executed_since_current_phys_start; } /* Transaction_stats_n_executed_since_phys_start */ static void Transaction_stats_clear(Transaction *self) { assert (self != NULL); self->n_prepared_statements_executed_since_current_phys_start = 0; } /* Transaction_stats_clear */ /*************** Transaction METHODS INACCESSIBLE TO PYTHON:END **************/ /*************** Transaction METHODS ACCESSIBLE TO PYTHON:BEGIN **************/ static PyObject *pyob_Transaction_close(Transaction *self) { PyObject *res = NULL; CConnection *con; assert (self != NULL); con = self->con; TRANS_REQUIRE_OPEN(self); if (Transaction_is_main(self)) { raise_exception(ProgrammingError, "A Connection's main_transaction cannot" " be close()d independently of the Connection itself." ); return NULL; } assert (con != NULL); /* We need to retain a reference to con until the very end of this function, * so that we know the connection won't be garbage collected before we can * unlock its TP lock. */ Py_INCREF(con); #ifdef ENABLE_CONNECTION_TIMEOUT ACQUIRE_CON_TP_WITH_GIL_HELD(con); #endif /* ENABLE_CONNECTION_TIMEOUT */ if (Transaction_close_with_unlink(self, TRUE) != 0) { goto fail; } /* Although we hold a reference to the connection in a local variable within * this function, the reference held as a member of the Transaction object * should've been cleared: */ assert (self->con == NULL); res = Py_None; Py_INCREF(Py_None); goto clean; fail: assert (PyErr_Occurred()); /* Fall through to clean: */ clean: #ifdef ENABLE_CONNECTION_TIMEOUT RELEASE_CON_TP(con); #endif /* ENABLE_CONNECTION_TIMEOUT */ Py_DECREF(con); return res; } /* pyob_Transaction_close */ static PyObject *pyob_Transaction_cursor(Transaction *self) { /* Perform the equivalent of the following Python statement: * return Cursor(self) */ /* Note that we don't TRANS_REQUIRE_OPEN(self) here, because the Cursor * constructor must check that condition anyway. */ PyObject *py_cur; #ifndef NDEBUG Py_ssize_t self_orig_refcount = self->ob_refcnt; #endif py_cur = PyObject_CallFunctionObjArgs((PyObject *) &CursorType, self, NULL ); /* Validate the handling by the Cursor constructor of self's reference * count: */ assert ( py_cur != NULL ? self->ob_refcnt == self_orig_refcount + 1 : self->ob_refcnt == self_orig_refcount ); return py_cur; } /* pyob_Transaction_cursor */ static PyObject *pyob_Transaction_begin(Transaction *self, PyObject *args, PyObject *kwargs ) { static char *kwarg_list[] = {"tpb", NULL}; PyObject *py_res = NULL; PyObject *py_tpb = NULL; CConnection *con; assert (self != NULL); TRANS_REQUIRE_OPEN(self); assert (self->con != NULL); con = self->con; CON_ACTIVATE(con, return NULL); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwarg_list, &py_tpb )) { goto fail; } /* Py_None and NULL are equivalent in this context: */ if (py_tpb == Py_None) { py_tpb = NULL; } /* Raise a more informative error message if the previous transaction is * still active when the client attempts to start another. The old approach * was to go ahead and try to start the new transaction regardless. If there * was already an active transaction, the resulting exception made no mention * of it, which was very confusing. */ if (Transaction_is_active(self)) { raise_exception_with_numeric_error_code(ProgrammingError, -901, "Previous transaction still active; cannot start new transaction." " Use commit() or rollback() to resolve the old transaction first." ); goto fail; } if (Transaction_ensure_active(self, py_tpb) != 0) { assert (PyErr_Occurred()); goto fail; } assert (Transaction_is_active(self)); Py_INCREF(Py_None); py_res = Py_None; goto clean; fail: assert (PyErr_Occurred()); /* Fall through to clean: */ clean: CON_PASSIVATE(con); CON_MUST_NOT_BE_ACTIVE(con); return py_res; } /* pyob_Transaction_begin */ static PyObject *pyob_Transaction_prepare(Transaction *self) { PyObject *py_res = NULL; CConnection *con; assert (self != NULL); TRANS_REQUIRE_OPEN(self); assert (self->con != NULL); con = self->con; CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION(con, return NULL); if (self->group == NULL) { if ( prepare_transaction(&self->trans_handle, con->status_vector) != OP_RESULT_OK ) { goto fail; } } else { /* Call the 'prepare' method of self->group: */ PyObject *py_ret = PyObject_CallMethod(self->group, "prepare", NULL); if (py_ret == NULL) { goto fail; } Py_DECREF(py_ret); } assert (!PyErr_Occurred()); py_res = Py_None; Py_INCREF(Py_None); goto clean; fail: assert (PyErr_Occurred()); assert (py_res == NULL); /* Fall through to clean: */ clean: CON_PASSIVATE(con); CON_MUST_NOT_BE_ACTIVE(con); return py_res; } /* pyob_Transaction_prepare */ static PyObject *pyob_Transaction_commit(Transaction *self, PyObject *args, PyObject *kwargs ) { return _pyob_Transaction_commit_or_rollback(OP_COMMIT, self, args, kwargs); } /* pyob_Transaction_commit */ static PyObject *pyob_Transaction_savepoint(Transaction *self, PyObject *args) { PyObject *py_res = NULL; PyObject *py_sp_name = NULL; PyObject *py_sql = NULL; TRANS_REQUIRE_OPEN(self); assert (self->con != NULL); CON_ACTIVATE(self->con, return NULL); if (!PyArg_ParseTuple(args, "O!", &PyString_Type, &py_sp_name)) { goto fail; } /* PyString_Concat will steal a ref to py_sql and store a new reference to * the concatenated str in py_sql. */ py_sql = trans___s_SAVEPOINT_SPACE; /* Immortal str. */ Py_INCREF(py_sql); PyString_Concat(&py_sql, py_sp_name); if (py_sql == NULL) { goto fail; } if (Transaction_execute_immediate(self, py_sql) != 0) { goto fail; } assert (!PyErr_Occurred()); Py_INCREF(Py_None); py_res = Py_None; goto clean; fail: assert (PyErr_Occurred()); assert (py_res == NULL); /* Fall through to clean: */ clean: Py_XDECREF(py_sql); CON_PASSIVATE(self->con); CON_MUST_NOT_BE_ACTIVE(self->con); return py_res; } /* pyob_Transaction_savepoint */ static PyObject *pyob_Transaction_rollback(Transaction *self, PyObject *args, PyObject *kwargs ) { return _pyob_Transaction_commit_or_rollback(OP_ROLLBACK, self, args, kwargs); } /* pyob_Transaction_rollback */ static PyObject *pyob_Transaction_transaction_info(Transaction *self, PyObject *args ) { /* Note that we call pyob_Connection_x_info, which activates the connection, * so we mustn't do so explicitly. */ PyObject *py_res = NULL; PyObject *args_with_con_prepended = NULL; TRANS_REQUIRE_OPEN(self); if (!Transaction_is_active(self)) { raise_exception(ProgrammingError, "Transaction must be active to issue" " info queries." ); return NULL; } assert (PyTuple_CheckExact(args)); { const Py_ssize_t args_len = PyTuple_GET_SIZE(args); args_with_con_prepended = PyTuple_New(args_len + 1); if (args_with_con_prepended == NULL) { goto fail; } { PyObject *con = (PyObject *) self->con; assert (con != NULL); Py_INCREF(con); /* PyTuple_SET_ITEM steals a reference. */ PyTuple_SET_ITEM(args_with_con_prepended, 0, con); } { Py_ssize_t i; for (i = 0; i < args_len; ++i) { PyObject *el = PyTuple_GET_ITEM(args, i); Py_INCREF(el); /* PyTuple_SET_ITEM steals a reference. */ PyTuple_SET_ITEM(args_with_con_prepended, i+1, el); } } } assert (args_with_con_prepended != NULL); assert (PyTuple_CheckExact(args_with_con_prepended)); assert (PyTuple_GET_SIZE(args_with_con_prepended) == PyTuple_GET_SIZE(args) + 1 ); py_res = pyob_Connection_x_info( FALSE, /* Requesting transaction info, not database info. */ &self->trans_handle, NULL, args_with_con_prepended ); if (py_res == NULL) { goto fail; } assert (!PyErr_Occurred()); assert (py_res != NULL); goto clean; fail: assert (PyErr_Occurred()); if (py_res != NULL) { Py_DECREF(py_res); py_res = NULL; } /* Fall through to clean: */ clean: Py_XDECREF(args_with_con_prepended); return py_res; } /* pyob_Transaction_transaction_info */ static PyObject *pyob_Transaction_trans_info(Transaction *self, PyObject *args) { /* This method is just a pass-through to Python function _trans_info in * __init__.py, which is accessible at the C level via pyob_trans_info. * * _trans_info calls self.transaction_info, which indirectly activates the * connection, so we mustn't do so explicitly. */ PyObject *py_res = NULL; TRANS_REQUIRE_OPEN(self); /* Validate the args tuple. It's supposed to contain a single element, the * request. */ assert (PyTuple_CheckExact(args)); if (PyTuple_GET_SIZE(args) != 1) { raise_exception(ProgrammingError, "trans_info requires exactly one" " argument, which can be either a sequence of integer request codes," " or a single integer request code." ); goto fail; } py_res = PyObject_CallFunctionObjArgs(pyob_trans_info, self, PyTuple_GET_ITEM(args, 0), NULL ); if (py_res == NULL) { goto fail; } assert (!PyErr_Occurred()); assert (py_res != NULL); return py_res; fail: assert (PyErr_Occurred()); Py_XDECREF(py_res); return NULL; } /* pyob_Transaction_trans_info */ static void Transaction_dist_trans_indicate_resultion( Transaction *self, PyObject *group, const boolean is_resolved ) { assert (self != NULL); /* A Transaction's own trans_handle should always remain null when it's * participating in distributed transactions. */ assert (self->trans_handle == NULL_TRANS_HANDLE); assert (self->group != NULL); assert (self->group == group); if (is_resolved) { assert (self->state == TR_STATE_UNRESOLVED); self->state = TR_STATE_RESOLVED; Transaction_stats_clear(self); } else { assert (self->state == TR_STATE_RESOLVED); self->state = TR_STATE_UNRESOLVED; } } /* Transaction_dist_trans_indicate_resultion */ /**************** Transaction METHODS ACCESSIBLE TO PYTHON:END ***************/ /**************** Transaction ATTRIBUTE GET/SET METHODS:BEGIN ****************/ static PyObject *pyob_Transaction_connection_get(Transaction *self, void *closure ) { PyObject *py_res = Py_None; if (Transaction_get_con(self) != NULL) { assert (self->con_python_wrapper != NULL); py_res = self->con_python_wrapper; } else { assert (self->con_python_wrapper == NULL); } Py_INCREF(py_res); return py_res; } /* pyob_Transaction_connection_get */ static PyObject *pyob_Transaction_closed_get(Transaction *self, void *closure) { return PyBool_FromLong(!Transaction_is_not_closed(self)); } static PyObject *pyob_Transaction_group_get(Transaction *self, void *closure) { PyObject *group = self->group; if (group == NULL) { RETURN_PY_NONE; } else { Py_INCREF(group); return group; } } /* pyob_Transaction_group_get */ static int pyob_Transaction_group_set(Transaction *self, PyObject *group, void *closure ) { if (group == Py_None) { self->group = NULL; } else { /* Due to the specifics of the ConnectionGroup class, there should be a * trans._set_group(None) call between any trans._set_group(group) * calls. */ if (self->group != NULL) { raise_exception(InternalError, "Attempt to set transaction group when" " previous setting had not been cleared." ); goto fail; } /* The ConnectionGroup code always calls trans._set_group(None) when a * transaction is removed from its group, including when that removal is * invoked implicitly by ConnectionGroup.__del__. Therefore, the * transaction can avoid creating a cycle by never owning a reference to * its group, yet knowing that ->group will never refer to a dead * object. */ self->group = group; } return 0; fail: assert (PyErr_Occurred()); return -1; } /* pyob_Transaction_group_set */ static PyObject *pyob_Transaction_n_physical_get(Transaction *self, void *closure ) { return PythonIntOrLongFrom64BitValue(self->n_physical_transactions_started); } static PyObject *pyob_Transaction_resolution_get(Transaction *self, void *closure ) { return PyInt_FromLong(self->state == TR_STATE_UNRESOLVED ? 0 : 1); } /* pyob_Transaction_resolution_get */ static PyObject *pyob_Transaction_cursors_get(Transaction *self, void *closure ) { TRANS_REQUIRE_OPEN(self); return pyob_TrackerToList((AnyTracker *) self->open_cursors); } /* pyob_Transaction_cursors_get */ /******************* Transaction ATTRIBUTE GET/SET METHODS:END ********************/ /************* Transaction CLASS DEFINITION AND INITIALIZATION:BEGIN **************/ static PyMethodDef Transaction_methods[] = { {"close", (PyCFunction) pyob_Transaction_close, METH_NOARGS}, {"cursor", (PyCFunction) pyob_Transaction_cursor, METH_NOARGS}, {"_execute_immediate", (PyCFunction) pyob_Transaction_execute_immediate, METH_VARARGS }, {"begin", (PyCFunction) pyob_Transaction_begin, METH_VARARGS|METH_KEYWORDS }, {"prepare", (PyCFunction) pyob_Transaction_prepare, METH_NOARGS}, {"commit", (PyCFunction) pyob_Transaction_commit, METH_VARARGS|METH_KEYWORDS }, {"savepoint", (PyCFunction) pyob_Transaction_savepoint, METH_VARARGS}, {"rollback", (PyCFunction) pyob_Transaction_rollback, METH_VARARGS|METH_KEYWORDS }, {"transaction_info", (PyCFunction) pyob_Transaction_transaction_info, METH_VARARGS }, {"trans_info", (PyCFunction) pyob_Transaction_trans_info, METH_VARARGS}, {NULL} /* sentinel */ }; static PyGetSetDef Transaction_getters_setters[] = { {"connection", (getter) pyob_Transaction_connection_get, NULL, "The kinterbasdb.Connection associated with this transaction." }, {"closed", (getter) pyob_Transaction_closed_get, NULL, "Whether the Transaction object has been closed (explicitly or" " implicitly)." }, {"n_physical", (getter) pyob_Transaction_n_physical_get, NULL, "Number of physical transactions that have been started via this" " Transaction object during its lifetime." }, {"resolution", (getter) pyob_Transaction_resolution_get, NULL, "Zero if this Transaction object is currently managing an open" " physical transaction. One if the physical transaction has been" " resolved normally. Note that this is an int property rather than a" " bool, and is named 'resolution' rather than 'resolved', so that the" " non-zero values other than one can be assigned to convey specific" " information about the state of the transaction, in a future" " implementation." }, {"cursors", (getter) pyob_Transaction_cursors_get, NULL, "List of non-close()d Cursor objects associated with this Transaction." }, /* Package-private property used by kinterbasdb.ConnectionGroup: */ {"_group", (getter) pyob_Transaction_group_get, (setter) pyob_Transaction_group_set, NULL }, {NULL} /* sentinel */ }; PyTypeObject TransactionType = { /* new-style class */ PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "kinterbasdb.Transaction", /* tp_name */ sizeof(Transaction), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor) pyob_Transaction___del__, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Transaction_methods, /* tp_methods */ NULL, /* tp_members */ Transaction_getters_setters, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc) Transaction_init, /* tp_init */ 0, /* tp_alloc */ pyob_Transaction_new, /* tp_new */ 0, /* tp_free */ 0, /* tp_is_gc */ 0, /* tp_bases */ 0, /* tp_mro */ 0, /* tp_cache */ 0, /* tp_subclasses */ 0 /* tp_weaklist */ }; static int init_kidb_transaction(void) { /* TransactionType is a new-style class, so PyType_Ready must be called * before its getters and setters will function. */ if (PyType_Ready(&TransactionType) < 0) { goto fail; } return 0; fail: /* This function is indirectly called by the module loader, which makes no * provision for error recovery. */ return -1; } /* init_kidb_transaction */ /*********** Transaction CLASS DEFINITION AND INITIALIZATION:END *************/ /** TransactionTracker MEMBER FUNC DEFINITIONS AND SUPPORTING FUNCS: BEGIN ***/ #include "_kisupport_lifo_linked_list.h" LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_PYALLOC_NOQUAL( TransactionTracker, Transaction ) /*** TransactionTracker MEMBER FUNC DEFINITIONS AND SUPPORTING FUNCS: END ****/ kinterbasdb-3.3.0/typeconv_fixed_fixedpoint.py0000644000175000001440000000325611130647414021045 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : Fixed/fixedpoint Module (3rd-party) # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( # kinterbasdb-native fixed point converters (old, precison_mode style): 'fixed_conv_in_imprecise', 'fixed_conv_in_precise', 'fixed_conv_out_imprecise', 'fixed_conv_out_precise', ) import sys # The fixedpoint module resides at: http://fixedpoint.sourceforge.net/ from fixedpoint import FixedPoint from kinterbasdb.k_exceptions import * _tenTo = [10**x for x in range(20)] del x ################################################################################ ## FIXED POINT ################################################################################ def fixed_conv_in_precise((val, scale)): # Allow implicit param conv: if val is None or isinstance(val, basestring): return val return int(val * _tenTo[abs(scale)]) fixed_conv_in_imprecise = fixed_conv_in_precise def fixed_conv_out_precise(x): if x is None: return None absScale = abs(x[1]) return FixedPoint(x[0], absScale) / _tenTo[absScale] fixed_conv_out_imprecise = fixed_conv_out_precise kinterbasdb-3.3.0/_kinterbasdb_exceptions.h0000644000175000001440000000475411130647414020261 0ustar pcisarusers/* KInterbasDB Python Package - Header File with References to DB-API Exceptions * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This module provides access to the standard DB API exceptions types defined * by kinterbasdb (the types are accessible through global variables). * A typical kinterbasdb exception might be raised like this: * raise_exception(ProgrammingError, "This is the error message."); * * WARNING: * The global pointers to DB API exception types *declared* here (not * *defined* here) are only meant to be implicitly referenced in code that * compiles into the main kinterbasdb shared library (_kinterbasdb.dll or * _kinterbasdb.so). * Code such as _kiservices.c that compiles into a separate shared library * cannot refer to the exception types simply by including this header file. * Instead, such code must import the main _kinterbasdb shared library (where * the exception types reside) and refer to the exception types via * _kinterbasdb. For an example, see services.py and _kiservices.c. */ #ifndef _KINTERBASDB_EXCEPTIONS_H #define _KINTERBASDB_EXCEPTIONS_H /* YYY: 2006.01.21: MinGW used to have a problem with FB 2.0's fb_interpret * function, but no longer (with MinGW-GCC-3.4.5 and FB 2.0.0.12174). #if (defined(FIREBIRD_2_0_OR_LATER) && !defined(COMPILER_IS_MINGW_WIN32)) */ #ifdef FIREBIRD_2_0_OR_LATER #define USE_MODERN_INTERP_FUNC #endif #ifndef isc_stack_trace #define isc_stack_trace 335544842L #endif #include "Python.h" extern PyObject *Warning; extern PyObject *Error; extern PyObject *InterfaceError; extern PyObject *DatabaseError; extern PyObject *DataError; extern PyObject *OperationalError; extern PyObject *TransactionConflict; extern PyObject *ConduitWasClosed; extern PyObject *ConnectionTimedOut; extern PyObject *IntegrityError; extern PyObject *InternalError; extern PyObject *ProgrammingError; extern PyObject *NotSupportedError; #endif /* _KINTERBASDB_EXCEPTIONS_H */ kinterbasdb-3.3.0/_kisupport_platform.h0000644000175000001440000000411211130647414017460 0ustar pcisarusers/* KInterbasDB Python Package - Header File for Platform-Specific Support * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifndef _KISUPPORT_PLATFORM_H #define _KISUPPORT_PLATFORM_H #include "_kinterbasdb.h" #include "pythread.h" #ifdef PLATFORM_WINDOWS #include typedef DWORD PlatformThreadIdType; typedef HANDLE PlatformThreadRefType; #define THREAD_REF_INVALID NULL typedef LPTHREAD_START_ROUTINE PlatformThreadFuncType; #define THREAD_FUNC_MODIFIER WINAPI typedef DWORD PlatformThreadFuncReturnType; #define THREAD_FUNC_RETURN_SUCCESS 0 #define THREAD_FUNC_RETURN_FAILURE 1 typedef CRITICAL_SECTION PlatformMutexType; #else /* If not Windows, assume POSIX. */ #include #include #include typedef pthread_t PlatformThreadIdType; typedef pthread_t PlatformThreadRefType; #define THREAD_REF_INVALID -1 typedef void * (*PlatformThreadFuncType)(void *); #define THREAD_FUNC_MODIFIER typedef void * PlatformThreadFuncReturnType; /* THREAD_FUNC_RETURN_SUCCESS is a meaningless, but non-NULL, pointer * value: */ #define THREAD_FUNC_RETURN_SUCCESS \ ((PlatformThreadFuncReturnType) (&null_connection)) #define THREAD_FUNC_RETURN_FAILURE \ ((PlatformThreadFuncReturnType) NULL) typedef pthread_mutex_t PlatformMutexType; static void millis_into_future_to_abstime( long millis, struct timespec *abstime ); #endif #define THREAD_ID_NONE ((PlatformThreadIdType) 0) #endif /* if not def _KISUPPORT_PLATFORM_H */ kinterbasdb-3.3.0/_kiconversion_from_db.c0000644000175000001440000001452611130647414017722 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Parameter Conversion DB->Py * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kiconversion.c, * without the involvement of a header file. */ /* The undocumented portion of ISC_TIME is ten-thousandths of a second: */ #define MICROSECONDS_FROM_ISC_TIME(t) \ (((t) % 10000) * 100) #define MICROSECONDS_FROM_ISC_TIMESTAMP_PTR(tp) \ (((tp)->timestamp_time % 10000) * 100) static PyObject *conv_out_char(char *data, size_t size) { return PyString_FromStringAndSize(data, SIZE_T_TO_PYTHON_SIZE(size)); } /* conv_out_char */ static PyObject *conv_out_varchar(char *data) { /* The first sizeof(short) bytes contain the size of the string. */ return PyString_FromStringAndSize( data + sizeof(short), (int) *( (short *)data ) ); } /* conv_out_varchar */ static PyObject *_conv_out_integer_types( PyObject *py_raw, boolean is_fixed_point, short scale ) { /* Reference ownership note: * This function is passed ownership of py_raw. In the non-fixed point case, * we return that reference. For fixed point, we transfer ownership of the * py_raw reference to the containing tuple, then return our owned reference * to the tuple. */ assert (py_raw != NULL); if (!is_fixed_point) { /* Simply return the integer value. */ return py_raw; } else { /* We're converting a fixed-point rather than an integer; return a 2-tuple * of the form: (value, scale) */ PyObject *fixed_tuple; PyObject *py_scale; fixed_tuple = PyTuple_New(2); if (fixed_tuple == NULL) { return NULL; } py_scale = PyInt_FromLong(scale); if (py_scale == NULL) { Py_DECREF(fixed_tuple); return NULL; } PyTuple_SET_ITEM(fixed_tuple, 0, py_raw); /* "steals" ref to py_raw */ PyTuple_SET_ITEM(fixed_tuple, 1, py_scale); /* "steals" ref to py_scale */ return fixed_tuple; } } /* _conv_out_integer_types */ static PyObject *conv_out_short_long( char *data, short data_type, boolean is_fixed_point, short scale ) { /* 2004.04.16:64BC: On x86_64/1.5.1pre1, ISC_LONG is actually an int. */ long conv_long = (long)( data_type == SQL_SHORT ? ( *((ISC_SHORT *) data) ) : ( *((ISC_LONG *) data) ) ); PyObject *py_int = PyInt_FromLong(conv_long); if (py_int == NULL) { return NULL; } return _conv_out_integer_types(py_int, is_fixed_point, scale); } /* conv_out_short_long */ #ifdef INTERBASE_6_OR_LATER static PyObject *conv_out_int64( char *data, boolean is_fixed_point, short scale ) { const LONG_LONG dataLL = *((LONG_LONG *) data); PyObject *py_int = PythonIntOrLongFrom64BitValue(dataLL); if (py_int == NULL) { return NULL; } return _conv_out_integer_types(py_int, is_fixed_point, scale); } /* conv_out_int64 */ #endif /* INTERBASE_6_OR_LATER */ static PyObject *conv_out_floating(const double raw, const unsigned short dialect, const short scale ) { /* It's possible that a user would define a field as DECIMAL/NUMERIC with * a scale of zero, but the API provides no way for us to detect that. */ if (dialect >= 3 || scale == 0) { return PyFloat_FromDouble(raw); }{ /* The value being converted is from a field that's logically fixed-point * rather than floating-point. This can only arise in pre-dialect-3 * databases. */ /* This is a perfect application for Py_BuildValue, but it doesn't * support 64-bit integers. */ PyObject *fixed_tuple; PyObject *py_scaled_integer; PyObject *py_scale; fixed_tuple = PyTuple_New(2); if (fixed_tuple != NULL) { py_scaled_integer = PythonIntOrLongFrom64BitValue( (LONG_LONG) (raw * pow(10.0f, (double) -scale)) ); if (py_scaled_integer == NULL) { Py_DECREF(fixed_tuple); fixed_tuple = NULL; } else { py_scale = PyInt_FromLong(scale); if (py_scale == NULL) { Py_DECREF(fixed_tuple); fixed_tuple = NULL; Py_DECREF(py_scaled_integer); } else { PyTuple_SET_ITEM(fixed_tuple, 0, py_scaled_integer); /* "steals" ref */ PyTuple_SET_ITEM(fixed_tuple, 1, py_scale); /* "steals" ref */ } } } return fixed_tuple; }} /* conv_out_floating */ /* Date/time types: */ static PyObject *conv_out_timestamp(char *data) { struct tm c_tm; int ported_ints[2]; /* sizeof(ported_ints) == sizeof(ISC_TIMESTAMP) */ int microseconds; ENTER_GDAL ported_ints[0] = isc_vax_integer(data, sizeof(int)); ported_ints[1] = isc_vax_integer(data + sizeof(int), sizeof(int)); isc_decode_timestamp((ISC_TIMESTAMP *) &ported_ints, &c_tm); microseconds = MICROSECONDS_FROM_ISC_TIMESTAMP_PTR((ISC_TIMESTAMP *) data); LEAVE_GDAL return Py_BuildValue("(iiiiiii)", c_tm.tm_year+1900, c_tm.tm_mon+1, c_tm.tm_mday, c_tm.tm_hour, c_tm.tm_min, c_tm.tm_sec, microseconds ); } /* conv_out_timestamp */ #ifdef INTERBASE_6_OR_LATER static PyObject *conv_out_date(char *data) { struct tm c_tm; int data_as_portable_int; ENTER_GDAL data_as_portable_int = isc_vax_integer(data, sizeof(int)); isc_decode_sql_date((ISC_DATE *) &data_as_portable_int, &c_tm); LEAVE_GDAL return Py_BuildValue("(iii)", c_tm.tm_year+1900, c_tm.tm_mon+1, c_tm.tm_mday); } /* conv_out_date */ static PyObject *conv_out_time(char *data) { struct tm c_tm; int data_as_portable_int; int microseconds; ENTER_GDAL data_as_portable_int = isc_vax_integer(data, sizeof(int)); isc_decode_sql_time((ISC_TIME *) &data_as_portable_int, &c_tm); microseconds = MICROSECONDS_FROM_ISC_TIME(*((ISC_TIME *) data)); LEAVE_GDAL return Py_BuildValue("(iiii)", c_tm.tm_hour, c_tm.tm_min, c_tm.tm_sec, microseconds ); } /* conv_out_time */ #endif /* INTERBASE_6_OR_LATER */ #define conv_out_boolean(data) \ PyBool_FromLong( *((ISC_BOOLEAN *) (data)) ) kinterbasdb-3.3.0/_kisupport_platform_windows.c0000644000175000001440000000515711130647414021237 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Platform Infrastructure for * Windows * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kievents.c, * without the involvement of a header file. */ /* NOTE: THE CODE IN THIS FILE IS TYPICALLY EXECUTED WHEN THE GIL IS NOT HELD, * SO IT MUST NOT CALL THE PYTHON C API! */ #ifdef ENABLE_CONNECTION_TIMEOUT static PlatformThreadRefType Thread_current_ref() { return GetCurrentThread(); } /* Thread_current_ref */ #endif /* ENABLE_CONNECTION_TIMEOUT */ static PlatformThreadIdType Thread_current_id() { return GetCurrentThreadId(); } /* Thread_current_id */ static boolean Thread_ids_equal( PlatformThreadIdType a, PlatformThreadIdType b ) { return (boolean) (a == b); } /* Thread_ids_equal */ static PlatformThreadRefType Thread_create( PlatformThreadFuncType func, void *func_arg, PlatformThreadIdType *store_thread_id ) { HANDLE t = CreateThread( NULL, /* default security attributes */ 0, /* default stack size */ func, /* thread should run this func */ func_arg, /* passing this arg */ 0, /* default creation flags (start immediately) */ store_thread_id ); return t ? t : THREAD_REF_INVALID; } /* Thread_create */ static long Thread_join(PlatformThreadRefType t) { return WaitForSingleObject(t, INFINITE) == WAIT_OBJECT_0 ? 0 : -1; } /* Thread_join */ #ifdef ENABLE_CONNECTION_TIMEOUT static void sleep_millis(unsigned int millis) { Sleep(millis); } /* sleep_millis */ #endif /* ENABLE_CONNECTION_TIMEOUT */ static long Mutex_init(PlatformMutexType *cs) { InitializeCriticalSection(cs); return 0; } /* Mutex_init */ static long Mutex_close(PlatformMutexType *cs) { DeleteCriticalSection(cs); return 0; } /* Mutex_destroy */ static long Mutex_lock(PlatformMutexType *cs) { EnterCriticalSection(cs); return 0; } /* Mutex_lock */ static long Mutex_unlock(PlatformMutexType *cs) { LeaveCriticalSection(cs); return 0; } /* Mutex_unlock */ kinterbasdb-3.3.0/typeconv_datetime_naked.py0000644000175000001440000000601511130647414020447 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : DateTime/Minimal # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( # kinterbasdb-native date and time converters: 'date_conv_in', 'date_conv_out', 'time_conv_in', 'time_conv_out', 'timestamp_conv_in', 'timestamp_conv_out', # DB API 2.0 standard date and time type constructors: 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', ) import sys, time from kinterbasdb.k_exceptions import * ################################################################################ ## DATE AND TIME ################################################################################ # kinterbasdb-native date and time converters: def date_conv_in(dateObj): # Allow implicit param conv from string: if dateObj is None or isinstance(dateObj, basestring): return dateObj if not isinstance(dateObj, tuple): raise InterfaceError( 'Cannot convert object of type %s to native kinterbasdb tuple.' % str(type(dateObj)) ) return dateObj def date_conv_out(dateTuple): return dateTuple def time_conv_in(timeObj): # Allow implicit param conv from string: if timeObj is None or isinstance(timeObj, basestring): return timeObj if not isinstance(timeObj, tuple): raise InterfaceError( 'Cannot convert object of type %s to native kinterbasdb tuple.' % str(type(timeObj)) ) return timeObj def time_conv_out(timeTuple): return timeTuple def timestamp_conv_in(timestampObj): # Allow implicit param conv from string: if timestampObj is None or isinstance(timestampObj, basestring): return timestampObj if not isinstance(timestampObj, tuple): raise InterfaceError( 'Cannot convert object of type %s to native kinterbasdb tuple.' % str(type(timestampObj)) ) return timestampObj def timestamp_conv_out(timestampTuple): return timestampTuple # DB API 2.0 standard date and time type constructors: def Date(year, month, day): return (year, month, day) def Time(hour, minute, second): return (hour, minute, second) def Timestamp(year, month, day, hour, minute, second): return (year, month, day, hour, minute, second) def DateFromTicks(ticks): return time.localtime(ticks)[:3] def TimeFromTicks(ticks): return time.localtime(ticks)[3:6] def TimestampFromTicks(ticks): return time.localtime(ticks)[:6] kinterbasdb-3.3.0/_kilock.h0000644000175000001440000001750611130647414015003 0ustar pcisarusers/* KInterbasDB Python Package - Header File for Central Concurrency Facilities * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /************** CONCEPTUAL NOTES (for Python client programmers) ************** * See docs/usage.html#special_issue_concurrency */ /************* IMPLEMENTATION NOTES (for KInterbasDB maintainers) ************* * * 1. The GDAL (which is active at Level 1 and inactive at Level 2) and the * GCDL (which is inactive at Level 1 and active at Level 2) are conceptually * separate, but can be implemented as a single lock object due to the fact * that the two conceptual locks are never active at the same time. * To avoid confusion in the bulk of the kinterbasdb code, and to prepare * for the potential introduction of concurrency levels greater than 2, the * macros for manipulating the GDAL and the GCDL are arranged to imply that * the two conceptually separate locks are truly different objects. */ #ifndef _KILOCK_H #define _KILOCK_H #include "_kinterbasdb.h" #ifdef ENABLE_CONCURRENCY #include "pythread.h" extern PyThread_type_lock _global_db_client_lock; /* The following aliases are defined due to factors discussed in * Implementation Note #1 (see above). */ #define global_GDAL _global_db_client_lock #define global_GCDL _global_db_client_lock /*************************** Python GIL ************************/ /* The next two macros are for manipulating the GIL using an explicit thread * state. For example, a thread that's bootstrapped from C, and only enters * the GIL occasionally, might use these. */ #define ENTER_GIL_USING_THREADSTATE(ts) \ debug_print3("GIL-> ?ACQUIRE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ PyEval_RestoreThread(ts); \ debug_print3("GIL-> !!ACQUIRED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); #define LEAVE_GIL_USING_THREADSTATE(ts) \ /* Notice that ts is not actually used in the current implementation of \ * this macro. */ \ debug_print3("GIL-> ?RELEASE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ PyEval_SaveThread(); \ debug_print3("GIL-> !RELEASED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); #define LEAVE_GIL_WITHOUT_AFFECTING_DB \ debug_print3("GIL-> ?RELEASE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ Py_BEGIN_ALLOW_THREADS \ debug_print3("GIL-> !RELEASED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); #define ENTER_GIL_WITHOUT_AFFECTING_DB \ debug_print3("GIL-> ?ACQUIRE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ Py_END_ALLOW_THREADS \ debug_print3("GIL-> !!ACQUIRED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); #define OPEN_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE \ { PyThreadState *_save = NULL; #define CLOSE_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE } #define LEAVE_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_STARTING_CODE_BLOCK \ debug_print3("GIL-> ?RELEASE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ Py_UNBLOCK_THREADS \ debug_print3("GIL-> !!RELEASED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); #define ENTER_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_ENDING_CODE_BLOCK \ debug_print3("GIL-> ?ACQUIRE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ Py_BLOCK_THREADS \ debug_print3("GIL-> !!ACQUIRED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); /*************************** GDAL ************************/ #define ENTER_GDAL \ { \ LEAVE_GIL_WITHOUT_AFFECTING_DB \ ENTER_GDAL_WITHOUT_LEAVING_PYTHON #define LEAVE_GDAL \ LEAVE_GDAL_WITHOUT_ENTERING_PYTHON \ ENTER_GIL_WITHOUT_AFFECTING_DB \ } #define ENTER_GDAL_WITHOUT_LEAVING_PYTHON \ if (global_concurrency_level == 1) { \ debug_print3("GDAL-> ?ACQUIRE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ PyThread_acquire_lock(global_GDAL, WAIT_LOCK); \ debug_print3("GDAL-> !!ACQUIRED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ } #define LEAVE_GDAL_WITHOUT_ENTERING_PYTHON \ if (global_concurrency_level == 1) { \ debug_print3("GDAL-> ?RELEASE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ PyThread_release_lock(global_GDAL); \ debug_print3("GDAL-> !RELEASED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ } #define LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK \ LEAVE_GDAL_WITHOUT_ENTERING_PYTHON \ ENTER_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_ENDING_CODE_BLOCK /*************************** GCDL ************************/ #define ENTER_GCDL \ { \ LEAVE_GIL_WITHOUT_AFFECTING_DB \ ENTER_GCDL_WITHOUT_LEAVING_PYTHON #define LEAVE_GCDL \ LEAVE_GCDL_WITHOUT_ENTERING_PYTHON \ ENTER_GIL_WITHOUT_AFFECTING_DB \ } #ifdef ENABLE_FREE_CONNECTION_AND_DISCONNECTION /* Connection and disconnection are not serialized. */ #define ENTER_GCDL_WITHOUT_LEAVING_PYTHON #define LEAVE_GCDL_WITHOUT_ENTERING_PYTHON #else /* Serialize connection and disconnection. If we're operating at a * concurrency_level < 2, the GDAL already performs this serialization. */ #define ENTER_GCDL_WITHOUT_LEAVING_PYTHON \ if (global_concurrency_level > 1) { \ debug_print3("GCDL-> ?ACQUIRE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ PyThread_acquire_lock(global_GCDL, WAIT_LOCK); \ debug_print3("GCDL-> !!ACQUIRED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ } #define LEAVE_GCDL_WITHOUT_ENTERING_PYTHON \ if (global_concurrency_level > 1) { \ debug_print3("GCDL-> ?RELEASE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ PyThread_release_lock(global_GCDL); \ debug_print3("GCDL-> !RELEASED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ } #endif /* ENABLE_FREE_CONNECTION_AND_DISCONNECTION */ #else /* ndef ENABLE_CONCURRENCY */ #define ENTER_GDAL #define LEAVE_GDAL #define ENTER_GCDL #define LEAVE_GCDL #define ENTER_GIL_USING_THREADSTATE(ts) #define LEAVE_GIL_USING_THREADSTATE(ts) #define LEAVE_GIL_WITHOUT_AFFECTING_DB #define ENTER_GIL_WITHOUT_AFFECTING_DB #define ENTER_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_ENDING_CODE_BLOCK #define ENTER_GDAL_WITHOUT_LEAVING_PYTHON #define LEAVE_GDAL_WITHOUT_ENTERING_PYTHON #define LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK #define ENTER_GCDL_WITHOUT_LEAVING_PYTHON #define LEAVE_GCDL_WITHOUT_ENTERING_PYTHON #endif /* def ENABLE_CONCURRENCY */ #endif /* not def _KILOCK_H */ kinterbasdb-3.3.0/_kiconversion_array.c0000644000175000001440000012554411130647414017433 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Array Conversion (both ways) * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kiconversion.c, * without the involvement of a header file. */ /* Note: kinterbasdb's array-handling code does not implicitly transform array * shape (i.e., pad too-short sequences with NULL), because the database engine * does not support NULL array elements (see IB 6 API guide, page 153). */ /* The isc_array_lookup_bounds function, at least in FB 1.5 and 2.0, has a * concurrency bug that causes severe problems if the function is called from * multiple threads at once. (In the lookup_desc function in array.epp, a * global database handle is set to the one the client code has passed in, and * then some GPRE-generated code is run. Even synchronizing all calls to * isc_array_lookup_bounds doesn't fix the problem, because lookup_desc tries * to clear open requests its previous handle.) * * On 2006.01.30, DSR reimplemented the functionality of * isc_array_lookup_bounds in kinterbasdb (chiefly in _array_descriptor.py) in * order to bypass the concurrency bug. Defining * USE_OFFICIAL_ISC_ARRAY_LOOKUP_BOUNDS will restore the old code, which is * slower, in addition to being buggy. */ /* #define USE_OFFICIAL_ISC_ARRAY_LOOKUP_BOUNDS */ /******************** HARD-CODED LIMITS:BEGIN ********************/ /* MAXIMUM_NUMBER_OF_ARRAY_DIMENSIONS is an IB/Firebird engine constraint, not * something we could overcome here in kinterbasdb. */ #define MAXIMUM_NUMBER_OF_ARRAY_DIMENSIONS 16 /******************** HARD-CODED LIMITS:END ********************/ /******************** CONVENIENCE DEFS:BEGIN ********************/ #define ARRAY_ROW_MAJOR 0 #define ARRAY_COLUMN_MAJOR 1 #define DIMENSION_SIZE_END_MARKER -1 /* VARCHAR array elements are stored differently from the way conventional * VARCHAR fields are stored. Instead of 2 bytes at the beginning containing * the size of the string value, array-element VARCHARs apparently have 2 null * bytes at the end that are not used. */ #define _ADJUST_ELEMENT_SIZE_FOR_VARCHAR_IF_NEC(data_type, size_of_el) \ if (data_type == blr_varying || data_type == blr_varying2) { \ size_of_el += 2; \ } #define SQLSUBTYPE_DETERMINATION_ERROR -999 /******************** CONVENIENCE DEFS:END ********************/ /******************** FUNCTION PROTOTYPES:BEGIN ********************/ /* Output functions: */ static PyObject *conv_out_array( Cursor *cursor, short sqlvar_index, ISC_QUAD *array_id, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, char *rel_name, short rel_name_length, char *field_name, short field_name_length ); static PyObject *conv_out_array_element( Cursor *cursor, short sqlvar_index, char *data, short data_type, size_t size_of_single_element, short scale, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle, char *rel_name, short rel_name_length, char *field_name, short field_name_length ); static PyObject *_extract_db_array_buffer_to_pyseq( Cursor *cursor, short sqlvar_index, char **data_slot, short *dimension_sizes_ptr, /* Boilerplate parameters: */ short data_type, size_t size_of_single_element, short scale, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, char *rel_name, short rel_name_length, char *field_name, short field_name_length ); /* Input functions: */ static InputStatus conv_in_array( PyObject *py_input, ISC_QUAD **array_id_slot, Cursor *cursor, short sqlvar_index, char *rel_name, short rel_name_length, char *field_name, short field_name_length ); static InputStatus conv_in_array_element( PyObject *py_input, char **data_slot, unsigned short dialect, short data_type, short data_subtype, size_t size_of_single_element, short scale, PyObject *converter, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, Cursor *cur ); static InputStatus _extract_pyseq_to_db_array_buffer( PyObject *py_seq, short *dimension_sizes_ptr, /* Boilerplate parameters: */ char **data_slot, unsigned short dialect, short data_type, short data_subtype, size_t size_of_single_element, short scale, PyObject *converter, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, Cursor *cur ); /* Functions common to both input and output: */ static ISC_ARRAY_DESC *_look_up_array_descriptor( Transaction *trans, /* These strings aren't null-terminated: */ char *sqlvar_rel_name, short sqlvar_rel_name_length, char *sqlvar_field_name, short sqlvar_field_name_length ); static short *_extract_dimensions_sizes( ISC_ARRAY_DESC *desc, /* output param: */ int *total_number_of_elements ); static short _determine_sqlsubtype_for_array( Transaction *trans, char *rel_name, short rel_name_length, char *field_name, short field_name_length ); /******************** FUNCTION PROTOTYPES:END ********************/ /******************** INPUT FUNCTIONS:BEGIN ********************/ static InputStatus conv_in_array( PyObject *py_input, ISC_QUAD **array_id_slot, Cursor *cursor, short sqlvar_index, char *rel_name, short rel_name_length, char *field_name, short field_name_length ) { InputStatus status = INPUT_OK; ISC_ARRAY_DESC *desc; short *dimensions = NULL; unsigned short number_of_dimensions; int total_number_of_elements = 0; short data_type = -1; size_t size_of_single_element; char *source_buf = NULL; char *source_buf_walker = NULL; size_t source_buf_size; ISC_STATUS *status_vector = cursor->status_vector; isc_db_handle *db_handle = Transaction_get_db_handle_p(cursor->trans); isc_tr_handle *trans_handle_p = Transaction_get_handle_p(cursor->trans); PyObject *converter = NULL; short data_subtype = -1; /* Read the database array descriptor for this field. */ desc = _look_up_array_descriptor(cursor->trans, rel_name, rel_name_length, field_name, field_name_length ); if (desc == NULL) { goto fail; } data_type = desc->array_desc_dtype; number_of_dimensions = desc->array_desc_dimensions; size_of_single_element = desc->array_desc_length; _ADJUST_ELEMENT_SIZE_FOR_VARCHAR_IF_NEC(data_type, size_of_single_element); /* Populate the short-array named dimensions. (The _extract_dimensions_sizes * function also sets total_number_of_elements to its appropriate value.) */ dimensions = _extract_dimensions_sizes(desc, &total_number_of_elements); if (dimensions == NULL) { goto fail; } /* The database engine doesn't allow zero-element arrays. */ assert (total_number_of_elements > 0); /* Validate the incoming Python sequence to ensure that its shape matches * that defined by the database array descriptor for this field. See comment * near top of this file for explanation of why kinterbasdb doesn't * automatically None-pad sequences that have too few elements. */ source_buf_size = size_of_single_element * total_number_of_elements; source_buf = kimem_main_malloc(source_buf_size); if (source_buf == NULL) { goto fail; } source_buf_walker = source_buf; assert (data_type != -1); data_subtype = _determine_sqlsubtype_for_array(cursor->trans, rel_name, rel_name_length, field_name, field_name_length ); if (data_subtype == SQLSUBTYPE_DETERMINATION_ERROR) { goto fail; } Transaction_stats_record_ps_executed(cursor->trans); { short scale = desc->array_desc_scale; /* Find the dynamic type translator (if any) for this array's type. Note * that the translator is applied to individual elements of the array, not * to the array as a whole. */ converter = cursor_get_in_converter(cursor, sqlvar_index, data_type, data_subtype, scale, TRUE ); if (converter == NULL) { goto fail; } /* At this point, converter is either a Python callable, if there was a * registered converter for this array's element type, or Py_None if there * was not. */ assert (source_buf_walker != NULL); status = _extract_pyseq_to_db_array_buffer( py_input, dimensions, /* For conversion: */ &source_buf_walker, Transaction_get_dialect(cursor->trans), data_type, data_subtype, size_of_single_element, scale, converter, status_vector, db_handle, trans_handle_p, cursor ); if (status != INPUT_OK) { goto fail; } } /* Successful completion requires the entire buffer to have been filled: */ assert (((size_t) (source_buf_walker - source_buf)) == source_buf_size); /* Call isc_array_put_slice to store the incoming value in the database. * A NULL array id tells isc_array_put_slice to "create or replace" the * existing array in the database (kinterbasdb does not support modifying * segments of an existing array--instead, it replaces the old array value in * the database with a new one). */ assert (*array_id_slot == NULL); { ISC_QUAD *array_id; array_id = *array_id_slot = kimem_main_malloc(sizeof(ISC_QUAD)); if (array_id == NULL) { goto fail; } /* "Nullify" the array id: */ /* 2003.01.25: In FB 1.5a5, isc_quad_high/isc_quad_low no longer work, * but gds_quad_high/gds_quad_low work with both 1.0 and 1.5a5. */ array_id->gds_quad_high = 0; array_id->gds_quad_low = 0; ENTER_GDAL isc_array_put_slice(status_vector, db_handle, trans_handle_p, array_id, desc, source_buf, (ISC_LONG *) &source_buf_size ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { goto fail_operationerror; } } /* array_id_slot (a pointer to a pointer passed in by the caller) is freshly * initialized by isc_array_put_slice. In effect, it is "passed back" to the * caller, so that it can be accessed in the XSQLVAR when the statement is * executed. */ /* We've stored the array successfully; now clean up. */ goto cleanup; fail_operationerror: raise_sql_exception(OperationalError, "Array input conversion: ", status_vector ); /* Fall through to fail. */ fail: status = INPUT_ERROR; /* Fall through to cleanup. */ cleanup: assert (status == INPUT_OK ? PyErr_Occurred() == NULL : PyErr_Occurred() != NULL ); #ifdef USE_OFFICIAL_ISC_ARRAY_LOOKUP_BOUNDS if (desc != NULL) { kimem_main_free(desc); } #endif if (dimensions != NULL) { kimem_main_free(dimensions); } if (source_buf != NULL) { kimem_main_free(source_buf); } if (status != INPUT_OK && *array_id_slot != NULL) { kimem_main_free(*array_id_slot); *array_id_slot = NULL; } return status; } /* conv_in_array */ #define CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION(conversion_code) \ TRY_INPUT_CONVERSION( (conversion_code), fail ); /* A "standard" DB type code is like SQL_LONG rather than blr_long. */ #define CONV_IN_ARRAY_ELEMENT_CONVERT_INTEGER_TYPE(standard_db_type_code) \ CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( \ conv_in_internal_integer_types_array(py_input_converted, data_slot, \ dialect, standard_db_type_code, data_subtype, scale, cur \ ) \ ); static InputStatus conv_in_array_element( PyObject *py_input, char **data_slot, unsigned short dialect, short data_type, short data_subtype, size_t size_of_single_element, short scale, PyObject *converter, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, Cursor *cur ) { InputStatus status; PyObject *py_input_converted; assert (py_input != NULL); assert (*data_slot != NULL); py_input_converted = dynamically_type_convert_input_obj_if_necessary( py_input, TRUE, /* it IS an array element */ dialect, data_type, data_subtype, scale, converter ); if (py_input_converted == NULL) { goto fail; } switch (data_type) { case blr_text: case blr_text2: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_text_array(data_slot, size_of_single_element, ' ') ); break; case blr_varying: case blr_varying2: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_text_array(data_slot, size_of_single_element, /* Unlike normal VARCHAR field values, VARCHAR array elements are * stored at a constant length, but padded with null characters: */ '\0' ) ); break; case blr_short: CONV_IN_ARRAY_ELEMENT_CONVERT_INTEGER_TYPE(SQL_SHORT); break; case blr_long: CONV_IN_ARRAY_ELEMENT_CONVERT_INTEGER_TYPE(SQL_LONG); break; #ifdef INTERBASE_6_OR_LATER case blr_int64: CONV_IN_ARRAY_ELEMENT_CONVERT_INTEGER_TYPE(SQL_INT64); break; #endif /* INTERBASE_6_OR_LATER */ case blr_float: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_float_array(py_input_converted, data_slot, cur) ); break; case blr_double: case blr_d_float: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_double_array(py_input_converted, data_slot, cur) ); break; case blr_timestamp: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_timestamp_array(py_input_converted, data_slot, cur) ); break; #ifdef INTERBASE_6_OR_LATER case blr_sql_date: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_date_array(py_input_converted, data_slot, cur) ); break; case blr_sql_time: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_time_array(py_input_converted, data_slot, cur) ); break; #endif /* INTERBASE_6_OR_LATER */ case blr_boolean_dtype: CONV_IN_ARRAY_ELEMENT_TRY_INPUT_CONVERSION( conv_in_boolean_array(py_input_converted, data_slot) ); break; /* Currently, none of the following types is supported, because it's not * clear how one would create such a field via SQL DDL. As far as I can * tell, this makes the types below useless for a client interface such as * kinterbasdb. */ case blr_quad: case blr_blob: case blr_blob_id: raise_exception(NotSupportedError, "kinterbasdb does not support arrays of arrays or arrays of blobs" " because it's not clear how one would create such a field via SQL." ); goto fail; /* NULL-terminated string: */ case blr_cstring: case blr_cstring2: raise_exception(NotSupportedError, "kinterbasdb does not support blr_cstring arrays because it's not" " clear how one would create such a field via SQL, or even why it" " would be desirable (in light of the existence of CHAR and VARCHAR" " arrays)." ); goto fail; default: raise_exception(NotSupportedError, "kinterbasdb does not support the input conversion of arrays of this" " type. " KIDB_REPORT " " KIDB_HOME_PAGE ); goto fail; } /* end of switch on data_type */ /* Success: */ status = INPUT_OK; goto cleanup; fail: status = INPUT_ERROR; /* Fall through to cleanup. */ cleanup: assert (status == INPUT_OK ? PyErr_Occurred() == NULL : PyErr_Occurred() != NULL ); Py_XDECREF(py_input_converted); return status; } /* conv_in_array_element */ #define _EXTRACT_SEQ__GET_SEQ_EL_WITH_EXCEPTION(seq, index, target) \ target = PySequence_GetItem(seq, index); \ if (target == NULL) { \ PyObject *err_msg = PyString_FromFormat("Array input conversion:" \ " unable to retrieve element %d of input sequence.", index \ ); \ if (err_msg == NULL) { goto fail; } \ raise_exception(InterfaceError, PyString_AS_STRING(err_msg)); \ Py_DECREF(err_msg); \ goto fail; \ } static InputStatus _extract_pyseq_to_db_array_buffer( /* For validation: */ PyObject *py_seq, short *dimension_sizes_ptr, /* For conversion: */ char **data_slot, unsigned short dialect, short data_type, short data_subtype, size_t size_of_single_element, short scale, PyObject *converter, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, Cursor *cur ) { InputStatus status; int i; int py_seq_len; int required_length_of_this_dimension = (int) *dimension_sizes_ptr; assert (*data_slot != NULL); assert (required_length_of_this_dimension > 0); if (!PySequence_Check(py_seq) || PyString_Check(py_seq)) { PyObject *input_py_obj_type = PyObject_Type(py_seq); if (input_py_obj_type != NULL) { PyObject *input_py_obj_type_repr = PyObject_Repr(input_py_obj_type); if (input_py_obj_type_repr != NULL) { PyObject *err_msg = PyString_FromFormat( "Array input conversion: type error:" " input sequence must be a Python sequence other than string, not" " a %s", PyString_AS_STRING(input_py_obj_type_repr) ); if (err_msg != NULL) { raise_exception(InterfaceError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } Py_DECREF(input_py_obj_type_repr); } Py_DECREF(input_py_obj_type); } goto fail; } /* if not appropriate seq */ { const Py_ssize_t py_seq_len_ss = PySequence_Length(py_seq); if (py_seq_len_ss == -1) { goto fail; } else if (py_seq_len_ss > INT_MAX) { raise_exception(NotSupportedError, "The database API does not yet" " officially support arrays larger than 2 GB." ); goto fail; } py_seq_len = (int) py_seq_len_ss; } if (py_seq_len != required_length_of_this_dimension) { PyObject *err_msg = PyString_FromFormat("Array input conversion: the input" " sequence is not appropriately shaped (current dimension requires" " input sequence of exactly %d elements, but actual input sequence" " has%s%d elements).%s", required_length_of_this_dimension, (py_seq_len < required_length_of_this_dimension ? " only " : ""), py_seq_len, (py_seq_len < required_length_of_this_dimension ? " kinterbasdb" " cannot automatically pad too-short input sequences because the" " database engine does not allow elements of an array to be NULL." : "" ) ); if (err_msg != NULL) { raise_exception(InterfaceError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } goto fail; } else { short *next_dimension_size_ptr = dimension_sizes_ptr + 1; if (*next_dimension_size_ptr == DIMENSION_SIZE_END_MARKER) { /* py_seq contains "leaf objects" (input values rather than * subsequences). Convert each "leaf object" from its Pythonic form to * its DB-internal representation; store the result in the raw array * source buffer. */ int conv_status_for_this_value; for (i = 0; i < py_seq_len; i++) { PyObject *py_input; _EXTRACT_SEQ__GET_SEQ_EL_WITH_EXCEPTION(py_seq, i, py_input); /* MEAT: */ assert (*data_slot != NULL); conv_status_for_this_value = conv_in_array_element( py_input, data_slot, dialect, data_type, data_subtype, size_of_single_element, scale, converter, status_vector, db_handle, trans_handle_p, cur ); /* PySequence_GetItem creates new ref; discard it: */ Py_DECREF(py_input); if (conv_status_for_this_value != INPUT_OK) { goto fail; } /* Move the raw-array-source-buffer pointer to the next slot. */ *data_slot += size_of_single_element; } /* end of convert-each-value loop */ } else { /* py_seq does NOT contain "leaf objects", so recursively validate each * subsequence of py_seq. */ for (i = 0; i < py_seq_len; i++) { int status_for_this_sub_seq; PyObject *sub_seq; _EXTRACT_SEQ__GET_SEQ_EL_WITH_EXCEPTION(py_seq, i, sub_seq); /* MEAT: */ status_for_this_sub_seq = _extract_pyseq_to_db_array_buffer( sub_seq, next_dimension_size_ptr, data_slot, dialect, data_type, data_subtype, size_of_single_element, scale, converter, status_vector, db_handle, trans_handle_p, cur ); /* PySequence_GetItem creates new ref; discard it: */ Py_DECREF(sub_seq); if (status_for_this_sub_seq != INPUT_OK) { goto fail; } } /* end of validate-each-subsequence loop */ } /* end of this-sequence-contains-leaves if block */ } /* end of this-sequence-was-appropriate length if block */ status = INPUT_OK; goto cleanup; fail: status = INPUT_ERROR; /* Fall through to cleanup. */ cleanup: assert (status == INPUT_OK ? PyErr_Occurred() == NULL : PyErr_Occurred() != NULL ); return status; } /* _extract_pyseq_to_db_array_buffer */ /******************** INPUT FUNCTIONS:END ********************/ /******************** OUTPUT FUNCTIONS:BEGIN ********************/ static PyObject *conv_out_array( Cursor *cursor, short sqlvar_index, ISC_QUAD *array_id, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, char *rel_name, short rel_name_length, char *field_name, short field_name_length ) { /* Arrays are always stored in row-major order inside the DB, and are also * retrieved that way unless the app requests otherwise (via * desc->array_desc_flags). */ PyObject *result = NULL; char *output_buf = NULL, *output_buf_walker; size_t output_buf_size = 0; size_t size_of_single_element = 0; short scale = -1; short data_type = -1; unsigned short number_of_dimensions; short *dimensions = NULL; int total_number_of_elements; /* Will be set later. */ ISC_ARRAY_DESC *desc = _look_up_array_descriptor(cursor->trans, rel_name, rel_name_length, field_name, field_name_length ); if (desc == NULL) { goto fail; } number_of_dimensions = desc->array_desc_dimensions; assert (number_of_dimensions >= 1); data_type = desc->array_desc_dtype; size_of_single_element = desc->array_desc_length; _ADJUST_ELEMENT_SIZE_FOR_VARCHAR_IF_NEC(data_type, size_of_single_element); scale = desc->array_desc_scale; /* Populate the short-array named dimensions. (The _extract_dimensions_sizes * function also sets total_number_of_elements to its appropriate value.) */ dimensions = _extract_dimensions_sizes(desc, &total_number_of_elements); if (dimensions == NULL) { goto fail; } /* The database engine doesn't allow zero-element arrays. */ assert (total_number_of_elements > 0); output_buf_size = size_of_single_element * total_number_of_elements; output_buf = kimem_main_malloc(output_buf_size); if (output_buf == NULL) { goto fail; } output_buf_walker = output_buf; ENTER_GDAL isc_array_get_slice(status_vector, db_handle, trans_handle_p, array_id, desc, (void *) output_buf, (ISC_LONG *) &output_buf_size ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { goto fail_operationerror; } /* The MEAT: */ result = _extract_db_array_buffer_to_pyseq( cursor, sqlvar_index, /* Pointer to pointer to the first element of the output buffer: */ &output_buf_walker, /* Pointer to array containing the element counts for successive * dimensions: */ dimensions, /* Boilerplate parameters: */ data_type, size_of_single_element, scale, /* Pass through utility stuff from above: */ status_vector, db_handle, trans_handle_p, rel_name, rel_name_length, field_name, field_name_length ); if (result == NULL) { goto fail; } /* Output buffer should've been filled entirely. */ assert (((size_t) (output_buf_walker - output_buf)) == output_buf_size); /* We've retrieved the array successfully; now clean up. */ goto cleanup; fail_operationerror: raise_sql_exception(OperationalError, "Array output conversion: ", status_vector ); /* Fall through to fail. */ fail: if (result != NULL) { Py_DECREF(result); result = NULL; } /* Fall though to cleanup. */ cleanup: #ifdef USE_OFFICIAL_ISC_ARRAY_LOOKUP_BOUNDS if (desc != NULL) { kimem_main_free(desc); } #endif if (dimensions != NULL) { kimem_main_free(dimensions); } if (output_buf != NULL) { kimem_main_free(output_buf); } assert (PyErr_Occurred() ? result == NULL : result != NULL); return result; } /* conv_out_array */ static PyObject *_extract_db_array_buffer_to_pyseq( Cursor *cursor, short sqlvar_index, char **data_slot, short *dimension_sizes_ptr, /* Boilerplate parameters (capitalized to differentiate them): */ short data_type, size_t size_of_single_element, short scale, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, char *rel_name, short rel_name_length, char *field_name, short field_name_length ) { short elCount = *dimension_sizes_ptr; short *next_dimension_size_ptr = dimension_sizes_ptr + 1; short i; PyObject *seq = PyList_New(elCount); if (seq == NULL) { goto fail; } if (*next_dimension_size_ptr == DIMENSION_SIZE_END_MARKER) { /* We're dealing with a "leaf" which contains actual output values rather * than subsequences. */ for (i = 0; i < elCount; i++) { PyObject *val = conv_out_array_element( cursor, sqlvar_index, *data_slot, data_type, size_of_single_element, scale, status_vector, db_handle, trans_handle_p, rel_name, rel_name_length, field_name, field_name_length ); if (val == NULL) { goto fail; } /* Move the raw-array-desination-buffer pointer to the next slot. */ *data_slot += size_of_single_element; /* PyList_SET_ITEM steals ref to val; no need to DECREF. */ PyList_SET_ITEM(seq, i, val); } } else { /* We're dealing with a subsequence rather than a leaf, so recurse. */ for (i = 0; i < elCount; i++ ) { PyObject *subList = _extract_db_array_buffer_to_pyseq( cursor, sqlvar_index, data_slot, next_dimension_size_ptr, /* Boilerplate parameters (capitalized to differentiate them): */ data_type, size_of_single_element, scale, status_vector, db_handle, trans_handle_p, rel_name, rel_name_length, field_name, field_name_length ); if (subList == NULL) { goto fail; } /* PyList_SET_ITEM steals ref to subList; no need to DECREF. */ PyList_SET_ITEM(seq, i, subList); } } assert (PyList_GET_SIZE(seq) == elCount); return seq; fail: assert (PyErr_Occurred()); Py_XDECREF(seq); return NULL; } /* _extract_db_array_buffer_to_pyseq */ static PyObject *conv_out_array_element( Cursor *cursor, short sqlvar_index, char *data, short data_type, size_t size_of_single_element, short scale, ISC_STATUS *status_vector, isc_db_handle *db_handle, isc_tr_handle *trans_handle_p, char *rel_name, short rel_name_length, char *field_name, short field_name_length ) { PyObject *result = NULL; PyObject *converter = NULL; const unsigned short dialect = Transaction_get_dialect(cursor->trans); const short data_subtype = _determine_sqlsubtype_for_array(cursor->trans, rel_name, rel_name_length, field_name, field_name_length ); if (data_subtype == SQLSUBTYPE_DETERMINATION_ERROR) { goto fail; } Transaction_stats_record_ps_executed(cursor->trans); converter = cursor_get_out_converter(cursor, sqlvar_index, data_type, data_subtype, scale, TRUE ); /* cursor_get_out_converter returns NULL on error, borrowed reference to * Py_None if there was no converter. */ if (converter == NULL) { goto fail; } switch (data_type) { case blr_text: case blr_text2: result = conv_out_char(data, size_of_single_element); break; case blr_varying: case blr_varying2: { /* VARCHAR array elements are stored differently from the way * conventional VARCHAR fields are stored (see documentary note about * _ADJUST_ELEMENT_SIZE_FOR_VARCHAR_IF_NEC). */ const size_t len_before_null = strlen(data); result = conv_out_char(data, (len_before_null <= size_of_single_element ? len_before_null : size_of_single_element ) ); } break; /* NULL-terminated string: */ case blr_cstring: case blr_cstring2: result = PyString_FromString(data); break; case blr_short: result = conv_out_short_long(data, SQL_SHORT, IS_FIXED_POINT__ARRAY_EL(dialect, data_type, data_subtype, scale), scale ); break; case blr_long: result = conv_out_short_long(data, SQL_LONG, IS_FIXED_POINT__ARRAY_EL(dialect, data_type, data_subtype, scale), scale ); break; #ifdef INTERBASE_6_OR_LATER case blr_int64: result = conv_out_int64(data, IS_FIXED_POINT__ARRAY_EL(dialect, data_type, data_subtype, scale), scale ); break; #endif /* INTERBASE_6_OR_LATER */ case blr_float: result = conv_out_floating(*((float *) data), dialect, scale); break; case blr_double: case blr_d_float: result = conv_out_floating(*((double *) data), dialect, scale); break; case blr_timestamp: result = conv_out_timestamp(data); break; #ifdef INTERBASE_6_OR_LATER case blr_sql_date: result = conv_out_date(data); break; case blr_sql_time: result = conv_out_time(data); break; #endif /* INTERBASE_6_OR_LATER */ case blr_boolean_dtype: result = conv_out_boolean(data); break; case blr_quad: /* ISC_QUAD structure; since the DB engine doesn't support arrays of * arrays, assume that this item refers to a blob id. */ case blr_blob: case blr_blob_id: raise_exception(NotSupportedError, "kinterbasdb does not support arrays of arrays or arrays of blobs" " because it's not clear how one would create such a field via SQL." ); goto fail; default: raise_exception(NotSupportedError, "kinterbasdb does not support the output conversion of arrays of this" " type. " KIDB_REPORT " " KIDB_HOME_PAGE ); goto fail; } assert (converter != NULL); /* Can't be NULL; may be None. */ /* Obviously mustn't invoke the converter if the original value was not * loaded properly from the database. */ if (result != NULL) { /* Replacing the PyObject pointer in result is not a refcount leak; see the * comments in dynamically_type_convert_output_obj_if_necessary. */ result = dynamically_type_convert_output_obj_if_necessary( result, converter, data_type, data_subtype ); } else { assert (PyErr_Occurred()); } return result; fail: assert (PyErr_Occurred()); Py_XDECREF(result); /* The reference to 'converter' is borrowed; no need to DECREF. */ return NULL; } /* conv_out_array_element */ /******************** OUTPUT FUNCTIONS:END ********************/ /******************** UTILITY FUNCTIONS:BEGIN ********************/ #ifdef USE_OFFICIAL_ISC_ARRAY_LOOKUP_BOUNDS static ISC_ARRAY_DESC *_look_up_array_descriptor( Transaction *trans, /* These strings aren't null-terminated: */ char *sqlvar_rel_name, short sqlvar_rel_name_length, char *sqlvar_field_name, short sqlvar_field_name_length ) { ISC_STATUS *status_vector = Transaction_get_sv(trans); isc_db_handle *db_handle = Transaction_get_db_handle_p(trans); isc_tr_handle *trans_handle_p = Transaction_get_handle_p(trans); /* isc_array_lookup_* functions require null-terminated strings, but the * relevant strings from the XSQLVAR structure are not null-terminated. */ char *null_terminated_table_name = NULL, *null_terminated_field_name = NULL; /* Begin initial memory allocation section. */ ISC_ARRAY_DESC *desc = kimem_main_malloc(sizeof(ISC_ARRAY_DESC)); if (desc == NULL) { goto fail; } null_terminated_table_name = kimem_main_malloc(sqlvar_rel_name_length + 1); if (null_terminated_table_name == NULL) { goto fail; } null_terminated_field_name = kimem_main_malloc(sqlvar_field_name_length + 1); if (null_terminated_field_name == NULL) { goto fail; } /* End initial memory allocation section. */ /* Copy the non-null-terminated strings sqlvar_rel_name and * sqlvar_field_name into the null-terminated strings null_terminated_*_name. */ memcpy(null_terminated_table_name, sqlvar_rel_name, sqlvar_rel_name_length); null_terminated_table_name[sqlvar_rel_name_length] = '\0'; memcpy(null_terminated_field_name, sqlvar_field_name, sqlvar_field_name_length); null_terminated_field_name[sqlvar_field_name_length] = '\0'; ENTER_GDAL isc_array_lookup_bounds(status_vector, db_handle, trans_handle_p, null_terminated_table_name, null_terminated_field_name, desc ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { raise_sql_exception(OperationalError, "Array descriptor lookup: ", status_vector ); goto fail; } /* Successfully completed. */ assert (desc != NULL); goto cleanup; fail: assert (PyErr_Occurred()); if (desc != NULL) { kimem_main_free(desc); desc = NULL; } /* Fall through to cleanup. */ cleanup: if (null_terminated_table_name != NULL) { kimem_main_free(null_terminated_table_name); } if (null_terminated_field_name != NULL) { kimem_main_free(null_terminated_field_name); } return desc; } /* _look_up_array_descriptor */ #else static ISC_ARRAY_DESC *_look_up_array_descriptor( Transaction *trans, /* These strings aren't null-terminated: */ char *sqlvar_rel_name, short sqlvar_rel_name_length, char *sqlvar_field_name, short sqlvar_field_name_length ) { PyObject *py_desc = NULL; ISC_ARRAY_DESC *desc = NULL; PyObject *py_rel_name = NULL; PyObject *py_field_name = NULL; assert (trans != NULL); assert (trans->con_python_wrapper != NULL); py_rel_name = PyString_FromStringAndSize( sqlvar_rel_name, sqlvar_rel_name_length ); if (py_rel_name == NULL) { goto fail; } py_field_name = PyString_FromStringAndSize( sqlvar_field_name, sqlvar_field_name_length ); if (py_field_name == NULL) { goto fail; } py_desc = PyObject_CallFunctionObjArgs(py_look_up_array_descriptor, trans->con_python_wrapper, py_rel_name, py_field_name, NULL ); if (py_desc == NULL) { goto fail; } if (!PyString_CheckExact(py_desc)) { raise_exception(InternalError, "py_look_up_array_descriptor returned wrong type." ); goto fail; } /* desc is just a pointer to the internal buffer of py_desc. The * connection's cache will maintain a reference to py_desc at least as long * as we need to use desc, so there are no object lifetime problems. */ assert (py_desc->ob_refcnt > 1); desc = (ISC_ARRAY_DESC *) PyString_AS_STRING(py_desc); goto clean; fail: assert (PyErr_Occurred()); desc = NULL; /* Fall through to clean: */ clean: Py_XDECREF(py_desc); /* Yes, this is correct--see lifetime note above. */ Py_XDECREF(py_rel_name); Py_XDECREF(py_field_name); return desc; } /* _look_up_array_descriptor */ #endif static short *_extract_dimensions_sizes( ISC_ARRAY_DESC *desc, /* output param: */ int *total_number_of_elements ) { int dimension; unsigned short number_of_dimensions = desc->array_desc_dimensions; ISC_ARRAY_BOUND bounds_of_current_dimension; /* Populate the short-array dimensions, and calculate the total number of * elements: */ short *dimensions = kimem_main_malloc((number_of_dimensions + 1) * sizeof(short)); if (dimensions == NULL) { goto fail; } *total_number_of_elements = 1; for (dimension = 0; dimension < number_of_dimensions; dimension++) { bounds_of_current_dimension = desc->array_desc_bounds[dimension]; dimensions[dimension] = (bounds_of_current_dimension.array_bound_upper + 1) - bounds_of_current_dimension.array_bound_lower ; *total_number_of_elements *= dimensions[dimension]; } /* The final element is set to a flag value (for pointer-walking * convenience). */ dimensions[number_of_dimensions] = DIMENSION_SIZE_END_MARKER; return dimensions; fail: assert (PyErr_Occurred()); if (dimensions != NULL) { kimem_main_free(dimensions); } return NULL; } /* _extract_dimensions_sizes */ static short _determine_sqlsubtype_for_array( Transaction *trans, char *rel_name, short rel_name_length, char *field_name, short field_name_length ) { #ifdef USE_OFFICIAL_ISC_ARRAY_LOOKUP_BOUNDS ISC_STATUS *status_vector = Transaction_get_sv(trans); isc_db_handle *db_handle = Transaction_get_db_handle_p(trans); isc_tr_handle *trans_handle_p = Transaction_get_handle_p(trans); short out_var_sqlind = SQLIND_NULL; /* Returns the subtype on success, or SQLSUBTYPE_DETERMINATION_ERROR on error. */ const char *subtype_determination_statement = "SELECT FIELD_SPEC.RDB$FIELD_SUB_TYPE" " FROM RDB$FIELDS FIELD_SPEC, RDB$RELATION_FIELDS REL_FIELDS" " WHERE" " FIELD_SPEC.RDB$FIELD_NAME = REL_FIELDS.RDB$FIELD_SOURCE" " AND REL_FIELDS.RDB$RELATION_NAME = ?" " AND REL_FIELDS.RDB$FIELD_NAME = ?" ; XSQLDA *out_da = NULL; XSQLDA *in_da = NULL; XSQLVAR *in_var = NULL; XSQLVAR *out_var = NULL; isc_stmt_handle stmt_handle_sqlsubtype = NULL; /* Guilty until proven innocent: */ short sqlsubtype = SQLSUBTYPE_DETERMINATION_ERROR; /* There's no need to use dynamic allocation here, but since this code is * disabled anyway due to the concurrency bug in isc_array_lookup_bounds, * I won't bother optimizing it: */ in_da = kimem_xsqlda_malloc(XSQLDA_LENGTH(2)); if (in_da == NULL) { /* Weren't calling one of the Python-supplied malloc impls; need to set * MemoryError. */ PyErr_NoMemory(); goto fail; } in_da->version = SQLDA_VERSION_KIDB; in_da->sqln = 2; in_da->sqld = 2; in_da->sqlvar ->sqltype = SQL_TEXT; (in_da->sqlvar + 1)->sqltype = SQL_TEXT; /* Set the names of the relation.field for which we're determining * sqlsubtype. */ in_var = in_da->sqlvar; /* First input variable. */ in_var->sqllen = rel_name_length; in_var->sqldata = rel_name; in_var++; /* Second input variable. */ in_var->sqllen = field_name_length; in_var->sqldata = field_name; /* Set up the output structures. We know at design time exactly how they * should be configured; there's no convoluted dance of dynamism here, as * there is in servicing a generic Python-level query. */ /* There's no need to use dynamic allocation here, but since this code is * disabled anyway due to the concurrency bug in isc_array_lookup_bounds, * I won't bother optimizing it: */ out_da = (XSQLDA *) kimem_xsqlda_malloc(XSQLDA_LENGTH(1)); if (out_da == NULL) { /* Weren't calling one of the Python-supplied malloc impls; need to set * MemoryError. */ PyErr_NoMemory(); goto fail; } out_da->version = SQLDA_VERSION_KIDB; out_da->sqln = 1; out_var = out_da->sqlvar; out_var->sqldata = (char *) kimem_main_malloc(sizeof(short)); if (out_var->sqldata == NULL) { goto fail; } out_var->sqlind = &out_var_sqlind; ENTER_GDAL isc_dsql_allocate_statement(status_vector, db_handle, &stmt_handle_sqlsubtype ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { goto fail; } ENTER_GDAL isc_dsql_prepare(status_vector, trans_handle_p, &stmt_handle_sqlsubtype, 0, (char *) subtype_determination_statement, 3, out_da ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { goto fail; } ENTER_GDAL isc_dsql_execute2(status_vector, trans_handle_p, &stmt_handle_sqlsubtype, 3, in_da, out_da ); LEAVE_GDAL if (DB_API_ERROR(status_vector)) { goto fail; } if (out_var_sqlind == SQLIND_NULL) { sqlsubtype = 0; } else { sqlsubtype = *((short *) out_var->sqldata); } goto cleanup; fail: if (DB_API_ERROR(status_vector)) { raise_sql_exception(InternalError, "_determine_sqlsubtype_for_array: ", status_vector ); } assert (PyErr_Occurred()); /* Fall through to cleanup. */ cleanup: if (stmt_handle_sqlsubtype != NULL) { /* The isc_dsql_free_statement call here is relatively safe because under * normal circumstances the connection is unlikely to be severed between * the time the statement handle is allocated with * isc_dsql_allocate_statement (earlier in this same function) and the * time it's freed (here). */ ENTER_GDAL isc_dsql_free_statement(status_vector, &stmt_handle_sqlsubtype, DSQL_drop); LEAVE_GDAL } if (in_da != NULL) { kimem_xsqlda_free(in_da); } if (out_da != NULL) { if (out_da->sqlvar->sqldata != NULL) { kimem_main_free(out_da->sqlvar->sqldata); } kimem_xsqlda_free(out_da); } return sqlsubtype; #else /* !USE_OFFICIAL_ISC_ARRAY_LOOKUP_BOUNDS: */ PyObject *py_sqlsubtype = NULL; short sqlsubtype = SQLSUBTYPE_DETERMINATION_ERROR; PyObject *py_rel_name = NULL; PyObject *py_field_name = NULL; assert (trans != NULL); assert (trans->con_python_wrapper != NULL); py_rel_name = PyString_FromStringAndSize(rel_name, rel_name_length); if (py_rel_name == NULL) { goto fail; } py_field_name = PyString_FromStringAndSize(field_name, field_name_length); if (py_field_name == NULL) { goto fail; } py_sqlsubtype = PyObject_CallFunctionObjArgs(py_look_up_array_subtype, trans->con_python_wrapper, py_rel_name, py_field_name, NULL ); if (py_sqlsubtype == NULL) { goto fail; } if (py_sqlsubtype == Py_None) { sqlsubtype = 0; } else { if (!PyInt_CheckExact(py_sqlsubtype)) { raise_exception(InternalError, "py_look_up_array_subtype returned wrong type." ); goto fail; } else { const long sqlsubtype_long = PyInt_AS_LONG(py_sqlsubtype); assert (sqlsubtype_long >= 0); assert (sqlsubtype_long <= SHRT_MAX); sqlsubtype = (short) sqlsubtype_long; } } goto clean; fail: assert (PyErr_Occurred()); assert (sqlsubtype == -1); /* Fall through to clean: */ clean: Py_XDECREF(py_sqlsubtype); Py_XDECREF(py_rel_name); Py_XDECREF(py_field_name); return sqlsubtype; #endif } /* _determine_sqlsubtype_for_array */ /******************** UTILITY FUNCTIONS:END ********************/ kinterbasdb-3.3.0/typeconv_fixed_decimal.py0000644000175000001440000000651511130647414020273 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : Fixed/Python 2.4+ Standard Library # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( # kinterbasdb-native fixed point converters (old, precison_mode style): 'fixed_conv_in_imprecise', 'fixed_conv_in_precise', 'fixed_conv_out_imprecise', 'fixed_conv_out_precise', ) import sys # We import the decimal module lazily so that client programs that use # kinterbasdb.init(type_conv=200), but don't actually use any fixed point # fields, don't pay the rather large memory overhead of the decimal module. Decimal = None from kinterbasdb.k_exceptions import * _tenTo = [10**x for x in range(20)] del x _passThroughTypes = ( type(None), str, # str, not basestring, for implicit param conv. ) _simpleScaleTypes = ( int, long, float, ) ################################################################################ ## FIXED POINT ################################################################################ def fixed_conv_in_precise((val, scale)): global Decimal if Decimal is None: from decimal import Decimal if isinstance(val, _passThroughTypes): res = val elif isinstance(val, Decimal): # Input conversion process: # 1. Scale val up by the appropriate power of ten # -> Decimal object # 2. Ask the resulting Decimal object to represent itself as an # integral, which will invoke whatever rounding policy happens to # be in place # -> Decimal object # 3. Convert the result of the previous step to an int (or a long, if # the number is too large to fit into a native integer). # -> int or long # # Note: # The final step would not be compatible with Python < 2.3 when # handling large numbers (the int function in < 2.3 would raise an # exception if the number couldn't fit into a native integer). Since # the decimal module didn't appear until 2.4 and kinterbasdb 3.2 will # not officially support < 2.3, this is not a problem. res = int((val * _tenTo[abs(scale)]).to_integral()) elif isinstance(val, _simpleScaleTypes): res = int(val * _tenTo[abs(scale)]) else: raise TypeError('Objects of type %s are not acceptable input for' ' a fixed-point column.' % str(type(val)) ) return res fixed_conv_in_imprecise = fixed_conv_in_precise def fixed_conv_out_precise(x): global Decimal if Decimal is None: from decimal import Decimal if x is None: return None # x[0] is an integer or long, and we divide it by a power of ten, so we're # assured a lossless result: return Decimal(x[0]) / _tenTo[abs(x[1])] fixed_conv_out_imprecise = fixed_conv_out_precise kinterbasdb-3.3.0/_kimem.h0000644000175000001440000001016611130647414014624 0ustar pcisarusers/* KInterbasDB Python Package - Header File for Memory Management Wrappers * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This header file is intended to provide centralized aliases for the various * sets of memory handling functions that kinterbasdb uses. The centralization * will have two main benefits: * - facilitate debug tracing of memory operations * - reduce the likelihood of "series mismatches" between allocation and * freeing functions for the same piece of memory by providing more * descriptive names (such as kimem_xsqlda_malloc for XSQLDA memory * allocation). * This issue is especially crucial with Python 2.3, which makes the * specialized pymalloc memory allocater (available via the * PyObject_[Malloc|Realloc|Free] series) the default. */ #ifndef _KIMEM_H #define _KIMEM_H /*************************** PLAIN ***********************************/ /* kimem_plain_* is kinterbasdb's simplest series of memory handlers. * Typically, these will simply be aliases for the libc C memory handlers. * * kimem_plain_* should NEVER resolve to pymalloc's memory handlers, nor to any * other memory allocator that's expected to be incompatible with "generic" * memory allocated "by some third party". * * Also, unlike kimem_main_*, this series must be threadsafe. */ #define kimem_plain_malloc malloc #define kimem_plain_realloc realloc #define kimem_plain_free free /*************************** MAIN ***********************************/ /* Series for pymalloc, the specialized Python-oriented memory handler that * became standard in Python 2.3. * * Unless there's a specific reason not to (as noted elsewhere in this file, * including in the WARNING just below), any kinterbasdb [|de|re]allocation of * raw memory should use this series. * * WARNING: * Members of the kimem_main_* series must only be called when the GIL is * held, since they rely on pymalloc, which assumes the GIL is held. */ #define kimem_main_malloc PyObject_Malloc #define kimem_main_realloc PyObject_Realloc #define kimem_main_free PyObject_Free /*************************** DB_CLIENT **********************************/ /* This series is implemented by the database client library. The client * library sometimes gives us deallocation responsibility for chunks of memory * that were allocated using its own memory handler. */ #define kimem_db_client_malloc isc_malloc #define kimem_db_client_realloc isc_realloc #define kimem_db_client_free isc_free /*************************** XSQLDA ***********************************/ /* The kimem_xsqlda_* memory management aliases were established because * trouble arises when XSQLDA structures are allocated/freed with pymalloc. * Probably, some isc_* functions that handle XSQLDAs or their XSQLVARs make * changes to those structures' memory that are not obvious, and in a way that * requires those structures to have been allocated with the standard C malloc. * * NOTES: * - These memory handlers are for the XSQLDA structures themselves (which * contain XSQLVARs), not for handling memory indirectly related to the * XSQLDA, such as XSQLVAR.sqldata and XSQLVAR.sqlind (those latter work just * fine with pymalloc). */ #define kimem_xsqlda_malloc kimem_plain_malloc #define kimem_xsqlda_realloc kimem_plain_realloc #define kimem_xsqlda_free kimem_plain_free /*****************************************************************************/ #endif /* not def _KIMEM_H */ kinterbasdb-3.3.0/typeconv_text_unicode.py0000644000175000001440000001761611130647414020214 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : Text/Unicode # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( 'unicode_conv_in', 'unicode_conv_out', 'DB_TO_PYTHON_ENCODING_MAP', ) import sys from kinterbasdb.k_exceptions import * # The database character set codes (the *keys* in DB_TO_PYTHON_ENCODING_MAP) # are defined on pages 221-225 of the Interbase 6 Data Definition Guide. # The Python codec names (the *values* in DB_TO_PYTHON_ENCODING_MAP) are # defined in section 4.9.2 "Standard Encodings" of the Python Library # Reference. # # The character sets supported by a given database can be determined with the # following query: # select rdb$character_set_id, rdb$character_set_name # from rdb$character_sets order by rdb$character_set_id DB_TO_PYTHON_ENCODING_MAP = { # The following three database character set codes are not handled by # kinterbasdb's TEXT_UNICODE dynamic type translation (they're handled by # TEXT instead, and deal with plain Python strings): # 0 -> 'NONE' # 1 -> 'OCTETS' # 2 -> 'ASCII' # DB CODE : PYTHON NAME : DB NAME # --------------------------------------------------------------------------- 3: 'utf_8', #: 'UNICODE_FSS' 4: 'utf_8', #: 'UTF8' (Firebird 2.0+) 5: 'shift_jis', #: 'SJIS_0208' 6: 'euc_jp', #: 'EUCJ_0208' 9: 'cp737', #: 'DOS737' 10: 'cp437', #: 'DOS437' 11: 'cp850', #: 'DOS850' 12: 'cp865', #: 'DOS865' 13: 'cp860', #: 'DOS860' 14: 'cp863', #: 'DOS863' 15: 'cp775', #: 'DOS775' # 16: NOT SUPPORTED, #: 'DOS858' 17: 'cp862', #: 'DOS862' 18: 'cp864', #: 'DOS864' # 19: NOT SUPPORTED, #: 'NEXT' 21: 'iso8859_1', #: 'ISO8859_1' 22: 'iso8859_2', #: 'ISO8859_2' 23: 'iso8859_3', #: 'ISO8859_3' 34: 'iso8859_4', #: 'ISO8859_4' 35: 'iso8859_5', #: 'ISO8859_5' 36: 'iso8859_6', #: 'ISO8859_6' 37: 'iso8859_7', #: 'ISO8859_7' 38: 'iso8859_8', #: 'ISO8859_8' 39: 'iso8859_9', #: 'ISO8859_9' 40: 'iso8859_13', #: 'ISO8859_13' 44: 'euc_kr', #: 'KSC_5601' 45: 'cp852', #: 'DOS852' 46: 'cp857', #: 'DOS857' 47: 'cp861', #: 'DOS861' 48: 'cp866', #: 'DOS866' 49: 'cp869', #: 'DOS869' # 50: NOT SUPPORTED, #: 'CYRL' 51: 'cp1250', #: 'WIN1250' 52: 'cp1251', #: 'WIN1251' 53: 'cp1252', #: 'WIN1252' 54: 'cp1253', #: 'WIN1253' 55: 'cp1254', #: 'WIN1254' 56: 'big5', #: 'BIG_5' 57: 'gb2312', #: 'GB_2312' 58: 'cp1255', #: 'WIN1255' 59: 'cp1256', #: 'WIN1256' 60: 'cp1257', #: 'WIN1257' 63: 'koi8_r', #: 'KOI8-R' (Firebird 2.0+) 64: 'koi8_u', #: 'KOI8-U' (Firebird 2.0+) 65: 'cp1258', #: 'WIN1258' (Firebird 2.0+) } DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP = { # DB CHAR SET NAME : PYTHON CODEC NAME (CANONICAL) # --------------------------------------------------------------------------- 'OCTETS' : None, # Allow to pass through unchanged. 'UNICODE_FSS' : 'utf_8', 'UTF8' : 'utf_8', # (Firebird 2.0+) 'SJIS_0208' : 'shift_jis', 'EUCJ_0208' : 'euc_jp', 'DOS737' : 'cp737', 'DOS437' : 'cp437', 'DOS850' : 'cp850', 'DOS865' : 'cp865', 'DOS860' : 'cp860', 'DOS863' : 'cp863', 'DOS775' : 'cp775', 'DOS862' : 'cp862', 'DOS864' : 'cp864', 'ISO8859_1' : 'iso8859_1', 'ISO8859_2' : 'iso8859_2', 'ISO8859_3' : 'iso8859_3', 'ISO8859_4' : 'iso8859_4', 'ISO8859_5' : 'iso8859_5', 'ISO8859_6' : 'iso8859_6', 'ISO8859_7' : 'iso8859_7', 'ISO8859_8' : 'iso8859_8', 'ISO8859_9' : 'iso8859_9', 'ISO8859_13' : 'iso8859_13', 'KSC_5601' : 'euc_kr', 'DOS852' : 'cp852', 'DOS857' : 'cp857', 'DOS861' : 'cp861', 'DOS866' : 'cp866', 'DOS869' : 'cp869', 'WIN1250' : 'cp1250', 'WIN1251' : 'cp1251', 'WIN1252' : 'cp1252', 'WIN1253' : 'cp1253', 'WIN1254' : 'cp1254', 'BIG_5' : 'big5', 'GB_2312' : 'gb2312', 'WIN1255' : 'cp1255', 'WIN1256' : 'cp1256', 'WIN1257' : 'cp1257', 'KOI8-R' : 'koi8_r', # (Firebird 2.0+) 'KOI8-U' : 'koi8_u', # (Firebird 2.0+) 'WIN1258' : 'cp1258', # (Firebird 2.0+) } # The inverse of DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP: PYTHON_ENCODING_TO_DB_CHAR_SET_NAME_MAP = dict( (val, key) for (key, val) in DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP.iteritems() ) DB_CHAR_SET_NAME_TO_DB_CHAR_SET_ID_MAP = {} # Example entry: 'WIN1251': 52 for dbCharSetID, pythonEncodingName in DB_TO_PYTHON_ENCODING_MAP.iteritems(): dbCharSetName = PYTHON_ENCODING_TO_DB_CHAR_SET_NAME_MAP[pythonEncodingName] DB_CHAR_SET_NAME_TO_DB_CHAR_SET_ID_MAP[dbCharSetName] = dbCharSetID del dbCharSetID, pythonEncodingName, dbCharSetName _UNKNOWN_CHARSET_MSG = ( "Don't know how to %s value %s charset with numeric ID %d." " If you are using an unofficial character set, you should add a" " corresponding entry to kinterbasdb's translation table, as in:\n" " kinterbasdb.typeconv_text_unicode.DB_TO_PYTHON_ENCODING_MAP[%d] = 'XX'\n" "where XX is the name of a Python codec." " Standard Python codecs are listed in section 4.9.2 ('Standard" " Encodings') of the Python documentation." ) def unicode_conv_in((unicodeString, dbCharacterSetCode)): if unicodeString is None: return None # Modulate dbCharacterSetCode by 256 to get rid of collation info. dbCharacterSetCode %= 256 pyEncodingName = DB_TO_PYTHON_ENCODING_MAP.get(dbCharacterSetCode, None) if pyEncodingName is not None: return unicodeString.encode(pyEncodingName) else: raise OperationalError( _UNKNOWN_CHARSET_MSG % ( 'encode', 'to', dbCharacterSetCode, dbCharacterSetCode )) def unicode_conv_out((rawString, dbCharacterSetCode)): if rawString is None: return None # Modulate dbCharacterSetCode by 256 to get rid of collation info. dbCharacterSetCode %= 256 pyEncodingName = DB_TO_PYTHON_ENCODING_MAP.get(dbCharacterSetCode, None) if pyEncodingName is not None: return rawString.decode(pyEncodingName) else: raise OperationalError( _UNKNOWN_CHARSET_MSG % ( 'decode', 'from', dbCharacterSetCode, dbCharacterSetCode )) kinterbasdb-3.3.0/_kisupport.h0000644000175000001440000000342311130647414015560 0ustar pcisarusers/* KInterbasDB Python Package - Header File for Support * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifndef _KISUPPORT_H #define _KISUPPORT_H #include "_kisupport_platform.h" #ifdef ENABLE_CONNECTION_TIMEOUT static PlatformThreadRefType Thread_current_ref(void); #endif /* ENABLE_CONNECTION_TIMEOUT */ static PlatformThreadIdType Thread_current_id(void); static boolean Thread_ids_equal( PlatformThreadIdType a, PlatformThreadIdType b ); static PlatformThreadRefType Thread_create( PlatformThreadFuncType, void *, PlatformThreadIdType * ); static long Thread_join(PlatformThreadRefType); #ifdef ENABLE_CONNECTION_TIMEOUT static void sleep_millis(unsigned int millis); #endif /* ENABLE_CONNECTION_TIMEOUT */ static long Mutex_init(PlatformMutexType *); static long Mutex_close(PlatformMutexType *); static long Mutex_lock(PlatformMutexType *); static long Mutex_unlock(PlatformMutexType *); typedef enum { WR_WAIT_CANCELLED = -2, WR_WAIT_ERROR = -1, WR_WAIT_OK = 0, WR_WAIT_TIMEOUT = 1 } WaitResult; #define WAIT_INFINITELY_FLOAT -1.0 #define WAIT_INFINITELY_LONG -1 #define MILLI_TIMEOUT_IS_INFINITE(millis) ((millis) == WAIT_INFINITELY_LONG) #endif /* if not def _KISUPPORT_H */ kinterbasdb-3.3.0/_kicore_connection_timeout.c0000644000175000001440000014254211130647414020762 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Connection Timeout * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #include "_kicore_connection_timeout.h" /*************************** PRELIMINARIES: BEGIN ****************************/ static double ConnectionTimeoutParams_active_secs(ConnectionTimeoutParams *tp); static double ConnectionTimeoutParams_idle_secs(ConnectionTimeoutParams *tp); /* Inform the ConnectionTimeoutThread that a new connection has been * added to the tracker: */ #ifdef PLATFORM_WINDOWS #define WAKE_TIMEOUT_THREAD \ SetEvent(global_ctm.reconsider_wait_interval) #else #define WAKE_TIMEOUT_THREAD \ pthread_cond_signal(&global_ctm.reconsider_wait_interval) #endif #define CLEAR_CTT_REFS \ global_ctm.timeout_thread_py = NULL; \ global_ctm.timeout_thread = THREAD_REF_INVALID; \ global_ctm.timeout_thread_id = THREAD_ID_NONE #define ASSERT_CTT_REFS_ARE_CLEAR \ assert (global_ctm.timeout_thread_py == NULL); \ assert (global_ctm.timeout_thread == THREAD_REF_INVALID); \ assert (global_ctm.timeout_thread_id == THREAD_ID_NONE) static boolean TP_TRYLOCK(ConnectionTimeoutParams *tp) { const boolean acquired = (boolean) PyThread_acquire_lock(tp->lock, NOWAIT_LOCK); if (acquired) { TP_RECORD_OWNERSHIP(tp); debug_print4("TP(%p)-> ?ACQUIRE: %ld file %s line %d\n", tp, PyThread_get_thread_ident(), __FILE__, __LINE__ ); debug_print4("TP(%p)-> !!ACQUIRED: %ld file %s line %d\n", tp, PyThread_get_thread_ident(), __FILE__, __LINE__ ); } return acquired; } /* TP_TRYLOCK */ /**************************** PRELIMINARIES: END *****************************/ /*********************** CACHED PYTHON OBJECTS: BEGIN ************************/ static PyObject *con_timeout__s_period; static PyObject *con_timeout__s_callback_before; static PyObject *con_timeout__s_callback_after; static PyObject *con_timeout__s_dsn; static PyObject *con_timeout__s_has_transaction; static PyObject *con_timeout__s_active_secs; static PyObject *con_timeout__s_idle_secs; /************************ CACHED PYTHON OBJECTS: END *************************/ /************************* MISC SUPPORT CODE: BEGIN **************************/ static int init_kidb_connection_timeout(PyObject *k_mod) { #define CT_CACHE_STRING(s) \ con_timeout__s_ ## s = PyString_FromString(#s); \ if (con_timeout__s_ ## s == NULL) { goto fail; } CT_CACHE_STRING(period); CT_CACHE_STRING(callback_before); CT_CACHE_STRING(callback_after); CT_CACHE_STRING(dsn); CT_CACHE_STRING(has_transaction); CT_CACHE_STRING(active_secs); CT_CACHE_STRING(idle_secs); if (CTM_initialize() != 0) { PyErr_SetString(PyExc_ImportError, "Unable to initialize CTM."); goto fail; } return 0; fail: assert (PyErr_Occurred()); return -1; } /* init_kidb_connection_timeout */ static ConnectionTimeoutParams *c_timeout_from_py(PyObject *py_timeout) { /* The GIL *IS HELD* when this function is called. */ ConnectionTimeoutParams *tp = NULL; PyObject *py_period = NULL; PyObject *py_callback_before = NULL; PyObject *py_callback_after = NULL; long timeout_period_ms; if (!PyDict_Check(py_timeout)) { raise_exception(ProgrammingError, "The 'timeout' keyword argument to" " kinterbasdb.connect must be either None (the default--no timeout)" " or a dict." ); goto fail; } py_period = PyDict_GetItem(py_timeout, con_timeout__s_period); { LONG_LONG timeout_ms_LL = py_seconds_to_milliseconds(py_period, ProgrammingError, "The timeout dict, if supplied, must contain a" " 'period' entry, the value of which must be a number of seconds" " between 0.001 (one millisecond) and 1209600 (the number of" " seconds in 14 days). The Python object %s is not acceptable.", 1, MS_IN_14_DAYS ); if (PyErr_Occurred()) { goto fail; } /* py_seconds_to_milliseconds constrained the user-supplied timeout to * between 1 and MS_IN_14_DAYS (inclusive), so the following cast is * safe: */ assert (timeout_ms_LL >= 1 && timeout_ms_LL <= MS_IN_14_DAYS); timeout_period_ms = (long) timeout_ms_LL; } py_period = NULL; /* Ref was borrowed. */ #define VALIDATE_CALLBACK(before_or_after) \ py_callback_ ## before_or_after = PyDict_GetItem(py_timeout, \ con_timeout__s_callback_ ## before_or_after \ ); /* BorRef */ \ if (py_callback_ ## before_or_after != NULL) { \ if (py_callback_ ## before_or_after == Py_None) { \ py_callback_ ## before_or_after = NULL; \ } else { \ if (!PyCallable_Check(py_callback_ ## before_or_after)) { \ raise_exception(ProgrammingError, "The optional '" \ # before_or_after " callback', if specified, must be" \ " either a callable object or None." \ ); \ goto fail; \ } \ } \ } VALIDATE_CALLBACK(before); VALIDATE_CALLBACK(after); /* If py_timeout contains any keys other than * 'period', 'callback_before', 'callback_after' * then we complain. This is to prevent the user from accidentally using * the wrong key for (e.g.) a callback, and having it never called. */ { PyObject *key; Py_ssize_t pos = 0; while (PyDict_Next(py_timeout, &pos, &key, NULL)) { if ( ( PyObject_Compare(key, con_timeout__s_period) != 0 && PyObject_Compare(key, con_timeout__s_callback_before) != 0 && PyObject_Compare(key, con_timeout__s_callback_after) != 0 ) || PyErr_Occurred() ) { PyObject *key_repr = PyObject_Repr(key); if (key_repr != NULL) { PyObject *err_msg = PyString_FromFormat( "Unrecognized key %s in connection timeout dict." " The following keys are allowed:" " 'period', 'callback_before', 'callback_after'.", PyString_AS_STRING(key_repr) ); if (err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } Py_DECREF(key_repr); } goto fail; } } } /* On the basis of timeout_period_ms and the Python callbacks, create a * ConnectionTimeoutParams structure: */ tp = ConnectionTimeoutParams_create(timeout_period_ms, py_callback_before, py_callback_after ); if (tp == NULL) { assert (PyErr_Occurred()); goto fail; } return tp; fail: assert (PyErr_Occurred()); if (tp != NULL) { if (ConnectionTimeoutParams_destroy(&tp) == 0) { assert (tp == NULL); } } return NULL; } /* c_timeout_from_py */ static const char *ConnectionOpState_describe(ConnectionOpState state) { char *desc = NULL; switch (state) { case CONOP_IDLE: desc = "IDLE"; break; case CONOP_ACTIVE: desc = "ACTIVE"; break; case CONOP_TIMED_OUT_TRANSPARENTLY: desc = "TIMED_OUT_TRANSPARENTLY"; break; case CONOP_TIMED_OUT_NONTRANSPARENTLY: desc = "TIMED_OUT_NONTRANSPARENTLY"; break; case CONOP_PERMANENTLY_CLOSED: desc = "PERMANENTLY_CLOSED"; break; } return desc; } /* ConnectionOpState_describe */ /************************** MISC SUPPORT CODE: END ***************************/ /****** ConnectionTracker MEMBER FUNC DEFS AND SUPPORTING FUNCS: BEGIN *******/ /* A "function" with the signature of CConnection_untrack must be present to * satisfy the _kisupport_lifo_linked_list.h infrastructure, but in this case, * it doesn't need to do anything: */ #define CConnection_untrack(con, allowed_to_raise) 0 #include "_kisupport_lifo_linked_list.h" /* Note that the ConnectionTracker is defined with ..._SYSALLOC, so its methods * can safely be called when the GIL is not held. */ LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_SYSALLOC( ConnectionTracker, volatile ConnectionTracker, CConnection, volatile CConnection ) /******* ConnectionTracker MEMBER FUNC DEFS AND SUPPORTING FUNCS: END ********/ /*** ConnectionTimeoutManager MEMBER FUNC DEFS AND SUPPORTING FUNCS: BEGIN ***/ static int CTM_initialize(void) { /* The GIL *IS HELD* when this function is called. */ if (Mutex_init(&global_ctm.lock) != 0) { goto fail; } #ifdef PLATFORM_WINDOWS /* Auto-reset event, initially non-signalled: */ global_ctm.reconsider_wait_interval = CreateEvent(NULL, FALSE, FALSE, NULL ); if (global_ctm.reconsider_wait_interval == NULL) { goto fail; } #else if (pthread_cond_init(&global_ctm.reconsider_wait_interval, NULL) != 0) { goto fail; } #endif global_ctm.n_cons = 0; global_ctm.cons = NULL; global_ctm.soonest_next_connection_might_timeout = 0; /* The ConnectionTimeoutThread is not actually started until a connection * with timeout enabled is created. */ CLEAR_CTT_REFS; global_ctm.ctt_should_stop = FALSE; return 0; fail: return -1; } /* CTM_initialize */ static int CTM_add(volatile CConnection *con, ConnectionTimeoutParams *tp) { /* The GIL *IS NOT HELD* when this function is called. */ /* This thread also holds tp, and acquires the CTM lock, but there's no risk * of a deadlock because the CTT can't possibly be holding the CTM lock and * trying to acquire tp (because con is not even in the connection tracker * yet). */ int status = 0; assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); assert (tp != NULL); assert (CURRENT_THREAD_OWNS_TP(tp)); /* It is the responsibility of this method to actually finalize the * association between a connection and its timeout parameter structure; that * should not have been done already: */ assert (con->timeout == NULL); CTM_LOCK; /* Critical section within these brackets: */ { #ifndef NDEBUG ConnectionOpState state = #endif ConnectionTimeoutParams_trans_while_already_locked(tp, CONOP_ACTIVE, CONOP_IDLE ); assert (state == CONOP_IDLE); assert (tp->connected_at > 0); assert (tp->connected_at <= time_millis()); assert (tp->last_active > 0); assert (tp->last_active <= time_millis()); assert (tp->soonest_might_time_out > tp->last_active); status = ConnectionTracker_add(&global_ctm.cons, con); assert (!Connection_timeout_enabled(con)); if (status == 0) { ++global_ctm.n_cons; assert (global_ctm.n_cons > 0); /* In essence: * global_ctm.soonest_next_connection_might_timeout = SOONER_OF( * tp->soonest_might_time_out, * global_ctm.soonest_next_connection_might_timeout * ); */ if (global_ctm.soonest_next_connection_might_timeout == 0 || ( global_ctm.soonest_next_connection_might_timeout - tp->soonest_might_time_out ) > 0 ) { global_ctm.soonest_next_connection_might_timeout = tp->soonest_might_time_out; } /* Associate the ConnectionTimeoutParams object with the connection in * order to indicate that the connection is officially "being tracked * for timeout." */ con->timeout = tp; assert (Connection_timeout_enabled(con)); debug_print1("CTM_add will now wake up CTT (global_ctm.n_cons: %d)\n", (int) global_ctm.n_cons ); WAKE_TIMEOUT_THREAD; } } CTM_UNLOCK; return status; } /* CTM_add */ static int CTM_remove(volatile CConnection *con) { /* The GIL *IS NOT HELD* when this function is called. */ int status; assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); CTM_LOCK; /* Critical section within these brackets: */ { status = ConnectionTracker_remove(&global_ctm.cons, con, TRUE); if (status == 0) { assert (global_ctm.n_cons > 0); --global_ctm.n_cons; } } CTM_UNLOCK; return status; } /* CTM_remove */ static int CTM_apply_timeout(PyThreadState *tstate) { /* The GIL *IS NOT HELD* when this function is called, although this function * sometimes acquires it. */ /* This function should only be called by the ConnectionTimeoutThread, and * that thread should already hold global_ctm's lock before calling this * function. */ int status = 0; Py_ssize_t n_cons_tried_to_time_out = 0; Py_ssize_t n_cons_timed_out = 0; LONG_LONG soonest_timeout_in_next_pass = 0; #define UPDATE_STINP_IF_SP_SMTO_IS_SOONER(tp) \ /* We're not timing tp's connection out during this pass, but as we \ * sweep, we need to determine the soonest that another sweep might be \ * needed. If tp->soonest_might_time_out is sooner than anything we've \ * previously seen, record it. */ \ if ( soonest_timeout_in_next_pass == 0 \ || (tp)->soonest_might_time_out - soonest_timeout_in_next_pass < 0 \ ) \ { soonest_timeout_in_next_pass = (tp)->soonest_might_time_out; } const LONG_LONG official_sweep_time = time_millis(); volatile ConnectionTracker *ct_prev = NULL; volatile ConnectionTracker *ct = global_ctm.cons; assert (RUNNING_IN_CONNECTION_TIMEOUT_THREAD); while (ct != NULL) { volatile CConnection *con = ct->contained; ConnectionTimeoutParams *tp; assert (con != NULL); assert (con->ob_refcnt > 0); assert (con->state == CON_STATE_OPEN); tp = con->timeout; assert (tp != NULL); TP_LOCK(tp); /* Critical section (over tp) within these brackets: */ { if ( tp->state == CONOP_IDLE && tp->soonest_might_time_out - official_sweep_time <= 0 ) { /* Time the connection out unless the user-supplied callback vetoes. */ boolean should_time_out = TRUE; boolean timeout_was_transparent = FALSE; boolean had_transaction; const double active_secs = ConnectionTimeoutParams_active_secs(tp); const double idle_secs = ConnectionTimeoutParams_idle_secs(tp); PyObject *py_dsn = NULL; PyObject *py_active_secs = NULL; PyObject *py_idle_secs = NULL; assert (active_secs >= 0.0); assert (idle_secs >= 0.0); /* It might seem inefficient to acquire and release the GIL around each * connection timeout, but in fact it is not. In a real program, a * given connection is likely to be checked to see whether it should be * timed out *MANY* more times than it is actually timed out. * Therefore, it is desirable for the checking process to avoid holding * the GIL insofar as possible; the acquisition and release of the GIL * which are required to actually time a connection out are executed * relatively rarely. */ ENTER_GIL_USING_THREADSTATE(tstate); /* GIL must be held before Connection_has_any_open_transaction is * called: */ had_transaction = Connection_has_any_open_transaction(DV_CCON(con)); assert (con->dsn != NULL && con->dsn_len > 0); if (tp->py_callback_before != NULL || tp->py_callback_after != NULL) { py_dsn = PyString_FromStringAndSize(con->dsn, con->dsn_len); if (py_dsn == NULL) { SUPPRESS_EXCEPTION; } /* py_active_secs and py_idle_secs will be checked for successful * construction later. */ py_active_secs = PyFloat_FromDouble(active_secs); py_idle_secs = PyFloat_FromDouble(idle_secs); } /* Call the user-supplied "before" callback, if any. The single * argument to the callback is a dict of the form: * {'dsn': dsn, 'has_transaction': boolean, * 'active_secs': float, 'idle_secs': float} * The connection itself is deliberately not exposed, because * kinterbasdb Connections aren't designed to be manipulated by * multiple threads except for those few operations necessary for the * ConnectionTimeoutThread to time the connection out (and to avoid * doing so unless the connection is truly idle). */ if (tp->py_callback_before == NULL) { /* If the user didn't supply a callback, and there's an active * transaction, then the timeout is non-transparent: */ timeout_was_transparent = !had_transaction; } else if (py_dsn != NULL) { /* Notice that we don't enter this block if we couldn't create * py_dsn, even if the user has supplied a callback. */ boolean continue_callback_attempt = ( py_active_secs != NULL && py_idle_secs != NULL ); PyObject *py_callback_dict = NULL; PyObject *py_has_transaction = NULL; /* The user-supplied callback should've been validated much * earlier. */ assert (PyCallable_Check(DV_PYO(tp->py_callback_before))); py_has_transaction = PyBool_FromLong(had_transaction); /* PyBool_FromLong is never supposed to fail: */ assert (py_has_transaction != NULL); if (continue_callback_attempt) { py_callback_dict = PyDict_New(); if (py_callback_dict == NULL) { continue_callback_attempt = FALSE; assert (should_time_out); } else { if ( PyDict_SetItem(py_callback_dict, con_timeout__s_dsn, py_dsn ) == 0 && PyDict_SetItem(py_callback_dict, con_timeout__s_has_transaction, py_has_transaction) == 0 && PyDict_SetItem(py_callback_dict, con_timeout__s_active_secs, py_active_secs ) == 0 && PyDict_SetItem(py_callback_dict, con_timeout__s_idle_secs, py_idle_secs ) == 0 ) { PyObject *py_callback_res = PyObject_CallFunctionObjArgs( DV_PYO(tp->py_callback_before), py_callback_dict, NULL ); if (py_callback_res == NULL) { /* If an exception arose, time the connection out anyway. */ SUPPRESS_EXCEPTION; assert (should_time_out); assert (!timeout_was_transparent); } else { /* We stick with the default action if the user-supplied * callback returned an object of the wrong type, or of the * right type but wrong value. */ CTCallbackVerdict verdict = CT_DEFAULT; if (PyInt_CheckExact(py_callback_res)) { long verdict_L = PyInt_AS_LONG(py_callback_res); if ( verdict_L == CT_VETO || verdict_L == CT_ROLLBACK || verdict_L == CT_COMMIT || verdict_L == CT_NONTRANSPARENT ) { verdict = (CTCallbackVerdict) verdict_L; } } switch (verdict) { case CT_VETO: should_time_out = FALSE; /* A veto is considered "activity"; we won't initiate * another timeout attempt for this connection until at * least tp->timeout_period seconds have passed. */ _ConnectionTimeoutParams_touch(tp); break; case CT_ROLLBACK: case CT_COMMIT: case CT_NONTRANSPARENT: if (!had_transaction) { /* If the connection didn't have a transaction * originally, it shouldn't now. */ assert (!Connection_has_any_open_transaction( DV_CCON(con) )); timeout_was_transparent = (verdict != CT_NONTRANSPARENT); } else { const WhichTransactionOperation op = ( verdict == CT_COMMIT ? OP_COMMIT : OP_ROLLBACK ); const TransactionalOperationResult trans_res_status = Connection_resolve_all_transactions_from_CTT(con, op); /* Connection_resolve_all_transactions_from_CTT should * have already suppressed any Python exception: */ assert (!PyErr_Occurred()); if (trans_res_status == OP_RESULT_OK) { assert (!Connection_has_any_open_transaction( DV_CCON(con) )); timeout_was_transparent = (verdict != CT_NONTRANSPARENT); } else { assert (!timeout_was_transparent); } } assert (should_time_out); break; default: /* This should never be reached, because verdict should * not have received the value of verdict_L unless that * value was a recognized member of CTCallbackVerdict: */ assert (FALSE); } Py_DECREF(py_callback_res); } /* end of is-py_callback_res-null block */ } else { SUPPRESS_EXCEPTION; continue_callback_attempt = FALSE; assert (should_time_out); } /* If an exception arose, it should've been cleared so the sweep * can continue. */ assert (!PyErr_Occurred()); } /* end of is-py_callback_dict-null block */ } /* end of initial should-continue_callback_attempt block */ Py_XDECREF(py_callback_dict); Py_XDECREF(py_has_transaction); } /* end of did-user-supply-callback block */ /* If the callback did not veto the timeout, close the connection. Use * Connection_close_from_CTT so that this thread doesn't need to * release global_ctm.lock during the connection's closure. This * thread removes the connection from the tracker manually (see block * below, after GIL release). */ if (should_time_out) { ++n_cons_tried_to_time_out; if (Connection_close_from_CTT(con) == 0) { const ConnectionOpState desired_state = timeout_was_transparent ? CONOP_TIMED_OUT_TRANSPARENTLY : CONOP_TIMED_OUT_NONTRANSPARENTLY ; #ifndef NDEBUG const ConnectionOpState achieved_state = #endif ConnectionTimeoutParams_trans_while_already_locked(tp, CONOP_IDLE, desired_state ); assert (achieved_state == desired_state); ++n_cons_timed_out; } else { /* Note that we'll remove the connection from the tracker even if * our attempt to close it failed. The only reason for failure * would be a network problem, and if that has occurred, the * connection is already [effectively] closed, so it shouldn't * remain in the tracker. * We set the state to CONOP_TIMED_OUT_NONTRANSPARENTLY because * even though the connection didn't actually time out, it can be * recovered with no greater disruption than recovering from a * non-transparent timeout. */ if (PyErr_Occurred()) { SUPPRESS_EXCEPTION; } else { #ifndef NDEBUG ConnectionOpState achieved_state = #endif ConnectionTimeoutParams_trans_while_already_locked(tp, CONOP_IDLE, CONOP_TIMED_OUT_NONTRANSPARENTLY ); assert (achieved_state == CONOP_TIMED_OUT_NONTRANSPARENTLY); } } } assert (!PyErr_Occurred()); /* Call the user-supplied "after" callback, if any. The single * argument to the callback is a dict of the form: * {'dsn': dsn * 'active_secs': float, 'idle_secs': float} * The connection itself is deliberately not exposed, for reasons * explained previously. */ if (!should_time_out) { /* The timeout attempt was vetoed by the user-supplied * callback_before, so we need to consider its soonest_might_time_out * timestamp when computing the CTT's after-sweep sleep duration. */ UPDATE_STINP_IF_SP_SMTO_IS_SOONER(tp); } else { if ( tp->py_callback_after != NULL && py_dsn != NULL && py_active_secs != NULL && py_idle_secs != NULL ) { PyObject *py_callback_dict = PyDict_New(); if (py_callback_dict == NULL) { SUPPRESS_EXCEPTION; } else { if ( PyDict_SetItem(py_callback_dict, con_timeout__s_dsn, py_dsn ) == 0 && PyDict_SetItem(py_callback_dict, con_timeout__s_active_secs, py_active_secs ) == 0 && PyDict_SetItem(py_callback_dict, con_timeout__s_idle_secs, py_idle_secs ) == 0 ) { assert (!Connection_has_any_open_transaction(DV_CCON(con))); { PyObject *py_callback_res = PyObject_CallFunctionObjArgs( DV_PYO(tp->py_callback_after), py_callback_dict, NULL ); if (py_callback_res == NULL) { SUPPRESS_EXCEPTION; } else { Py_DECREF(py_callback_res); } } } else { SUPPRESS_EXCEPTION; } Py_DECREF(py_callback_dict); } } } assert (!PyErr_Occurred()); if (py_dsn != NULL) { Py_DECREF(py_dsn); py_dsn = NULL; } if (py_active_secs != NULL) { Py_DECREF(py_active_secs); py_active_secs = NULL; } if (py_idle_secs != NULL) { Py_DECREF(py_idle_secs); py_idle_secs = NULL; } LEAVE_GIL_USING_THREADSTATE(tstate); /* Remove the timed-out connection from the tracker, unless the * callback prevented us from actually timing it out. */ if (should_time_out) { if (ct_prev == NULL) { /* ct is the first node. */ assert (ct == global_ctm.cons); global_ctm.cons = ct->next; kimem_plain_free(DV_CT(ct)); ct = NULL; } else { /* ct is not the first node. */ ct_prev->next = ct->next; kimem_plain_free(DV_CT(ct)); ct = ct_prev; } assert (global_ctm.n_cons > 0); --global_ctm.n_cons; } } else { /* We're not timing this connection out during this pass, but as we * sweep, we need to determine the soonest that another sweep might be * needed. */ UPDATE_STINP_IF_SP_SMTO_IS_SOONER(tp); } } TP_UNLOCK(tp); ct_prev = ct; if (ct == NULL) { ct = global_ctm.cons; } else { ct = ct->next; } } /* end of loop across each tracked connection */ if (global_ctm.n_cons == 0) { /* All tracked connections were timed out. */ global_ctm.soonest_next_connection_might_timeout = 0; } else { assert (soonest_timeout_in_next_pass > 0); global_ctm.soonest_next_connection_might_timeout = soonest_timeout_in_next_pass; } return status; } /* CTM_apply_timeout */ static PyObject *pyob_CTM_halt(PyObject *self) { PyObject *timeout_thread_py = NULL; int status = -1; if (global_ctm.timeout_thread_py == NULL) { RETURN_PY_NONE; } LEAVE_GIL_WITHOUT_AFFECTING_DB CTM_LOCK; /* Critical section within these brackets: */ { assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); status = ConnectionTracker_release(&global_ctm.cons); /* The ConnectionTracker_release call should never fail, because * CConnection_untrack doesn't actually do anything. */ assert (status == 0); assert (global_ctm.cons == NULL); /* We take local responsibility for the artificial reference created by * the ConnectionTimeoutThread in pyob_ConnectionTimeoutThread_main. */ timeout_thread_py = global_ctm.timeout_thread_py; global_ctm.ctt_should_stop = TRUE; WAKE_TIMEOUT_THREAD; } CTM_UNLOCK; ENTER_GIL_WITHOUT_AFFECTING_DB if (status == 0) { assert (timeout_thread_py != NULL); { PyObject *join_result = PyObject_CallMethod(timeout_thread_py, "join", NULL ); if (join_result != NULL) { ASSERT_CTT_REFS_ARE_CLEAR; Py_DECREF(join_result); } else { status = -1; } } Py_DECREF(timeout_thread_py); } if (status == 0) { RETURN_PY_NONE; } else { raise_exception(OperationalError, "Unable to cleanly stop" " ConnectionTimeoutThread." ); return NULL; } } /* pyob_CTM_halt */ /**** ConnectionTimeoutManager MEMBER FUNC DEFS AND SUPPORTING FUNCS: END ****/ /******************** ConnectionTimeoutThread DEFS: BEGIN ********************/ static PlatformThreadFuncReturnType THREAD_FUNC_MODIFIER ConnectionTimeoutThread_main(void *context) { /* The GIL *IS NOT HELD* when this function is called, although this * function's subordinates sometimes acquire it (via the PyThreadState* * context). */ PyThreadState *tstate = (PyThreadState *) context; assert (tstate != NULL); CTM_LOCK; for (;;) { while (global_ctm.n_cons == 0 && !global_ctm.ctt_should_stop) { /* The CTM lock should be held at this point. */ debug_print("CTT will now wait indefinitely for new connection.\n"); /* At this point, no connections with timeout enabled have been * registered. This thread will wait until one of the following occurs: * - a connection with timeout enabled arrives in the tracker (via * CTM_add) * - this thread is asked to terminate itself (via pyob_CTM_halt) */ #ifdef PLATFORM_WINDOWS /* Note: Compared to pthread condition variables, Windows Event * objects contain many pitfalls for the unwary. Foremost among them * is that the process of releasing the lock associated with the event, * then waiting for the event, is not an atomic operation, whereas * pthread_cond_wait performs those steps atomically. * Generally speaking, that problem can be addressed by using either * a busy-wait loop or the atomic function SignalObjectAndWait, but the * latter is not available on Win9x. * The lack of atomicity doesn't actually matter in this case, * because we know that there is exactly one thread waiting on the * Event (that is, ConnectionTimeoutThread). */ CTM_UNLOCK; WaitForSingleObject(global_ctm.reconsider_wait_interval, INFINITE); CTM_LOCK; #else pthread_cond_wait(&global_ctm.reconsider_wait_interval, &global_ctm.lock ); #endif /* The CTM lock should be held at this point. */ debug_print("CTT awakened from indefinite wait for new connection.\n"); } /* The CTM lock should be held at this point. */ if (global_ctm.ctt_should_stop) { /* This thread, the ConnectionTimeoutThread, has been ordered to * terminate itself. */ debug_print("CTT was ordered to terminate itself.\n"); CLEAR_CTT_REFS; CTM_UNLOCK; break; } assert (global_ctm.n_cons > 0); assert (global_ctm.soonest_next_connection_might_timeout > 0); debug_print1("CTT now has %d con(s) to process.\n", (int) global_ctm.n_cons ); if (global_ctm.soonest_next_connection_might_timeout <= time_millis()) { int timeout_result; debug_print("CTT calling CTM_apply_timeout.\n"); timeout_result = CTM_apply_timeout(tstate); debug_print("CTT finished CTM_apply_timeout call.\n"); /* At present, CTM_apply_timeout never indicates that an error occurred, * because this thread needs to remain active even if a user-supplied * callback raises an exception, or an attempt to close a connection * raises an exception. */ assert (timeout_result == 0); /* Having just finished a call to CTM_apply_timeout, we yield the CTM * lock briefly. This prevents the ConnectionTimeoutThread from hogging * the CTM if the client programmer has requested extremely short * timeouts, yet the connections with such timeouts are remaining active. * (The test suite provoked such behavior, although a real application is * unlikely to). */ CTM_UNLOCK; sleep_millis(10); CTM_LOCK; } { /* CTM_add updates global_ctm.soonest_next_connection_might_timeout if * the newly arrived connection might require attention sooner than any * previously tracked connection. Upon being awakened when CTM_add sets * the even, then, the ConnectionTimeoutThread need only examine * global_ctm.soonest_next_connection_might_timeout to determine how long * it should go back to sleep, if at all. */ boolean wait_ended_because_new_connection_arrived = TRUE; while (wait_ended_because_new_connection_arrived) { /* If the CTM_apply_timeout call or a Connection's non-timeout closure * caused there to be no more connections to monitor, we can * immediately go back to waiting (indefinitely) for the * reconsider_wait_interval event. * We also need to exit this timed-wait loop if this thread has been * ordered to terminate. */ if (global_ctm.n_cons == 0 || global_ctm.ctt_should_stop) { break; } /* Wait until the soonest point in time that one of the connections * currently tracked by the CTM might possibly time out. * If at any time during that waiting period a new connection is added * to the tracker, stop waiting and reorient to accomodate its timeout * period, if it requires action sooner than any of the previously * tracked connections. */ assert (global_ctm.soonest_next_connection_might_timeout > 0); { LONG_LONG max_wait_ms_LL = ( global_ctm.soonest_next_connection_might_timeout - time_millis() ); long max_wait_ms; if (max_wait_ms_LL <= 0) { break; } else if (max_wait_ms_LL < 10) { /* Wait for no fewer than 10ms: */ max_wait_ms_LL = 10; } /* Validation code should have ensured that no connection's timeout * period was longer than a certain threshold, which must be less * than LONG_MAX. */ assert (max_wait_ms_LL <= LONG_MAX); max_wait_ms = (long) max_wait_ms_LL; debug_print1("CTT will now 'sleep' up to %ld ms.\n", max_wait_ms); #ifdef PLATFORM_WINDOWS { DWORD wait_result; CTM_UNLOCK; wait_result = WaitForSingleObject( global_ctm.reconsider_wait_interval, (DWORD) max_wait_ms ); CTM_LOCK; assert (wait_result != WAIT_FAILED); wait_ended_because_new_connection_arrived = (boolean) (wait_result != WAIT_TIMEOUT); } #else { int wait_result; struct timespec abstime; millis_into_future_to_abstime(max_wait_ms, &abstime); wait_result = pthread_cond_timedwait( &global_ctm.reconsider_wait_interval, &global_ctm.lock, &abstime ); assert (wait_result != EINVAL); assert (wait_result != EPERM); wait_ended_because_new_connection_arrived = (boolean) (wait_result != ETIMEDOUT); } #endif } /* end of scope block for wait time calculation */ } /* end of while(wait_ended_because_new_connection_arrived) loop */ } /* end of scope block for wait_ended_because_new_connection_arrived */ } /* end of for(;;) loop */ return THREAD_FUNC_RETURN_SUCCESS; } /* ConnectionTimeoutThread_main */ static PyObject *pyob_ConnectionTimeoutThread_main( PyObject *self, PyObject *args ) { /* The GIL *IS HELD* when this function is called. */ PyThreadState *tstate = PyThreadState_Get(); boolean main_succeeded = FALSE; PyObject *py_ctt_ref; PyObject *started_event; PyObject *event_set_result = NULL; if (!PyArg_ParseTuple(args, "OO", &py_ctt_ref, &started_event)) { return NULL; } CTM_LOCK; /* At this point, no CTT thread should be running. */ assert (global_ctm.timeout_thread_py == NULL); global_ctm.timeout_thread_py = py_ctt_ref; /* Create an artificial reference to self so that pyob_CTM_halt can be sure * the Python Thread object will not be garbage collected before * pyob_CTM_halt has joined it. */ Py_INCREF(global_ctm.timeout_thread_py); py_ctt_ref = NULL; global_ctm.timeout_thread = Thread_current_ref(); global_ctm.timeout_thread_id = Thread_current_id(); debug_print1("CTT thread ID is %ld\n", global_ctm.timeout_thread_id); CTM_UNLOCK; /* Now that we've filled in the relevant global_ctm fields, call * event.set() to allow the thread that started this thread to proceed. */ event_set_result = PyObject_CallMethod(started_event, "set", NULL); if (event_set_result == NULL) { goto fail; } Py_DECREF(event_set_result); /* Release the GIL before calling the "real" main function of this thread. * That function does enter the GIL at times, but most of the time it * operates GIL-free. */ LEAVE_GIL_WITHOUT_AFFECTING_DB main_succeeded = (boolean) ( ConnectionTimeoutThread_main(tstate) == THREAD_FUNC_RETURN_SUCCESS ); ENTER_GIL_WITHOUT_AFFECTING_DB if (main_succeeded) { RETURN_PY_NONE; } /* Else, fall through to fail: */ fail: assert (PyErr_Occurred()); return NULL; } /* pyob_ConnectionTimeoutThread_main */ /********************* ConnectionTimeoutThread DEFS: END *********************/ /*** ConnectionTimeoutParams MEMBER FUNC DEFS AND SUPPORTING FUNCS: BEGIN ****/ static ConnectionTimeoutParams *ConnectionTimeoutParams_create( long period, PyObject *py_callback_before, PyObject *py_callback_after ) { /* The GIL *IS HELD* when this function is called. */ ConnectionTimeoutParams *tp = NULL; /* The range of period and type of the callbacks should have been validated * already; the checks below should be redundant: */ assert (TIMEOUT_PERIOD_IS_IN_RANGE(period)); assert (py_callback_before == NULL || PyCallable_Check(py_callback_before)); assert (py_callback_after == NULL || PyCallable_Check(py_callback_after )); tp = kimem_main_malloc(sizeof(ConnectionTimeoutParams)); if (tp == NULL) { goto fail; } tp->state = CONOP_ACTIVE; tp->connected_at = 0; tp->last_active = 0; tp->timeout_period = period; tp->soonest_might_time_out = 0; Py_XINCREF(py_callback_before); tp->py_callback_before = py_callback_before; Py_XINCREF(py_callback_after); tp->py_callback_after = py_callback_after; tp->lock = PyThread_allocate_lock(); if (tp->lock == NULL) { goto fail; } tp->owner = THREAD_ID_NONE; return tp; fail: assert (PyErr_Occurred()); if (tp != NULL) { _ConnectionTimeoutParams_destroy_(&tp, FALSE); } return NULL; } /* ConnectionTimeoutParams_create */ static int _ConnectionTimeoutParams_destroy_( ConnectionTimeoutParams **tp_, boolean should_destroy_lock ) { /* The GIL *IS HELD* when this function is called. */ ConnectionTimeoutParams *tp = *tp_; Py_XDECREF(tp->py_callback_before); Py_XDECREF(tp->py_callback_after); if (should_destroy_lock) { PyThread_free_lock(tp->lock); } kimem_main_free(tp); *tp_ = NULL; return 0; } /* _ConnectionTimeoutParams_destroy_ */ static int ConnectionTimeoutParams_destroy(ConnectionTimeoutParams **tp_) { /* The GIL *IS HELD* when this function is called. */ return _ConnectionTimeoutParams_destroy_(tp_, TRUE); } /* ConnectionTimeoutParams_destroy */ static ConnectionOpState ConnectionTimeoutParams_trans_while_already_locked( ConnectionTimeoutParams *tp, ConnectionOpState expected_old_state, ConnectionOpState requested_new_state ) { /* The GIL *MIGHT OR MIGHT NOT BE HELD* when this function is called * (therefore, this function should not attempt to acquire * global_ctm.lock because of deadlock risk). */ assert (tp != NULL); assert (CURRENT_THREAD_OWNS_TP(tp)); if (tp->state == expected_old_state) { tp->state = requested_new_state; if (requested_new_state == CONOP_IDLE) { /* We're going from a state of some kind of activity into one of * idleness, so we need to update tp's activity stamps. */ _ConnectionTimeoutParams_touch(tp); } } return tp->state; } /* ConnectionTimeoutParams_trans_while_already_locked */ static ConnectionOpState ConnectionTimeoutParams_trans( ConnectionTimeoutParams *tp, ConnectionOpState expected_old_state, ConnectionOpState requested_new_state ) { /* The GIL *IS HELD* when this function is called. */ ConnectionOpState achieved_state; assert (tp != NULL); assert (!CURRENT_THREAD_OWNS_TP(tp)); ACQUIRE_TP_WITH_GIL_HELD(tp); /* Critical section within these brackets: */ { achieved_state = ConnectionTimeoutParams_trans_while_already_locked(tp, expected_old_state, requested_new_state ); } TP_UNLOCK(tp); return achieved_state; } /* ConnectionTimeoutParams_trans */ static void _ConnectionTimeoutParams_touch(ConnectionTimeoutParams *tp) { /* tp->lock must be held when this function is called. */ tp->last_active = time_millis(); tp->soonest_might_time_out = tp->last_active + tp->timeout_period; } /* _ConnectionTimeoutParams_touch */ static double ConnectionTimeoutParams_active_secs(ConnectionTimeoutParams *tp) { return ((double) (tp->last_active - tp->connected_at)) / 1000.0; } /* ConnectionTimeoutParams_active_secs */ static double ConnectionTimeoutParams_idle_secs(ConnectionTimeoutParams *tp) { return ((double) (time_millis() - tp->last_active)) / 1000.0; } /* ConnectionTimeoutParams_idle_secs */ /**** ConnectionTimeoutParams MEMBER FUNC DEFS AND SUPPORTING FUNCS: END *****/ /****** CConnection ACTIVATION AND DEACTIVATION INFRASTRUCTURE: BEGIN ********/ static int Connection_activate(CConnection *con, const boolean con_tp_already_locked, const boolean allow_transparent_resumption ) { /* The GIL must be held when this function is called. */ int status = 0; if (!Connection_timeout_enabled(con)) { if (con->state != CON_STATE_OPEN) { raise_exception(ProgrammingError, "Invalid connection state. The" " connection must be open to perform this operation." ); status = -1; } } else { ConnectionTimeoutParams *tp = con->timeout; ConnectionOpState achieved_state; assert (tp != NULL); assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); /* In checked build, verify that the caller's claim about lock ownership * (delivered via boolean con_tp_already_locked) matches reality: */ assert ( con_tp_already_locked ? CURRENT_THREAD_OWNS_CON_TP(con) : !CURRENT_THREAD_OWNS_CON_TP(con) ); if (!con_tp_already_locked) { ACQUIRE_TP_WITH_GIL_HELD(tp); } assert (CURRENT_THREAD_OWNS_CON_TP(con)); achieved_state = ConnectionTimeoutParams_trans_while_already_locked(tp, CONOP_IDLE, CONOP_ACTIVE ); switch (achieved_state) { case CONOP_ACTIVE: /* Everything's fine. */ break; case CONOP_TIMED_OUT_TRANSPARENTLY: if (allow_transparent_resumption) { /* Temporarily disassociate tp from con, then call * Connection_attach_from_members, which will reassociate tp and con * iff it's successful. */ assert (tp == con->timeout); con->timeout = NULL; tp->state = CONOP_ACTIVE; status = Connection_attach_from_members(con, tp); if (status != 0) { PyObject *ex_type; PyObject *ex_value; PyObject *ex_traceback; PyObject *prev_msg = NULL; PyObject *new_msg = NULL; assert (PyErr_Occurred()); PyErr_Fetch(&ex_type, &ex_value, &ex_traceback); prev_msg = PyObject_Str(ex_value); if (prev_msg != NULL) { new_msg = PyString_FromFormat("Attempt to reattach" " transparently-timed-out connection failed with error: %s", PyString_AS_STRING(prev_msg) ); } if (new_msg == NULL) { /* We didn't succeed in creating the new error message (probably * due to low memory), so restore the previous exception. * (PyErr_Restore reclaims our owned reference to each of its * arguments.) */ PyErr_Restore(ex_type, ex_value, ex_traceback); } else { raise_exception(OperationalError, PyString_AS_STRING(new_msg)); Py_DECREF(new_msg); /* The previous exception objects are now obsolete: */ Py_XDECREF(ex_type); Py_XDECREF(ex_value); Py_XDECREF(ex_traceback); } Py_XDECREF(prev_msg); } else { const ConnectionOpState achieved_state = ConnectionTimeoutParams_trans_while_already_locked(tp, CONOP_IDLE, CONOP_ACTIVE ); if (achieved_state != CONOP_ACTIVE) { PyObject *err_msg; status = -1; { const char *achieved_state_desc = ConnectionOpState_describe( achieved_state ); assert (achieved_state_desc != NULL); err_msg = PyString_FromFormat("Unable to reactivate" " transparently-timed-out connection: Could not" " transition from state IDLE to ACTIVE (achieved state %s" " instead).", achieved_state_desc ); } if (err_msg != NULL) { raise_exception(OperationalError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } } } if (status == 0) { /* tp and con should've been reassociated: */ assert (con->timeout == tp); assert (tp->state == CONOP_ACTIVE); } else { assert (PyErr_Occurred()); if (con->timeout == NULL) { /* Connection_attach_from_members did not succeed, so it didn't * associate tp with con. We must make the reassociation so that * the failure to transparently reconnect con is recorded: */ con->timeout = tp; } tp->state = CONOP_TIMED_OUT_NONTRANSPARENTLY; } break; } /* Else (!allow_transparent_resumption), fall through to next: */ case CONOP_TIMED_OUT_NONTRANSPARENTLY: status = -1; raise_exception(ConnectionTimedOut, "A transaction was still" " unresolved when this connection timed out, so it cannot be" " transparently reactivated." ); break; case CONOP_IDLE: status = -1; raise_exception(OperationalError, "Unable to activate idle" " connection." ); break; case CONOP_PERMANENTLY_CLOSED: status = -1; raise_exception(ProgrammingError, "Cannot operate on a permanently" " closed connection." ); break; } if (!con_tp_already_locked) { TP_UNLOCK(tp); } } return status; } /* Connection_activate */ static PyObject *Connection__read_activity_stamps( PyObject *self, PyObject *args ) { /* The GIL must be held when this function is called. */ /* This function returns a 2-tuple of Python ints (or longs) of the form: * (last_active, soonest_might_time_out) * If timeout is not enabled for the connection, this method returns None. * This function is not part of the public Connection API; it is intended * solely for verification purposes (see test_connection_timeouts.py in the * test suite). */ PyObject *py_ret; /* 2-tuple. */ CConnection *con; if (!PyArg_ParseTuple(args, "O!", &ConnectionType, &con)) { return NULL; } if (!Connection_timeout_enabled(con)) { RETURN_PY_NONE; } ACQUIRE_CON_TP_WITH_GIL_HELD(con); py_ret = Py_BuildValue("LL", con->timeout->last_active, con->timeout->soonest_might_time_out ); TP_UNLOCK(con->timeout); return py_ret; } /* Connection__read_activity_stamps */ /******* CConnection ACTIVATION AND DEACTIVATION INFRASTRUCTURE: END *********/ kinterbasdb-3.3.0/typeconv_backcompat.py0000644000175000001440000000223411130647414017614 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : Backward-Compatible # # Version 3.1 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2004 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere # This module can be conveniently activated as the process-wide default via: # kinterbasdb.init(type_conv=1) from kinterbasdb import typeconv_datetime_mx from kinterbasdb import typeconv_fixed_stdlib _underlying_modules = (typeconv_datetime_mx, typeconv_fixed_stdlib) # Load the required members from the underlying modules into the namespace of # this module. globalz = globals() for m in _underlying_modules: for req_member in m.__all__: globalz[req_member] = getattr(m, req_member) del globalz kinterbasdb-3.3.0/_kievents.h0000644000175000001440000002145511130647414015355 0ustar pcisarusers/* KInterbasDB Python Package - Header File for Events Support * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* Throughout these declarations, note the use of the 'volatile' modifier to * ensure that an altered variable's value is written back to main memory * instead of mistakenly written only to one CPU's cache on a * multiprocessor. */ #ifndef _KIEVENTS_H #define _KIEVENTS_H #include "_kinterbasdb.h" #ifdef ENABLE_DB_EVENT_SUPPORT #include "_kinterbasdb_exception_functions_without_python.h" #include "_kisupport.h" #include "_kisupport_threadsafe_fifo_queue.h" #ifdef FIREBIRD_2_0_OR_LATER #define EVENT_CALLBACK_FUNCTION ISC_EVENT_CALLBACK #else #define EVENT_CALLBACK_FUNCTION isc_callback #endif #define DV_VOID(void_ptr) DEVOLATILE(void *, void_ptr) #define DV_STR(char_ptr) DEVOLATILE(char *, char_ptr) #define DV_STR_PTR(char_ptr_ptr) DEVOLATILE(char **, char_ptr_ptr) #define DV_Q(q) DEVOLATILE(ThreadSafeFIFOQueue *, q) #define DV_EOTC(eotc) DEVOLATILE(EventOpThreadContext *, eotc) #define DV_ERB(erb) DEVOLATILE(EventRequestBlock *, erb) #define DV_CALCTX(callctx) DEVOLATILE(EventCallbackThreadContext *, callctx) #define DV_ISC_STATUS(ISC_STATUS_) DEVOLATILE(ISC_STATUS, ISC_STATUS_) #define DV_STATVEC(ISC_STATUS_ptr) DEVOLATILE(ISC_STATUS *, ISC_STATUS_ptr) #define DV_DB_HANDLE_PTR(isc_db_handle_ptr) DEVOLATILE(isc_db_handle *, isc_db_handle_ptr) #define DV_LONG_PTR(long_ptr) DEVOLATILE(long *, long_ptr) #define DV_ISC_LONG_PTR(ISC_LONG_ptr) DEVOLATILE(ISC_LONG *, ISC_LONG_ptr) #define DV_THREADID_PTR(thread_id_ptr) \ DEVOLATILE(PlatformThreadIdType *, thread_id_ptr) /******************** HARD-CODED LIMITS:BEGIN ********************/ /* EVENT_BLOCK_SIZE is a limitation imposed by the DB client library, but * kinterbasdb transparently eliminates it from the Python programmer's * perspective. */ #define EVENT_BLOCK_SIZE 15 /******************** HARD-CODED LIMITS:END ********************/ /******************** MODULE TYPE DEFINITIONS:BEGIN ********************/ typedef enum { ECALL_UNINITIALIZED = 1, ECALL_DUMMY = 2, ECALL_NORMAL = 3, ECALL_DEAD = 4 } EventCallbackThreadState; typedef struct { PlatformMutexType lock; volatile EventCallbackThreadState state; volatile int block_number; volatile PlatformThreadIdType op_thread_id; /* op_q is a pointer to the operation request queue in the * EventOpThreadContext. The queue's memory and member cleanup is managed * by the EventOpThreadContext, not by EventCallbackThreadContext. */ volatile ThreadSafeFIFOQueue *op_q; } EventCallbackThreadContext; typedef struct { #define NULL_EVENT_ID -1 volatile ISC_LONG event_id; volatile char *req_buf; volatile short req_buf_len; /* The EventOpThread never accesses the members of this structure. The only * threads that do so are the thread that creates/destroys the * EventConduit, as well as (obviously) the respective EventCallbackThreads * themselves. */ volatile EventCallbackThreadContext callback_ctx; } EventRequestBlock; typedef enum { /* Requested by the thread that creates the event conduit: */ OP_CONNECT, OP_REGISTER, OP_DIE, /* Requested by the event callback (which runs for all except its final * iteration in a thread started by the database client library). * During this operation, the EventOpThread: * 1) Calculates and posts to the EventFiredQueue an EventFiredNode that * contains the counts of the events that the EventOpNode indicates * occurred. The EventConduit.wait method is where EventFiredNodes are * consumed from the EventFiredQueue. * 2) Re-registers the event callback to be called by the database client * library the next time the server detects the occurence of any of the * events that were specified by the client programmer in the * event_names parameter to the EventConduit constructor. */ OP_RECORD_AND_REREGISTER, /* Sent by the event callback thread in case of error: */ OP_CALLBACK_ERROR } EventOpThreadOpCode; typedef enum { OPTHREADSTATE_NONE = 1, OPTHREADSTATE_WAITING_FOR_CONNECTION_REQUEST = 2, OPTHREADSTATE_WAITING_FOR_REGISTER_REQUEST = 3, OPTHREADSTATE_READY = 4, OPTHREADSTATE_FATALLY_WOUNDED = 5, OPTHREADSTATE_DEAD = 6 } EventOpThreadState; typedef struct { PlatformMutexType lock; volatile EventOpThreadState state; volatile PlatformThreadIdType event_op_thread_id; volatile int n_event_blocks; /* error_info is used to transfer error information from one piece of code * that uses the EventOpThreadContext to another. This member is always * manipulated by the EventOpThread, except during its initialization and * (potentially) its destruction. */ NonPythonSQLErrorInfo *error_info; /* Members to support the database API calls: */ volatile EventRequestBlock *er_blocks; volatile isc_db_handle db_handle; volatile ISC_STATUS sv[STATUS_VECTOR_SIZE]; /* Members used to communicate with other threads: */ /* Threads that want the EventOpThread to do something post EventOpNodes * bearing an EventOpThreadOpCode to this queue: */ ThreadSafeFIFOQueue op_q; /* The thread(s) that make "administrative requests" of the EventOpThread * wait for acknowledgement on this queue; the EventOpThread provides those * acknowledgements in the form of AdminResponseNodes. Although it's * possible for different threads to wait on this queue, they won't do it * simultaneously, so there's no risk of a waiting thread receiving * acknowledgement of an operation that another thread requested. */ ThreadSafeFIFOQueue admin_response_q; /* event_q is a pointer to the EventConduit's event queue. The EventOpThread * posts EventFiredNodes to this queue in reaction to EventOpNodes that it * receives via op_q from the EventCallbackThread. The EventConduit.wait * method is where EventFiredNodes are consumed from event_q.*/ volatile ThreadSafeFIFOQueue *event_q; } EventOpThreadContext; typedef enum { CONDUIT_STATE_CREATED, CONDUIT_STATE_OPEN, CONDUIT_STATE_CLOSED } EventConduitState; typedef struct { PyObject_HEAD /* Python API - infrastructural macro. */ /* Since EventConduits are Python objects, access to them is implicitly * serialized by the GIL. Destructive operations on those members of an * EventConduit that might be in use in multiple threads are not performed * until the destructor. Therefore, no explicit lock is necessary. */ EventConduitState state; PyObject *py_event_names; int n_event_names; int n_event_blocks; PyObject *py_event_counts_dict_template; ThreadSafeFIFOQueue event_q; /* The only time a thread other than the EventOpThread accesses the members * of op_thread_context is during the creation or destruction of the * EventConduit. */ EventOpThreadContext op_thread_context; /* This thread reference is used during EventConduit destruction to ensure * that the EventOpThread has actually exited before the thread that's * destroying the EventOpThread's context pulls the rug out from under it. */ PlatformThreadRefType op_thread_ref; } EventConduit; /* Node types used as elements of the various ThreadSafeFIFOQueues: */ typedef struct { volatile EventOpThreadOpCode op_code; #define NO_TAG -1 volatile int tag; volatile void *payload; /* Might be NULL. */ } EventOpNode; typedef struct { volatile EventOpThreadOpCode op_code; volatile long status; volatile ISC_STATUS sql_error_code; volatile char *message; /* Might be NULL. */ } AdminResponseNode; #ifdef FIREBIRD_2_0_OR_LATER #define UPDATED_BUF_SIGNEDNESS unsigned #else #define UPDATED_BUF_SIGNEDNESS #endif typedef struct { volatile int block_number; UPDATED_BUF_SIGNEDNESS char *updated_buf; } EventCallbackOutputNode; typedef struct { volatile int block_number; volatile long counts[EVENT_BLOCK_SIZE]; #define COUNTS_BUF_SIZE (sizeof(long) * EVENT_BLOCK_SIZE) } EventFiredNode; typedef struct { volatile char *dsn; volatile short dsn_len; volatile char *dpb; volatile short dpb_len; volatile short dialect; } ConnParamsNode; /******************** MODULE TYPE DEFINITIONS:END ********************/ #endif /* ENABLE_DB_EVENT_SUPPORT */ #endif /* if not def _KIEVENTS_H */ kinterbasdb-3.3.0/_kievents.c0000644000175000001440000010606611130647414015352 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Events Support * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifdef ENABLE_DB_EVENT_SUPPORT #include "_kisupport_threadsafe_fifo_queue.c" #include "_kievents.h" #include "_kievents_infra.c" static PyObject *pyob_EventConduit_close(EventConduit *); static int _update_event_count_dict(PyObject *, PyObject *, Py_ssize_t, Py_ssize_t, long * ); static long _event_context_allocate_event_count_buffers( PyObject *, Py_ssize_t, Py_ssize_t, EventRequestBlock * ); /* Global variables that are "private" to the event subsystem: */ PyObject *events__PyInt_zero; #define EN_OFFSET_FROM_BLOCK_NUMBER(bn) (EVENT_BLOCK_SIZE * (bn)) #define EN_UPPER_LIMIT_FROM_BLOCK_NUMBER(bn, n_event_names) ( \ (EN_OFFSET_FROM_BLOCK_NUMBER(bn) + EVENT_BLOCK_SIZE) > (n_event_names) \ ? n_event_names : (EN_OFFSET_FROM_BLOCK_NUMBER(bn) + EVENT_BLOCK_SIZE) \ ) /************* EventConduit METHODS INACCESSIBLE TO PYTHON:BEGIN *************/ #define EventConduit_is_closed(self) ((self)->state != CONDUIT_STATE_OPEN) static long _EventConduit_require_open( EventConduit *self, char *failure_message ) { /* If self is not an open event conduit, raises the supplied error message * (or a default if no error message is supplied). * Returns 0 if the cursor was open; -1 if it was closed. */ if (!EventConduit_is_closed(self)) { return 0; } if (failure_message == NULL) { failure_message = "Invalid EventConduit state. The conduit must be OPEN" " to perform this operation."; } raise_exception(ConduitWasClosed, failure_message); return -1; } /* _EventConduit_require_open */ #define CONDUIT_REQUIRE_OPEN_WITH_FAILURE(self, failure_action) \ if (_EventConduit_require_open(self, NULL) != 0) { failure_action; } #define CONDUIT_REQUIRE_OPEN(self) \ CONDUIT_REQUIRE_OPEN_WITH_FAILURE(self, return NULL) /************** EventConduit METHODS INACCESSIBLE TO PYTHON:END **************/ /************** EventConduit METHODS ACCESSIBLE TO PYTHON:BEGIN **************/ static PyObject *pyob_EventConduit_create(PyObject *self_, PyObject *py_args) { EventConduit *self = NULL; PyObject *py_event_names = NULL; int py_event_names_length; int i; CConnection *originating_con; PyObject *py_event_names_orig; PyObject *py_con_params; boolean init_event_q = FALSE; boolean init_op_thread_context = FALSE; boolean started_op_thread = FALSE; if (!PyArg_ParseTuple(py_args, "O!O!O", &ConnectionType, &originating_con, &PyTuple_Type, &py_con_params, &py_event_names_orig )) { return NULL; } CON_ACTIVATE(originating_con, return NULL); /* Validate py_event_names_orig, storing a tuple representation of it in * py_event_names: */ /* Execute the equivalent of the following Python statement: * py_event_names = tuple(py_event_names_orig) * This allows the client programmer to pass any iterable; the tuple * constructor takes care of extracting the values. Importantly, it also * prevents the client programmer from passing a mutable sequence object and * then modifying it while the EventConduit is operating. * * We use separate variables for py_event_names and py_event_names_orig so * this function's error handler can safely Py_XDECREF(py_event_names). */ py_event_names = PyObject_CallFunctionObjArgs( (PyObject *) &PyTuple_Type, py_event_names_orig, NULL ); if (py_event_names == NULL) { goto fail; } { /* The database engine doesn't work properly with anywhere near INT_MAX * events, so the limit imposed here is no real loss. */ const Py_ssize_t py_event_names_length_ss = PyTuple_GET_SIZE( py_event_names ); if (py_event_names_length_ss > INT_MAX) { raise_exception(NotSupportedError, "At most INT_MAX events supported."); goto fail; } py_event_names_length = (int) py_event_names_length_ss; } /* Disallow zero event names: */ if (py_event_names_length == 0) { raise_exception(ProgrammingError, "Can't wait for zero events."); goto fail; } /* Disallow non-string event names, and also multiple events with the same * name: */ { PyObject *py_event_name_dict = PyDict_New(); if (py_event_name_dict == NULL) { goto fail; } for (i = 0; i < py_event_names_length; i++) { PyObject *en = PyTuple_GET_ITEM(py_event_names, i); if (!PyString_CheckExact(en)) { raise_exception(ProgrammingError, "All event names must be str" " objects." ); goto done_checking_event_names; } if (PyDict_GetItem(py_event_name_dict, en) != NULL) { PyObject *py_err_msg = PyString_FromFormat("The following event name" " appears more than once in the supplied event_names sequence:" " \"%s\"", PyString_AS_STRING(en) ); if (py_err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(py_err_msg)); Py_DECREF(py_err_msg); } goto done_checking_event_names; } /* We're using py_event_name_dict as a set, so the key matters, but the * value does not. */ if (PyDict_SetItem(py_event_name_dict, en, Py_None) != 0) { goto done_checking_event_names; } } /* Fall through to done_checking_event_names: */ done_checking_event_names: Py_DECREF(py_event_name_dict); if (PyErr_Occurred()) { goto fail; } } /* Validate py_con_params. * kinterbasdb itself, rather than the client programmer, supplies * py_con_params, so we're not very uptight about validating it. */ /* The PyArg_ParseTuple call toward the beginning of this function should've * already ensured the following: */ assert (PyTuple_CheckExact(py_con_params)); if ( PyTuple_GET_SIZE(py_con_params) != 3 || !PyString_CheckExact(PyTuple_GET_ITEM(py_con_params, 0)) || !PyString_CheckExact(PyTuple_GET_ITEM(py_con_params, 1)) || !PyInt_CheckExact (PyTuple_GET_ITEM(py_con_params, 2)) ) { raise_exception(InternalError, "py_con_params is invalid."); goto fail; } /* The parameters passed to this method from the Python level are now * validated; created and initialize the EventConduit object. */ self = PyObject_New(EventConduit, &EventConduitType); if (self == NULL) { goto fail; } /* Nullify self's members for safe cleanup before initializing them: */ self->state = CONDUIT_STATE_CREATED; self->py_event_names = NULL; self->n_event_names = -1; self->n_event_blocks = -1; self->py_event_counts_dict_template = NULL; self->op_thread_ref = THREAD_REF_INVALID; /* Done nullifying. */ /* Pass ownership of py_event_names to self. */ assert (PyTuple_CheckExact(py_event_names)); self->py_event_names = py_event_names; py_event_names = NULL; self->n_event_names = py_event_names_length; self->n_event_blocks = py_event_names_length / EVENT_BLOCK_SIZE; if (py_event_names_length % EVENT_BLOCK_SIZE != 0) { ++self->n_event_blocks; } self->py_event_counts_dict_template = PyDict_New(); if (self->py_event_counts_dict_template == NULL) { goto fail; } { PyObject *py_ecdt = self->py_event_counts_dict_template; PyObject *py_en = self->py_event_names; for (i = 0; i < py_event_names_length; i++) { if (PyDict_SetItem(py_ecdt, PyTuple_GET_ITEM(py_en, i), events__PyInt_zero ) != 0 ) { goto fail; } } } if (ThreadSafeFIFOQueue_init(&self->event_q) != 0) { raise_exception(OperationalError, "Unable to initialize event_q."); goto fail; } init_event_q = TRUE; { EventOpThreadContext *eotc = &self->op_thread_context; /* Initialize the context structure for the EventOpThread: */ if (EventOpThreadContext_init(eotc, &self->event_q, self->n_event_blocks) != 0 ) { raise_exception(OperationalError, "Unable to initialize op_thread_context." ); goto fail; } init_op_thread_context = TRUE; assert (self->n_event_blocks == eotc->n_event_blocks); assert (&self->event_q == eotc->event_q); /* Start the EventOpThread: */ self->op_thread_ref = Thread_create(EventOpThread_main, eotc, DV_THREADID_PTR(&eotc->event_op_thread_id) ); if (self->op_thread_ref == THREAD_REF_INVALID) { raise_exception(OperationalError, "Unable to create EventOpThread."); goto fail; } started_op_thread = TRUE; /* Send an opcode to the EventOpThread requesting that it establish a * private database connection. This thread then blocks until the * EventOpThread indicates completion by posting to the admin_response_q. */ { ConnParamsNode* payload = kimem_plain_malloc(sizeof(ConnParamsNode)); if (payload == NULL) { goto fail; } payload->dsn = NULL; payload->dpb = NULL; /* The types of the elements of py_con_params were validated earlier. * Additionally, when the CConnection they're derived from was * originally created (in _kicore_connection.c/pyob_Connection_connect), the * contents of the elements were validated. Therefore, we can be certain * that py_con_params contains appropriate values. */ { PyObject *param; #define _CONVERT_STR_PARAM(tuple_pos, name) \ param = PyTuple_GET_ITEM(py_con_params, tuple_pos); \ payload->name ## _len = (short) PyString_GET_SIZE(param); \ assert (payload->name ## _len > 0); \ /* Note that there's no need for a trailing null byte at the end of \ * the buffer: */ \ payload->name = kimem_plain_malloc(payload->name ## _len); \ if (payload->name == NULL) { goto fail_freeing_payload; } \ memcpy(DV_STR(payload->name), PyString_AS_STRING(param), \ payload->name ## _len \ ); _CONVERT_STR_PARAM(0, dsn); _CONVERT_STR_PARAM(1, dpb); param = PyTuple_GET_ITEM(py_con_params, 2); assert (PyInt_CheckExact(param)); payload->dialect = (short) PyInt_AS_LONG(param); } { int req_status = -1; int res_status = -1; ISC_STATUS sql_error_code = 0; char *message = NULL; LEAVE_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ req_status = EventOpQueue_request(&eotc->op_q, OP_CONNECT, NO_TAG, payload ); if (req_status == 0) { res_status = AdminResponseQueue_require(&eotc->admin_response_q, OP_CONNECT, 0, &sql_error_code, &message, WAIT_INFINITELY_LONG ); } ENTER_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ if (req_status != 0 || res_status != 0) { if (message != NULL) { PyObject *err_msg = PyString_FromFormat("While creating" " EventConduit, OP_CONNECT failed:\n%s", message ); kimem_plain_free(message); if (err_msg != NULL) { raise_exception_with_numeric_error_code(OperationalError, sql_error_code, PyString_AS_STRING(err_msg) ); Py_DECREF(err_msg); } } else { raise_exception(OperationalError, "EventOpThread could not" " establish private database connection." ); } /* If the EventOpQueue_request call succeeded, then the queue took * ownership of the payload. */ if (req_status == 0) { goto fail; } else { goto fail_freeing_payload; } } } goto connect_request_succeeded; fail_freeing_payload: if (payload->dsn != NULL) { kimem_plain_free(DV_STR(payload->dsn)); } if (payload->dpb != NULL) { kimem_plain_free(DV_STR(payload->dpb)); } kimem_plain_free(payload); goto fail; } connect_request_succeeded: /* On success, the EventOpThread will have freed payload's memory. */ for (i = 0; i < eotc->n_event_blocks; i++) { if (_event_context_allocate_event_count_buffers( DV_PYO(self->py_event_names), EN_OFFSET_FROM_BLOCK_NUMBER(i), EN_UPPER_LIMIT_FROM_BLOCK_NUMBER(i, self->n_event_names), DV_ERB(eotc->er_blocks + i) ) != 0 ) { goto fail; } } /* Now that the EventOpThread's private connection has been established and * the members of EventOpThreadContext necessary to support event * registration have been initialized, we can direct the EventOpThread to * actually register for event notification. */ { int req_status = -1; int res_status = -1; ISC_STATUS sql_error_code = 0; char *message = NULL; LEAVE_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ req_status = EventOpQueue_request(&eotc->op_q, OP_REGISTER, NO_TAG, NULL ); if (req_status == 0) { res_status = AdminResponseQueue_require(&eotc->admin_response_q, OP_REGISTER, 0, &sql_error_code, &message, WAIT_INFINITELY_LONG ); } ENTER_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ if (req_status != 0 || res_status != 0) { if (message != NULL) { PyObject *err_msg = PyString_FromFormat("While creating" " EventConduit, OP_REGISTER failed:\n%s", message ); kimem_plain_free(message); if (err_msg != NULL) { raise_exception_with_numeric_error_code(OperationalError, sql_error_code, PyString_AS_STRING(err_msg) ); Py_DECREF(err_msg); } } else { raise_exception(OperationalError, "EventOpThread failed to register" " for event notification." ); } goto fail; } } assert (EventOpThreadContext_has_state(eotc, OPTHREADSTATE_READY)); } /* Success: */ assert (self != NULL); assert (!PyErr_Occurred()); self->state = CONDUIT_STATE_OPEN; goto clean; fail: assert (PyErr_Occurred()); if (self != NULL) { if (self->py_event_names != NULL) { Py_DECREF(self->py_event_names); self->py_event_names = NULL; } if (self->py_event_counts_dict_template != NULL) { Py_DECREF(self->py_event_counts_dict_template); self->py_event_counts_dict_template = NULL; } if (init_op_thread_context) { EventOpThreadContext *eotc = &self->op_thread_context; /* Must wait until the EventOpThread has actually exited before yanking * support structures from under it: */ if (started_op_thread) { Thread_join(self->op_thread_ref); } ENTER_GDAL EventOpThreadContext_free_er_blocks(eotc); LEAVE_GDAL EventOpThreadContext_close(eotc); #ifdef FIREBIRD_1_0_ONLY /* 2005.12.13: * YYY: KInterbasDB's event-handling system is not working with FB * 1.0.x at present, although it works with other versions of the * database engine. KInterbasDB's attempt to establish an event * conduit causes an internal error in the FB 1.0.3 server, after * which the server becomes totally unresponsive, both to * isc_det4ch_db requests and even to server process shutdown * requests (the server has to be killed). * This workaround is intended to prevent the Python client process * from hanging permanently due to the server's refusal to respond to * an isc_det4ch_db request. */ Connection_close(originating_con, TRUE, FALSE); #endif } if (init_event_q) { /* self's destructor will close self->event_q, but we ensure that it's * in the expected state: */ assert (!(self->event_q).closed); } /* DECREFing self here won't cause pyob_EventConduit_close to be executed * because pyob_EventConduit___del__ will detect that self's state flag * was not yet set to CONDUIT_STATE_OPEN. */ assert (EventConduit_is_closed(self)); Py_DECREF(self); self = NULL; } /* This doesn't constitute a "double DECREF" because local variable * py_event_names is nullified as soon as ownership of the Python object is * transferred to self->py_event_names: */ Py_XDECREF(py_event_names); /* Fall through to clean: */ assert (PyErr_Occurred()); assert (self == NULL); clean: CON_PASSIVATE(originating_con); CON_MUST_NOT_BE_ACTIVE(originating_con); return (PyObject *) self; } /* pyob_EventConduit_create */ static void pyob_EventConduit___del__(EventConduit *self) { if (!EventConduit_is_closed(self)) { PyObject *py_close_result = pyob_EventConduit_close(self); if (py_close_result != NULL) { Py_DECREF(py_close_result); } else { SUPPRESS_EXCEPTION; } } /* 2005.09.27: Moved the closure of event_q here from * pyob_EventConduit_close. */ if (!(self->event_q).closed) { if (ThreadSafeFIFOQueue_close(&self->event_q) != 0) { SUPPRESS_EXCEPTION; } } /* Release the EventConduit struct itself: */ PyObject_Del(self); } /* pyob_EventConduit___del__ */ static PyObject *pyob_EventConduit_closed_get( EventConduit *self, void *closure ) { /* Obviously, if the result is FALSE, there's no guarantee that it will still * be valid by the time it's returned to the caller. */ return PyBool_FromLong(EventConduit_is_closed(self)); } /* pyob_EventConduit_closed_get */ static PyObject *pyob_EventConduit_close(EventConduit *self) { /* Note the explicit serialization in this method. */ PyObject *res = NULL; EventOpThreadContext *eotc = &self->op_thread_context; CONDUIT_REQUIRE_OPEN_WITH_FAILURE(self, goto fail); if (self->py_event_names != NULL) { Py_DECREF(self->py_event_names); self->py_event_names = NULL; } self->n_event_names = -1; self->n_event_blocks = -1; if (self->py_event_counts_dict_template != NULL) { Py_DECREF(self->py_event_counts_dict_template); self->py_event_counts_dict_template = NULL; } if (!EventOpThreadContext_has_state(eotc, OPTHREADSTATE_DEAD)) { int status = -1; ISC_STATUS sql_error_code = 0; char *message = NULL; LEAVE_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ status = EventOpQueue_request(&eotc->op_q, OP_DIE, NO_TAG, NULL); if (status == 0) { status = AdminResponseQueue_require(&eotc->admin_response_q, OP_DIE, 0, &sql_error_code, &message, WAIT_INFINITELY_LONG ); /* Regardless of whether the EventOpThread shuts down normally, we need * to be sure it's finished before pulling the rug out from under it. */ Thread_join(self->op_thread_ref); } ENTER_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ if (status != 0) { if (message != NULL) { raise_exception_with_numeric_error_code(OperationalError, sql_error_code, message ); kimem_plain_free(message); } goto fail; } } /* Close the members of EventOpThreadContext that're necessary for the most * basic communication with the thread(s) that're calling methods of the * supervising EventConduit: */ if (EventOpThreadContext_close(eotc) != 0) { goto fail; } /* 2005.09.27: */ /* Previously, the following call was issued here on the event queue: * ThreadSafeFIFOQueue_close(&self->event_q); * However, that method closes its target destructively (freeing container * memory, synchronization primitives, etc.). Therefore, calling it here * caused memory corruption if there were one or more threads wait()ing for * events when another thread called close(). * Since our earlier closure of the EventOpThreadContext will already have * indirectly *cancelled* the event queue, we needn't do anything more to the * event queue here. Instead, we wait to call ThreadSafeFIFOQueue_close * until EventConduit's destructor. */ assert (ThreadSafeFIFOQueue_is_cancelled(&self->event_q)); self->state = CONDUIT_STATE_CLOSED; res = Py_None; Py_INCREF(Py_None); goto exit; fail: assert (res == NULL); if (!PyErr_Occurred()) { raise_exception(OperationalError, "Unspecified error while closing."); } /* Fall through to exit: */ exit: return res; } /* pyob_EventConduit_close */ static PyObject *pyob_EventConduit_flush(EventConduit *self) { /* This method needs no explicit serialization because the underlying * ThreadSafeFIFOQueue self->event_q provides enough. */ LONG_LONG n_items_flushed = -1; CONDUIT_REQUIRE_OPEN(self); if (ThreadSafeFIFOQueue_flush(&self->event_q, &n_items_flushed) != 0) { raise_exception(OperationalError, "Underlying event queue flush failed."); goto fail; } assert (n_items_flushed >= 0); return PythonIntOrLongFrom64BitValue(n_items_flushed); fail: assert (PyErr_Occurred()); return NULL; } /* pyob_EventConduit_flush */ static PyObject *pyob_EventConduit_wait( EventConduit *self, PyObject *args, PyObject *kwargs ) { /* This method needs no explicit serialization because the underlying * ThreadSafeFIFOQueue self->event_q provides enough. */ static char *kwarg_list[] = {"timeout", NULL}; PyObject *py_timeout = NULL; long timeout_ms = WAIT_INFINITELY_LONG; WaitResult wait_res; EventFiredNode *n = NULL; PyObject *py_count_dict = NULL; CONDUIT_REQUIRE_OPEN(self); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwarg_list, &py_timeout ) ) { goto fail; } if (py_timeout != NULL && py_timeout != Py_None) { LONG_LONG timeout_ms_LL = py_seconds_to_milliseconds(py_timeout, ProgrammingError, "'timeout' parameter to EventConduit.wait must be either None or -1.0" " to wait infinitely, or a non-negative number specifying the maximum" " number of seconds to wait before timing out. The Python object %s" " is not an acceptable input value.", -1, LONG_MAX ); if (PyErr_Occurred()) { goto fail; } /* py_seconds_to_milliseconds constrained the user-supplied timeout to * between -1 and LONG_MAX (inclusive), so the following cast is safe: */ timeout_ms = (long) timeout_ms_LL; } LEAVE_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ wait_res = EventFiredQueue_get(&self->event_q, timeout_ms, &n); ENTER_GIL_WITHOUT_AFFECTING_DB /* DB lock is immaterial. */ if (wait_res == WR_WAIT_TIMEOUT) { RETURN_PY_NONE; } else if (wait_res != WR_WAIT_OK) { if (wait_res == WR_WAIT_CANCELLED) { raise_exception(ConduitWasClosed, "Event conduit was closed before wait completed." ); } else { raise_exception(OperationalError, "Unspecified fatal error while waiting for events." ); } goto fail; } assert (n != NULL); assert (n->block_number >= 0 && n->block_number <= self->n_event_blocks); /* Convert the raw q_item to a Python-accessible value. */ py_count_dict = PyDict_Copy(self->py_event_counts_dict_template); if (py_count_dict == NULL) { goto fail; } if (_update_event_count_dict(py_count_dict, DV_PYO(self->py_event_names), EN_OFFSET_FROM_BLOCK_NUMBER(n->block_number), EN_UPPER_LIMIT_FROM_BLOCK_NUMBER(n->block_number, self->n_event_names), DV_LONG_PTR(n->counts) ) != 0 ) { goto fail; } assert (py_count_dict != NULL); goto exit; fail: assert (PyErr_Occurred()); Py_XDECREF(py_count_dict); /* Fall through to exit: */ exit: if (n != NULL) { EventFiredNode_del(n); } return py_count_dict; } /* pyob_EventConduit_wait */ /*************** EventConduit METHODS ACCESSIBLE TO PYTHON:END ***************/ /************************** UTILITY FUNCTIONS:BEGIN **************************/ static long _event_context_allocate_event_count_buffers( PyObject *py_event_names, Py_ssize_t en_offset, Py_ssize_t en_upper_limit, EventRequestBlock *erb ) { /* Build event count buffers using isc_event_block, for up to * EVENT_BLOCK_SIZE events. * I know of no way to gracefully and *portably* "apply(function, sequence)" * in C, so this is really ugly. */ long res = -1; char *en[EVENT_BLOCK_SIZE]; long allocated_buf_len = -1; #ifdef FIREBIRD_2_0_OR_LATER ISC_UCHAR * #else char * #endif res_buf_slot__discarded = NULL; short n_to_allocate; assert (en_upper_limit - en_offset <= SHRT_MAX); n_to_allocate = (short) (en_upper_limit - en_offset); /* Before calling this function, EventConduit_create should have verified the * following: */ assert (erb != NULL); assert (erb->req_buf == NULL); assert (en_offset >= 0); assert (en_upper_limit >= 1); assert (en_offset < en_upper_limit); assert (n_to_allocate >= 1 && n_to_allocate <= EVENT_BLOCK_SIZE); assert (py_event_names != NULL); assert (PyTuple_CheckExact(py_event_names)); assert (en_upper_limit <= PyTuple_GET_SIZE(py_event_names)); { Py_ssize_t py_en_tuple_index = en_offset; for (; py_en_tuple_index < en_upper_limit; py_en_tuple_index++) { PyObject *s = PyTuple_GET_ITEM(py_event_names, py_en_tuple_index); /* EventConduit_create should've already verified that every element of * py_event_names is a string: */ assert (PyString_CheckExact(s)); en[py_en_tuple_index % EVENT_BLOCK_SIZE] = PyString_AS_STRING(s); } } ENTER_GDAL #ifdef FIREBIRD_2_0_OR_LATER #define _CONVERT_REQBUF_POINTER(p) ((ISC_UCHAR **) DV_STR_PTR(p)) #else #define _CONVERT_REQBUF_POINTER(p) DV_STR_PTR(p) #endif #define ISC_EVENT_BLOCK_BEGIN \ allocated_buf_len = isc_event_block( \ _CONVERT_REQBUF_POINTER(&erb->req_buf), \ &res_buf_slot__discarded, n_to_allocate, #define ISC_EVENT_BLOCK_END ); break; #define EN_STR(i) en[i] switch (n_to_allocate) { case 1: ISC_EVENT_BLOCK_BEGIN EN_STR(0) ISC_EVENT_BLOCK_END case 2: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1) ISC_EVENT_BLOCK_END case 3: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2) ISC_EVENT_BLOCK_END case 4: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3) ISC_EVENT_BLOCK_END case 5: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4) ISC_EVENT_BLOCK_END case 6: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5) ISC_EVENT_BLOCK_END case 7: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6) ISC_EVENT_BLOCK_END case 8: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7) ISC_EVENT_BLOCK_END case 9: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7), EN_STR(8) ISC_EVENT_BLOCK_END case 10: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7), EN_STR(8), EN_STR(9) ISC_EVENT_BLOCK_END case 11: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7), EN_STR(8), EN_STR(9), EN_STR(10) ISC_EVENT_BLOCK_END case 12: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7), EN_STR(8), EN_STR(9), EN_STR(10), EN_STR(11) ISC_EVENT_BLOCK_END case 13: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7), EN_STR(8), EN_STR(9), EN_STR(10), EN_STR(11), EN_STR(12) ISC_EVENT_BLOCK_END case 14: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7), EN_STR(8), EN_STR(9), EN_STR(10), EN_STR(11), EN_STR(12), EN_STR(13) ISC_EVENT_BLOCK_END case 15: ISC_EVENT_BLOCK_BEGIN EN_STR(0), EN_STR(1), EN_STR(2), EN_STR(3), EN_STR(4), EN_STR(5), EN_STR(6), EN_STR(7), EN_STR(8), EN_STR(9), EN_STR(10), EN_STR(11), EN_STR(12), EN_STR(13), EN_STR(14) ISC_EVENT_BLOCK_END default: /* No default case is necessary because the length of py_event_names was * already validated. */ assert (FALSE); } LEAVE_GDAL if ( allocated_buf_len <= 0 || erb->req_buf == NULL || res_buf_slot__discarded == NULL ) { raise_exception(OperationalError, "isc_event_block: event buffers could" " not be allocated." ); goto fail; } else if (allocated_buf_len > SHRT_MAX) { raise_exception(OperationalError, "isc_event_block: allocated event" " buffers have size > SHRT_MAX." ); goto fail; } erb->req_buf_len = (short) allocated_buf_len; res = 0; goto exit; fail: assert (PyErr_Occurred()); assert (res == -1); if (erb->req_buf != NULL) { ENTER_GDAL kimem_db_client_free(DV_STR(erb->req_buf)); erb->req_buf = NULL; LEAVE_GDAL } /* Fall through to exit: */ exit: /* isc_event_block requires that it be allowed to allocate this buffer, but * because of the migration to a queue-centric thread communication * architecture, we don't actually use it. */ if (res_buf_slot__discarded != NULL) { ENTER_GDAL kimem_db_client_free((void *) res_buf_slot__discarded); LEAVE_GDAL } return res; } /* _event_context_allocate_event_count_buffers */ static int _update_event_count_dict(PyObject *py_count_dict, PyObject *py_event_names, Py_ssize_t en_offset, Py_ssize_t en_upper_limit, long *counts ) { Py_ssize_t en_pos; Py_ssize_t counts_pos; /* These conditions should've been verified earlier: */ assert (py_count_dict != NULL); assert (py_event_names != NULL); assert (PyTuple_CheckExact(py_event_names)); assert (PyTuple_GET_SIZE(py_event_names) > 0); assert (en_upper_limit <= PyTuple_GET_SIZE(py_event_names)); assert (en_offset >= 0); assert (en_offset < en_upper_limit); for (en_pos = en_offset, counts_pos = 0; en_pos < en_upper_limit; en_pos++, counts_pos++ ) { long cur_count; assert (counts_pos >= 0 && counts_pos < EVENT_BLOCK_SIZE); cur_count = counts[counts_pos]; if (cur_count == 0) { /* The count of 0 is already present in py_count_dict, and already zero; * we needn't do anything. */ assert ( PyObject_Compare( events__PyInt_zero, PyDict_GetItem(py_count_dict, PyTuple_GET_ITEM(py_event_names, en_pos) ) ) == 0 ); continue; } else { int status; PyObject *py_key = PyTuple_GET_ITEM(py_event_names, en_pos); PyObject *py_value = PyInt_FromLong(cur_count); if (py_value == NULL) { goto fail; } assert (PyString_CheckExact(py_key)); status = PyDict_SetItem(py_count_dict, py_key, py_value); Py_DECREF(py_value); if (status != 0) { goto fail; } } } return 0; fail: assert (PyErr_Occurred()); return -1; } /* _update_event_count_dict */ /*************************** UTILITY FUNCTIONS:END ***************************/ /********** EventConduit CLASS DEFINITION AND INITIALIZATION:BEGIN ***********/ static PyMethodDef EventConduit_methods[] = { {"wait", (PyCFunction) pyob_EventConduit_wait, METH_VARARGS|METH_KEYWORDS}, {"flush", (PyCFunction) pyob_EventConduit_flush, METH_NOARGS}, {"close", (PyCFunction) pyob_EventConduit_close, METH_NOARGS}, {NULL} /* sentinel */ }; static PyGetSetDef EventConduit_getters_setters[] = { {"closed", (getter) pyob_EventConduit_closed_get, NULL, NULL, }, {NULL} /* sentinel */ }; PyTypeObject EventConduitType = { /* new-style class */ PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "kinterbasdb.EventConduit", /* tp_name */ sizeof(EventConduit), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor) pyob_EventConduit___del__, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ EventConduit_methods, /* tp_methods */ NULL, /* tp_members */ EventConduit_getters_setters, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ /* Currently using pyob_EventConduit_create instead of a conventional * __init__ method: */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ }; static int init_kidb_event_system(void) { /* EventConduitType is a new-style class, so PyType_Ready must be called * before its getters and setters will function. */ if (PyType_Ready(&EventConduitType) < 0) { goto fail; } events__PyInt_zero = PyInt_FromLong(0); if (events__PyInt_zero == NULL) { goto fail; } return 0; fail: /* This function is indirectly called by the module loader, which makes no * provision for error recovery. */ return -1; } /* init_kidb_event_system */ /*********** EventConduit CLASS DEFINITION AND INITIALIZATION:END ************/ #endif /* ENABLE_DB_EVENT_SUPPORT */ kinterbasdb-3.3.0/_kiconversion_type_translation.c0000644000175000001440000007400011130647414021702 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Dynamic Type Translation * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ static PyObject *dynamically_type_convert_input_obj_if_necessary( PyObject *py_input, boolean is_array_element, unsigned short dialect, short data_type, short data_subtype, short scale, PyObject *converter ); /* Infinitely peristent global variables: */ PyObject *_type_names_all_supported; PyObject *cached_type_name_TEXT; PyObject *cached_type_name_TEXT_UNICODE; PyObject *cached_type_name_BLOB; PyObject *cached_type_name_INTEGER; PyObject *cached_type_name_FIXED; PyObject *cached_type_name_FLOATING; PyObject *cached_type_name_TIMESTAMP; PyObject *cached_type_name_DATE; PyObject *cached_type_name_TIME; PyObject *cached_type_name_BOOLEAN; #define DTT_DISALLOW_POSITIONAL -1 #define DTT_POSITIONAL_IN_RANGE(i) (i >= 0 && i <= SHRT_MAX) #define IS_UNICODE_CHAR_OR_VARCHAR(data_type, data_subtype) \ ((boolean) ( \ ((data_type) == SQL_VARYING || (data_type) == SQL_TEXT) \ && (data_subtype) > 2 \ )) #define CACHED_TYPE_NAME__TEXT_OR_TEXT_UNICODE(data_subtype) \ ( ((data_subtype) <= 2) ? \ cached_type_name_TEXT \ : cached_type_name_TEXT_UNICODE \ ) static int init_kidb_type_translation(void) { _type_names_all_supported = PyList_New(0); if (_type_names_all_supported == NULL) { goto fail; } /* ICTN stands for Init Constant Type Name: */ #define _ICTN(ptr, name) \ if ((ptr = PyString_FromString(name)) == NULL) { goto fail; } \ if (PyList_Append(_type_names_all_supported, ptr) != 0) { goto fail; } _ICTN( cached_type_name_TEXT, "TEXT" ); _ICTN( cached_type_name_TEXT_UNICODE, "TEXT_UNICODE" ); _ICTN( cached_type_name_BLOB, "BLOB" ); _ICTN( cached_type_name_INTEGER, "INTEGER" ); _ICTN( cached_type_name_FIXED, "FIXED" ); _ICTN( cached_type_name_FLOATING, "FLOATING" ); _ICTN( cached_type_name_TIMESTAMP, "TIMESTAMP" ); _ICTN( cached_type_name_DATE, "DATE" ); _ICTN( cached_type_name_TIME, "TIME" ); _ICTN( cached_type_name_BOOLEAN, "BOOLEAN" ); return 0; fail: assert (PyErr_Occurred()); /* Don't free any allocated memory because this function is called during * module import, and Python provides no way to recover from that an error * during that process anyway. */ return -1; } /* init_kidb_type_translation */ static PyObject *_get_cached_type_name_for_conventional_code( unsigned short dialect, short data_type, short data_subtype, short scale ) { switch (data_type) { case SQL_TEXT: case SQL_VARYING: return CACHED_TYPE_NAME__TEXT_OR_TEXT_UNICODE(data_subtype); case SQL_BLOB: return cached_type_name_BLOB; case SQL_SHORT: case SQL_LONG: #ifdef INTERBASE_6_OR_LATER case SQL_INT64: #endif /* INTERBASE_6_OR_LATER */ return IS_FIXED_POINT__CONVENTIONAL(dialect, data_type, data_subtype, scale) ? cached_type_name_FIXED : cached_type_name_INTEGER ; case SQL_FLOAT: case SQL_DOUBLE: case SQL_D_FLOAT: /* 2005.07.11: Internal floating point value can "logically" represent * fixed-point value in dialect < 3 databases. */ if (dialect < 3 && scale != 0) { return cached_type_name_FIXED; } else { return cached_type_name_FLOATING; } case SQL_TIMESTAMP: return cached_type_name_TIMESTAMP; #ifdef INTERBASE_6_OR_LATER case SQL_TYPE_DATE: return cached_type_name_DATE; case SQL_TYPE_TIME: return cached_type_name_TIME; #endif /* INTERBASE_6_OR_LATER */ case SQL_BOOLEAN: return cached_type_name_BOOLEAN; default: { PyObject *err_msg = PyString_FromFormat( "Unable to determine conventional type name from these parameters:" " dialect: %d, data_type: %d, data_subtype: %d, scale: %d", (int) dialect, (int) data_type, (int) data_subtype, (int) scale ); if (err_msg != NULL) { raise_exception(InternalError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } } return NULL; } } /* _get_cached_type_name_for_conventional_code */ static PyObject *_get_cached_type_name_for_array_code( unsigned short dialect, short data_type, short data_subtype, short scale ) { switch (data_type) { case blr_text: case blr_text2: case blr_varying: case blr_varying2: case blr_cstring: case blr_cstring2: return CACHED_TYPE_NAME__TEXT_OR_TEXT_UNICODE(data_subtype); case blr_short: case blr_long: #ifdef INTERBASE_6_OR_LATER case blr_int64: #endif /* INTERBASE_6_OR_LATER */ return IS_FIXED_POINT__ARRAY_EL(dialect, data_type, data_subtype, scale) ? cached_type_name_FIXED : cached_type_name_INTEGER ; case blr_float: case blr_double: case blr_d_float: /* 2005.07.11: Internal floating point value can "logically" represent * fixed-point value in dialect < 3 databases. */ if (dialect < 3 && scale != 0) { return cached_type_name_FIXED; } else { return cached_type_name_FLOATING; } case blr_timestamp: return cached_type_name_TIMESTAMP; #ifdef INTERBASE_6_OR_LATER case blr_sql_date: return cached_type_name_DATE; case blr_sql_time: return cached_type_name_TIME; #endif /* INTERBASE_6_OR_LATER */ case blr_boolean_dtype: return cached_type_name_BOOLEAN; case blr_quad: /* The database engine does not support arrays of arrays. */ case blr_blob: case blr_blob_id: /* The database engine does not support arrays of blobs. */ raise_exception(InternalError, "_get_cached_type_name_for_array_code:" " This code was written under the assumption that the database" " engine does not support arrays of arrays or arrays of blobs." ); return NULL; default: { PyObject *err_msg = PyString_FromFormat( "Unable to determine array type name from these parameters:" " dialect: %d, data_type: %d, data_subtype: %d, scale: %d", (int) dialect, (int) data_type, (int) data_subtype, (int) scale ); if (err_msg != NULL) { raise_exception(InternalError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } } return NULL; } } /* _get_cached_type_name_for_array_code */ static PyObject *_get_converter( PyObject *trans_dict, short sqlvar_index, unsigned short dialect, short data_type, short data_subtype, short scale, boolean is_array_field ) { /* Returns a borrowed reference to the converter if one is registered in * $trans_dict for the specified type (the registered converter might be * Py_None to mandate "naked" translation). * If no converter was present, returns NULL. * Also returns NULL on error, so use PyErr_Occurred() to determine whether * the NULL return value indicates "missing" or "error." */ PyObject *converter = NULL; if (trans_dict != NULL) { PyObject *type_name = NULL; /* Positional DTT settings take precedence over typal settings, but of * course positional settings only apply to cursors, not connections. */ if (sqlvar_index != DTT_DISALLOW_POSITIONAL) { PyObject *py_sqlvar_index = PyInt_FromLong(sqlvar_index); if (py_sqlvar_index == NULL) { goto fail; } converter = PyDict_GetItem(trans_dict, py_sqlvar_index); Py_DECREF(py_sqlvar_index); if (converter != NULL) { return converter; } /* Else, fall through to typal lookup. */ } type_name = ( is_array_field ? _get_cached_type_name_for_array_code(dialect, data_type, data_subtype, scale ) : _get_cached_type_name_for_conventional_code(dialect, data_type, data_subtype, scale ) ); if (type_name == NULL) { goto fail; } converter = PyDict_GetItem(trans_dict, type_name); if (converter != NULL) { return converter; } } /* Converter couldn't be found (not an error). */ assert (converter == NULL); return NULL; fail: /* An exception should've already been set: */ assert (PyErr_Occurred()); return NULL; } /* _get_converter */ #define _make__connection_get_DIRECTION_converter(direction) \ static PyObject *connection_get_ ## direction ## _converter( \ CConnection *con, \ short data_type, short data_subtype, short scale, \ boolean is_array_field \ ) \ { \ PyObject *converter = _get_converter(con->type_trans_ ## direction, \ /* Positional DTT doesn't apply to connections: */ \ DTT_DISALLOW_POSITIONAL, \ con->dialect, \ data_type, data_subtype, scale, is_array_field \ ); \ if (converter == NULL && !PyErr_Occurred()) { \ /* No converter was present; return borrowed ref to None. */ \ converter = Py_None; \ } \ return converter; \ } _make__connection_get_DIRECTION_converter(out) _make__connection_get_DIRECTION_converter(in) #define _make__cursor_get_DIRECTION_converter(direction) \ static PyObject *cursor_get_ ## direction ## _converter( \ Cursor *cursor, short sqlvar_index, \ short data_type, short data_subtype, short scale, \ boolean is_array_field \ ) \ { \ PyObject *trans_dict = cursor->type_trans_ ## direction; \ PyObject *converter = _get_converter(trans_dict, sqlvar_index, \ Transaction_get_dialect(cursor->trans), \ data_type, data_subtype, scale, is_array_field \ ); \ \ if (converter != NULL || PyErr_Occurred()) { \ return converter; \ } \ \ /* Fall back on the connection's translation dictionary, if any. */ \ return connection_get_ ## direction ## _converter( \ Transaction_get_con(cursor->trans), \ data_type, data_subtype, scale, is_array_field \ ); \ } _make__cursor_get_DIRECTION_converter(in) _make__cursor_get_DIRECTION_converter(out) #define _make__cursor_get_INOROUT_converter_for_type_name(direction) \ static PyObject *cursor_get_ ## direction ## _converter_for_type_name( \ Cursor *cursor, PyObject *type_name \ ) \ { \ /* Returns borrowed ref. */ \ PyObject *converter; \ PyObject *trans_dict = cursor->type_trans_ ## direction; \ if (trans_dict != NULL) { \ converter = PyDict_GetItem(trans_dict, type_name); \ if (converter != NULL || PyErr_Occurred()) { \ return converter; \ } \ } \ /* Fall back on the connection's translation dictionary, if any. */ \ trans_dict = Transaction_get_con(cursor->trans)->type_trans_ ## direction; \ if (trans_dict != NULL) { \ converter = PyDict_GetItem(trans_dict, type_name); \ if (converter != NULL || PyErr_Occurred()) { \ return converter; \ } \ } \ return Py_None; \ } _make__cursor_get_INOROUT_converter_for_type_name(in) _make__cursor_get_INOROUT_converter_for_type_name(out) static PyObject *connection_get_translator_output_type( CConnection *con, PyObject *translator_key ) { /* Helper function for cursor_get_translator_output_type. */ assert (PyString_Check(translator_key)); { PyObject *output_type_dict = con->output_type_trans_return_type_dict; if (output_type_dict != NULL) { return PyDict_GetItem(output_type_dict, translator_key); } return NULL; } } /* connection_get_translator_output_type */ /* cursor_get_translator_output_type's search might "bubble" to its connection * in a manner similar to the "bubble" in cursor_get_(in|out)_converter. */ static PyObject *cursor_get_translator_output_type( Cursor *cursor, short sqlvar_index, PyObject *translator_key ) { /* If a record of the return type of the output translator is found, return a * borrowed reference to that type. Otherwise, return NULL (which simply * means "not found"--it doesn't mean there was an error unless * PyErr_Occurred()). */ assert (PyString_Check(translator_key)); { PyObject *output_type_dict = cursor->output_type_trans_return_type_dict; if (output_type_dict != NULL) { PyObject *output_type = NULL; /* Positional DTT settings take precedence over typal. */ PyObject *py_sqlvar_index = PyInt_FromLong(sqlvar_index); if (py_sqlvar_index == NULL) { return NULL; } output_type = PyDict_GetItem(output_type_dict, py_sqlvar_index); Py_DECREF(py_sqlvar_index); if (output_type == NULL) { output_type = PyDict_GetItem(output_type_dict, translator_key); } if (output_type != NULL) { return output_type; } /* Else, fall through and search the connection's output type dict. */ } return connection_get_translator_output_type( Transaction_get_con(cursor->trans), translator_key ); } } /* cursor_get_translator_output_type */ typedef enum { DTT_KEYS_ALL_VALID = 1, DTT_KEYS_INVALID = 0, DTT_KEYS_VALIDATION_PROBLEM = -1 } DTTKeyValidationResult; static DTTKeyValidationResult _validate_dtt_keys(PyObject *trans_dict, boolean allow_positional ) { /* Returns: * - DTT_KEYS_ALL_VALID if all keys are valid (that is, all are recognized * type name strings or integers between 0 and SHRT_MAX, inclusive). * - DTT_KEYS_INVALID if at least one key is invalid. * - DTT_KEYS_VALIDATION_PROBLEM upon error in validation process. */ DTTKeyValidationResult status = DTT_KEYS_VALIDATION_PROBLEM; Py_ssize_t key_count; Py_ssize_t i; PyObject *keys = PyDict_Keys(trans_dict); if (keys == NULL) { goto fail; } key_count = PyList_GET_SIZE(keys); for (i = 0; i < key_count; i++) { /* PyList_GET_ITEM "returns" a borrowed ref, and can't fail as long as the * first argument is of the correct type, which it certainly is here. */ PyObject *k = PyList_GET_ITEM(keys, i); if (allow_positional && PyInt_Check(k)) { const long c_k = PyInt_AS_LONG(k); if (!DTT_POSITIONAL_IN_RANGE(c_k)) { PyObject *err_msg = PyString_FromFormat("Positional DTT keys must be" " between 0 and %d (inclusive); %ld is outside that range.", SHRT_MAX, c_k ); if (err_msg == NULL) { goto fail; } raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); status = DTT_KEYS_INVALID; goto fail; } } else if (PyUnicode_Check(k)) { /* Unicode DTT keys are forbidden here in a special case because if * they're included in the general validation case below, confusing error * messages that have to do with encoding rather than DTT tend to arise. */ raise_exception(ProgrammingError, "unicode objects are not allowed as" " dynamic type translation keys." ); status = DTT_KEYS_INVALID; goto fail; } else { const int contains = PySequence_Contains(_type_names_all_supported, k); if (contains == -1) { goto fail; } else if (contains == 0) { /* k was not in the master list of supported type names. */ PyObject *err_msg = NULL; char *msg_template = NULL; PyObject *str_all_supported = NULL; PyObject *str_k = PyObject_Str(k); if (str_k == NULL) { goto fail; } str_all_supported = PyObject_Str(_type_names_all_supported); if (str_all_supported == NULL) { Py_DECREF(str_k); goto fail; } if (allow_positional) { msg_template = "Translator key '%s' is not valid. The key must be" " either a zero-based integer index (for positional DTT) or one" " of %s (for type-based DTT)."; } else { msg_template = "Translator key '%s' is not valid. The key must be" " one of %s."; } err_msg = PyString_FromFormat(msg_template, PyString_AS_STRING(str_k), PyString_AS_STRING(str_all_supported) ); Py_DECREF(str_k); Py_DECREF(str_all_supported); if (err_msg == NULL) { goto fail; } raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); status = DTT_KEYS_INVALID; goto fail; } /* else, contains == 1, which is the 'success' condition. */ } } status = DTT_KEYS_ALL_VALID; goto cleanup; fail: assert (PyErr_Occurred()); /* Fall through to cleanup. */ cleanup: Py_XDECREF(keys); return status; } /* _validate_dtt_keys */ #define DICT_IS_NONE_OR_EMPTY(d) ((d) == Py_None || PyDict_Size(d) == 0) /* Generic programming the ugly way: */ #define _make__type_trans_setter(direction_is_IN, \ func_name, member_name, \ type_name, type_infra_name, is_for_cursor \ ) \ static PyObject *func_name(PyObject *self, PyObject *args) { \ type_name *target; \ PyObject *trans_dict; \ \ if (is_for_cursor) { \ target = (type_name *) self; \ if (!PyArg_ParseTuple(args, "O!", &PyDict_Type, &trans_dict)) \ { return NULL; } \ } else { \ if (!PyArg_ParseTuple(args, "O!O!", \ &type_infra_name, &target, &PyDict_Type, &trans_dict \ )) \ { return NULL; } \ } \ if (_validate_dtt_keys(trans_dict, is_for_cursor) != DTT_KEYS_ALL_VALID) \ { return NULL; } \ \ { \ /* If a 'BLOB' entry is specified and the supplied translator is a \ * dict, validate its contents. */ \ PyObject *blob_trans = PyDict_GetItem(trans_dict, cached_type_name_BLOB); \ if (blob_trans != NULL && PyDict_Check(blob_trans)) { \ BlobMode _throwaway_mode; \ boolean _throwaway_treat_subtype_text_as_text; \ if ( validate_nonstandard_blob_config_dict(blob_trans, \ &_throwaway_mode, &_throwaway_treat_subtype_text_as_text \ ) \ != DTT_BLOB_CONFIG_VALID \ ) \ { return NULL; } \ } \ } \ \ if (!direction_is_IN) { \ PyObject *output_type_trans_return_type_dict = \ PyObject_CallFunctionObjArgs( \ py__make_output_translator_return_type_dict_from_trans_dict, \ trans_dict, \ NULL \ ); \ if (output_type_trans_return_type_dict == NULL) { return NULL; } \ if ( output_type_trans_return_type_dict != Py_None \ && !PyDict_Check(output_type_trans_return_type_dict) \ ) \ { \ raise_exception(InternalError, "Return value of" \ " py__make_output_translator_return_type_dict_from_trans_dict was" \ " not a dict or None." \ ); \ Py_DECREF(output_type_trans_return_type_dict); \ return NULL; \ } \ \ Py_XDECREF(target->output_type_trans_return_type_dict); \ /* If the new output_type_trans_return_type_dict is None or empty, \ * set the target's corresponding member to NULL rather than recording \ * the useless incoming value. \ * Note that output_type_trans_return_type_dict might be empty when \ * trans_dict is *not* empty, because when a translator is set to None \ * (which indicates that kinterbasdb should return its internal \ * representation of the value), output_type_trans_return_type_dict \ * will not contain an entry for that translation key (instead, \ * XSQLDA2Description will supply a default type). */ \ if (DICT_IS_NONE_OR_EMPTY(output_type_trans_return_type_dict)) { \ Py_DECREF(output_type_trans_return_type_dict); \ target->output_type_trans_return_type_dict = NULL; \ } else { \ target->output_type_trans_return_type_dict = \ output_type_trans_return_type_dict; \ } \ \ { /* Flush cached description tuples, because we've changed the output \ * type translators, and that might invalidate the description \ * tuples. */ \ if (type_name ## _clear_ps_description_tuples(target) != 0) { \ return NULL; \ } \ } \ } \ \ Py_XDECREF(target->member_name); /* Free old translation dict, if any. */ \ if (DICT_IS_NONE_OR_EMPTY(trans_dict)) { \ target->member_name = NULL; \ } else { \ /* Corresponding DECREF is executed in target's destructor or, if the \ * DTT settings are changed again during the life of target, by the \ * XDECREF above. */ \ Py_INCREF(trans_dict); \ target->member_name = trans_dict; \ } \ \ Py_INCREF(Py_None); \ return Py_None; \ } /* end of _make__type_trans_setter */ #define _make__type_trans_getter(func_name, member_name, \ type_name, type_infra_name, is_for_cursor \ ) \ static PyObject *func_name(PyObject *self, PyObject *args) { \ type_name *target; \ if (is_for_cursor) { \ target = (type_name *) self; \ } else { \ if (!PyArg_ParseTuple(args, "O!", &type_infra_name, &target)) { \ return NULL; \ } \ } \ \ if (target->member_name != NULL) { \ /* Copy the dict so that the type translation settings can't be \ * modified except via a set_type_trans_* method. */ \ return PyDict_Copy(target->member_name); \ } else { \ Py_INCREF(Py_None); \ return Py_None; \ } \ } /* end of _make__type_trans_getter */ /* Getters/setters for CConnection: */ /* Out: */ _make__type_trans_setter(FALSE, pyob_Connection_set_type_trans_out, type_trans_out, CConnection, ConnectionType, FALSE ) _make__type_trans_getter( pyob_Connection_get_type_trans_out, type_trans_out, CConnection, ConnectionType, FALSE ) /* In: */ _make__type_trans_setter(TRUE, pyob_Connection_set_type_trans_in, type_trans_in, CConnection, ConnectionType, FALSE ) _make__type_trans_getter( pyob_Connection_get_type_trans_in, type_trans_in, CConnection, ConnectionType, FALSE ) /* Getters/setters for Cursor: */ /* Out: */ _make__type_trans_setter(FALSE, pyob_Cursor_set_type_trans_out, type_trans_out, Cursor, CursorType, TRUE ) _make__type_trans_getter( pyob_Cursor_get_type_trans_out, type_trans_out, Cursor, CursorType, TRUE ) /* In: */ _make__type_trans_setter(TRUE, pyob_Cursor_set_type_trans_in, type_trans_in, Cursor, CursorType, TRUE ) _make__type_trans_getter( pyob_Cursor_get_type_trans_in, type_trans_in, Cursor, CursorType, TRUE ) static PyObject *dynamically_type_convert_input_obj_if_necessary( PyObject *py_input, boolean is_array_element, unsigned short dialect, short data_type, short data_subtype, short scale, PyObject *converter ) { /* if $converter is None, returns: * a new reference to the original py_input * else: * the return value of the converter (which is a new reference) */ assert (py_input != NULL); if (converter == Py_None) { Py_INCREF(py_input); return py_input; }{ boolean is_fixed_point; PyObject *py_converted = NULL; PyObject *py_argument_to_converter; PyObject *argz = PyTuple_New(1); if (argz == NULL) { goto fail; } is_fixed_point = ( is_array_element ? IS_FIXED_POINT__ARRAY_EL(dialect, data_type, data_subtype, scale) : IS_FIXED_POINT__CONVENTIONAL(dialect, data_type, data_subtype, scale) ); /* Next, set py_argument_to_converter, the single argument that the converter * will receive (though it's only one argument, it might be a sequence). */ /* Special case for fixed point fields: pass the original input object and * the scale figure in a 2-tuple, rather than just the original input object, * as with most other field types. */ if (is_fixed_point) { /* Reference ownership of this new 2-tuple is passed to argz via * PyTuple_SET_ITEM. argz will then delete this new 2-tuple when argz * itself is deleted. The refcount of py_input is INCd when it enters * the new 2-tuple; DECd when the 2-tuple is deleted. */ py_argument_to_converter = Py_BuildValue("(Oi)", py_input, scale); } else if (IS_UNICODE_CHAR_OR_VARCHAR(data_type, data_subtype)) { py_argument_to_converter = Py_BuildValue("(Oi)", py_input, data_subtype); } else { /* We currently hold only a borrowed reference to py_input, since it's * an input parameter rather than a newly created object. * Therefore, we must now artificially INCREF py_input so that * PyTuple_SET_ITEM(argz, ...) can "steal" ownership of a reference to * py_input and then discard that reference when argz is destroyed. */ Py_INCREF(py_input); py_argument_to_converter = py_input; } if (py_argument_to_converter == NULL) { goto fail; } PyTuple_SET_ITEM(argz, 0, py_argument_to_converter); py_converted = PyObject_CallObject(converter, argz); /* The MEAT. */ if (py_converted == NULL) { goto fail; } /* Special case for fixed-point values with precision 10-18 in dialects < 3: * The value returned by the converter is a scaled Python int; we need to * convert it to a non-scaled Python float, because in dialects < 3, * logically fixed-point fields with precisions 10-18 are actually stored as * floating point. Those with precisions 1-9 are stored internally as * integers, similar to the way they're stored in dialect 3; the clause * && data_subtype == SUBTYPE_NONE * in the condition below prevents this special case from applying to the * precision 1-9 fields. */ if ( dialect < 3 && is_fixed_point && scale != 0 && data_subtype == SUBTYPE_NONE && py_converted != Py_None ) { PyObject *py_conv_unscaled; PyObject *py_conv_as_py_float = PyNumber_Float(py_converted); if (py_conv_as_py_float == NULL) { goto fail; } py_conv_unscaled = PyFloat_FromDouble( PyFloat_AS_DOUBLE(py_conv_as_py_float) / pow(10.0f, (double) -scale) ); Py_DECREF(py_conv_as_py_float); if (py_conv_unscaled == NULL) { goto fail; } /* Replace py_converted with py_conv_unscaled: */ Py_DECREF(py_converted); py_converted = py_conv_unscaled; } goto cleanup; fail: assert (PyErr_Occurred()); Py_XDECREF(py_converted); /* Fall through to cleanup. */ cleanup: /* Notice that we do not DECREF py_argument_to_converter, because ownership * of the reference to py_argument_to_converter will have been pass to * the container argz. */ Py_XDECREF(argz); return py_converted; }} /* dynamically_type_convert_input_obj_if_necessary */ static PyObject *dynamically_type_convert_output_obj_if_necessary( PyObject *db_plain_output, PyObject *converter, short data_type, short data_subtype ) { /* Unlike dynamically_type_convert_input_obj_if_necessary, this function * does NOT return a new reference. * if converter is None: * returns the passed reference to the original value db_plain_output * else: * returns the return value of the converter (which is a new reference), * BUT ALSO deletes the passed reference to db_plain_output, in effect * "replacing" the db_plain_output reference with one to py_converted. * The passed reference to db_plain_output is deleted EVEN if this function * encounters an error. */ assert (converter != NULL); /* If dealing with non-standard blob, this function never should've been * called: */ assert (data_type == SQL_BLOB ? !PyDict_Check(converter) : TRUE); if (converter == Py_None) { return db_plain_output; } else { PyObject *py_converted; boolean is_unicode_char_or_varchar = IS_UNICODE_CHAR_OR_VARCHAR( data_type, data_subtype ); PyObject *argz = PyTuple_New(1); if (argz == NULL) { goto fail; } if (!is_unicode_char_or_varchar) { /* The following statement "steals" the ref to db_plain_output, which is * appropriate behavior in this situation. */ PyTuple_SET_ITEM(argz, 0, db_plain_output); } else { /* If it's a unicode CHAR or VARCHAR, create a 2-tuple containing: * ( * the raw (encoded) string, * the database engine's internal character set code * ). */ PyObject *db_charset_code; PyObject *tuple_of_raw_string_and_charset_code = PyTuple_New(2); if (tuple_of_raw_string_and_charset_code == NULL) { goto fail; } db_charset_code = PyInt_FromLong(data_subtype); if (db_charset_code == NULL) { Py_DECREF(tuple_of_raw_string_and_charset_code); goto fail; } /* The following statements "steal" the refs to the element values, which * is appropriate behavior in this situation. Reference ownership of * db_plain_output and db_charset_code is handed off to the container * tuple_of_raw_string_and_charset_code; in turn, reference ownership of * tuple_of_raw_string_and_charset_code is handed off to the container * argz. * When argz is released at the end of this function, the release * "cascades", releasing the three other references mentioned above. */ PyTuple_SET_ITEM(tuple_of_raw_string_and_charset_code, 0, db_plain_output); PyTuple_SET_ITEM(tuple_of_raw_string_and_charset_code, 1, db_charset_code); PyTuple_SET_ITEM(argz, 0, tuple_of_raw_string_and_charset_code); } py_converted = PyObject_CallObject(converter, argz); /* The MEAT. */ Py_DECREF(argz); return py_converted; fail: assert (PyErr_Occurred()); /* Yes, DECing db_plain_output here is correct (see comment at start): */ Py_DECREF(db_plain_output); Py_XDECREF(argz); return NULL; } } /* dynamically_type_convert_output_obj_if_necessary */ kinterbasdb-3.3.0/_kisupport_time.c0000644000175000001440000000502611130647414016572 0ustar pcisarusers/* KInterbasDB Python Package - Time Functions * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifdef ENABLE_CONNECTION_TIMEOUT static LONG_LONG time_millis(void) { #ifdef PLATFORM_WINDOWS struct _timeb tstruct; _ftime(&tstruct); /* _ftime doesn't return an error code */ return (((LONG_LONG) tstruct.time) * 1000) + tstruct.millitm; #else struct timeval tstruct; gettimeofday(&tstruct, NULL); return ( (((LONG_LONG) tstruct.tv_sec ) * 1000) + (((LONG_LONG) tstruct.tv_usec) / 1000) ); #endif } /* time_millis */ #endif /* ENABLE_CONNECTION_TIMEOUT */ static LONG_LONG py_seconds_to_milliseconds(PyObject *py_secs, PyObject *exc_type, char *err_template, LONG_LONG min, LONG_LONG max ) { LONG_LONG millis; if (py_secs == NULL) { goto fail; } else if (PyFloat_Check(py_secs)) { millis = (LONG_LONG) (PyFloat_AS_DOUBLE(py_secs) * 1000.0); } else if (PyInt_Check(py_secs)) { millis = PyInt_AS_LONG(py_secs) * 1000; } else if (PyLong_Check(py_secs)) { millis = PyLong_AsLongLong(py_secs); if ( PyErr_Occurred() || millis > LONG_LONG_MAX / 1000 || millis < LONG_LONG_MIN / 1000 ) { goto fail; } /* overflow */ millis *= 1000; } else { goto fail; } if (millis < min || millis > max) { goto fail; } return millis; fail: if (!PyErr_Occurred()) { PyObject *py_secs_repr = py_secs == NULL ? PyString_FromString("") : PyObject_Repr(py_secs) ; if (py_secs_repr != NULL) { PyObject *err_msg = PyString_FromFormat(err_template, PyString_AS_STRING(py_secs_repr) ); if (err_msg != NULL) { raise_exception(exc_type, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } Py_DECREF(py_secs_repr); } } assert (PyErr_Occurred()); return 0; /* The return value is not an error code! */ } /* py_seconds_to_milliseconds */ kinterbasdb-3.3.0/typeconv_23plus_lowmem.py0000644000175000001440000000365311130647414020226 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : Even More Progressive # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2004 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere # This module can be conveniently activated as the process-wide default via: # kinterbasdb.init(type_conv=199) # # This module represents date/time values via datetime, and fixed point values # (imprecisely) as floats. It is designed *solely* for client programs that # want to use the standard library datetime module for date/time values, but # don't care about the representation of fixed point values and don't want to # pay the substantial memory overhead for importing the standard library # decimal module. # # !!IF YOU CARE ABOUT THE PRECISION OF NUMERIC AND DECIMAL VALUES, DO NOT USE # THIS MODULE; USE THE typeconv_24plus MODULE INSTEAD!! typeconv_24plus can be # conveniently activated as the process-wide default via: # kinterbasdb.init(type_conv=200) from kinterbasdb import typeconv_datetime_stdlib from kinterbasdb import typeconv_fixed_stdlib # uses floats, not Decimals from kinterbasdb import typeconv_text_unicode _underlying_modules = ( typeconv_datetime_stdlib, typeconv_fixed_stdlib, typeconv_text_unicode ) # Load the required members from the underlying modules into the namespace of # this module. globalz = globals() for m in _underlying_modules: for req_member in m.__all__: globalz[req_member] = getattr(m, req_member) del globalz kinterbasdb-3.3.0/_kisupport_platform_posix.c0000644000175000001440000000657511130647414020714 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Platform Infrastructure for * POSIX * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kievents.c, * without the involvement of a header file. */ /* NOTE: THE CODE IN THIS FILE IS TYPICALLY EXECUTED WHEN THE GIL IS NOT HELD, * SO IT MUST NOT CALL THE PYTHON C API! */ /* With pthreads, there's no difference between a thread reference and a thread * ID. */ static PlatformThreadRefType Thread_current_ref() { return pthread_self(); } /* Thread_current_ref */ static PlatformThreadIdType Thread_current_id() { return pthread_self(); } /* Thread_current_id */ static boolean Thread_ids_equal( PlatformThreadIdType a, PlatformThreadIdType b ) { return (pthread_equal(a, b) != 0); } /* Thread_ids_equal */ static PlatformThreadRefType Thread_create( PlatformThreadFuncType func, void *func_arg, PlatformThreadIdType *store_thread_id ) { /* Unlike on Windows, PlatformThreadRefType and the thread ID type are the * same with pthreads. */ int status = pthread_create(store_thread_id, NULL, func, func_arg); return status == 0 ? *store_thread_id : THREAD_REF_INVALID; } /* Thread_create */ static long Thread_join(PlatformThreadRefType t) { return pthread_join(t, NULL); } /* Thread_join */ static void sleep_millis(unsigned int millis) { const unsigned int seconds = millis / 1000; const unsigned int useconds = (millis % 1000) * 1000; if (seconds != 0) { sleep(seconds); } if (useconds != 0) { usleep((useconds_t) useconds); } } /* sleep_millis */ static long Mutex_init(PlatformMutexType *m) { return (pthread_mutex_init(m, NULL) == 0 ? 0 : -1); } /* Mutex_initialize */ static long Mutex_close(PlatformMutexType *m) { return (pthread_mutex_destroy(m) == 0 ? 0 : -1); } /* Mutex_close */ static long Mutex_lock(PlatformMutexType *m) { return (pthread_mutex_lock(m) == 0 ? 0 : -1); } /* Mutex_lock */ static long Mutex_unlock(PlatformMutexType *m) { return (pthread_mutex_unlock(m) == 0 ? 0 : -1); } /* Mutex_unlock */ static void millis_into_future_to_abstime( long millis, struct timespec *abstime ) { struct timeval now; const long rel_secs = millis / 1000; const long rel_millis = millis % 1000; const long rel_nanos = rel_millis * 1000000; /* 1: use $now to get the absolute time: */ gettimeofday(&now, NULL); /* 2: transfer the values from $now to $abstime: */ abstime->tv_sec = now.tv_sec; abstime->tv_nsec = now.tv_usec * 1000; /* 3: add the relative timeout to $abstime */ abstime->tv_sec += rel_secs; { const long total_nanos = abstime->tv_nsec + rel_nanos; abstime->tv_sec += total_nanos / 1000000000; abstime->tv_nsec = total_nanos % 1000000000; } } /* millis_into_future_to_abstime */ kinterbasdb-3.3.0/_kinterbasdb.c0000644000175000001440000007431511130647414016013 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Core * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #include "_kinterbasdb.h" #if (defined(ENABLE_DB_EVENT_SUPPORT) || defined(ENABLE_CONNECTION_TIMEOUT)) #include "_kisupport.h" #include "_kisupport_platform.c" #endif #ifdef ENABLE_DB_EVENT_SUPPORT #ifndef ENABLE_CONCURRENCY /* setup.py should've already enforced this: */ #error "ENABLE_CONCURRENCY is required for ENABLE_DB_EVENT_SUPPORT." #endif #include "_kievents.h" #endif #include "_kilock.h" /****************** "PRIVATE" DECLARATIONS:BEGIN *******************/ static int Connection_require_open(CConnection *self, char *failure_message); static int CConnection_clear_ps_description_tuples(CConnection *); /****************** "PRIVATE" DECLARATIONS:END *******************/ /******************** GLOBAL VARIABLES:BEGIN ********************/ /* These min/max constants are initialized in init_kinterbasdb, and retained * throughout the life of the process. */ static PyObject *py_SHRT_MIN = NULL; static PyObject *py_SHRT_MAX = NULL; static PyObject *py_INT_MIN = NULL; static PyObject *py_INT_MAX = NULL; static PyObject *py_LONG_MIN = NULL; static PyObject *py_LONG_MAX = NULL; static PyObject *py_LONG_LONG_MIN = NULL; static PyObject *py_LONG_LONG_MAX = NULL; static PyObject *py_PY_SSIZE_T_MIN = NULL; static PyObject *py_PY_SSIZE_T_MAX = NULL; /* Global references to this module's exception type objects, as documented in * the Python DB API (initialized by init_kidb_exceptions). */ static PyObject *Warning = NULL; static PyObject *Error = NULL; static PyObject *InterfaceError = NULL; static PyObject *DatabaseError = NULL; static PyObject *DataError = NULL; static PyObject *OperationalError = NULL; static PyObject *TransactionConflict = NULL; #ifdef ENABLE_DB_EVENT_SUPPORT static PyObject *ConduitWasClosed = NULL; #endif #ifdef ENABLE_CONNECTION_TIMEOUT static PyObject *ConnectionTimedOut = NULL; #endif static PyObject *IntegrityError = NULL; static PyObject *InternalError = NULL; static PyObject *ProgrammingError = NULL; static PyObject *NotSupportedError = NULL; /* 2005.11.04: */ static int global_concurrency_level = UNKNOWN_CONCURRENCY_LEVEL; #ifdef ENABLE_CONCURRENCY PyThread_type_lock _global_db_client_lock = NULL; /* _global_db_client_lock is not a PyObject *, but we need to pass it to the * _kiservices module through Python, so we wrap it in a PyCObject: */ PyObject *_global_db_client_lock__python_Wrapper = NULL; #endif /* Reference to an object of ConnectionType that is used as a strongly typed * None: */ static CConnection *null_connection; /* This Python callable is used by the Cursor implementation to transform * ordinary fetch* rows into fetch*map rows. */ static PyObject *py_RowMapping_constructor = NULL; static PyObject *py__make_output_translator_return_type_dict_from_trans_dict = NULL; static PyObject *py_look_up_array_descriptor = NULL; static PyObject *py_look_up_array_subtype = NULL; static PyObject *pyob_Cursor_execute_exception_type_filter = NULL; static PyObject *pyob_validate_tpb = NULL; static PyObject *pyob_trans_info = NULL; /* These Python string objects are created when the module loads, then used by * multiple subsystems throughout the life of the process: */ static PyObject *shared___s__C_con; /* _C_con */ static PyObject *shared___s__main_trans; /* _main_trans */ static PyObject *shared___s_ascii; /* ascii (canonical name of ASCII codec) */ static PyObject *shared___s_charset; /* _charset */ static PyObject *shared___s_DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP; static PyObject *shared___s_execute_immediate; /* execute_immediate */ static PyObject *shared___s_strict; /* strict */ /******************** GLOBAL VARIABLES:END ********************/ /******************** IMPLEMENTATION:BEGIN ********************/ PyObject *pyob_TrackerToList(AnyTracker *tracker); /* CConnection-related: */ #ifdef ENABLE_CONNECTION_TIMEOUT static int Connection_close_from_CTT(volatile CConnection *con); /* When a Connection times out, it cuts loose certain terminal objects * (BlobReaders and PreparedStatements) which cannot under any circumstance * be transparently resumed. An attempt by the client programmer to use * such an object should raise ConnectionTimedOut rather than * ProgrammingError. To implement that, we copy pointers to all BlobReaders * and PreparedStatements attached to a Connection before we attempt to time * the Connection out. After timing it out, we then update the state * indicator of the non-resumable objects so that they can raise * ConnectionTimedOut if the client programmer attempts to use them again. */ static BlobReader **Connection_copy_BlobReader_pointers( volatile CConnection *con, Py_ssize_t *count ); static void Connection_former_BlobReaders_flag_timeout_and_free( BlobReader **blob_readers, Py_ssize_t count ); #define Connection_former_BlobReader_pointers_free(blob_readers) \ kimem_main_free(blob_readers) static PreparedStatement **Connection_copy_PreparedStatement_pointers( volatile CConnection *con, Py_ssize_t *count ); static void Connection_former_PreparedStatements_flag_timeout_and_free( PreparedStatement **prepared_statements, Py_ssize_t count ); #define Connection_former_PreparedStatement_pointers_free(prepared_statements) \ kimem_main_free(prepared_statements) #endif /* ENABLE_CONNECTION_TIMEOUT */ static int Connection_attach_from_members(CConnection *con #ifdef ENABLE_CONNECTION_TIMEOUT , struct _ConnectionTimeoutParams *tp #endif ); static PyObject *pyob_Connection_x_info( boolean for_isc_database_info, isc_tr_handle *trans_handle_p, PyObject *self, PyObject *args ); static boolean Connection_has_any_open_transaction(CConnection *con); /* Transaction-related: */ boolean Transaction_is_main(Transaction *self) { assert (self != NULL); assert (self->con != NULL); return (self->con->main_trans == self); } /* Transaction_is_main */ static PyObject *pyob_Transaction_get_default_tpb(Transaction *self); typedef enum { OP_COMMIT = 1, OP_ROLLBACK = 0 } WhichTransactionOperation; typedef enum { OP_RESULT_OK = 0, OP_RESULT_ERROR = -1 } TransactionalOperationResult; static void Transaction_dist_trans_indicate_resultion( Transaction *self, PyObject *group, const boolean is_resolved ); static int change_resolution_of_all_con_main_trans( PyObject *group, PyObject *cons, const boolean is_resolved ); static TransactionalOperationResult Transaction_commit_or_rollback( const WhichTransactionOperation op, Transaction *self, const boolean retaining, const boolean allowed_to_raise ); /* BlobReader-related: */ static int BlobReader_untrack(BlobReader *, boolean); /* These streaming blob support functions are declared here rather than in * _kinterbasdb.h so as to avoid compiler warnings that they're "declared but * never defined" when the _kiservices compilation unit is built. */ static int validate_nonstandard_blob_config_dict(PyObject *, BlobMode *, boolean * ); static int BlobReaderTracker_release(BlobReaderTracker **); static boolean _check_statement_length(Py_ssize_t); static PyObject *pyob_Cursor_close(Cursor *self); static int _Cursor_require_open(Cursor *self, char *failure_message); static int Cursor_clear(Cursor *, boolean); static void Cursor_clear_and_leave_open(Cursor *); static int Cursor_ensure_PSCache(Cursor *self); static int Cursor_close_prepared_statements(Cursor *self, const boolean allowed_to_raise, const boolean clear_ps_superior_refs ); static int Cursor_close_without_unlink(Cursor *, boolean); static int Cursor_close_with_unlink(Cursor *, boolean); static int CursorTracker_release(CursorTracker **); typedef int (* CursorTrackerMappedFunction)(CursorTracker *, CursorTracker *); static int Cursor_clear_ps_description_tuples(Cursor *); typedef int (* PSTrackerMappedFunction)(PSTracker *, PSTracker *); static int PSTracker_traverse(PSTracker *, PSTrackerMappedFunction); /* This header file needs to be included whether connection timeout is enabled * or not, because (potentially empty versions of) some macros must be * provided: */ #include "_kicore_connection_timeout.h" #include "_kisupport_time.c" #include "_kinterbasdb_exception_functions.c" #include "_kicore_transaction_support.c" #include "_kicore_transaction_distributed.c" #include "_kicore_transaction.c" #include "_kiconversion.c" #ifdef ENABLE_CONNECTION_TIMEOUT static TransactionalOperationResult Connection_resolve_all_transactions_from_CTT( volatile CConnection *con, const WhichTransactionOperation trans_op ); #include "_kicore_connection_timeout.c" #endif #include "_kicore_connection.c" #include "_kicore_xsqlda.c" #include "_kicore_preparedstatement.c" #include "_kicore_cursor.c" #include "_kicore_create_drop_db.c" #ifdef ENABLE_DB_EVENT_SUPPORT #include "_kinterbasdb_exception_functions_without_python.c" #include "_kievents.c" #endif static PyObject *pyob_provide_refs_to_python_entities( PyObject *self, PyObject *args ) { if (!PyArg_ParseTuple(args, "OOOOOOO", &py_RowMapping_constructor, &py__make_output_translator_return_type_dict_from_trans_dict, &py_look_up_array_descriptor, &py_look_up_array_subtype, &pyob_Cursor_execute_exception_type_filter, &pyob_validate_tpb, &pyob_trans_info )) { return NULL; } #define REQ_CALLABLE(py_func) \ if (!PyCallable_Check(py_func)) { \ raise_exception(InternalError, #py_func " is not callable."); \ py_func = NULL; \ return NULL; \ } REQ_CALLABLE(py_RowMapping_constructor); REQ_CALLABLE(py__make_output_translator_return_type_dict_from_trans_dict); REQ_CALLABLE(py_look_up_array_descriptor); REQ_CALLABLE(py_look_up_array_subtype); REQ_CALLABLE(pyob_Cursor_execute_exception_type_filter); REQ_CALLABLE(pyob_validate_tpb); REQ_CALLABLE(pyob_trans_info); /* We need these objects to live forever: */ Py_INCREF(py_RowMapping_constructor); Py_INCREF(py__make_output_translator_return_type_dict_from_trans_dict); Py_INCREF(py_look_up_array_descriptor); Py_INCREF(py_look_up_array_subtype); Py_INCREF(pyob_Cursor_execute_exception_type_filter); Py_INCREF(pyob_validate_tpb); Py_INCREF(pyob_trans_info); RETURN_PY_NONE; } /* pyob_provide_refs_to_python_entities */ static PyObject *pyob_concurrency_level_get(PyObject *self) { if (global_concurrency_level == UNKNOWN_CONCURRENCY_LEVEL) { raise_exception(ProgrammingError, "The concurrency level has not been set;" " that must be done explicitly or implicitly via the" " kinterbasdb.init function." ); return NULL; } return PyInt_FromLong(global_concurrency_level); } /* pyob_concurrency_level_get */ static PyObject *pyob_concurrency_level_set(PyObject *self, PyObject *args) { int tentative_level; if (!PyArg_ParseTuple(args, "i", &tentative_level)) { return NULL; } if (global_concurrency_level != UNKNOWN_CONCURRENCY_LEVEL) { raise_exception(ProgrammingError, "The concurrency level cannot be changed" " once it has been set. Use kinterbasdb.init(concurrency_level=?) to" " set the concurrency level legally." ); return NULL; } #ifndef ENABLE_CONCURRENCY if (tentative_level != 0) { #else if (tentative_level != 1 && tentative_level != 2) { #endif raise_exception(ProgrammingError, "Illegal concurrency level."); return NULL; } global_concurrency_level = tentative_level; RETURN_PY_NONE; } /* pyob_concurrency_level_set */ #ifdef ENABLE_CONCURRENCY static PyObject *pyob_Thread_current_id(PyObject *self) { LONG_LONG thread_id = (LONG_LONG) Thread_current_id(); return PythonIntOrLongFrom64BitValue(thread_id); } /* pyob_Thread_current_id */ #endif static PyObject *pyob_isc_portable_integer(PyObject *self, PyObject *args) { char *raw_bytes; Py_ssize_t raw_len; LONG_LONG result; if (!PyArg_ParseTuple(args, "s#", &raw_bytes, &raw_len)) { goto fail; } if (raw_len != 8 && raw_len != 4 && raw_len != 2 && raw_len != 1) { raise_exception(InternalError, "pyob_isc_portable_integer: len(buf) must be in (1,2,4,8)" ); goto fail; } ENTER_GDAL result = isc_portable_integer( #ifndef INTERBASE_7_OR_LATER (unsigned char *) #endif raw_bytes, (short) raw_len /* Cast is safe b/c already checked val. */ ); LEAVE_GDAL return PythonIntOrLongFrom64BitValue(result); fail: assert (PyErr_Occurred()); return NULL; } /* pyob_isc_portable_integer */ static PyObject *pyob_raw_timestamp_to_tuple(PyObject *self, PyObject *args) { char *raw_bytes; Py_ssize_t raw_len; if (!PyArg_ParseTuple(args, "s#", &raw_bytes, &raw_len)) { return NULL; } if (raw_len != 8) { raise_exception(ProgrammingError, "raw_timestamp_to_tuple argument must" " be str of length 8." ); return NULL; } return conv_out_timestamp(raw_bytes); } /* pyob_raw_timestamp_to_tuple */ /******************** IMPLEMENTATION:END ********************/ /********************** MODULE INFRASTRUCTURE:BEGIN ***********************/ static PyObject *init_kidb_basic_header_constants(PyObject *, PyObject *); static int init_shared_string_constants(void) { #define INIT_SHARED_STRING_CONST(s) \ shared___s_ ## s = PyString_FromString(#s); \ if (shared___s_ ## s == NULL) { goto fail; } INIT_SHARED_STRING_CONST(_C_con); INIT_SHARED_STRING_CONST(_main_trans); INIT_SHARED_STRING_CONST(ascii); INIT_SHARED_STRING_CONST(charset); INIT_SHARED_STRING_CONST(DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP); INIT_SHARED_STRING_CONST(execute_immediate); INIT_SHARED_STRING_CONST(strict); return 0; fail: assert (PyErr_Occurred()); return -1; } /* init_shared_string_constants */ static PyMethodDef kinterbasdb_GlobalMethods[] = { { "init_kidb_basic_header_constants", init_kidb_basic_header_constants, METH_VARARGS }, { "create_database", pyob_create_database, METH_VARARGS }, /*********** CConnection methods: ***********/ { "Connection_drop_database", pyob_Connection_drop_database,METH_VARARGS }, { "Connection_connect", pyob_Connection_connect, METH_VARARGS }, { "Connection_python_wrapper_obj_set", pyob_Connection_python_wrapper_obj_set, METH_VARARGS }, { "Connection_close", pyob_Connection_close, METH_VARARGS }, { "Connection_database_info", pyob_Connection_database_info,METH_VARARGS }, { "Connection_transaction_info", pyob_Connection_transaction_info, METH_VARARGS }, /* Dynamic type trans getters/setters for CConnection: */ { "set_Connection_type_trans_out", pyob_Connection_set_type_trans_out, METH_VARARGS }, { "get_Connection_type_trans_out", pyob_Connection_get_type_trans_out, METH_VARARGS }, { "set_Connection_type_trans_in", pyob_Connection_set_type_trans_in, METH_VARARGS }, { "get_Connection_type_trans_in", pyob_Connection_get_type_trans_in, METH_VARARGS }, { "Connection_closed_get",pyob_Connection_closed_get, METH_VARARGS }, #ifdef INTERBASE_6_OR_LATER { "Connection_dialect_get", pyob_Connection_dialect_get, METH_VARARGS }, { "Connection_dialect_set", pyob_Connection_dialect_set, METH_VARARGS }, #endif /* INTERBASE_6_OR_LATER */ { "Connection_main_trans_get", pyob_Connection_main_trans_get, METH_VARARGS }, { "Connection_transactions_get", pyob_Connection_transactions_get, METH_VARARGS }, { "Connection_has_active_transaction", pyob_Connection_has_active_transaction, METH_VARARGS }, #ifdef ENABLE_DB_EVENT_SUPPORT { "EventConduit_create", pyob_EventConduit_create, METH_VARARGS }, #endif /* ENABLE_DB_EVENT_SUPPORT */ /* Can apply to either a CConnection or a Cursor: */ { "is_purportedly_open", pyob_CursorOrConnection_is_purportedly_open, METH_VARARGS }, /* StandaloneTransactionHandle methods: */ { "distributed_begin", pyob_distributed_begin, METH_VARARGS }, { "distributed_prepare", pyob_distributed_prepare, METH_VARARGS }, { "distributed_commit", pyob_distributed_commit, METH_VARARGS }, { "distributed_rollback", pyob_distributed_rollback, METH_VARARGS }, /* Module-level functions that allow the Python layer to manipulate certain * global variables within the C layer: */ { "provide_refs_to_python_entities", pyob_provide_refs_to_python_entities, METH_VARARGS }, /* Concurrency level: */ { "concurrency_level_get",(PyCFunction) pyob_concurrency_level_get, METH_NOARGS }, { "concurrency_level_set", pyob_concurrency_level_set, METH_VARARGS }, #ifdef ENABLE_CONCURRENCY { "thread_id", (PyCFunction) pyob_Thread_current_id, METH_NOARGS }, #endif #ifdef ENABLE_CONNECTION_TIMEOUT /* Connection timeouts: */ { "ConnectionTimeoutThread_main", pyob_ConnectionTimeoutThread_main, METH_VARARGS }, { "CTM_halt", (PyCFunction) pyob_CTM_halt, METH_NOARGS }, { "Connection__read_activity_stamps", Connection__read_activity_stamps, METH_VARARGS }, #endif /* ENABLE_CONNECTION_TIMEOUT */ /* Connection_timeout_enabled should be available whether the feature is * compiled in or not: */ { "Connection_timeout_enabled", pyob_Connection_timeout_enabled, METH_VARARGS }, /* General utility functions: */ { "portable_int", pyob_isc_portable_integer, METH_VARARGS }, { "raw_timestamp_to_tuple", pyob_raw_timestamp_to_tuple, METH_VARARGS }, /* The end: */ { NULL, NULL } }; static int init_kidb_exceptions(PyObject *d) { /* Python provides no way to recover from errors encoutered during C * extension module import, so the error handling here is lame. */ #define DEFINE_EXC(targetVar, superType) \ targetVar = PyErr_NewException("kinterbasdb." #targetVar, superType, \ NULL \ ); \ if (targetVar == NULL) { goto fail; } \ if (PyDict_SetItemString(d, #targetVar, targetVar) != 0) { goto fail; } DEFINE_EXC(Warning, PyExc_StandardError); DEFINE_EXC(Error, PyExc_StandardError); DEFINE_EXC(InterfaceError, Error); DEFINE_EXC(DatabaseError, Error); DEFINE_EXC(DataError, DatabaseError); DEFINE_EXC(OperationalError, DatabaseError); DEFINE_EXC(TransactionConflict, OperationalError); #ifdef ENABLE_DB_EVENT_SUPPORT DEFINE_EXC(ConduitWasClosed, OperationalError); #endif #ifdef ENABLE_CONNECTION_TIMEOUT DEFINE_EXC(ConnectionTimedOut, OperationalError); #endif DEFINE_EXC(IntegrityError, DatabaseError); DEFINE_EXC(InternalError, DatabaseError); DEFINE_EXC(ProgrammingError, DatabaseError); DEFINE_EXC(NotSupportedError, DatabaseError); return 0; fail: /* Not much we do due to Python's feature-poor module initialization * infrastructure. */ return -1; } /* init_kidb_exceptions */ PyTypeObject ConnectionType = { PyObject_HEAD_INIT(NULL) 0, "kinterbasdb.ConnectionType", sizeof(CConnection), 0, (destructor) pyob_Connection___del__, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; PyTypeObject BlobReaderType = { /* new-style class */ PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "kinterbasdb.BlobReader", /* tp_name */ sizeof(BlobReader), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor) pyob_BlobReader___del__, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ (reprfunc) pyob_BlobReader_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ BlobReader_methods, /* tp_methods */ NULL, /* tp_members */ BlobReader_getters_setters, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ /* This type does not need to be instantiable from the Python level: */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ }; PyTypeObject StandaloneTransactionHandleType = { PyObject_HEAD_INIT(NULL) 0, "kinterbasdb.StandaloneTransactionHandle", sizeof(StandaloneTransactionHandle), 0, (destructor) StandaloneTransactionHandle___del__, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; DL_EXPORT(void) init_kinterbasdb(void) { PyObject *m, *d; m = Py_InitModule("_kinterbasdb", kinterbasdb_GlobalMethods); if (m == NULL) { goto fail; } d = PyModule_GetDict(m); if (d == NULL) { goto fail; } ConnectionType.ob_type = &PyType_Type; CursorType.ob_type = &PyType_Type; StandaloneTransactionHandleType.ob_type = &PyType_Type; #ifdef VERBOSE_DEBUGGING if (PyModule_AddIntConstant(m, "VERBOSE_DEBUGGING", 1) != 0) { goto fail; } #endif if (PyModule_AddIntConstant(m, "FB_API_VER", FB_API_VER) != 0) { goto fail; } #ifdef ENABLE_CONCURRENCY { /* See documentation in _kilock.h */ _global_db_client_lock = PyThread_allocate_lock(); if (_global_db_client_lock == NULL) { goto fail; } _global_db_client_lock__python_Wrapper = PyCObject_FromVoidPtr( _global_db_client_lock, NULL /* No automatic collection. */ ); if (_global_db_client_lock__python_Wrapper == NULL) { goto fail; } PyObject_SetAttrString(m, "_global_db_client_lock__python_Wrapper", _global_db_client_lock__python_Wrapper ); } #endif /* def ENABLE_CONCURRENCY */ if (PyModule_AddIntConstant(m, "DEFAULT_CONCURRENCY_LEVEL", DEFAULT_CONCURRENCY_LEVEL ) != 0 ) { goto fail; } /* Initialize module-global Python objects to hold min/max values for integer * types. */ #define INITIALIZE_INT_BOUNDARY_CONST(name) \ py_ ## name = PyInt_FromLong(name); \ if (py_ ## name == NULL) { goto fail; } #define INITIALIZE_LONG_BOUNDARY_CONST(name) \ py_ ## name = PyLong_FromLongLong(name); \ if (py_ ## name == NULL) { goto fail; } #define INITIALIZE_PY_SSIZE_T_BOUNDARY_CONST(name) \ py_ ## name = PyInt_FromSsize_t(name); \ if (py_ ## name == NULL) { goto fail; } INITIALIZE_INT_BOUNDARY_CONST(SHRT_MIN); INITIALIZE_INT_BOUNDARY_CONST(SHRT_MAX); INITIALIZE_INT_BOUNDARY_CONST(INT_MIN); INITIALIZE_INT_BOUNDARY_CONST(INT_MAX); INITIALIZE_INT_BOUNDARY_CONST(LONG_MIN); INITIALIZE_INT_BOUNDARY_CONST(LONG_MAX); INITIALIZE_LONG_BOUNDARY_CONST(LONG_LONG_MIN); INITIALIZE_LONG_BOUNDARY_CONST(LONG_LONG_MAX); INITIALIZE_PY_SSIZE_T_BOUNDARY_CONST(PY_SSIZE_T_MIN); INITIALIZE_PY_SSIZE_T_BOUNDARY_CONST(PY_SSIZE_T_MAX); /* Expose the just-created integer boundary constants to the Python level * (in the private _k namespace) to facilitate bounds checking in Python. * * PyModule_AddObject steals a reference to the object being added, but we * need these to live forever, even if a meddling client programmer comes in * and deletes them from the namespace of the _k module, so INCREF. */ #define EXPOSE_INT_BOUNDARY_CONSTANT(name) \ if (PyModule_AddObject(m, #name, py_ ## name) != 0) { \ goto fail; \ } else { \ Py_INCREF(py_ ## name); \ } EXPOSE_INT_BOUNDARY_CONSTANT(SHRT_MIN); EXPOSE_INT_BOUNDARY_CONSTANT(SHRT_MAX); EXPOSE_INT_BOUNDARY_CONSTANT(INT_MIN); EXPOSE_INT_BOUNDARY_CONSTANT(INT_MAX); EXPOSE_INT_BOUNDARY_CONSTANT(LONG_MIN); EXPOSE_INT_BOUNDARY_CONSTANT(LONG_MAX); EXPOSE_INT_BOUNDARY_CONSTANT(LONG_LONG_MIN); EXPOSE_INT_BOUNDARY_CONSTANT(LONG_LONG_MAX); EXPOSE_INT_BOUNDARY_CONSTANT(PY_SSIZE_T_MIN); EXPOSE_INT_BOUNDARY_CONSTANT(PY_SSIZE_T_MAX); #ifdef ENABLE_CONNECTION_TIMEOUT if (init_kidb_connection_timeout(m) != 0) { return; } #endif if (init_shared_string_constants() != 0) { PyErr_SetString(PyExc_ImportError, "Unable to initialize shared strings."); return; } if (init_kidb_exceptions(d) != 0) { PyErr_SetString(PyExc_ImportError, "Unable to initialize kinterbasdb exceptions." ); return; } if (init_kidb_exception_support() != 0) { PyErr_SetString(PyExc_ImportError, "Unable to initialize kinterbasdb exception support code." ); return; } if (init_kidb_transaction_support() != 0) { PyErr_SetString(PyExc_ImportError, "Unable to initialize kinterbasdb transaction support code." ); return; } #define _INIT_C_TYPE_AND_SYS(type_name, init_func) { \ int status = init_func(); \ if (status == 0) { \ /* Expose the class in the namespace of the _k module. \ * PyModule_AddObject steals a ref, so the artificial INCREF prevents \ * a client programmer from messing up the refcount with a statement \ * like 'del kinterbasdb._k.ClassName'. */ \ Py_INCREF(&type_name ## Type); \ status = PyModule_AddObject(m, \ #type_name, (PyObject *) &type_name ## Type \ ); \ } \ if (status != 0) { \ assert (PyErr_Occurred()); \ /* Python's module loader doesn't really give us any error handling \ * options: */ \ return; \ } \ } _INIT_C_TYPE_AND_SYS(Transaction, init_kidb_transaction); _INIT_C_TYPE_AND_SYS(BlobReader, init_kidb_nonstandard_blob_support); _INIT_C_TYPE_AND_SYS(PreparedStatement, init_kidb_preparedstatement); _INIT_C_TYPE_AND_SYS(Cursor, init_kidb_cursor); #ifdef ENABLE_DB_EVENT_SUPPORT _INIT_C_TYPE_AND_SYS(EventConduit, init_kidb_event_system); /* Expose the following event-related constants solely for the sake of code * that tests boundary conditions: */ if (PyModule_AddIntConstant(m, "EVENT_BLOCK_SIZE", EVENT_BLOCK_SIZE) != 0) { return; } #endif if (init_kidb_type_translation() != 0) { PyErr_SetString(PyExc_ImportError, "Unable to initialize kinterbasdb type translation." ); return; } /* DSR added null_connection when moving connection state detection from the * Python level to the C level. From the perspective of the Python-level * kinterbasdb code, _kinterbasdb.null_connection is a null value like * Python's None, except that it is of type ConnectionType instead of * NoneType. * The kinterbasdb.Connection Python class can set its reference to its * equivalent C type (self._C_conn) to _kinterbasdb.null_connection to * indicate that the underlying connection is no longer valid. Then the * pyob_* functions in this C code that demand an argument of ConnectionType * are satisfied by null_connection long enough to detect that it is not open * (and that therefore the requested operation is not allowed). */ null_connection = Connection_create(); if (null_connection == NULL) { PyErr_SetString(PyExc_ImportError, "Unable to create null_connection."); return; } PyDict_SetItemString(d, "null_connection", (PyObject *) null_connection); return; fail: /* Python's module loader doesn't support clean recovery from errors, so * there's nothing we can do except attempt to avoid a segfault by * returning if a required object cannot be created. */ return; } /* init_kinterbasdb */ #include "_kinterbasdb_constants.c" /********************** MODULE INFRASTRUCTURE:END ***********************/ kinterbasdb-3.3.0/_kinterbasdb_constants.c0000644000175000001440000003345711130647414020111 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of DB API Constant Transfer to * Python Objects * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This file is designed to be included directly into _kinterbasdb.c without * the involvement of a header file. */ /* YYY: In the macros below, theoretically ought to DECREF the created * constant (though in practice, that'll slow down module init a little, and * the objects never die, so there's no practical difference). */ /* SIC is just a shortcut for entering integer database info constants into * dict d. */ #define SIC(name) \ py_const = PyInt_FromLong(name); \ if (py_const == NULL || PyDict_SetItemString(d, #name, py_const) != 0) { \ return; \ } /* SIC_TO doesn't require that $name be defined; the caller must specify the * value. */ #define SIC_TO(name, value) \ py_const = PyInt_FromLong(value); \ if (py_const == NULL || PyDict_SetItemString(d, #name, py_const) != 0) { \ return; \ } /* SET_PARAMBUF_CONST is just a shortcut for defining transaction parameter buffer * constants (or those of a similar format, such as database parameter buffer * constants), which were previously defined in kinterbasdb.py as strings * containing octal escape codes. * * For example, if the definition of isc_some_dumb_const were: * #define isc_some_dumb_const 16 * then the brittle version of the Python would feature this line: * isc_some_dumb_const = '\020' * * The point of SET_PARAMBUF_CONST is to enter into dict d the equivalent of the * Python string '\020', when passed the name of isc_some_dumb_const. */ #define SET_PARAMBUF_CONST(name) \ convArray[0] = (char) name; \ py_const = PyString_FromStringAndSize(convArray, 1); \ if (py_const == NULL || PyDict_SetItemString(d, #name, py_const) != 0) { \ return; \ } static void _init_kidb_general(PyObject *d) { /* These constants are defined by kinterbasdb rather than by the database * engine's C API. */ PyObject *py_const; SIC(DIST_TRANS_MAX_DATABASES); #ifdef ENABLE_CONNECTION_TIMEOUT SIC(CT_VETO); SIC(CT_ROLLBACK); SIC(CT_COMMIT); SIC(CT_DEFAULT); SIC(CT_NONTRANSPARENT); #endif /* ENABLE_CONNECTION_TIMEOUT */ } /* _init_kidb_ibase_header_constants_general */ static void _init_kidb_ibase_header_constants_general(PyObject *d) { char convArray[1]; PyObject *py_const; /* When testing for the presence of certain constants, it'd be better to use * #ifdef than to hard-code assumptions about which versions of the engine * have which constants, but some of the constants are defined as enums * rather than via the preprocessor. If their inclusion is #ifdefed, they * of course are not included. */ #ifdef FIREBIRD_1_0_OR_LATER SIC(isc_info_isc_version); #endif /* isc_info_version is apparently a deprecated form of isc_info_isc_version. */ SIC(isc_info_version); SET_PARAMBUF_CONST(isc_dpb_version1); SET_PARAMBUF_CONST(isc_dpb_cdd_pathname); SET_PARAMBUF_CONST(isc_dpb_allocation); SET_PARAMBUF_CONST(isc_dpb_journal); SET_PARAMBUF_CONST(isc_dpb_page_size); SET_PARAMBUF_CONST(isc_dpb_num_buffers); SET_PARAMBUF_CONST(isc_dpb_buffer_length); SET_PARAMBUF_CONST(isc_dpb_debug); SET_PARAMBUF_CONST(isc_dpb_garbage_collect); SET_PARAMBUF_CONST(isc_dpb_verify); SET_PARAMBUF_CONST(isc_dpb_sweep); SET_PARAMBUF_CONST(isc_dpb_enable_journal); SET_PARAMBUF_CONST(isc_dpb_disable_journal); SET_PARAMBUF_CONST(isc_dpb_dbkey_scope); SET_PARAMBUF_CONST(isc_dpb_number_of_users); SET_PARAMBUF_CONST(isc_dpb_trace); SET_PARAMBUF_CONST(isc_dpb_no_garbage_collect); SET_PARAMBUF_CONST(isc_dpb_damaged); SET_PARAMBUF_CONST(isc_dpb_license); SET_PARAMBUF_CONST(isc_dpb_sys_user_name); SET_PARAMBUF_CONST(isc_dpb_encrypt_key); SET_PARAMBUF_CONST(isc_dpb_activate_shadow); SET_PARAMBUF_CONST(isc_dpb_sweep_interval); SET_PARAMBUF_CONST(isc_dpb_delete_shadow); SET_PARAMBUF_CONST(isc_dpb_force_write); SET_PARAMBUF_CONST(isc_dpb_begin_log); SET_PARAMBUF_CONST(isc_dpb_quit_log); SET_PARAMBUF_CONST(isc_dpb_no_reserve); SET_PARAMBUF_CONST(isc_dpb_user_name); SET_PARAMBUF_CONST(isc_dpb_password); SET_PARAMBUF_CONST(isc_dpb_password_enc); SET_PARAMBUF_CONST(isc_dpb_sys_user_name_enc); SET_PARAMBUF_CONST(isc_dpb_interp); SET_PARAMBUF_CONST(isc_dpb_online_dump); SET_PARAMBUF_CONST(isc_dpb_old_file_size); SET_PARAMBUF_CONST(isc_dpb_old_num_files); SET_PARAMBUF_CONST(isc_dpb_old_file); SET_PARAMBUF_CONST(isc_dpb_old_start_page); SET_PARAMBUF_CONST(isc_dpb_old_start_seqno); SET_PARAMBUF_CONST(isc_dpb_old_start_file); SET_PARAMBUF_CONST(isc_dpb_drop_walfile); SET_PARAMBUF_CONST(isc_dpb_old_dump_id); SET_PARAMBUF_CONST(isc_dpb_wal_backup_dir); SET_PARAMBUF_CONST(isc_dpb_wal_chkptlen); SET_PARAMBUF_CONST(isc_dpb_wal_numbufs); SET_PARAMBUF_CONST(isc_dpb_wal_bufsize); SET_PARAMBUF_CONST(isc_dpb_wal_grp_cmt_wait); SET_PARAMBUF_CONST(isc_dpb_lc_messages); SET_PARAMBUF_CONST(isc_dpb_lc_ctype); SET_PARAMBUF_CONST(isc_dpb_cache_manager); SET_PARAMBUF_CONST(isc_dpb_shutdown); SET_PARAMBUF_CONST(isc_dpb_online); SET_PARAMBUF_CONST(isc_dpb_shutdown_delay); SET_PARAMBUF_CONST(isc_dpb_reserved); SET_PARAMBUF_CONST(isc_dpb_overwrite); SET_PARAMBUF_CONST(isc_dpb_sec_attach); SET_PARAMBUF_CONST(isc_dpb_disable_wal); SET_PARAMBUF_CONST(isc_dpb_connect_timeout); SET_PARAMBUF_CONST(isc_dpb_dummy_packet_interval); SET_PARAMBUF_CONST(isc_dpb_gbak_attach); SET_PARAMBUF_CONST(isc_dpb_sql_role_name); SET_PARAMBUF_CONST(isc_dpb_set_page_buffers); SET_PARAMBUF_CONST(isc_dpb_working_directory); SET_PARAMBUF_CONST(isc_dpb_sql_dialect); SET_PARAMBUF_CONST(isc_dpb_set_db_readonly); SET_PARAMBUF_CONST(isc_dpb_set_db_sql_dialect); SET_PARAMBUF_CONST(isc_dpb_gfix_attach); SET_PARAMBUF_CONST(isc_dpb_gstat_attach); #ifdef isc_dpb_set_db_charset SET_PARAMBUF_CONST(isc_dpb_set_db_charset); #endif #ifdef isc_dpb_gsec_attach SET_PARAMBUF_CONST(isc_dpb_gsec_attach); #endif #ifdef isc_dpb_address_path SET_PARAMBUF_CONST(isc_dpb_address_path); #endif } /* _init_kidb_ibase_header_constants_general */ static void _init_kidb_ibase_header_constants_transaction_parameters(PyObject *d) { char convArray[1]; PyObject *py_const; SET_PARAMBUF_CONST(isc_tpb_version3); SET_PARAMBUF_CONST(isc_tpb_consistency); SET_PARAMBUF_CONST(isc_tpb_concurrency); SET_PARAMBUF_CONST(isc_tpb_shared); SET_PARAMBUF_CONST(isc_tpb_protected); SET_PARAMBUF_CONST(isc_tpb_exclusive); SET_PARAMBUF_CONST(isc_tpb_wait); SET_PARAMBUF_CONST(isc_tpb_nowait); SET_PARAMBUF_CONST(isc_tpb_read); SET_PARAMBUF_CONST(isc_tpb_write); SET_PARAMBUF_CONST(isc_tpb_lock_read); SET_PARAMBUF_CONST(isc_tpb_lock_write); SET_PARAMBUF_CONST(isc_tpb_verb_time); SET_PARAMBUF_CONST(isc_tpb_commit_time); SET_PARAMBUF_CONST(isc_tpb_ignore_limbo); SET_PARAMBUF_CONST(isc_tpb_read_committed); SET_PARAMBUF_CONST(isc_tpb_autocommit); SET_PARAMBUF_CONST(isc_tpb_rec_version); SET_PARAMBUF_CONST(isc_tpb_no_rec_version); SET_PARAMBUF_CONST(isc_tpb_restart_requests); SET_PARAMBUF_CONST(isc_tpb_no_auto_undo); #ifdef HAVE__isc_tpb_lock_timeout SET_PARAMBUF_CONST(isc_tpb_lock_timeout); #endif } /* _init_kidb_ibase_header_constants_transaction_parameters */ static void _init_kidb_ibase_header_constants_database_info(PyObject *d) { PyObject *py_const; SIC(isc_info_db_id); SIC(isc_info_reads); SIC(isc_info_writes); SIC(isc_info_fetches); SIC(isc_info_marks); SIC(isc_info_implementation); SIC(isc_info_base_level); SIC(isc_info_page_size); SIC(isc_info_num_buffers); SIC(isc_info_limbo); SIC(isc_info_current_memory); SIC(isc_info_max_memory); SIC(isc_info_window_turns); SIC(isc_info_license); SIC(isc_info_allocation); SIC(isc_info_attachment_id); SIC(isc_info_read_seq_count); SIC(isc_info_read_idx_count); SIC(isc_info_insert_count); SIC(isc_info_update_count); SIC(isc_info_delete_count); SIC(isc_info_backout_count); SIC(isc_info_purge_count); SIC(isc_info_expunge_count); SIC(isc_info_sweep_interval); SIC(isc_info_ods_version); SIC(isc_info_ods_minor_version); SIC(isc_info_no_reserve); SIC(isc_info_logfile); SIC(isc_info_cur_logfile_name); SIC(isc_info_cur_log_part_offset); SIC(isc_info_num_wal_buffers); SIC(isc_info_wal_buffer_size); SIC(isc_info_wal_ckpt_length); SIC(isc_info_wal_cur_ckpt_interval); SIC(isc_info_wal_prv_ckpt_fname); SIC(isc_info_wal_prv_ckpt_poffset); SIC(isc_info_wal_recv_ckpt_fname); SIC(isc_info_wal_recv_ckpt_poffset); SIC(isc_info_wal_grpc_wait_usecs); SIC(isc_info_wal_num_io); SIC(isc_info_wal_avg_io_size); SIC(isc_info_wal_num_commits); SIC(isc_info_wal_avg_grpc_size); SIC(isc_info_forced_writes); SIC(isc_info_user_names); SIC(isc_info_page_errors); SIC(isc_info_record_errors); SIC(isc_info_bpage_errors); SIC(isc_info_dpage_errors); SIC(isc_info_ipage_errors); SIC(isc_info_ppage_errors); SIC(isc_info_tpage_errors); SIC(isc_info_set_page_buffers); #ifdef INTERBASE_6_OR_LATER SIC(isc_info_db_sql_dialect); /* Mis-cased version of the above has now been deprecated and is maintained * only for compatibility: */ SIC(isc_info_db_SQL_dialect); SIC(isc_info_db_read_only); SIC(isc_info_db_size_in_pages); #endif /* INTERBASE_6_OR_LATER */ #ifdef FIREBIRD_1_0_OR_LATER SIC(frb_info_att_charset); SIC(isc_info_db_class); SIC(isc_info_firebird_version); SIC(isc_info_oldest_transaction); SIC(isc_info_oldest_active); SIC(isc_info_oldest_snapshot); SIC(isc_info_next_transaction); SIC(isc_info_db_provider); #else /* isc_info_firebird_version is used in __init__.py, but it's not available * with Interbase, so we set its value to a flag. */ SIC_TO(isc_info_firebird_version, -1); #endif /* FIREBIRD_1_0_OR_LATER */ #ifdef FIREBIRD_1_5_OR_LATER SIC(isc_info_active_transactions); #endif /* FIREBIRD_1_5_OR_LATER */ /* This HAVE__? silliness is necessary because ibase.h is increasingly not * using the preprocessor for constants definitions, so setup.py has to test * for the presence of the constants, then use the preprocessor to indicate * the result. */ #ifdef HAVE__isc_info_active_tran_count SIC(isc_info_active_tran_count); #endif #ifdef HAVE__isc_info_creation_date SIC(isc_info_creation_date); #endif } /* _init_kidb_ibase_header_constants_database_info */ static void _init_kidb_ibase_header_constants_transaction_info(PyObject *d) { PyObject *py_const; SIC(isc_info_tra_id); #ifdef HAVE__isc_info_tra_oldest_interesting SIC(isc_info_tra_oldest_interesting); #endif #ifdef HAVE__isc_info_tra_oldest_snapshot SIC(isc_info_tra_oldest_snapshot); #endif #ifdef HAVE__isc_info_tra_oldest_active SIC(isc_info_tra_oldest_active); #endif #ifdef HAVE__isc_info_tra_isolation SIC(isc_info_tra_isolation); #endif #ifdef HAVE__isc_info_tra_access SIC(isc_info_tra_access); #endif #ifdef HAVE__isc_info_tra_lock_timeout SIC(isc_info_tra_lock_timeout); #endif #ifdef HAVE__isc_info_tra_consistency SIC(isc_info_tra_consistency); #endif #ifdef HAVE__isc_info_tra_concurrency SIC(isc_info_tra_concurrency); #endif #ifdef HAVE__isc_info_tra_read_committed SIC(isc_info_tra_read_committed); #endif #ifdef HAVE__isc_info_tra_no_rec_version SIC(isc_info_tra_no_rec_version); #endif #ifdef HAVE__isc_info_tra_rec_version SIC(isc_info_tra_rec_version); #endif #ifdef HAVE__isc_info_tra_readonly SIC(isc_info_tra_readonly); #endif #ifdef HAVE__isc_info_tra_readwrite SIC(isc_info_tra_readwrite); #endif } /* _init_kidb_ibase_header_constants_transaction_info */ static void _init_kidb_ibase_header_constants_preparedstatement_properties( PyObject *d ) { /* These constants are intended to allow the client programmer to compare the * properties of PreparedStatement to symbolic names. */ PyObject *py_const; /* statement_type: */ SIC(isc_info_sql_stmt_select); SIC(isc_info_sql_stmt_insert); SIC(isc_info_sql_stmt_update); SIC(isc_info_sql_stmt_delete); SIC(isc_info_sql_stmt_ddl); SIC(isc_info_sql_stmt_get_segment); SIC(isc_info_sql_stmt_put_segment); SIC(isc_info_sql_stmt_exec_procedure); SIC(isc_info_sql_stmt_start_trans); SIC(isc_info_sql_stmt_commit); SIC(isc_info_sql_stmt_rollback); SIC(isc_info_sql_stmt_select_for_upd); SIC(isc_info_sql_stmt_set_generator); #ifdef FIREBIRD_1_5_OR_LATER SIC(isc_info_sql_stmt_savepoint); #endif } /* _init_kidb_ibase_header_constants_preparedstatement_properties */ static PyObject *init_kidb_basic_header_constants(PyObject *self, PyObject *args) { PyObject *dest_dict; if (!PyArg_ParseTuple(args, "O!", &PyDict_Type, &dest_dict)) { return NULL; } _init_kidb_general(dest_dict); if (PyErr_Occurred()) { return NULL; } _init_kidb_ibase_header_constants_general(dest_dict); if (PyErr_Occurred()) { return NULL; } _init_kidb_ibase_header_constants_transaction_parameters(dest_dict); if (PyErr_Occurred()) { return NULL; } _init_kidb_ibase_header_constants_database_info(dest_dict); if (PyErr_Occurred()) { return NULL; } _init_kidb_ibase_header_constants_transaction_info(dest_dict); if (PyErr_Occurred()) { return NULL; } _init_kidb_ibase_header_constants_preparedstatement_properties(dest_dict); if (PyErr_Occurred()) { return NULL; } RETURN_PY_NONE; } /* init_kidb_ibase_header_constants */ kinterbasdb-3.3.0/_array_descriptor.py0000644000175000001440000001103011130647414017266 0ustar pcisarusersimport struct _FB_MAX_ENTITY_NAME_LENGTH = 32 _MAX_ARRAY_DIMENSIONS = 16 def look_up_array_descriptor(con, relName, fieldName): return _loadArrayFieldCacheEntry(con, relName, fieldName)[0] def look_up_array_subtype(con, relName, fieldName): return _loadArrayFieldCacheEntry(con, relName, fieldName)[1] # Supporting code: def _loadArrayFieldCacheEntry(con, relName, fieldName): # Given a connection and a 2-tuple of (relation name, field name) for an # array field, create and return an array descriptor for that field. # For a given field and connection, the descriptor is created only once, # then cached for all future lookups. fieldKey = (relName, fieldName) arrayFieldMetaCache = getattr(con, '_array_field_meta_cache', None) if arrayFieldMetaCache is not None: entry = arrayFieldMetaCache.get(fieldKey, None) if entry is not None: # Cache hit: return entry else: con._array_field_meta_cache = {} # Cache miss: cur = con.cursor() try: cur.execute( "SELECT " # These fields aren't placed directly in the descriptor: " FIELD_SPEC.RDB$FIELD_NAME," # internal field name " FIELD_SPEC.RDB$FIELD_SUB_TYPE," # field subtype # These fields are placed directly in the descriptor: " FIELD_SPEC.RDB$FIELD_TYPE," # array_desc_dtype " FIELD_SPEC.RDB$FIELD_SCALE," # array_desc_scale " FIELD_SPEC.RDB$FIELD_LENGTH," # array_desc_length " FIELD_SPEC.RDB$DIMENSIONS " # array_desc_dimensions "FROM " "RDB$FIELDS FIELD_SPEC " "JOIN RDB$RELATION_FIELDS REL_FIELDS " "ON FIELD_SPEC.RDB$FIELD_NAME = REL_FIELDS.RDB$FIELD_SOURCE " "WHERE " "REL_FIELDS.RDB$RELATION_NAME = ? " "AND REL_FIELDS.RDB$FIELD_NAME = ?", (relName, fieldName) ) basicSpecs = cur.fetchone() assert basicSpecs is not None cur.execute( "SELECT RDB$LOWER_BOUND, RDB$UPPER_BOUND " "FROM RDB$FIELD_DIMENSIONS " "WHERE RDB$FIELD_NAME = ? " "ORDER BY RDB$DIMENSION", (basicSpecs[0],) ) boundsForEachDimension = cur.fetchall() assert boundsForEachDimension finally: cur.close() # Flatten boundsForEachDimension: cBoundsSource = [] for lowerBound, upperBound in boundsForEachDimension: cBoundsSource.append(lowerBound) cBoundsSource.append(upperBound) desc = _createArrayDescriptor( basicSpecs[2], basicSpecs[3], basicSpecs[4], fieldName, relName, basicSpecs[5], cBoundsSource ) subType = basicSpecs[1] cacheEntry = (desc, subType) con._array_field_meta_cache[fieldKey] = cacheEntry return cacheEntry _FMT__ISC_ARRAY_DESC = ( 'B' # unsigned char array_desc_dtype 'b' # char array_desc_scale 'H' # unsigned short array_desc_length + str(_FB_MAX_ENTITY_NAME_LENGTH) + 's' # char array_desc_field_name[_FB_MAX_ENTITY_NAME_LENGTH] + str(_FB_MAX_ENTITY_NAME_LENGTH) + 's' # char array_desc_relation_name[_FB_MAX_ENTITY_NAME_LENGTH] 'h' # short array_desc_dimensions 'h' # short array_desc_flags + str(_MAX_ARRAY_DIMENSIONS * 2) + 'h' # ISC_ARRAY_BOUND array_desc_bounds[16] # (an ISC_ARRAY_BOUND consists of two shorts) ) def _createArrayDescriptor(array_desc_dtype, array_desc_scale, array_desc_length, array_desc_field_name, array_desc_relation_name, array_desc_dimensions, array_desc_bounds, ): array_desc_field_name = _padEntityName(array_desc_field_name) array_desc_relation_name = _padEntityName(array_desc_relation_name) if len(array_desc_bounds) < _MAX_ARRAY_DIMENSIONS * 2: array_desc_bounds += [0] * (_MAX_ARRAY_DIMENSIONS * 2 - len(array_desc_bounds)) assert len(array_desc_bounds) == _MAX_ARRAY_DIMENSIONS * 2 packArgs = [ array_desc_dtype , array_desc_scale, array_desc_length, array_desc_field_name, array_desc_relation_name, array_desc_dimensions, 0, # array_desc_flags is always zero initially ] + array_desc_bounds return struct.pack(_FMT__ISC_ARRAY_DESC, *packArgs) def _padEntityName(en): en = en.strip() en_len = len(en) assert en_len <= _FB_MAX_ENTITY_NAME_LENGTH return en + ((_FB_MAX_ENTITY_NAME_LENGTH - en_len) * '\0') kinterbasdb-3.3.0/_kicore_preparedstatement.c0000644000175000001440000014124111130647414020577 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Cursor * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /****************** DECLARATIONS:BEGIN *******************/ static int PreparedStatement_close_without_unlink( PreparedStatement *, boolean ); static int PSTracker_remove(PSTracker **, PreparedStatement *cont, boolean); static int PreparedStatement_isc_close(PreparedStatement *, boolean); static PyObject *_generic_single_item_isc_dsql_sql_info_request( isc_stmt_handle *, ISC_STATUS *, const char, const short ); static int _determine_statement_type(isc_stmt_handle *, ISC_STATUS *); typedef int (*PSCacheMappedFunction)( PSCache *, unsigned short, PreparedStatement * ); #define PSCache_has_been_deleted(self) \ ((self)->container == NULL) /* Note that PS_STATE_CLOSED has a very different meaning from the "closed" * states of most other objects: */ #define PreparedStatement_is_open(self) \ ((self)->state == PS_STATE_OPEN || (self)->state == PS_STATE_CLOSED) /* Python strings are immutable, and Python's garbage collector does not * relocate memory, so if two PyObject pointers point to the same memory * location, the two objects are certainly equal--in fact, they're IDentical * (id(self->sql) == id(sql)). * * If the pointers refer to different memory locations, the two objects are * still equal if their contents match. */ #define PreparedStatement_matches_sql(self_, sql_) \ ( (self_)->sql == (sql_) \ || PyObject_Compare((self_)->sql, (sql_)) == 0 \ ) /****************** DECLARATIONS:END *******************/ /*************** PSCache (PreparedStatementList) METHODS:BEGIN ***************/ #define PSCACHE_NULLIFY(self) \ (self)->container = NULL; \ (self)->capacity = 0; \ (self)->start = 0; \ (self)->most_recently_found = NULL; static int PSCache_initialize(PSCache *self, unsigned short capacity) { unsigned short i; self->container = kimem_main_malloc(sizeof(PreparedStatement *) * capacity); if (self->container == NULL) { return -1; } self->capacity = capacity; for (i = 0; i < capacity; i++) { self->container[i] = NULL; } self->most_recently_found = NULL; return 0; } /* PSCache_initialize */ #if (PREP_STMT_CACHE_CAPACITY == 1) #define _PSCache_index_next(i) 0 #define _PSCache_index_prev(i) 0 #else #define _PSCache_index_next(i) \ ((i + 1) % self->capacity) #define _PSCache_index_prev(i) \ (i != 0 ? i - 1 : self->capacity - 1) #endif #define PSCache_first(self) \ _PSCache_index_prev(self->start) #define PSCache_is_empty(self) \ (PSCache_first(self) == NULL) static int PSCache_append(PSCache *self, PreparedStatement *ps) { const unsigned short i = self->start; assert (ps != NULL); assert (PreparedStatement_is_open(ps)); /* Any PreparedStatement added to a Cursor's PSCache should be for internal * user, so we can enforce specific expectations about its reference * count: */ assert (ps->for_internal_use); assert (ps->ob_refcnt == 1); assert (ps->sql != NULL); assert (OBJECT_IS_VALID_TYPE_FOR_PREPARED_STATEMENT_SQL(ps->sql)); assert (ps->cur != NULL); if (Cursor_ensure_PSCache(ps->cur) != 0) { return -1; } { PreparedStatement *prev_occupant = self->container[i]; if (prev_occupant != NULL) { /* We should never be appending to the cache a PreparedStatement that's * already present. The following assertion isn't a robust check, of * course. */ assert (prev_occupant != ps); /* We mustn't delete a PreparedStatement, yet have it still referenced by * the self->most_recently_found shortcut pointer. */ if (prev_occupant == self->most_recently_found) { self->most_recently_found = NULL; } /* Since all PreparedStatements stored in a PSCache are for internal use, * and internal use is tightly controllable, we know that removing * prev_occupant from the cache will cause it to be deleted. */ assert (prev_occupant->ob_refcnt == 1); Py_DECREF(prev_occupant); } } Py_INCREF(ps); self->container[i] = ps; self->start = _PSCache_index_next(i); return 0; } /* PSCache_append */ static PreparedStatement *PSCache_find_prep_stmt_for_sql(PSCache *self, PyObject *sql ) { /* Returns a borrowed reference to a PreparedStatement that matches sql, or * NULL if there is no such PreparedStatement in the cache. */ assert (!PSCache_has_been_deleted(self)); assert (sql != NULL); assert (OBJECT_IS_VALID_TYPE_FOR_PREPARED_STATEMENT_SQL(sql)); if (self->most_recently_found != NULL) { assert (self->most_recently_found->sql != NULL); if (PreparedStatement_matches_sql(self->most_recently_found, sql)) { return self->most_recently_found; } }{ PreparedStatement *matching_ps = NULL; /* self->start is the index where the next appended statement should be * placed, but we want to start with the most recently appended statement and * work backward from there. */ unsigned short i = _PSCache_index_prev(self->start); const unsigned short i_orig = i; do { PreparedStatement *ps = self->container[i]; if (ps == NULL) { break; } /* None left to search. */ assert (OBJECT_IS_VALID_TYPE_FOR_PREPARED_STATEMENT_SQL(ps->sql)); if (PreparedStatement_matches_sql(ps, sql)) { matching_ps = self->most_recently_found = ps; break; } i = _PSCache_index_prev(i); } while (i != i_orig); return matching_ps; }} /* PSCache_find_prep_stmt_for_sql */ static int PSCache_traverse(PSCache *self, PSCacheMappedFunction modifier) { unsigned short i = _PSCache_index_prev(self->start); const unsigned short i_orig = i; assert (!PSCache_has_been_deleted(self)); do { PreparedStatement *ps = self->container[i]; if (ps == NULL) { break; }{ /* None left to search. */ const int modifier_result = modifier(self, i, ps); if (modifier_result != 0) { return modifier_result; } i = _PSCache_index_prev(i); }} while (i != i_orig); return 0; } /* PSCache_traverse */ static void PSCache_clear(PSCache *self) { /* Empties the PSCache, but does not release any of the dynamically allocated * memory of its members (we're talking about PSCache *structure members*, * not *container elements*). */ unsigned short i; assert (!PSCache_has_been_deleted(self)); i = self->start; for (;;) { i = _PSCache_index_prev(i); { PreparedStatement *ps = self->container[i]; if (ps == NULL) { break; } /* Only internal PreparedStatements should be in the PSCache: */ assert (ps->for_internal_use); /* At this point, each internal PreparedStatement should be referenced * once by the tracker: */ assert (ps->ob_refcnt != 0); /* But nowhere else: */ assert (ps->ob_refcnt == 1); Py_DECREF(ps); self->container[i] = NULL; } } self->start = 0; self->most_recently_found = NULL; } /* PSCache_clear */ static void PSCache_delete(PSCache *self) { /* Opposite of PSCache_initialize. */ assert (!PSCache_has_been_deleted(self)); assert (self->container != NULL); PSCache_clear(self); /* Must ensure that a dangling reference to a deleted member of ->container * wasn't left behind. */ assert (self->most_recently_found == NULL); kimem_main_free(self->container); self->container = NULL; self->capacity = 0; } /* PSCache_delete */ /**************** PSCache (PreparedStatementList) METHODS:END ****************/ /********** PreparedStatement METHODS INACCESSIBLE TO PYTHON:BEGIN ***********/ #define PS_REQ_OPEN_WITH_FAILURE(ps, failure_action) \ if (_PreparedStatement_require_open(ps, NULL) != 0) { failure_action; } #define PS_REQ_OPEN(ps) \ PS_REQ_OPEN_WITH_FAILURE(ps, return NULL) #define PS_REQ_OPEN2(ps, failure_message) \ if (_PreparedStatement_require_open(ps, failure_message) != 0) { \ return NULL; \ } static int _PreparedStatement_require_open( PreparedStatement *self, char *failure_message ) { /* If self is not open, raises the supplied error message (or a default if * no error message is supplied). * Returns 0 if self was open; -1 if it was closed. */ if (PreparedStatement_is_open(self)) { return 0; } if (self->state == PS_STATE_CONNECTION_TIMED_OUT) { raise_exception(ConnectionTimedOut, "This PreparedStatement's connection timed out, and" " PreparedStatements cannot transparently survive a timeout." ); } else { raise_exception(ProgrammingError, failure_message != NULL ? failure_message : "The PreparedStatement must be OPEN to perform this" " operation." ); } return -1; } /* _PreparedStatement_require_open */ static void PreparedStatement_create_references_to_superiors( PreparedStatement *self, const boolean for_internal_use, Cursor *cur ) { assert (self != NULL); assert (self->cur == NULL); assert (cur != NULL); /* The internal-use indicator that we've been provided should match the one * set in the corresponding member of self: */ assert (for_internal_use == self->for_internal_use); /* An non-internal (user-accessible) PreparedStatement owns a reference to * its Cursor, while its Cursor does not own a reference to it. Reference * ownership for an internal PreparedStatement is just the opposite. */ if (!for_internal_use) { Py_INCREF(cur); } self->cur = cur; } /* PreparedStatement_create_references_to_superiors */ static void PreparedStatement_clear_references_to_superiors( PreparedStatement *self ) { assert (self != NULL); { Cursor *cur = self->cur; assert (cur != NULL); self->cur = NULL; if (!self->for_internal_use) { Py_DECREF(cur); } } } /* PreparedStatement_clear_references_to_superiors */ static PreparedStatement *PreparedStatement_create( Cursor *cur, boolean for_internal_use ) { PreparedStatement *self; self = PyObject_New(PreparedStatement, &PreparedStatementType); if (self == NULL) { goto fail; } self->state = PS_STATE_CREATED; self->for_internal_use = for_internal_use; self->stmt_handle = NULL_STMT_HANDLE; self->sql = NULL; assert (cur != NULL); self->cur = NULL; PreparedStatement_create_references_to_superiors( self, for_internal_use, cur ); assert (self->cur == cur); self->statement_type = NULL_STATEMENT_TYPE; self->in_sqlda = NULL; self->in_sqlda_sqlind_array = NULL; self->out_sqlda = NULL; self->in_var_orig_spec = NULL; self->out_buffer = NULL; self->description = NULL; /* Notice that this constructor does not add self to any caches or trackers. * It is the caller's responsibility to do so, if appropriate. */ return self; fail: Py_XDECREF(self); return NULL; } /* PreparedStatement_create */ static int PreparedStatement_open(PreparedStatement* self, Cursor *cur, PyObject *sql ) { ISC_STATUS *sv; Transaction *trans; CConnection *con; assert (cur != NULL); sv = cur->status_vector; trans = cur->trans; assert (trans != NULL); con = Transaction_get_con(trans); assert (con != NULL); CON_MUST_ALREADY_BE_ACTIVE(con); /* The caller should've already validated sql; sql should now be a non-NULL * str. */ assert (sql != NULL); assert (OBJECT_IS_VALID_TYPE_FOR_PREPARED_STATEMENT_SQL(sql)); Py_INCREF(sql); self->sql = sql; /* Allocate new statement handle: */ assert(self->stmt_handle == NULL_STMT_HANDLE); ENTER_GDAL isc_dsql_allocate_statement(sv, &con->db_handle, &self->stmt_handle); LEAVE_GDAL if (DB_API_ERROR(sv)) { raise_sql_exception(OperationalError, "isc_dsql_allocate_statement: ", sv); goto fail; } assert(self->stmt_handle != NULL_STMT_HANDLE); /* Allocate enough space for a default number of XSQLVARs in the OUTput * XSQLDA. The XSQLDA must be allocated prior to calling isc_dsql_prepare * below, and if there are too many output variables for the default size, * it may need to be reallocated and re-initialized with isc_dsql_describe * later. */ assert (self->out_sqlda == NULL); if (reallocate_sqlda(&self->out_sqlda, FALSE, NULL) < 0) { goto fail; } assert (self->out_sqlda != NULL); /* Ask the database engine to compile the statement. */ { /* Note that we call Transaction_get_handle_p while holding the GIL. */ isc_tr_handle *trans_handle_addr = Transaction_get_handle_p(trans); char *sql_raw_buffer = PyString_AS_STRING(sql); const Py_ssize_t sql_len = PyString_GET_SIZE(sql); if (!_check_statement_length(sql_len)) { goto fail; } ENTER_GDAL isc_dsql_prepare(sv, trans_handle_addr, &self->stmt_handle, (unsigned short) sql_len, sql_raw_buffer, con->dialect, self->out_sqlda ); LEAVE_GDAL if (DB_API_ERROR(sv)) { raise_sql_exception(ProgrammingError, "isc_dsql_prepare: ", sv); goto fail; } } /* Determine the database API's internal statement type code for the current * statement and cache that code. */ assert (self->statement_type == NULL_STATEMENT_TYPE); self->statement_type = _determine_statement_type(&self->stmt_handle, sv); if (self->statement_type == -1) { goto fail; } assert (self->statement_type != NULL_STATEMENT_TYPE); /* Now that we know how many output variables there are, it might be * necessary to resize the output XSQLDA to accomodate them. */ { const int sqlda_realloc_res = reallocate_sqlda(&self->out_sqlda, FALSE, NULL); if (sqlda_realloc_res == 0) { /* No actual reallocation was necessary, so there's no need to rebind. */ } else if (sqlda_realloc_res == 1) { /* The default number of XSQLVARs allocated earlier was insufficient, so * the XSQLDA's parameter information must be rebound. */ ENTER_GDAL isc_dsql_describe(sv, &self->stmt_handle, con->dialect, self->out_sqlda /* OUTPUT */ ); LEAVE_GDAL if (DB_API_ERROR(sv)) { raise_sql_exception(OperationalError, "isc_dsql_describe for OUTput params: ", sv ); goto fail; } } else { /* reallocate_sqlda raised an error. */ goto fail; } } /* If there are any output variables, allocate a buffer to hold the raw * values from the database, before they're converted to Python values. This * buffer will be allocated to exactly the required size, and will then * remain alive throughout the life of this PreparedStatement, accepting * every row of raw output every fetched with the statement. */ assert (self->out_buffer == NULL); if (self->out_sqlda->sqld > 0) { self->out_buffer = allocate_output_buffer(self->out_sqlda); if (self->out_buffer == NULL) { goto fail; } } /* Bind information about the INput XSQLDA's variables. */ /* Allocate enough space for a default number of XSQLVARs in the INput * XSQLDA. The XSQLDA must be allocated prior to calling * isc_dsql_describe_bind below, and it may need to be reallocated prior if * the default number of XSQLVARs was insufficient. */ assert (self->in_sqlda == NULL); if (reallocate_sqlda(&self->in_sqlda, TRUE, &self->in_sqlda_sqlind_array) < 0 ) { goto fail; } assert (self->in_sqlda != NULL); ENTER_GDAL isc_dsql_describe_bind(sv, &self->stmt_handle, con->dialect, self->in_sqlda /* INPUT */ ); LEAVE_GDAL if (DB_API_ERROR(sv)) { raise_sql_exception(OperationalError, "isc_dsql_describe_bind for INput params: ", sv ); goto fail; } { const int sqlda_realloc_res = reallocate_sqlda(&self->in_sqlda, TRUE, &self->in_sqlda_sqlind_array ); if (sqlda_realloc_res == 0) { /* No actual reallocation was necessary, so there's no need to rebind. */ } else if (sqlda_realloc_res == 1) { /* The default number of XSQLVARs allocated earlier was insufficient, so * the XSQLDA's parameter information must be rebound. */ ENTER_GDAL isc_dsql_describe_bind(sv, &self->stmt_handle, con->dialect, self->in_sqlda /* INPUT */ ); LEAVE_GDAL if (DB_API_ERROR(sv)) { raise_sql_exception(OperationalError, "isc_dsql_describe_bind for INput params: ", sv ); goto fail; } } else { /* reallocate_sqlda raised an error. */ goto fail; } } /* Record the original type and size information for input parameters so that * information can be restored if implicit input parameter conversion from * string is invoked. */ { XSQLVAR *sqlvar; OriginalXSQLVARSpecificationCache *spec_cache; short var_no; const short input_param_count = self->in_sqlda->sqld; self->in_var_orig_spec = kimem_plain_malloc( sizeof(OriginalXSQLVARSpecificationCache) * input_param_count ); if (self->in_var_orig_spec == NULL) { /* Weren't calling one of the Python-supplied memory allocators; need to * set Python exception (MemoryError). */ PyErr_NoMemory(); goto fail; } for ( sqlvar = self->in_sqlda->sqlvar, var_no = 0, spec_cache = self->in_var_orig_spec; var_no < input_param_count; sqlvar++, var_no++, spec_cache++ ) { spec_cache->sqltype = sqlvar->sqltype; spec_cache->sqllen = sqlvar->sqllen; } } /* The description tuple should still be NULL; it will be computed lazily * only if the client programmer asks for it. */ assert (self->description == NULL); assert (!PyErr_Occurred()); self->state = PS_STATE_OPEN; return 0; fail: assert (PyErr_Occurred()); /* The state flag should indicate that the PreparedStatement has been * created but not successfully opened. */ assert (self->state == PS_STATE_CREATED); return -1; } /* PreparedStatement_open */ static int PreparedStatement_compare( PreparedStatement *a, PreparedStatement *b ) { PyObject *a_sql = a->sql; PyObject *b_sql = b->sql; Cursor *a_cur = a->cur; Cursor *b_cur = b->cur; if ( (a_sql == NULL || b_sql == NULL) || (a_cur == NULL || b_cur == NULL) || (a_cur->trans != b_cur->trans) ) { /* Not equal. */ return -1; } else { /* Equality comes down to the equality of their SQL strings. */ return PyObject_Compare(a_sql, b_sql); } } /* PreparedStatement_compare */ static PyObject *PreparedStatement_description_tuple_get( PreparedStatement *self ) { /* Lazily creates a Python DB API Cursor.description tuple for this * PreparedStatement. * Returns borrowed reference to the description tuple, or NULL on error. */ assert (self->out_sqlda != NULL); assert (self->cur != NULL); if (self->description == NULL) { /* The object created by the following call will be DECREFed by * PreparedStatement_clear_description_tuple, which may be called directly * by the PreparedStatement destructor or indirectly by * (Connection|Cursor).set_type_trans_out. */ self->description = XSQLDA2Description(self->out_sqlda, self->cur); } return self->description; } /* PreparedStatement_description_tuple_get */ static PyObject *pyob_PreparedStatement_description_get( PreparedStatement *self, void *closure ) { /* DB API description tuple read-only property. */ PS_REQ_OPEN(self); { PyObject *py_result = PreparedStatement_description_tuple_get(self); /* PreparedStatement_description_tuple_get returns a *borrowed* reference * to the description tuple; we must increment it (with Py_XINCREF, because * it might be NULL) before returning it. */ Py_XINCREF(py_result); return py_result; } } /* pyob_Cursor_description_get */ /* CLEAR-DESCRIPTION-TUPLE SUPPORT CODE : BEGIN */ /* The description tuple contains information about a statement's output * variables that might change if (Connection|Cursor).set_type_trans_out is * called. Therefore, set_type_trans_out was changed to trigger the clearing * of the description tuple of every PreparedStatement subordinate to the * object on which the method was invoked (a Connection or Cursor). This group * of functions supports that operation. * * Note that the description tuples are cleared, not reconstructed. A given * description tuple will only be reconstructed if the user actually requests * it. */ static int PreparedStatement_clear_description_tuple(PreparedStatement *self) { /* Clears an individual PreparedStatement's description tuple. */ if (self->description != NULL) { Py_DECREF(self->description); self->description = NULL; } return 0; } /* PreparedStatement_clear_description_tuple */ static int PSCacheMapped_clear_description_tuple( PSCache* cache, unsigned short cache_index, PreparedStatement *ps ) { /* Called on each element of a Cursor's PSCache's container. */ assert (ps != NULL); return PreparedStatement_clear_description_tuple(ps); } /* PSCacheMapped_clear_description_tuple */ static int PSTrackerMapped_clear_description_tuple( PSTracker *node_prev, PSTracker *node_cur ) { /* Called on each element of a Cursor's PSTracker. */ PreparedStatement *ps; assert (node_cur != NULL); ps = node_cur->contained; assert (ps != NULL); return PreparedStatement_clear_description_tuple(ps); } /* PSTrackerMapped_clear_description_tuple */ static int Cursor_clear_ps_description_tuples(Cursor *self) { /* Clears the description tuple of every PreparedStatement subordinate to * this Cursor. */ PSCache *psc = &self->ps_cache_internal; if (!PSCache_has_been_deleted(psc)) { if (PSCache_traverse(psc, PSCacheMapped_clear_description_tuple) != 0) { goto fail; } } if (self->ps_tracker != NULL) { if (PSTracker_traverse(self->ps_tracker, PSTrackerMapped_clear_description_tuple ) != 0 ) { goto fail; } } return 0; fail: assert (PyErr_Occurred()); return -1; } /* Cursor_clear_ps_description_tuples */ static int CConnection_clear_ps_description_tuples( CConnection *self ) { /* Clears the description tuple of every PreparedStatement subordinate to * this Connection. */ TransactionTracker *trans_node = self->transactions; while (trans_node != NULL) { Transaction *trans = trans_node->contained; assert (trans != NULL); { CursorTracker *cur_node = trans->open_cursors; while (cur_node != NULL) { Cursor *cur = cur_node->contained; assert (cur != NULL); if (Cursor_clear_ps_description_tuples(cur) != 0) { return -1; } cur_node = cur_node->next; } } trans_node = trans_node->next; } return 0; } /* CConnection_clear_ps_description_tuples */ /* CLEAR-DESCRIPTION-TUPLE SUPPORT CODE : END */ static int PreparedStatement_isc_close(PreparedStatement *self, boolean allowed_to_raise ) { /* IB6 API Guide page 334: "A cursor need only be closed in this manner [with * DSQL_close] if it was previously opened and associated with stmt_handle by * isc_dsql_set_cursor_name()." * * That's not accurate. A statement handle also needs to be closed in this * manner if it's about to be executed again. If it's going to be closed * *permanently*, calling isc_dsql_free_statement with DSQL_drop (as is done * in PreparedStatement_isc_drop) is sufficient. */ ISC_STATUS *sv; assert (self->cur != NULL); assert (self->cur->trans != NULL); assert (Transaction_get_con(self->cur->trans) != NULL); CON_MUST_ALREADY_BE_ACTIVE(Transaction_get_con(self->cur->trans)); sv = self->cur->status_vector; assert (PreparedStatement_is_open(self)); ENTER_GDAL isc_dsql_free_statement(sv, &self->stmt_handle, /* DSQL_close means "close the open result set associated with this * statement; we're clearing it for another execution." */ DSQL_close ); LEAVE_GDAL if (DB_API_ERROR(sv)) { raise_sql_exception(OperationalError, "Error while trying to close PreparedStatement's associated result" " set: ", sv ); if (allowed_to_raise) { return -1; } else { SUPPRESS_EXCEPTION; } } self->state = PS_STATE_CLOSED; /* We "closed" this prepared statement in a sense that matters only to the * server, not to the Python client programmer: */ assert (PreparedStatement_is_open(self)); assert (self->stmt_handle != NULL_STMT_HANDLE); return 0; } /* PreparedStatement_isc_close */ static int PreparedStatement_isc_drop(PreparedStatement *self, boolean allowed_to_raise ) { ISC_STATUS *sv; assert (self->cur != NULL); assert (self->cur->trans != NULL); #if (defined(ENABLE_CONNECTION_TIMEOUT) && !defined(NDEBUG)) { CConnection *con = Transaction_get_con(self->cur->trans); assert (con != NULL); if (Connection_timeout_enabled(con)) { assert (CURRENT_THREAD_OWNS_CON_TP(con)); if (RUNNING_IN_CONNECTION_TIMEOUT_THREAD) { assert (con->timeout->state == CONOP_IDLE); } } } #endif sv = self->cur->status_vector; /* Notice that unlike in PreparedStatement_isc_close, there's no * assert (PreparedStatement_is_open(self)); * statement here. That's because it's possible for a statement handle to * be allocated, but for preparation to fail later in the process, as when * there's a syntax error. In that case, self's state won't have been moved * to PS_STATE_OPEN. */ { /* This code can be reached when the CTT is timing out a connection. In * that case, we want the GIL to remain held during the entire timeout * operation. */ OPEN_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE #ifdef ENABLE_CONNECTION_TIMEOUT const boolean should_manip_gil = NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD; if (should_manip_gil) { #endif LEAVE_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_STARTING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif ENTER_GDAL_WITHOUT_LEAVING_PYTHON isc_dsql_free_statement(sv, &self->stmt_handle, /* DSQL_drop means "free resources allocated for this statement; we're * closing it permanently." */ DSQL_drop ); LEAVE_GDAL_WITHOUT_ENTERING_PYTHON #ifdef ENABLE_CONNECTION_TIMEOUT if (should_manip_gil) { #endif ENTER_GIL_WITHOUT_AFFECTING_DB_AND_WITHOUT_ENDING_CODE_BLOCK #ifdef ENABLE_CONNECTION_TIMEOUT } #endif CLOSE_LOCAL_GIL_MANIPULATION_INFRASTRUCTURE } /* end of lock manipulation scope */ if (DB_API_ERROR(sv)) { raise_sql_exception(OperationalError, "Error while trying to drop PreparedStatement's statement handle: ", sv ); if (allowed_to_raise) { return -1; } else { SUPPRESS_EXCEPTION; } } self->stmt_handle = NULL_STMT_HANDLE; self->state = PS_STATE_DROPPED; assert (!PreparedStatement_is_open(self)); return 0; } /* PreparedStatement_isc_drop */ static int PreparedStatement_close_without_unlink(PreparedStatement *self, boolean allowed_to_raise ) { if (self->sql != NULL) { Py_DECREF(self->sql); self->sql = NULL; } if (self->in_sqlda != NULL) { kimem_xsqlda_free(self->in_sqlda); self->in_sqlda = NULL; } if (self->in_sqlda_sqlind_array != NULL) { kimem_main_free(self->in_sqlda_sqlind_array); self->in_sqlda_sqlind_array = NULL; } if (self->out_sqlda != NULL) { kimem_xsqlda_free(self->out_sqlda); self->out_sqlda = NULL; } if (self->in_var_orig_spec != NULL) { kimem_plain_free(self->in_var_orig_spec); self->in_var_orig_spec = NULL; } if (self->out_buffer != NULL) { kimem_main_free(self->out_buffer); self->out_buffer = NULL; } PreparedStatement_clear_description_tuple(self); /* Save the operations that might fail for last: */ if (self->cur != NULL) { /* If self is the cursor's current PreparedStatement, need to ensure that * the cursor realizes self has closed so it won't try to perform * operations using self in the future. */ if (self->cur->ps_current == self) { if (self->cur->state != CURSOR_STATE_CLOSED) { Cursor_clear_and_leave_open(self->cur); } /* The lack of Py_DECREF here is deliberate, because the ps_current * member of Cursor never (conceptually) contains an owned reference. */ self->cur->ps_current = NULL; } /* Note that we don't DECREF or clear self->cur here; that's for * PreparedStatement_close_with_unlink. */ } if (self->stmt_handle != NULL_STMT_HANDLE) { assert (self->cur != NULL); if (PreparedStatement_isc_drop(self, allowed_to_raise) != 0) { goto fail; } } assert (self->stmt_handle == NULL_STMT_HANDLE); /* We don't clear self->cur here or remove self from cur's tracker. That's * up to PreparedStatement_close_with_unlink. */ self->state = PS_STATE_DROPPED; return 0; fail: assert (PyErr_Occurred()); return -1; } /* PreparedStatement_close_without_unlink */ static int PreparedStatement_untrack_with_superior_ref_clear_control( PreparedStatement *self, const boolean allowed_to_raise, const boolean clear_superior_refs ) { if (PreparedStatement_close_without_unlink(self, allowed_to_raise) != 0) { return -1; } assert (self->state == PS_STATE_DROPPED); assert (self->cur != NULL); if (clear_superior_refs) { PreparedStatement_clear_references_to_superiors(self); assert (self->cur == NULL); } return 0; } /* PreparedStatement_untrack_with_superior_ref_clear_control */ static int PreparedStatement_untrack(PreparedStatement *self, boolean allowed_to_raise ) { return PreparedStatement_untrack_with_superior_ref_clear_control( self, allowed_to_raise, TRUE ); } /* PreparedStatement_untrack */ static int PreparedStatement_close_with_unlink(PreparedStatement *self, boolean allowed_to_raise ) { if (self->state != PS_STATE_DROPPED) { if (PreparedStatement_close_without_unlink(self, allowed_to_raise) != 0) { goto fail; } } if (self->cur != NULL) { /* If self was for internal use, there's no need to manually remove self * from cur->cur->ps_cache_internal, because the cur will only kill one of * its internal use PreparedStatements as part of the act of removing it * from cur->ps_cache_internal. */ if (!self->for_internal_use) { /* Remove self from the cursor's open prepared statement tracker. * Normally self will be in the ps_tracker, but PreparedStatements are * not inserted until the end of the preparation process, so if an error * occurred, self won't be there. This can occur routinely (e.g., if * user submits erroneous SQL to Cursor.prep), so with the final boolean * parameter, we direct PSTracker_remove not to complain if the self is * missing. */ if (PSTracker_remove(&self->cur->ps_tracker, self, FALSE) != 0) { if (allowed_to_raise) { goto fail; } else { SUPPRESS_EXCEPTION; } } } PreparedStatement_clear_references_to_superiors(self); assert (self->cur == NULL); } assert (allowed_to_raise ? self->state == PS_STATE_DROPPED : TRUE); return 0; fail: assert (PyErr_Occurred()); return -1; } /* PreparedStatement_close_with_unlink */ /*********** PreparedStatement METHODS INACCESSIBLE TO PYTHON:END ************/ /*********** PreparedStatement METHODS ACCESSIBLE TO PYTHON:BEGIN ************/ static void pyob_PreparedStatement___del__(PreparedStatement *self) { assert ( !self->for_internal_use ? NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD : TRUE ); if (self->cur != NULL) { Cursor *cur = self->cur; /* We want to make sure that cur remains alive until we're done destroying * self, but if this destructor is being called as a result of the * execution of cur's destructor, we most definitely must not manipulate * cur's reference count, which would cause cur to be "resurrected" and * then for its destructor to execute again! * Also, if self is for internal use, it never physically owns a reference * to the cursor, so the reference count should not be manipulated. */ const boolean should_manipulate_cursor_refcnt = ( !self->for_internal_use && cur->ob_refcnt != 0 ); PyObject *con_python_wrapper = NULL; CConnection *con; assert (cur->trans != NULL); con = Transaction_get_con(cur->trans); assert (con != NULL); con_python_wrapper = Transaction_get_con_python_wrapper(cur->trans); assert (con_python_wrapper != NULL); { /* hoop-jump for C90's retarded scoping */ const boolean needed_to_acquire_tp = !CURRENT_THREAD_OWNS_CON_TP(con); /* Make sure con stays alive until we're done with it. Note that cur * might not stay alive until the end of this destructor. */ if (should_manipulate_cursor_refcnt) { assert (cur->ob_refcnt != 0); Py_INCREF(cur); } Py_INCREF(con); Py_INCREF(con_python_wrapper); #ifdef ENABLE_CONNECTION_TIMEOUT if (needed_to_acquire_tp) { ACQUIRE_CON_TP_WITH_GIL_HELD(con); } #endif if (PreparedStatement_close_with_unlink(self, TRUE) == 0) { assert (self->cur == NULL); } else { SUPPRESS_EXCEPTION; } #ifdef ENABLE_CONNECTION_TIMEOUT if (needed_to_acquire_tp) { RELEASE_CON_TP(con); } #endif /* ENABLE_CONNECTION_TIMEOUT */ } /* hoop-jump for C90's retarded scoping */ if (should_manipulate_cursor_refcnt) { assert (cur->ob_refcnt != 0); Py_DECREF(cur); } Py_DECREF(con); Py_DECREF(con_python_wrapper); } /* end of if (self->cur != NULL) block */ /* Release the PreparedStatement struct itself: */ PyObject_Del(self); } /* pyob_PreparedStatement___del__ */ /************ PreparedStatement METHODS ACCESSIBLE TO PYTHON:END *************/ /************ PreparedStatement ATTRIBUTE GET/SET METHODS:BEGIN **************/ static PyObject *pyob_PreparedStatement_sql_get( PreparedStatement *self, void *closure ) { PyObject *py_result; PS_REQ_OPEN(self); py_result = self->sql != NULL ? self->sql : Py_None; Py_INCREF(py_result); return py_result; } /* pyob_PreparedStatement_sql_get */ static PyObject *pyob_PreparedStatement_statement_type_get( PreparedStatement *self, void *closure ) { /* A PreparedStatement cannot function even minimally without knowing its own * statement type, so kinterbasdb should never have allowed a PS that doesn't * know its statement type to become accessible to Python client code. * * Because a PreparedStatement's underlying Curosr could close without the * PS's consent, though, we ensure that the PS is open before returning the * statement_type. */ int statement_type; PS_REQ_OPEN(self); statement_type = self->statement_type; if (statement_type != NULL_STATEMENT_TYPE) { return PyInt_FromLong(statement_type); } else { raise_exception(InternalError, "This PreparedStatement does not know its" " own statement_type; kinterbasdb should not have allowed it to become" " accessible to client code." ); return NULL; } } /* pyob_PreparedStatement_statement_type_get */ static PyObject *pyob_PreparedStatement_plan_get( PreparedStatement *self, void *closure ) { PyObject *ret = NULL; PS_REQ_OPEN(self); assert (self->cur != NULL); #ifdef ENABLE_CONNECTION_TIMEOUT CUR_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION(self->cur, return NULL); #endif /* ENABLE_CONNECTION_TIMEOUT */ assert (self->cur->trans != NULL); assert (Transaction_get_con(self->cur->trans) != NULL); CON_MUST_ALREADY_BE_ACTIVE(Transaction_get_con(self->cur->trans)); ret = _generic_single_item_isc_dsql_sql_info_request( &self->stmt_handle, self->cur->status_vector, isc_info_sql_get_plan, 1 /* Skip 1 byte (newline character at beginning of plan string. */ ); if (ret == NULL) { goto fail; } goto clean; fail: assert (PyErr_Occurred()); if (ret != NULL) { Py_DECREF(ret); ret = NULL; } /* Fall through to clean: */ clean: #ifdef ENABLE_CONNECTION_TIMEOUT CUR_PASSIVATE(self->cur); CON_MUST_NOT_BE_ACTIVE(Transaction_get_con(self->cur->trans)); #endif /* ENABLE_CONNECTION_TIMEOUT */ return ret; } /* pyob_PreparedStatement_plan_get */ #define _make_n_direction_params_getter(direction) \ /* direction is expected to be 'in' or 'out'. */ \ static PyObject *pyob_PreparedStatement_n_ ## direction ## put_params_get( \ PreparedStatement *self, void *closure \ ) \ { \ PS_REQ_OPEN(self); \ assert (self->cur != NULL); \ { \ XSQLDA *sqlda = self->direction ## _sqlda; \ \ if (sqlda != NULL) { \ return PyInt_FromLong(sqlda->sqld); \ } else { \ raise_exception(InternalError, "Unexpected PreparedStatement state:" \ " the PS is considered 'open', but has no " # direction "put" \ "_sqlda." \ ); \ return NULL; \ } \ } \ } _make_n_direction_params_getter(in) _make_n_direction_params_getter(out) /************* PreparedStatement ATTRIBUTE GET/SET METHODS:END ***************/ /************************** UTILITY FUNCTIONS:BEGIN **************************/ static boolean _check_statement_length(Py_ssize_t length) { /* Although the sql-statement-length parameter to such Firebird API functions * as isc_dsql_prepare and isc_dsql_execute_immediate is an unsigned short, * the documentation says that the length can be left zero for null- * terminated strings, in which case the database engine will figure out the * length itself. * As of 2003.02.13, Firebird cannot handle SQL statements longer than the * maximum value of an unsigned short even if zero is passed as the length. * * Test the length and raise an exception if it's too long for safe passage * to SQL-handling isc_* functions. Return TRUE if OK; FALSE otherwise. */ assert (length >= 0); if (length <= (Py_ssize_t) USHRT_MAX) { return TRUE; } else { PyObject *py_length = PyLong_FromUnsignedLongLong( (unsigned LONG_LONG) length ); if (py_length != NULL) { PyObject *py_length_str = PyObject_Str(py_length); if (py_length_str != NULL) { PyObject *err_msg = PyString_FromFormat( "SQL statement of %s bytes is too long (max %d allowed). Consider" " using bound parameters to shorten the SQL code, rather than" " passing large values as part of the SQL string.", PyString_AS_STRING(py_length_str), USHRT_MAX ); if (err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } Py_DECREF(py_length_str); } Py_DECREF(py_length); } return FALSE; } } /* _check_statement_length */ static PyObject *_generic_single_item_isc_dsql_sql_info_request( isc_stmt_handle *stmt_handle, ISC_STATUS *sv, const char request_code, const short skip_bytes_at_beginning_of_result ) { char req_buf[] = {0}; char *res_buf = NULL; /* The size of the buffer is automatically increased if necessary. */ unsigned short res_buf_size = 128; short result_length = -1; PyObject *py_result = NULL; ENTER_GDAL req_buf[0] = request_code; for (;;) { assert (res_buf == NULL); res_buf = kimem_plain_malloc(res_buf_size); if (res_buf == NULL) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK /* Was calling kimem_plain_malloc; need to set MemoryError explicitly. */ PyErr_NoMemory(); goto fail; } isc_dsql_sql_info(sv, stmt_handle, sizeof(req_buf), req_buf, res_buf_size, res_buf ); if (DB_API_ERROR(sv)) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK raise_sql_exception(OperationalError, "isc_dsql_sql_info failed: ", sv); goto fail; } { const char first_byte = res_buf[0]; if (first_byte == isc_info_truncated) { /* The result buffer wasn't large enough. Free the original result * buffer and double the integer that will determine its size in the * next pass, then jump to the next pass. */ kimem_plain_free(res_buf); res_buf = NULL; res_buf_size *= 2; continue; } else if (first_byte == isc_info_end) { /* No result is available for this request; return None. */ LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK Py_INCREF(Py_None); py_result = Py_None; goto exit; } else if (first_byte != request_code) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK { PyObject *err_msg = PyString_FromFormat( "Unexpected code in result buffer. Expected %c; got %c.", request_code, first_byte ); if (err_msg != NULL) { raise_exception(InternalError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } goto fail; } } } result_length = (short) isc_vax_integer(res_buf + 1, sizeof(short)); break; } LEAVE_GDAL assert (result_length >= 0); if (skip_bytes_at_beginning_of_result > result_length) { raise_exception(InternalError, "byte skip directive would overflow result." ); goto fail; } { const short read_n_bytes = result_length - skip_bytes_at_beginning_of_result; if (read_n_bytes != 0) { py_result = PyString_FromStringAndSize( res_buf + 3 + skip_bytes_at_beginning_of_result, read_n_bytes ); } else { py_result = PyString_FromStringAndSize("", 0); } } if (py_result != NULL) { goto exit; } /* Fall through to fail: */ fail: assert (PyErr_Occurred()); if (py_result != NULL) { Py_DECREF(py_result); py_result = NULL; } /* Fall through to exit: */ exit: if (res_buf != NULL) { kimem_plain_free(res_buf); } return py_result; } /* _generic_single_item_isc_dsql_sql_info_request */ static int _determine_statement_type( isc_stmt_handle *stmt_handle, ISC_STATUS *status_vector ) { /* Given a pointer to the handle of a prepared statement, dynamically * determine the database engine's internal statement type code. * * This function is essentially a special case of * _generic_single_item_isc_dsql_sql_info_request, optimized for speed * (it must be called for each and every statement preparation in * kinterbasdb). */ int stmt_type; short stmt_type_length; static const char sql_info_stmt_type_req[] = {isc_info_sql_stmt_type}; #define sql_info_res_buf_size 8 char sql_info_res_buf[sql_info_res_buf_size]; ENTER_GDAL isc_dsql_sql_info(status_vector, stmt_handle, sizeof(sql_info_stmt_type_req), #ifndef FIREBIRD_1_5_OR_LATER (char *) /* Cast away constness. */ #endif sql_info_stmt_type_req, sql_info_res_buf_size, sql_info_res_buf ); if (DB_API_ERROR(status_vector)) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK raise_sql_exception(OperationalError, "_determine_statement_type: ", status_vector ); goto fail; } { const char first_byte = sql_info_res_buf[0]; if (first_byte == isc_info_truncated) { /* Since we know the required size of the information item we're * requesting, this should never actually happen (and therefore we don't * need to adjust the size dynamically). */ LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK raise_exception(InternalError, "_determine_statement_type: statically" " sized result buffer was too small." ); goto fail; } else if (first_byte != isc_info_sql_stmt_type) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK raise_exception(InternalError, "_determine_statement_type: expected" " first byte of result buffer to be isc_info_sql_stmt_type." ); goto fail; } } stmt_type_length = (short) isc_vax_integer(sql_info_res_buf + 1, sizeof(short) ) ; stmt_type = (int) isc_vax_integer(sql_info_res_buf + 3, stmt_type_length); LEAVE_GDAL return stmt_type; fail: assert (PyErr_Occurred()); return -1; } /* _determine_statement_type */ /*************************** UTILITY FUNCTIONS:END ***************************/ /******** PreparedStatement CLASS DEFINITION AND INITIALIZATION:BEGIN ********/ static PyMethodDef PreparedStatement_methods[] = { {NULL} /* sentinel */ }; static PyGetSetDef PreparedStatement_getters_setters[] = { {"sql", (getter) pyob_PreparedStatement_sql_get, NULL, "ASCII representation of the raw SQL string that underlies this" " PreparedStatement." }, {"statement_type", (getter) pyob_PreparedStatement_statement_type_get, NULL, "An int that matches one of the kinterbasdb.isc_info_sql_stmt_*" " constants. For example, the following comparison is True:" "\n cur.prep(\"insert into test values (?,?)\").statement_type ==" " kinterbasdb.isc_info_sql_stmt_insert" }, {"plan", (getter) pyob_PreparedStatement_plan_get, NULL, "The PLAN string generated by the optimizer." }, {"n_input_params", (getter) pyob_PreparedStatement_n_input_params_get, NULL, "The number of input parameters expected by the statement." }, {"n_output_params", (getter) pyob_PreparedStatement_n_output_params_get, NULL, "The number of output parameters returned by the statement." }, {"description", (getter) pyob_PreparedStatement_description_get, NULL, "A DB API description tuple (of the same format as Cursor.description)" " that describes the statement's output parameters." }, {NULL} /* sentinel */ }; PyTypeObject PreparedStatementType = { /* new-style class */ PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "kinterbasdb.PreparedStatement", /* tp_name */ sizeof(PreparedStatement), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor) pyob_PreparedStatement___del__, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ (cmpfunc) PreparedStatement_compare,/* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ PreparedStatement_methods, /* tp_methods */ NULL, /* tp_members */ PreparedStatement_getters_setters, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ /* Currently using PreparedStatement_create only at the C level, so there's * no conventional __init__ method: */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ }; static int init_kidb_preparedstatement(void) { /* PreparedStatementType is a new-style class, so PyType_Ready must be called * before its getters and setters will function. */ if (PyType_Ready(&PreparedStatementType) < 0) { goto fail; } return 0; fail: /* This function is indirectly called by the module loader, which makes no * provision for error recovery. */ return -1; } /********* PreparedStatement CLASS DEFINITION AND INITIALIZATION:END *********/ /* PSTracker MEMBER FUNC DEFINITIONS AND SUPPORTING FUNCS: BEGIN */ #include "_kisupport_lifo_linked_list.h" LIFO_LINKED_LIST_DEFINE_BASIC_METHODS_PYALLOC_NOQUAL(PSTracker, PreparedStatement) LIFO_LINKED_LIST_DEFINE_TRAVERSE_NOQUAL(PSTracker, PreparedStatement) /* PSTracker MEMBER FUNC DEFINITIONS AND SUPPORTING FUNCS: END */ kinterbasdb-3.3.0/_kievents_infra.c0000644000175000001440000011057711130647414016533 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Events Support * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifdef ENABLE_DB_EVENT_SUPPORT /* This file is designed to be directly included in _kievents.c; the code here * has been placed in this file for organizational rather than compilational * purposes. * * Notes: * - NO CODE IN THIS FILE *EVER* HOLDS THE GIL, OR MANIPULATES PYTHON OBJECTS * IN ANY WAY. */ #define REQUIRE_EXECUTING_IN_EVENT_OP_THREAD(EventOpThreadContext_) \ assert (Thread_ids_equal( \ Thread_current_id(), (EventOpThreadContext_)->event_op_thread_id \ )); static int EventOpThreadContext_close_all_except_admin_comm_objects( EventOpThreadContext *self ); static int EventOpThreadContext_record_error( EventOpThreadContext *self, const char *preamble ); /************************* EventOpNode METHODS:BEGIN *************************/ static void EventOpNode_del(void *_n) { /* EventOpNodes can have complex payloads, the components of which might need * to be freed. */ EventOpNode *n = (EventOpNode *) _n; assert (n != NULL); if (n->payload != NULL) { switch (n->op_code) { case OP_RECORD_AND_REREGISTER: { EventCallbackOutputNode *cbn = (EventCallbackOutputNode *) n->payload; if (cbn->updated_buf != NULL) { kimem_plain_free(cbn->updated_buf); } } break; case OP_CONNECT: { ConnParamsNode *p = (ConnParamsNode *) n->payload; if (p->dsn != NULL) { kimem_plain_free(DV_STR(p->dsn)); } if (p->dpb != NULL) { kimem_plain_free(DV_STR(p->dpb)); } } break; /* Didn't use 'default:' here because want compiler warning if new opcode * is added but this switch isn't updated to handle it. */ case OP_DIE: case OP_CALLBACK_ERROR: case OP_REGISTER: break; } kimem_plain_free(DV_VOID(n->payload)); } /* Note that this function frees the memory of the EventOpNode struct * itself, leaving the caller holding an invalid pointer. */ kimem_plain_free(n); } /* EventOpNode_del */ /************************** EventOpNode METHODS:END **************************/ /************************* EventOpQueue METHODS:BEGIN ************************/ /* EventOpQueue doesn't actually exist as a type distinct from * ThreadSafeFIFOQueue, but for organizational purposes, it's convenient to * pretend that it is a distinct type. */ static int EventOpQueue_request(ThreadSafeFIFOQueue *self, EventOpThreadOpCode op_code, int tag, void *payload ) { EventOpNode *n = (EventOpNode *) kimem_plain_malloc(sizeof(EventOpNode)); if (n == NULL) { goto fail; } n->op_code = op_code; n->tag = tag; n->payload = payload; if (ThreadSafeFIFOQueue_put(self, n, EventOpNode_del) != 0) { goto fail; } return 0; fail: if (n != NULL) { /* Notice that we free n itself, but we don't call EventOpNode_del, which * would free n->payload. Since this call failed, the caller is * responsible for freeing n->payload. */ kimem_plain_free(n); } return -1; } /* EventOpQueue_request */ /************************** EventOpQueue METHODS:END *************************/ /*********************** AdminResponseNode METHODS:BEGIN *********************/ static void AdminResponseNode_del(void *_n) { AdminResponseNode *n = (AdminResponseNode *) _n; assert (n != NULL); if (n->message != NULL) { kimem_plain_free(DV_STR(n->message)); } /* Note that this function frees the memory of the AdminResponseNode struct * itself, leaving the caller holding an invalid pointer. */ kimem_plain_free(n); } /* AdminResponseNode_del */ /************************ AdminResponseNode METHODS:END **********************/ /*********************** AdminResponseQueue METHODS:BEGIN ********************/ /* AdminResponseQueue doesn't actually exist as a type distinct from * ThreadSafeFIFOQueue, but for organizational purposes, it's convenient to * pretend that it is a distinct type. */ static int AdminResponseQueue_post(ThreadSafeFIFOQueue *self, EventOpThreadOpCode op_code, long status, ISC_STATUS sql_error_code, char *msg ) { AdminResponseNode *n = (AdminResponseNode *) kimem_plain_malloc( sizeof(AdminResponseNode) ); if (n == NULL) { goto fail; } n->op_code = op_code; n->status = status; n->sql_error_code = sql_error_code; if (msg == NULL) { n->message = NULL; } else { const size_t msg_len = strlen(msg); if (msg_len > 0) { n->message = kimem_plain_malloc(msg_len + 1); if (n->message == NULL) { goto fail; } strncpy(DV_STR(n->message), msg, msg_len + 1); assert (n->message[msg_len] == '\0'); } } if (ThreadSafeFIFOQueue_put(self, n, AdminResponseNode_del) != 0) { goto fail; } return 0; fail: if (n != NULL) { /* Notice that we free n itself, but *not* n->message. Since this call * failed, the caller is responsible for freeing n->message. */ kimem_plain_free(n); } return -1; } /* AdminResponseQueue_post */ static int AdminResponseQueue_require(ThreadSafeFIFOQueue *self, EventOpThreadOpCode op_code, long status, ISC_STATUS *sql_error_code, char **message, long timeout_millis ) { /* This method gets an AdminResponseNode from the queue, potentially waiting * up to timeout_millis. A timeout is considered failure. * * Once the node has been retrieved, if its op_code and status members do not * match the provided values, this function will set *message to the node's * message, then release the node (but not its message, obviously) and return * -1. Note that the node's message might be NULL. * * Otherwise, both the node and its message will be released, and this * function will return 0. */ int res = -1; AdminResponseNode *n = NULL; WaitResult wait_res; assert (self != NULL); assert (*sql_error_code == 0); assert (*message == NULL); wait_res = ThreadSafeFIFOQueue_get(self, timeout_millis, (void **) &n); if (wait_res == WR_WAIT_OK) { assert (n != NULL); res = (n->op_code == op_code && n->status == status) ? 0 : -1; } if (n != NULL) { if (res != 0) { *sql_error_code = DV_ISC_STATUS(n->sql_error_code); *message = DV_STR(n->message); } else { if (n->message != NULL) { kimem_plain_free(DV_STR(n->message)); n->message = NULL; } } kimem_plain_free(n); n = NULL; } return res; } /* AdminResponseQueue_require */ /************************ AdminResponseQueue METHODS:END *********************/ /************************ EventFiredNode METHODS:BEGIN ***********************/ static void EventFiredNode_del(void *_n) { assert (_n != NULL); /* At present, the counts member is a fixed-size array, so it doesn't need to * be freed separately from the EventFiredNode struct. */ /* Note that this function frees the memory of the EventFiredNode struct * itself, leaving the caller holding an invalid pointer. */ kimem_plain_free(_n); } /* EventFiredNode_del */ /************************* EventFiredNode METHODS:END ************************/ /************************ EventFiredQueue METHODS:BEGIN **********************/ /* EventFiredQueue doesn't actually exist as a type distinct from * ThreadSafeFIFOQueue, but for organizational purposes, it's convenient to * pretend that it is a distinct type. */ static WaitResult EventFiredQueue_get(ThreadSafeFIFOQueue *self, long timeout_millis, EventFiredNode **n ) { WaitResult wait_res = WR_WAIT_ERROR; assert (self != NULL); assert (*n == NULL); wait_res = ThreadSafeFIFOQueue_get(self, timeout_millis, (void **) n); assert (wait_res != WR_WAIT_OK ? *n == NULL : TRUE); return wait_res; } /* EventFiredQueue_get */ static int EventFiredQueue_post(ThreadSafeFIFOQueue *self, int block_number, ISC_ULONG *counts ) { int res = -1; int i; EventFiredNode *count_node = (EventFiredNode *) kimem_plain_malloc(sizeof(EventFiredNode)); if (count_node == NULL) { goto fail; } count_node->block_number = block_number; for (i = 0; i < EVENT_BLOCK_SIZE; i++) { count_node->counts[i] = (long) counts[i]; } if (ThreadSafeFIFOQueue_put(self, count_node, EventFiredNode_del) != 0) { goto fail; } res = 0; goto exit; fail: assert (res == -1); if (count_node != NULL) { kimem_plain_free(count_node); } /* Fall through to exit: */ exit: return res; } /* EventFiredQueue_post */ /************************* EventFiredQueue METHODS:END ***********************/ /****************** EventCallbackThreadContext METHODS:BEGIN *****************/ static int EventCallbackThreadContext_init(EventCallbackThreadContext *self, ThreadSafeFIFOQueue *op_q, int block_number ) { /* This method is called by the thread that creates the EventConduit, *not* * by the EventCallbackThread itself (the EventCallbackThread needs its * context to be initialized before it can run). */ assert (self->state == ECALL_UNINITIALIZED); self->state = ECALL_DUMMY; self->block_number = block_number; self->op_thread_id = 0; self->op_q = op_q; if (Mutex_init(&self->lock) != 0) { goto fail; } return 0; fail: return -1; } /* EventCallbackThreadContext_init */ static int EventCallbackThreadContext_close(EventCallbackThreadContext *self) { /* This method is called by the thread that's closing the EventConduit, *not* * by the EventCallbackThread itself. And obviously this method isn't * thread-safe. */ self->state = ECALL_DEAD; self->op_q = NULL; if (Mutex_close(&self->lock) != 0) { return -1; } return 0; } /* EventCallbackThreadContext_close */ static void EventCallbackThreadContext__event_callback( EventCallbackThreadContext *self, const unsigned short updated_buf_len, const UPDATED_BUF_SIGNEDNESS char *updated_buf ) { /* In all but one of its iterations in a given context, this function is * called by a thread (the EventCallbackThread) that's started and managed by * the DB client library. Such calls simply pass notification of the event * to the EventOpThread by posting an EventCallbackOutputNode to its op_q. * * In the exceptional case, this function is executed by the EventOpThread * when it calls isc_cancel_events. */ boolean succeeded = FALSE; EventCallbackOutputNode *n = NULL; if (Mutex_lock(&self->lock) != 0) { return; } /* Critical section within these brackets: */ { if (Thread_ids_equal(Thread_current_id(), self->op_thread_id)) { self->state = ECALL_DEAD; succeeded = TRUE; } else if (self->state == ECALL_DEAD) { /* The EventOpThread must've cancelled the registration while we (the * EventCallbackThread) were waiting for self->lock; just exit. */ succeeded = TRUE; } else { assert (self->state == ECALL_DUMMY || self->state == ECALL_NORMAL); n = kimem_plain_malloc(sizeof(EventCallbackOutputNode)); if (n != NULL) { boolean allocation_succeeded = TRUE; n->block_number = self->block_number; if (updated_buf_len == 0) { n->updated_buf = NULL; } else { n->updated_buf = kimem_plain_malloc(updated_buf_len); allocation_succeeded = (n->updated_buf != NULL); if (allocation_succeeded) { memcpy(n->updated_buf, updated_buf, updated_buf_len); } } if (allocation_succeeded) { /* Enqueue the op request (note that we send self->state in the * event op node's ->tag so that the EventOpThread will know * whether it's receiving a dummy notification or the real thing). */ if (EventOpQueue_request(DV_Q(self->op_q), OP_RECORD_AND_REREGISTER, self->state, n ) == 0 ) { succeeded = TRUE; if (self->state == ECALL_DUMMY) { self->state = ECALL_NORMAL; } } else { assert (succeeded == FALSE); self->state = ECALL_DEAD; } } } } if (!succeeded) { if (n != NULL) { if (n->updated_buf != NULL) { kimem_plain_free(n->updated_buf); } kimem_plain_free(n); } /* If the op queue has already been cancelled, then obviously the * EventOpThread (which spends most of its time waiting on that very * queue) knows that the event registration that this callback was * attempting to support is not longer valid. Otherwise, post an * OP_CALLBACK_ERROR. */ if (!ThreadSafeFIFOQueue_is_cancelled(DV_Q(self->op_q))) { if (EventOpQueue_request(DV_Q(self->op_q), OP_CALLBACK_ERROR, NO_TAG, NULL ) != 0 ) { /* In a low memory situation, it's entirely possible that posting a * failure message to the op queue will fail. In such a case, we * resort to forcibly cancelling the op queue so its listener (the * EventOpThread) won't deadlock. * * If even that cancellation attempt doesn't work, we resort to * taking down the entire process (it's better to do something * drastic than to let a deadlock go undiagnosed). */ if (ThreadSafeFIFOQueue_cancel(DV_Q(self->op_q)) != 0) { fprintf(stderr, "EventCallbackThreadContext__event_callback" " killing process after fatal error to avoid deadlock.\n" ); exit(1); } } } } } Mutex_unlock(&self->lock); } /* EventCallbackThreadContext__event_callback */ /******************* EventCallbackThreadContext METHODS:END ******************/ /********************* EventOpThreadContext METHODS:BEGIN ********************/ static int EventOpThreadContext_init(EventOpThreadContext *self, /* Pointer to the queue into which EventFiredNodes should be posted. */ ThreadSafeFIFOQueue *event_q, int n_event_blocks ) { /* This method is called by the thread that creates the EventConduit, *not* * by the EventOpThread itself (the EventOpThread needs its context to be * initialized before it can run). */ int i; PlatformMutexType *lock = &self->lock; ThreadSafeFIFOQueue *op_q = &self->op_q; ThreadSafeFIFOQueue *admin_response_q = &self->admin_response_q; boolean init_lock = FALSE; boolean init_op_q = FALSE; boolean init_admin_response_q = FALSE; int init_n_erbs = 0; /* Nullify self's fields: */ self->state = OPTHREADSTATE_NONE; self->event_op_thread_id = 0; self->n_event_blocks = n_event_blocks; self->er_blocks = NULL; self->db_handle = NULL_DB_HANDLE; self->error_info = NULL; self->event_q = event_q; /* Initialize self's fields: */ /* lock: */ if (Mutex_init(lock) != 0) { goto fail; } init_lock = TRUE; /* op_q: */ if (ThreadSafeFIFOQueue_init(op_q) != 0) { goto fail; } init_op_q = TRUE; /* admin_response_q: */ if (ThreadSafeFIFOQueue_init(admin_response_q) != 0) { goto fail; } init_admin_response_q = TRUE; /* er_blocks: */ self->er_blocks = kimem_plain_malloc( sizeof(EventRequestBlock) * n_event_blocks ); if (self->er_blocks == NULL) { goto fail; } for (i = 0; i < n_event_blocks; i++) { EventRequestBlock *erb = DV_ERB(self->er_blocks + i); erb->event_id = NULL_EVENT_ID; erb->req_buf = NULL; erb->req_buf_len = -1; erb->callback_ctx.state = ECALL_UNINITIALIZED; } for (i = 0; i < n_event_blocks; i++) { EventRequestBlock *erb = DV_ERB(self->er_blocks + i); if (EventCallbackThreadContext_init(DV_CALCTX(&erb->callback_ctx), op_q, i) != 0 ) { goto fail; } ++init_n_erbs; } return 0; fail: if (init_lock) { Mutex_close(lock); } if (init_op_q) { ThreadSafeFIFOQueue_close(op_q); } if (init_admin_response_q) { ThreadSafeFIFOQueue_close(admin_response_q); } if (self->er_blocks != NULL) { for (i = 0; i < init_n_erbs; i++) { EventRequestBlock *erb = DV_ERB(self->er_blocks + i); EventCallbackThreadContext_close(DV_CALCTX(&erb->callback_ctx)); } kimem_plain_free(DV_ERB(self->er_blocks)); } return -1; } /* EventOpThreadContext_init */ static int EventOpThreadContext_change_state_while_already_locked( EventOpThreadContext *self, EventOpThreadState new_state, char *msg ) { /* The caller must already hold self->lock. */ #ifndef NDEBUG const EventOpThreadState old_state = self->state; #endif ISC_STATUS sql_error_code = 0; assert (new_state > old_state); self->state = new_state; switch (new_state) { case OPTHREADSTATE_FATALLY_WOUNDED: EventOpThreadContext_close_all_except_admin_comm_objects(self); /* This non-routine death requires an explicit post to avoid stalling the * administrative thread(s): */ if (self->error_info != NULL && self->error_info->msg != NULL) { msg = self->error_info->msg; sql_error_code = self->error_info->code; } if (msg == NULL) { msg = "EventOpThread encountered unspecified fatal error."; } if (AdminResponseQueue_post(&self->admin_response_q, OP_DIE, -1, sql_error_code, msg ) != 0 ) { /* Posting to the admin response queue didn't work (perhaps because of * a low memory condition), so cancel it, if that hasn't already been * done by another thread. */ if (!ThreadSafeFIFOQueue_is_cancelled(&self->admin_response_q)) { if (ThreadSafeFIFOQueue_cancel(&self->admin_response_q) != 0) { /* Not even cancelling worked, so we know something is dreadfully * wrong. Kill the process to avoid deadlock. */ fprintf(stderr, "EventOpThreadContext_change_state_while_already_locked" " killing process after fatal error to avoid deadlock.\n" ); exit(1); } } } break; case OPTHREADSTATE_DEAD: EventOpThreadContext_close_all_except_admin_comm_objects(self); /* It is the EventConduit's responsibility to release our most basic * members via EventOpThreadContext_close. */ break; default: break; } return 0; } /* EventOpThreadContext_change_state_while_already_locked */ static int EventOpThreadContext_change_state(EventOpThreadContext *self, EventOpThreadState new_state, char *msg ) { int res = -1; if (Mutex_lock(&self->lock) != 0) { goto exit; } /* Critical section within these brackets: */ { res = EventOpThreadContext_change_state_while_already_locked( self, new_state, msg ); } if (Mutex_unlock(&self->lock) != 0) { goto exit; } exit: return res; } /* EventOpThreadContext_change_state */ static boolean EventOpThreadContext_has_state(EventOpThreadContext *self, EventOpThreadState requested_state ) { /* This function has the obvious limitation that the state could change * between the time the calling thread reads it and the time the calling * thread acts upon it. However, this function is only used by the * EventOpThread's supervising thread in situations where this potential for * inconsistency is not a problem. */ boolean has_requested_state; if (Mutex_lock(&self->lock) != 0) { goto fail; } /* Critical section within these brackets: */ { has_requested_state = (boolean) (self->state == requested_state); } if (Mutex_unlock(&self->lock) != 0) { goto fail; } return has_requested_state; fail: return FALSE; } /* EventOpThreadContext_has_state */ static int EventOpThreadContext_free_er_blocks(EventOpThreadContext *self) { /* The caller should hold the GDAL while calling. */ int i; if (self->er_blocks != NULL) { for (i = 0; i < self->n_event_blocks; i++) { EventRequestBlock *erb = DV_ERB(self->er_blocks + i); /* event_id: */ if (erb->event_id != NULL_EVENT_ID) { isc_cancel_events(DV_STATVEC(self->sv), DV_DB_HANDLE_PTR(&self->db_handle), DV_ISC_LONG_PTR(&erb->event_id) ); if (DB_API_ERROR(self->sv)) { EventOpThreadContext_record_error(self, "EventOpThreadContext_free_er_blocks: " ); goto fail; } erb->event_id = NULL_EVENT_ID; } /* req_buf: */ if (erb->req_buf != NULL) { /* The kimem_db_client_* functions require that the GDAL be held when * they're called, but the function we're currently in places the same * requirement on its callers, so there's we needn't acquire the GDAL * here. */ kimem_db_client_free(DV_STR(erb->req_buf)); erb->req_buf = NULL; } /* req_buf_len: */ erb->req_buf_len = -1; if (EventCallbackThreadContext_close(DV_CALCTX(&erb->callback_ctx)) != 0 ) { goto fail; } } kimem_plain_free(DV_ERB(self->er_blocks)); self->er_blocks = NULL; } return 0; fail: return -1; } /* EventOpThreadContext_free_er_blocks */ static int EventOpThreadContext_close_DB_API_members( EventOpThreadContext *self ) { /* This function assumes that the caller is already holding self->lock, and * that it's running in the EventOpThread itself. * * It is safe to call this function multiple times on the same * EventOpThreadContext. */ int res = -1; REQUIRE_EXECUTING_IN_EVENT_OP_THREAD(self); ENTER_GDAL_WITHOUT_LEAVING_PYTHON if (EventOpThreadContext_free_er_blocks(self) != 0) { goto fail; } /* db_handle: */ if (self->db_handle != NULL_DB_HANDLE) { #ifdef FIREBIRD_1_0_ONLY /* See lengthy note about workaround for apparent FB 1.0.x bug in the * 'fail' block of _kievents.c/pyob_EventConduit_create. */ if (self->state != OPTHREADSTATE_FATALLY_WOUNDED) { #endif ENTER_GCDL_WITHOUT_LEAVING_PYTHON /* YYY:2006.08.08: * With FB 1.0, 1.5 or 2.0, a crash sometimes occurs when this thread * attempts to detach its connection, *even though* this is the same thread * that created the connection, and the only thread to ever perform any * operations on it. * It is widely agreed that the FB 1.5.x client library has a bug that can * cause problems such as this; it seems to me that the FB 1.0 and 2.0 * client libraries have the same. * I've observed this problem on both Win32 and Linux. * In the KInterbasDB test suite, * __fb_client_lib_event_on_isc_detach_database.py is a simple example of * code that provokes the problem. */ isc_detach_database(DV_STATVEC(self->sv), DV_DB_HANDLE_PTR(&self->db_handle)); LEAVE_GCDL_WITHOUT_ENTERING_PYTHON if (DB_API_ERROR(self->sv)) { EventOpThreadContext_record_error(self, "EventOpThreadContext_close_DB_API_members: " ); goto fail; } #ifdef FIREBIRD_1_0_ONLY } #endif self->db_handle = NULL_DB_HANDLE; } res = 0; goto exit; fail: assert (res == -1); /* Fall through to exit. */ exit: LEAVE_GDAL_WITHOUT_ENTERING_PYTHON return res; } /* EventOpThreadContext_close_DB_API_members */ static int EventOpThreadContext_reject_all_requests( EventOpThreadContext *self ) { if (ThreadSafeFIFOQueue_cancel(&self->op_q) != 0) { goto fail; } /* Note that we do *not* close self->admin_response_q. That's only closed * when the supervising thread is finished with this EventOpThreadContext * entirely. */ if (ThreadSafeFIFOQueue_cancel(DV_Q(self->event_q)) != 0) { goto fail; } return 0; fail: return -1; } /* EventOpThreadContext_reject_all_requests */ static int EventOpThreadContext_close_all_except_admin_comm_objects( EventOpThreadContext *self ) { if (EventOpThreadContext_reject_all_requests(self) != 0) { goto fail; } if (EventOpThreadContext_close_DB_API_members(self) != 0) { goto fail; } return 0; fail: return -1; } /* EventOpThreadContext_close_all_except_admin_comm_objects */ static int EventOpThreadContext_close(EventOpThreadContext *self) { /* EventOpThreadContext_close_all_except_admin_comm_objects should've been * called previously, and it should've caused all of the EventRequestBlocks * to be released. */ assert (self->er_blocks == NULL); if (ThreadSafeFIFOQueue_cancel(&self->admin_response_q) != 0) { goto fail; } if (Mutex_close(&self->lock) != 0) { goto fail; } if (self->error_info != NULL) { NonPythonSQLErrorInfo_destroy(self->error_info); self->error_info = NULL; } return 0; fail: return -1; } /* EventOpThreadContext_close */ static int EventOpThreadContext_record_error( EventOpThreadContext *self, const char *preamble ) { /* The caller should hold the GDAL while calling. */ NonPythonSQLErrorInfo *se = extract_sql_error_without_python( DV_STATVEC(self->sv), preamble ); if (se == NULL) { goto fail; } if (self->error_info != NULL) { NonPythonSQLErrorInfo_destroy(self->error_info); } self->error_info = se; return 0; fail: return -1; } /********************** EventOpThreadContext METHODS:END *********************/ /************************ EventOpThread METHODS:BEGIN ************************/ /* The functions in this section are to called only by the EventOpThread. */ static int EventOpThread_connect(EventOpThreadContext *ctx, EventOpNode *n, char **msg ) { int res = -1; ConnParamsNode *cp = (ConnParamsNode *) n->payload; assert (cp != NULL); assert (cp->dsn != NULL); assert (cp->dsn_len > 0); assert (cp->dpb != NULL); assert (cp->dpb_len > 0); if (Mutex_lock(&ctx->lock) != 0) { goto fail; } /* Critical section within these brackets: */ { REQUIRE_EXECUTING_IN_EVENT_OP_THREAD(ctx); assert (ctx->db_handle == NULL_DB_HANDLE); ENTER_GDAL_WITHOUT_LEAVING_PYTHON /* This thread never holds the GIL. */ ENTER_GCDL_WITHOUT_LEAVING_PYTHON isc_attach_database(DV_STATVEC(ctx->sv), cp->dsn_len, DV_STR(cp->dsn), DV_DB_HANDLE_PTR(&ctx->db_handle), cp->dpb_len, DV_STR(cp->dpb) ); LEAVE_GCDL_WITHOUT_ENTERING_PYTHON LEAVE_GDAL_WITHOUT_ENTERING_PYTHON /* This thread never holds the GIL. */ if (DB_API_ERROR(ctx->sv)) { ENTER_GDAL_WITHOUT_LEAVING_PYTHON EventOpThreadContext_record_error(ctx, "EventOpThread_connect: "); LEAVE_GDAL_WITHOUT_ENTERING_PYTHON goto unlock; } assert (ctx->db_handle != NULL_DB_HANDLE); EventOpThreadContext_change_state_while_already_locked(ctx, OPTHREADSTATE_READY, NULL ); res = 0; } unlock: if (Mutex_unlock(&ctx->lock) != 0) { goto fail; } goto exit; fail: assert (res == -1); /* Fall through to exit: */ exit: return res; } /* EventOpThread_connect */ static int EventOpThread_register( EventOpThreadContext *ctx, int block_number ) { int res = -1; if (Mutex_lock(&ctx->lock) != 0) { goto fail; } /* Critical section within these brackets: */ { EventRequestBlock *erb; REQUIRE_EXECUTING_IN_EVENT_OP_THREAD(ctx); erb = DV_ERB(ctx->er_blocks + block_number); ENTER_GDAL_WITHOUT_LEAVING_PYTHON /* This thread never holds the GIL. */ isc_que_events(DV_STATVEC(ctx->sv), DV_DB_HANDLE_PTR(&ctx->db_handle), DV_ISC_LONG_PTR(&erb->event_id), erb->req_buf_len, #ifdef FIREBIRD_2_0_OR_LATER (ISC_UCHAR *) #endif DV_STR(erb->req_buf), (EVENT_CALLBACK_FUNCTION) EventCallbackThreadContext__event_callback, DV_CALCTX(&erb->callback_ctx) ); LEAVE_GDAL_WITHOUT_ENTERING_PYTHON /* This thread never holds the GIL. */ if (DB_API_ERROR(ctx->sv)) { ENTER_GDAL_WITHOUT_LEAVING_PYTHON EventOpThreadContext_record_error(ctx, "EventOpThread_register: "); LEAVE_GDAL_WITHOUT_ENTERING_PYTHON goto unlock; } res = 0; } unlock: if (Mutex_unlock(&ctx->lock) != 0) { goto fail; } goto exit; fail: assert (res == -1); /* Fall through to exit: */ exit: return res; } /* EventOpThread_register */ static int EventOpThread_record_and_reregister(EventOpThreadContext *ctx, EventOpNode *n ) { int res = -1; ISC_ULONG counts_dest[EVENT_BLOCK_SIZE]; EventCallbackOutputNode *cb_node = (EventCallbackOutputNode *) n->payload; /* The payload is an EventCallbackOutputNode bearing (in its 'counts' member) * a buffer that contains event counts updated to reflect the event firing * that caused this EventOpNode to be created. * * While in the critical section, we use isc_event_counts to calculate the * difference between the event counts before and after this firing; that * difference is stored temporarily in counts_dest (would've used ctx->sv, as * recommended by docs, but Firebird's x86-64 port is incompatible with that * approach). * * isc_event_counts also updates the appropriate EventRequestBlock's req_buf * to reflect the new counts, so that buffer will be ready for another call * to isc_que_events. */ if (Mutex_lock(&ctx->lock) != 0) { goto fail; } /* Critical section within these brackets: */ { EventRequestBlock *erb; REQUIRE_EXECUTING_IN_EVENT_OP_THREAD(ctx); /* If the payload is NULL or the EventCallbackThread encountered any other * error, this thread needs to die also. */ if ( cb_node == NULL || !(n->tag == ECALL_DUMMY || n->tag == ECALL_NORMAL) ) { goto fail_with_unlock; } assert ( cb_node->block_number >= 0 && cb_node->block_number <= ctx->n_event_blocks ); assert (cb_node->updated_buf != NULL); erb = DV_ERB(ctx->er_blocks + cb_node->block_number); /* isc_event_counts (implemented as gds__event_counts in jrd/utl.cpp) is a * simple function; the only lock we need to hold while calling it is a * lock on ctx. */ isc_event_counts(counts_dest, erb->req_buf_len, #ifdef FIREBIRD_2_0_OR_LATER (ISC_UCHAR *) #endif DV_STR(erb->req_buf), cb_node->updated_buf ); /* erb->req_buf must be updated via isc_event_counts even after the dummy * run, but of course we don't post to the event_q after the dummy run. */ if (n->tag != ECALL_DUMMY) { if (EventFiredQueue_post(DV_Q(ctx->event_q), cb_node->block_number, counts_dest ) != 0 ) { goto fail_with_unlock; } } else { /* Don't report back to the thread supervising the EventOpThread until * the dummy run associated with the last EventRequestBlock completes. * There's no deadlock danger in this strategy, because any callback that * encounters an error will take the necessary measures to ensure that * the EventOpThread is informed, and the EventOpThread will in turn make * sure the supervising thread is informed. */ if (cb_node->block_number == ctx->n_event_blocks - 1) { if (AdminResponseQueue_post(&ctx->admin_response_q, OP_REGISTER, 0, 0, NULL ) != 0 ) { goto fail_with_unlock; } } } } if (Mutex_unlock(&ctx->lock) != 0) { goto fail; } /* We must not hold ctx->lock while calling EventOpThread_register. */ if (EventOpThread_register(ctx, cb_node->block_number) != 0) { goto fail; } res = 0; /* Only now do we consider this operation a success. */ goto exit; fail_with_unlock: Mutex_unlock(&ctx->lock); /* Fall through to fail: */ fail: assert (res == -1); /* Fall through to exit: */ exit: return res; } /* EventOpThread_record_and_reregister */ static PlatformThreadFuncReturnType THREAD_FUNC_MODIFIER EventOpThread_main( void *_context ) { EventOpThreadContext *ctx = (EventOpThreadContext *) _context; PlatformThreadFuncReturnType status = THREAD_FUNC_RETURN_FAILURE; EventOpNode *n = NULL; char *msg = NULL; /* Some operations are only supposed to run once; these boolean flags are * used to detect a second request for such an op. */ boolean OP_CONNECT_has_already_been_requested = FALSE; boolean OP_REGISTER_has_already_been_requested = FALSE; boolean OP_DIE_has_already_been_requested = FALSE; int i; if (Mutex_lock(&ctx->lock) != 0) { goto die_with_error; } /* Critical section within these brackets: */ { REQUIRE_EXECUTING_IN_EVENT_OP_THREAD(ctx); assert (ctx->state == OPTHREADSTATE_NONE); EventOpThreadContext_change_state_while_already_locked( ctx, OPTHREADSTATE_WAITING_FOR_CONNECTION_REQUEST, NULL ); { const PlatformThreadIdType thread_id = Thread_current_id(); for (i = 0; i < ctx->n_event_blocks; i++) { EventRequestBlock *erb = DV_ERB(ctx->er_blocks + i); erb->callback_ctx.op_thread_id = thread_id; } } } if (Mutex_unlock(&ctx->lock) != 0) { goto die_with_error; } #define _RELEASE_EVENT_OP_NODE_IF_NECESSARY(node) \ if (node != NULL) { \ EventOpNode_del(node); \ node = NULL; \ } /* Note that ctx->lock is not *directly* manipulated within this loop (though * it is manipulated by functions that the code here calls). */ for (;;) { assert (n == NULL); if (ThreadSafeFIFOQueue_get(&ctx->op_q, WAIT_INFINITELY_LONG, (void **) &n) != WR_WAIT_OK ) { goto die_with_error; } switch(n->op_code) { case OP_RECORD_AND_REREGISTER: /* The routine case. */ if (EventOpThread_record_and_reregister(ctx, n) != 0) { goto die_with_error; } break; case OP_CALLBACK_ERROR: { ISC_STATUS sql_error_code = 0; char *msg = "EventOpThread detected fatal error in EventCallbackThread."; if (ctx->error_info != NULL && ctx->error_info->msg != NULL) { sql_error_code = ctx->error_info->code; msg = ctx->error_info->msg; } AdminResponseQueue_post(&ctx->admin_response_q, OP_CALLBACK_ERROR, -1, sql_error_code, msg ); goto die_with_error; } break; case OP_CONNECT: /* Should happen only once per EventOpThread. */ if (OP_CONNECT_has_already_been_requested) { goto die_with_error; } OP_CONNECT_has_already_been_requested = TRUE; if (EventOpThread_connect(ctx, n, &msg) != 0) { goto die_with_error; } if (AdminResponseQueue_post(&ctx->admin_response_q, OP_CONNECT, 0, 0, NULL ) != 0 ) { goto die_with_error; } break; case OP_REGISTER: /* Should happen only once per EventOpThread. */ if (OP_REGISTER_has_already_been_requested) { goto die_with_error; } OP_REGISTER_has_already_been_requested = TRUE; for (i = 0; i < ctx->n_event_blocks; i++) { if (EventOpThread_register(ctx, i) != 0) { goto die_with_error; } } /* Notice that we don't immediately indicate to the admin thread that * the OP_REGISTER was successful. Instead, we move to the next loop * iteration and wait for the EventCallbackThread to indicate the * success of the dummy run in each EventCallbackThreadContext; only * once we know that outcome do we report back to the waiting admin * thread (that reporting back is performed in * EventOpThread_record_and_reregister). */ break; case OP_DIE: /* Should happen only once per EventOpThread. */ if (OP_DIE_has_already_been_requested) { goto die_with_error; } OP_DIE_has_already_been_requested = TRUE; if (EventOpThreadContext_change_state(ctx, OPTHREADSTATE_DEAD, "EventOpThread received OP_DIE request." ) != 0 ) { goto die_with_error; } AdminResponseQueue_post(&ctx->admin_response_q, OP_DIE, 0, 0, NULL); status = THREAD_FUNC_RETURN_SUCCESS; goto exit; } msg = NULL; _RELEASE_EVENT_OP_NODE_IF_NECESSARY(n); } die_with_error: assert (status == THREAD_FUNC_RETURN_FAILURE); EventOpThreadContext_change_state(ctx, OPTHREADSTATE_FATALLY_WOUNDED, msg); exit: _RELEASE_EVENT_OP_NODE_IF_NECESSARY(n); return status; } /* EventOpThread_main */ /************************* EventOpThread METHODS:END *************************/ #endif /* ENABLE_DB_EVENT_SUPPORT */ kinterbasdb-3.3.0/_kicore_connection_timeout.h0000644000175000001440000003637011130647414020770 0ustar pcisarusers/* KInterbasDB Python Package - Header File for Connection Timeout * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifndef _KICORE_CONNECTION_TIMEOUT_H #define _KICORE_CONNECTION_TIMEOUT_H #ifdef ENABLE_CONNECTION_TIMEOUT #include "_kinterbasdb.h" #include "_kisupport.h" /* Type definitions: */ typedef enum { CONOP_IDLE, /* There is no operation active on the Connection at present, and it has * not timed out. */ CONOP_ACTIVE, /* There is an operation active, and the Connection has not timed out. */ CONOP_TIMED_OUT_TRANSPARENTLY, /* The Connection was closed by the ConnectionTimeoutThread, and can be * transparently resurrected because no transaction was in progress when * the timeout occurred. */ CONOP_TIMED_OUT_NONTRANSPARENTLY, /* The Connection was closed by the ConnectionTimeoutThread, and cannot be * transparently resurrected because a transaction was in progress when the * timeout occurred. The next attempt to use the Connection (directly, by * calling a method of the Connection, or indirectly, by calling a method * of a dependent object such as a Cursor) will raise a * kinterbasdb.ConnectionTimedOut exception. */ CONOP_PERMANENTLY_CLOSED } ConnectionOpState; /* The user-supplied before_callback returns one of these codes to indicate how * the ConnectionTimeoutThread should proceed: */ typedef enum { CT_VETO, /* Don't time the connection out, and don't check it again until at least a * full ->timeout_period has elapsed. */ CT_ROLLBACK, /* Roll back the connection's unresolved transaction (if any) before timing * the connection out. */ CT_COMMIT, /* Commit the connection's unresolved transaction (if any) before timing * the connection out. */ CT_NONTRANSPARENT /* Roll back the connection's unresolved transaction (if any) and time the * connection out nontransparently (so that future attempts to use it will * raise a kinterbasdb.ConnectionTimedOut exception). */ } CTCallbackVerdict; const CTCallbackVerdict CT_DEFAULT = CT_NONTRANSPARENT; typedef struct _ConnectionTimeoutParams { /* Each Connection for which timeout is enabled has a ConnectionTimeoutParams * instance associated with it. */ PyThread_type_lock lock; PlatformThreadIdType owner; volatile ConnectionOpState state; /* state and timeout_period are placed together so that on a 32-bit platform, * they won't need padding (reduces size of struct from 48 to 40 bytes on * Win32). */ volatile long timeout_period; volatile LONG_LONG connected_at; volatile LONG_LONG last_active; /* soonest_might_time_out = last_active + timeout_period: */ volatile LONG_LONG soonest_might_time_out; /* User-supplied callbacks: */ volatile PyObject *py_callback_before; volatile PyObject *py_callback_after; } ConnectionTimeoutParams; /* The global ConnectionTimeoutManager uses a linked list of these to keep * track of all connections on which timeout is enabled: */ typedef struct _ConnectionTracker { volatile CConnection *contained; volatile struct _ConnectionTracker *next; } ConnectionTracker; /* Cast away volatile qualifier: */ #define DV_CT(ct_ptr) ((ConnectionTracker *) (ct)) struct ConnectionTimeoutManager { /* There's only one instance of ConnectionTimeoutManager (named global_ctm) * in the entire process. */ PlatformMutexType lock; /* There are too many differences between Windows Events and pthread * condition variables for us to handle them with a uniform abstraction. */ #ifdef PLATFORM_WINDOWS HANDLE #else pthread_cond_t #endif reconsider_wait_interval; volatile Py_ssize_t n_cons; /* Number of nodes in linked list cons. */ volatile ConnectionTracker *cons; /* The soonest point in time (expressed in milliseconds since the epoch) that * one of the connections tracked in cons might need to be timed out: */ volatile LONG_LONG soonest_next_connection_might_timeout; /* timeout_thread_py is a reference to a Python object, but a typical client * of global_ctm does not hold the GIL. The only operation performed on * timeout_thread_py without the GIL held is to nullify it, so no problem. */ PyObject *timeout_thread_py; PlatformThreadRefType timeout_thread; PlatformThreadIdType timeout_thread_id; volatile boolean ctt_should_stop; } global_ctm; /* Misc. constraints: */ #define RUNNING_IN_CONNECTION_TIMEOUT_THREAD \ (Thread_ids_equal(Thread_current_id(), global_ctm.timeout_thread_id)) #define NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD \ (!RUNNING_IN_CONNECTION_TIMEOUT_THREAD) #define S_IN_14_DAYS 1209600 #define MS_IN_14_DAYS 1209600000 #define TIMEOUT_PERIOD_IS_IN_RANGE(period) \ ((period) > 0 && (period) <= MS_IN_14_DAYS) /* ConnectionTimeoutManager method prototypes: */ static int CTM_initialize(void); static int CTM_add(volatile CConnection *con, ConnectionTimeoutParams *tp); static int CTM_remove(volatile CConnection *con); #define CTM_LOCK \ debug_print3("CTM-> ?ACQUIRE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ Mutex_lock(&global_ctm.lock); \ debug_print3("CTM-> !!ACQUIRED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); #define CTM_UNLOCK \ debug_print3("CTM-> ?RELEASE: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ Mutex_unlock(&global_ctm.lock); \ debug_print3("CTM-> !!RELEASED: %ld file %s line %d\n", \ PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); /* ConnectionTimeoutParams method prototypes: */ static ConnectionTimeoutParams *ConnectionTimeoutParams_create( long period, PyObject *py_callback_before, PyObject *py_callback_after ); static int _ConnectionTimeoutParams_destroy_( ConnectionTimeoutParams **tp_, boolean should_destroy_lock ); static int ConnectionTimeoutParams_destroy(ConnectionTimeoutParams **tp_); static ConnectionOpState ConnectionTimeoutParams_trans_while_already_locked( ConnectionTimeoutParams *tp, ConnectionOpState expected_old_state, ConnectionOpState requested_new_state ); static ConnectionOpState ConnectionTimeoutParams_trans( ConnectionTimeoutParams *tp, ConnectionOpState expected_old_state, ConnectionOpState requested_new_state ); static void _ConnectionTimeoutParams_touch(ConnectionTimeoutParams *tp); /* Connection activation and deactivation: */ #define TP_RECORD_OWNERSHIP(tp) \ (tp)->owner = Thread_current_id() #define TP_RELEASE_OWNERSHIP(tp) \ (tp)->owner = THREAD_ID_NONE static boolean CURRENT_THREAD_OWNS_TP(ConnectionTimeoutParams *tp) { assert (tp != NULL); return Thread_ids_equal(Thread_current_id(), tp->owner); } static boolean CURRENT_THREAD_OWNS_CON_TP(CConnection *con) { assert (con != NULL); if (!Connection_timeout_enabled(con)) { return TRUE; } else { ConnectionTimeoutParams *tp = con->timeout; assert (tp != NULL); return CURRENT_THREAD_OWNS_TP(tp); } } /* TP_LOCK must not be called when the GIL is held; use * ACQUIRE_TP_WITH_GIL_HELD for that. Violating this rule could result in a * deadlock between the violating thread and the ConnectionTimeoutThread. */ #define TP_LOCK(tp) \ debug_print4("TP(%p)-> ?ACQUIRE: %ld file %s line %d\n", \ tp, PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ PyThread_acquire_lock((tp)->lock, WAIT_LOCK); \ TP_RECORD_OWNERSHIP(tp); \ debug_print4("TP(%p)-> !!ACQUIRED: %ld file %s line %d\n", \ tp, PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); static boolean TP_TRYLOCK(ConnectionTimeoutParams *tp); #define TP_UNLOCK(tp) \ debug_print4("TP(%p)-> ?RELEASE: %ld file %s line %d\n", \ tp, PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ TP_RELEASE_OWNERSHIP(tp); \ PyThread_release_lock((tp)->lock); \ debug_print4("TP(%p)-> !!RELEASED: %ld file %s line %d\n", \ tp, PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); /* ACQUIRE_TP_WITH_GIL_HELD: * (This macro is meant to be used only by the thread that performs routine * operations on a connection (ROT), not by the ConnectionTimeoutThread (CTT).) * * There can be at most two threads vying for control of a connection at once: * - the ROT * - the CTT * * ACQUIRE_TP_WITH_GIL_HELD, invoked by the ROT, first tries to acquire the * lock over the ConnectionTimeoutParams object tp. If that attempt fails, the * ROT knows that the CTT was holding the lock. The CTT sometimes tries to * acquire the GIL while it is holding a ConnectionTimeoutParams lock, so in * order to avoid deadlock when the ROT detects that the CTT is holding * tp->lock, the ROT must release the GIL and wait to acquire tp->lock before * reacquiring the GIL. */ #define ACQUIRE_TP_WITH_GIL_HELD(tp) \ debug_print4("TP(%p)-> ?TRY-ACQUIRE: %ld file %s line %d\n", \ tp, PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ if (TP_TRYLOCK(tp)) { \ debug_print4("TP(%p)-> ?TRY-ACQUIRED: %ld file %s line %d\n", \ tp, PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ } else { \ debug_print4("TP(%p)-> ?TRY-ACQUIRE-FAILED: %ld file %s line %d\n", \ tp, PyThread_get_thread_ident(), __FILE__, __LINE__ \ ); \ { \ PyThreadState *tstate = PyThreadState_Get(); \ LEAVE_GIL_USING_THREADSTATE(tstate); \ TP_LOCK(tp); \ ENTER_GIL_USING_THREADSTATE(tstate); \ } \ } #define ACQUIRE_CON_TP_WITH_GIL_HELD(con) \ if (Connection_timeout_enabled(con)) { \ ACQUIRE_TP_WITH_GIL_HELD((con)->timeout); \ } /* RELEASE_CON_TP is the inverse of ACQUIRE_CON_TP_WITH_GIL_HELD. */ #define RELEASE_CON_TP(con) \ if (Connection_timeout_enabled(con)) { \ TP_UNLOCK((con)->timeout); \ } static int Connection_activate(CConnection *con, const boolean con_tp_already_locked, const boolean allow_transparent_resumption ); /* Connection Activation and passivation macros: */ #define _CON_ACTIVATE( \ con, failure_action, con_tp_already_locked, allow_transparent_resumption \ ) \ /* The GIL must be held when this macro is used. */ \ assert (con != NULL); \ if (Connection_activate(con, con_tp_already_locked, \ allow_transparent_resumption \ ) != 0 \ ) \ { \ assert (PyErr_Occurred()); \ failure_action; \ } #define CON_ACTIVATE(con, failure_action) \ _CON_ACTIVATE(con, failure_action, FALSE, TRUE) #define CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION(con, failure_action) \ _CON_ACTIVATE(con, failure_action, FALSE, FALSE) #define CON_ACTIVATE__ALREADY_LOCKED(con, failure_action) \ _CON_ACTIVATE(con, failure_action, TRUE, TRUE) #define CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION__ALREADY_LOCKED( \ con, failure_action \ ) \ _CON_ACTIVATE(con, failure_action, TRUE, FALSE) #define _CON_PASSIVATE(con, state_trans_func) \ if (Connection_timeout_enabled(con)) { \ LONG_LONG orig_last_active; \ ConnectionOpState achieved_state; \ \ assert ((con)->timeout->state == CONOP_ACTIVE); \ orig_last_active = (con)->timeout->last_active; \ /* This should never fail: */ \ achieved_state = state_trans_func( \ (con)->timeout, CONOP_ACTIVE, CONOP_IDLE \ ); \ assert (achieved_state == CONOP_IDLE); \ /* The last_active timestamp should now be later than it was (but it \ * might be mathematically the same if the timer resolution is * course). */ \ assert ((con)->timeout->last_active - orig_last_active >= 0); \ } #define CON_PASSIVATE(con) \ _CON_PASSIVATE(con, ConnectionTimeoutParams_trans) #define CON_PASSIVATE__ALREADY_LOCKED(con) \ _CON_PASSIVATE(con, ConnectionTimeoutParams_trans_while_already_locked) #define CON_MUST_ALREADY_BE_ACTIVE(con) \ /* This macro performs no locking because it's only intended to be used in \ * places where the connection is expected to *retain a state of \ * CONOP_ACTIVE set earlier while a lock was held* (the \ * ConnectionTimeoutThread never times out a connection that has state \ * CONOP_ACTIVE, so the fact that the lock has been released since is no \ * problem). */ \ assert ((con) != NULL); \ assert ( \ !Connection_timeout_enabled(con) \ || (con)->timeout->state == CONOP_ACTIVE \ ); #define CON_MUST_NOT_BE_ACTIVE(con) \ assert (con != NULL); \ assert ( \ !Connection_timeout_enabled(con) \ || (con)->timeout->state != CONOP_ACTIVE \ ); #else /* not defined(ENABLE_CONNECTION_TIMEOUT) */ #define RUNNING_IN_CONNECTION_TIMEOUT_THREAD FALSE #define NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD TRUE /* Connection Activation and Passivation macros: */ #define _CON_ACTIVATE( \ con, failure_action, \ con_tp_already_locked, allow_transparent_resumption \ ) \ assert (con != NULL); \ if (con == null_connection || (con)->state != CON_STATE_OPEN) { \ raise_exception(ProgrammingError, "Invalid connection state. The" \ " connection must be open to perform this operation." \ ); \ failure_action; \ } #define CON_ACTIVATE(con, failure_action) \ _CON_ACTIVATE(con, failure_action, FALSE, FALSE) #define CON_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION(con, failure_action) \ CON_ACTIVATE(con, failure_action) #define CON_PASSIVATE(con) #define CON_MUST_ALREADY_BE_ACTIVE(con) #define CON_MUST_NOT_BE_ACTIVE(con) #endif /* defined(ENABLE_CONNECTION_TIMEOUT) */ /* Cursor activation and deactivation: */ #define CUR_REQUIRE_OPEN_WITH_FAILURE(cursor, failure_action) \ if (_Cursor_require_open(cursor, NULL) != 0) { failure_action; } #define CUR_REQUIRE_OPEN(cursor) \ CUR_REQUIRE_OPEN_WITH_FAILURE(cursor, return NULL) #define CUR_REQUIRE_OPEN2(cursor, failure_message) \ if (_Cursor_require_open(cursor, failure_message) != 0) { return NULL; } #define _CUR_ACTIVATE(cursor, failure_action, allow_transparent_resumption) \ assert (cursor != NULL); \ if ((cursor)->trans != NULL) { \ /* If cursor has a connection at all, _CON_ACTIVATE should be executed \ * before CUR_REQUIRE_OPEN_WITH_FAILURE, so that a ConnectionTimedOut \ * exception will be thrown (instead of ProgrammingError) where \ * appropriate: */ \ CConnection *con = Transaction_get_con((cursor)->trans); \ if (con != NULL) { \ _CON_ACTIVATE(con, failure_action, FALSE, allow_transparent_resumption); \ } \ } \ CUR_REQUIRE_OPEN_WITH_FAILURE(cursor, failure_action); \ #define CUR_ACTIVATE(cursor, failure_action) \ _CUR_ACTIVATE(cursor, failure_action, TRUE) #define CUR_ACTIVATE__FORBID_TRANSPARENT_RESUMPTION(cursor, failure_action) \ _CUR_ACTIVATE(cursor, failure_action, FALSE) #define CUR_PASSIVATE(cursor) \ assert (cursor != NULL); \ assert ((cursor)->trans != NULL); \ assert (Transaction_get_con((cursor)->trans) != NULL); \ CON_PASSIVATE(Transaction_get_con((cursor)->trans)) #endif /* not def _KICORE_CONNECTION_TIMEOUT_H */ kinterbasdb-3.3.0/_kiconversion_field_precision.c0000644000175000001440000003471711130647414021454 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Field Precision Determination * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kiconversion.c, * without the involvement of a header file. */ #define ENTITY_TYPE_UNKNOWN 0 #define ENTITY_TYPE_TABLE 1 #define ENTITY_TYPE_STORED_PROCEDURE 2 #define ENTITY_TYPE_LAST ENTITY_TYPE_STORED_PROCEDURE static PyObject *determine_field_precision( short entity_type_code, char *entity_name, short entity_name_length, char *field_name, short field_name_length, Cursor *cursor ) { /* Returns: * - a new reference to a PyObject * containing the precision figure on * success (may be the PyInt zero) * - a new reference to a PyObject * containing the PyInt zero on routine * inability to determine precision (as in the case of dynamic fields) * - NULL on error (will already have set an exception) */ int status = -1; PyObject *precision = NULL; PyObject *result_cache = NULL; PyObject *result_cache_this_entity = NULL; const char *sql_statement_table = "SELECT FIELD_SPEC.RDB$FIELD_PRECISION" " FROM RDB$FIELDS FIELD_SPEC, RDB$RELATION_FIELDS REL_FIELDS" " WHERE" " FIELD_SPEC.RDB$FIELD_NAME = REL_FIELDS.RDB$FIELD_SOURCE" " AND REL_FIELDS.RDB$RELATION_NAME = ?" " AND REL_FIELDS.RDB$FIELD_NAME = ?" ; const unsigned short sql_statement_table_length = (unsigned short) strlen(sql_statement_table); const char *sql_statement_stored_procedure = "SELECT FIELD_SPEC.RDB$FIELD_PRECISION" " FROM RDB$FIELDS FIELD_SPEC, RDB$PROCEDURE_PARAMETERS REL_FIELDS" " WHERE" " FIELD_SPEC.RDB$FIELD_NAME = REL_FIELDS.RDB$FIELD_SOURCE" " AND RDB$PROCEDURE_NAME = ?" " AND RDB$PARAMETER_NAME = ?" " AND RDB$PARAMETER_TYPE = 1" /* 1 is the parameter type of output * parameters */ ; const unsigned short sql_statement_stored_procedure_length = (unsigned short) strlen(sql_statement_stored_procedure); /* The following variables are all just shortcuts to members of * result_cache: */ XSQLDA *in_da = NULL; XSQLVAR *in_var = NULL; XSQLDA *out_da = NULL; XSQLVAR *out_var = NULL; CursorDescriptionCache *cache = Transaction_get_con(cursor->trans)->desc_cache; PyObject *exception_type = NULL; /* Default to normal table. */ if (entity_type_code == ENTITY_TYPE_UNKNOWN) { entity_type_code = ENTITY_TYPE_TABLE; } if (entity_name_length == 0 || field_name_length == 0) { /* Either or both of the entity name and the field name are not supplied, * so we cannot determine this output field's precision. This is not * an exceptional situation; it occurs routinely in queries with * dynamically computed fields (e.g., select count(*) from some_table). */ return PyInt_FromLong(0); } /* Special case for the automagic RDB$DB_KEY field, which the engine isn't * able to find the precision of. The engine mangles the field name to * "DB_KEY" instead of "RDB$DB_KEY", but I'm testing for either here in the * interest of future-proofing. */ if ( (field_name_length == 6 && strncmp(field_name, "DB_KEY", 6) == 0) || (field_name_length == 10 && strncmp(field_name, "RDB$DB_KEY", 10) == 0) ) { return PyInt_FromLong(0); } /* If the cache has not yet been allocated and prepared, do so now. * If it has already been allocated, just set some local "shortcut pointers" * and proceed directly to the query execution. */ if (cache != NULL) { /* If the precison figure for this entity.field is already cached, just * retrieve it from the cache dictionary and return. */ result_cache = cache->result_cache; assert (result_cache != NULL); result_cache_this_entity = PyDict_GetItemString(result_cache, entity_name); /* borrowed ref */ if (result_cache_this_entity != NULL) { precision = PyDict_GetItemString(result_cache_this_entity, field_name); if (precision != NULL) { /* Cache hit. * PyDict_GetItemString returned borrowed ref, so we need to INCREF. */ Py_INCREF(precision); return precision; } } else { /* There is not even a cache for this entity, so there cannot possibly be * one for this entity.field. Create a new dictionary to hold the cached * precision figures for this entity. */ result_cache_this_entity = PyDict_New(); if (result_cache_this_entity == NULL) { goto fail; } status = PyDict_SetItemString(result_cache, entity_name, result_cache_this_entity ); /* PyDict_SetItemString established its own ref. */ Py_DECREF(result_cache_this_entity); if (status == -1) { goto fail; } } /* The precision figure was not cached; fall through and query the system * tables. */ in_da = cache->in_da; out_da = cache->out_da; out_var = cache->out_da->sqlvar; } else { /* cache itself was NULL, so we're starting from scratch by allocating the * cache structure. Won't need to explicitly deallocate it on error, * because CConnection's field cleanup code will take care of that. */ cache = Transaction_get_con(cursor->trans)->desc_cache = kimem_main_malloc( sizeof(CursorDescriptionCache) ); if (cache == NULL) { goto fail; } cache->in_da = (XSQLDA *) cache->in_da_mem; cache->out_da = (XSQLDA *) cache->out_da_mem; cache->out_var_sqldata = -1; cache->out_var_sqlind = SQLIND_NULL; out_var = cache->out_da->sqlvar; out_var->sqldata = (char *) &cache->out_var_sqldata; out_var->sqlind = &cache->out_var_sqlind; /* The dictionary result_cache will cache entity-specific dictionaries that * will contain the field precision figures that have been determined via * queries to system tables. * Notice that we attach result_cache to the CursorDescriptionCache object, * which is tracked by the CConnection, so on error, deallocation of * result_cache will be handled implicitly by the CConnection's * field cleanup code. */ result_cache = cache->result_cache = PyDict_New(); if (result_cache == NULL) { goto fail; } /* There was no cache at all, so there could not have been a cache for this * entity. Create one. */ result_cache_this_entity = PyDict_New(); if (result_cache_this_entity == NULL) { goto fail; } status = PyDict_SetItemString(result_cache, entity_name, result_cache_this_entity ); Py_DECREF(result_cache_this_entity); if (status == -1) { goto fail; } /* Set up the query output structures. We know at design time exactly how * they should be configured, so there's no convoluted dynamism here, as * there is in servicing an arbitrary query that originated in Python * client code. */ out_da = cache->out_da; out_da->version = SQLDA_VERSION_KIDB; out_da->sqln = 1; /* Set up the input structures (again, their configuration is mostly * static). */ in_da = cache->in_da; in_da->version = SQLDA_VERSION_KIDB; in_da->sqln = 2; in_da->sqld = 2; /* Set the type flags of the input variables (they never change): */ in_da->sqlvar ->sqltype = SQL_TEXT; (in_da->sqlvar + 1)->sqltype = SQL_TEXT; /* Allocate the statement structures. MUST set statement handles to NULL * before calls to isc_dsql_allocate_statement. */ ENTER_GDAL cache->stmt_handle_table = NULL_STMT_HANDLE; cache->stmt_handle_stored_procedure = NULL_STMT_HANDLE; isc_dsql_allocate_statement(cursor->status_vector, Transaction_get_db_handle_p(cursor->trans), &cache->stmt_handle_table ); if (DB_API_ERROR(cursor->status_vector)) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK goto fail_with_operationalerror; } isc_dsql_allocate_statement(cursor->status_vector, Transaction_get_db_handle_p(cursor->trans), &cache->stmt_handle_stored_procedure ); LEAVE_GDAL if (DB_API_ERROR(cursor->status_vector)) { goto fail_with_operationalerror; } /* Prepare the statements. */ { isc_tr_handle *trans_handle_addr = Transaction_get_handle_p(cursor->trans); ENTER_GDAL isc_dsql_prepare(cursor->status_vector, trans_handle_addr, &cache->stmt_handle_table, sql_statement_table_length, (char *) sql_statement_table, Transaction_get_dialect(cursor->trans), out_da ); if (DB_API_ERROR(cursor->status_vector)) { LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK goto fail_with_operationalerror; } isc_dsql_prepare(cursor->status_vector, trans_handle_addr, &cache->stmt_handle_stored_procedure, sql_statement_stored_procedure_length, (char *) sql_statement_stored_procedure, Transaction_get_dialect(cursor->trans), out_da ); LEAVE_GDAL if (DB_API_ERROR(cursor->status_vector)) { goto fail_with_operationalerror; } } } /* We are now done (allocating and preparing new)/(loading references to * existing) description cache structures. */ assert (in_da != NULL); assert (out_da != NULL); assert (out_var != NULL); assert (out_var == out_da->sqlvar); /* Set the names of the relation and field for which we're determining * precision. */ in_var = in_da->sqlvar; /* First input variable. */ assert (in_var->sqltype == SQL_TEXT); in_var->sqllen = entity_name_length; in_var->sqldata = entity_name; in_var++; /* Second input variable. */ assert (in_var->sqltype == SQL_TEXT); in_var->sqllen = field_name_length; in_var->sqldata = field_name; /* Execute the prepared statement. */ switch (entity_type_code) { case ENTITY_TYPE_TABLE: { isc_tr_handle *trans_handle_addr = Transaction_get_handle_p(cursor->trans); ENTER_GDAL isc_dsql_execute2(cursor->status_vector, trans_handle_addr, &cache->stmt_handle_table, Transaction_get_dialect(cursor->trans), in_da, out_da ); LEAVE_GDAL } break; case ENTITY_TYPE_STORED_PROCEDURE: { isc_tr_handle *trans_handle_addr = Transaction_get_handle_p(cursor->trans); ENTER_GDAL isc_dsql_execute2(cursor->status_vector, trans_handle_addr, &cache->stmt_handle_stored_procedure, Transaction_get_dialect(cursor->trans), in_da, out_da ); LEAVE_GDAL } break; default: raise_exception(InternalError, "determine_field_precision called with" " invalid entity type directive."); goto fail; } if (DB_API_ERROR(cursor->status_vector)) { /* If we've recursed as far as possible, yet have run out of entity types * to try, we must give up and raise an error. */ if (entity_type_code == ENTITY_TYPE_LAST) { exception_type = InternalError; goto fail; } else { /* Recursively try the next alternative entity type. */ precision = determine_field_precision( (short) (entity_type_code + 1), entity_name, entity_name_length, field_name, field_name_length, cursor ); if (precision == NULL) { goto fail; } } } else { Transaction_stats_record_ps_executed(cursor->trans); /* Both PyInt_FromLong and PyDict_SetItemString create new references, so * although we want to store one reference to the int object and return * another reference to the same object, there's no need to * INCREF(precision) * here. */ precision = PyInt_FromLong( cache->out_var_sqlind == SQLIND_NULL ? 0 : cache->out_var_sqldata ); if (precision == NULL) { goto fail; } /* Cache the precision figure. */ if (PyDict_SetItemString(result_cache_this_entity, field_name, precision) == -1 ) { Py_DECREF(precision); goto fail; } } assert (precision != NULL); assert (PyInt_CheckExact(precision)); return precision; fail_with_operationalerror: exception_type = OperationalError; /* Fall through to fail: */ fail: /* If no exception_type was specified, there should already be a Python * exception set. */ if (exception_type == NULL) { assert (PyErr_Occurred()); } else { raise_sql_exception(exception_type, "Unable to determine field precison from system tables: ", cursor->status_vector ); } return NULL; } /* determine_field_precision */ static void free_field_precision_cache( CursorDescriptionCache *cache, boolean should_try_to_free_stmt_handles, ISC_STATUS *status_vector ) { if (cache == NULL) { return; } /* Added should_try_to_free_stmt_handles flag so that the connection can * prevent this function from calling isc_dsql_free_statement if the * connection knows that it has lost its database handle (in some versions of * the Firebird client library, isc_dsql_free_statement causes a segfault * when the connection under which the handles were established is no longer * valid). */ if (!should_try_to_free_stmt_handles) { cache->stmt_handle_table = NULL_STMT_HANDLE; cache->stmt_handle_stored_procedure = NULL_STMT_HANDLE; } else { assert (cache->stmt_handle_table != NULL_STMT_HANDLE); assert (cache->stmt_handle_stored_procedure != NULL_STMT_HANDLE); ENTER_GDAL isc_dsql_free_statement(status_vector, &cache->stmt_handle_table, DSQL_drop ); isc_dsql_free_statement(status_vector, &cache->stmt_handle_stored_procedure, DSQL_drop ); LEAVE_GDAL } /* Free the master cache dictionary, which will of course free its * subordinate (entity-specific) dictionaries: */ Py_XDECREF(cache->result_cache); /* Free the passed cache object itself: */ kimem_main_free(cache); } /* free_field_precision_cache */ kinterbasdb-3.3.0/_kiconversion_to_db.c0000644000175000001440000007557211130647414017411 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Parameter Conversion Py->DB * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kiconversion.c, * without the involvement of a header file. */ /******************** FUNCTION PROTOTYPES:BEGIN ********************/ static void _complain_PyObject_to_database_field_type_mismatch( PyObject *py_input, char *database_field_type_name_raw, XSQLVAR *sqlvar, boolean is_array_element ); static int _try_to_accept_string_and_convert(PyObject *o, XSQLVAR *sqlvar, Cursor *cur ); static int _PyObject2XSQLVAR_check_range_SQL_INTEGER( unsigned short dialect, short data_type, short data_subtype, short scale, PyObject *n, PyObject *min, PyObject *max ); static int _PyObject2XSQLVAR_check_range_SQL_CHARACTER(PyObject *o, size_t actualLength, size_t maxLength ); boolean ISC_TIME_from_PyInt(PyObject *py_int, ISC_TIME *t); /******************** FUNCTION PROTOTYPES:END ********************/ /******************** CONVENIENCE DEFS:BEGIN ********************/ #define TRY_TO_ACCEPT_STRING_AND_CONVERT(py_input, sqlvar, cur) \ if (_try_to_accept_string_and_convert(py_input, sqlvar, cur) == INPUT_OK) { \ return INPUT_OK; \ } /* Else, do not immediately return or break. */ /* Don't allocate new memory if we're converting a database array element: */ #define ALLOC_IF_NOT_ARRAY_THEN_SET(buf_ptr, datatype, value) \ if (!is_array_element) { \ buf_ptr = (char *) kimem_main_malloc(sizeof(datatype)); \ if (buf_ptr == NULL) { goto fail; } \ } \ /* value may contain a Python API call; we must check for a Python error \ * after evaluating value. */ \ { \ datatype temp = (datatype)(value); \ if (PyErr_Occurred()) { \ /* Error-handling code elsewhere will take care of freeing buf_ptr, for \ * which we allocated space just above. */ \ goto fail; \ } \ *( (datatype *) buf_ptr ) = temp; \ } /******************** CONVENIENCE DEFS:END ********************/ #define conv_in_text_conventional(py_input, sqlvar, data_type) \ _conv_in_text( \ FALSE, /* This is not an array element. */ \ py_input, \ /* For non-array-element conversion: */ \ sqlvar, data_type, \ /* For array-element conversion; irrelevant here: */ \ NULL, 0, '\0' \ ) #define conv_in_text_array(data_slot, size_of_single_element, pad_char) \ _conv_in_text( \ TRUE, /* This is an array element. */ \ py_input, \ /* For non-array-element conversion: */ \ NULL, -1, \ /* For array-element conversion; irrelevant here: */ \ data_slot, size_of_single_element, pad_char \ ) /* The _conv_in_text function should not be called except via the * conv_in_text_(conventional|array) macros defined above. */ static InputStatus _conv_in_text( /* Common: */ boolean is_array_element, PyObject *py_input, /* For non-array-element conversion: */ XSQLVAR *sqlvar, short data_type, /* For array-element conversion: */ char **data_slot, size_t defined_field_size, char array_value_pad_char ) { if (!PyString_Check(py_input)) { _complain_PyObject_to_database_field_type_mismatch(py_input, "str", sqlvar, is_array_element ); goto fail; } { size_t size_of_incoming_string = PyString_GET_SIZE(py_input); size_t max_allowed_length = ( is_array_element ? defined_field_size : sqlvar->sqllen ); /* Don't allow truncation; raise an exception if py_input is too long. */ if (_PyObject2XSQLVAR_check_range_SQL_CHARACTER( py_input, size_of_incoming_string, max_allowed_length ) != INPUT_OK ) { goto fail; } if (!is_array_element) { /* This is not an array element; we're free to use sqlvar. */ assert (sqlvar != NULL); assert (data_slot == NULL); /* Coerce this sqlvar's type to SQL_TEXT (CHAR) so that we don't have to * allocate a new buffer of size * sizeof(short) + size_of_incoming_string * just to have sizeof(short) extra bytes at the beginning to denote * the length of the incoming value (as we normally would with a * SQL_VARYING). */ if (data_type != SQL_TEXT) { data_type = SQL_TEXT; /* Reset the XSQLVAR's type code, retaining its original null flag. */ sqlvar->sqltype = SQL_TEXT | XSQLVAR_SQLTYPE_READ_NULL_FLAG(sqlvar); } sqlvar->sqllen = (short) size_of_incoming_string; /* !MUST! set the * sqllen to prevent the database engine from bulldozing its way out * to the field's defined length and corrupting the value in the * database. * The database engine assumes that an incoming CHAR buffer is sqllen * bytes long (sqllen is initially set to the defined length of the * CHAR field). The incoming buffer might not be long enough because * we haven't allocated a full-sized buffer for the incoming value. * Instead, we're using the pre-existing, null-terminated buffer * inside the Python string object py_input). * !Note that this XSQLVAR's original settings are later restored * to prevent the database client library from concluding that the * defined maximum length of this field is *really* * size_of_incoming_string, or that this field is *really* a CHAR if * sqltype originally indicated VARCHAR. * In essence, this amounts to API abuse for the sake of a very * significant optimization. */ sqlvar->sqldata = PyString_AS_STRING(py_input); } else { /* This is an array element. */ assert (sqlvar == NULL); assert (data_slot != NULL); /* Because we don't have an XSQLVAR structure to abuse, we must actually * *copy* the incoming bytes into the array source buffer. */ memcpy(*data_slot, PyString_AS_STRING(py_input), size_of_incoming_string); memset( (*data_slot) + size_of_incoming_string, array_value_pad_char, defined_field_size - size_of_incoming_string ); } } /* end of namespace-block for size_of_incoming_string. */ return INPUT_OK; fail: assert (PyErr_Occurred()); return INPUT_ERROR; } /* _conv_in_text */ #define conv_in_internal_integer_types_conventional(py_input, sqlvar, \ dialect, data_type, data_subtype, scale, cur \ ) \ _conv_in_internal_integer_types(FALSE, py_input, &sqlvar->sqldata, \ dialect, data_type, data_subtype, scale, \ sqlvar, cur \ ) #define conv_in_internal_integer_types_array(py_input, data_slot, \ dialect, data_type, data_subtype, scale, cur \ ) \ _conv_in_internal_integer_types(TRUE, py_input, data_slot, \ dialect, data_type, data_subtype, scale, \ NULL, cur \ ) /* The _conv_in_internal_integer_types function should not be called except * via the _conv_in_internal_integer_types_(conventional|array) macros defined * above. */ static InputStatus _conv_in_internal_integer_types( boolean is_array_element, PyObject *py_input, char **data_slot, unsigned short dialect, short data_type, short data_subtype, short scale, XSQLVAR *sqlvar, Cursor *cur ) { PyObject *minN, *maxN; const boolean isSQLShort = (boolean) (data_type == SQL_SHORT); const boolean isSQLLong = (boolean) (data_type == SQL_LONG); const boolean isPyInt = (boolean) PyInt_Check(py_input); const boolean isPyLong = (boolean) PyLong_Check(py_input); assert (!is_array_element || sqlvar == NULL); if (!(isPyInt || isPyLong)) { if (!is_array_element) { TRY_TO_ACCEPT_STRING_AND_CONVERT(py_input, sqlvar, cur); } _complain_PyObject_to_database_field_type_mismatch(py_input, "database-internal numeric type", sqlvar, is_array_element ); goto fail; } /* End of block that ensures that py_input is of an appropriate type. */ /* The next step is to ensure that the scaled value is not too large for * storage in its internal format. If it is not too large, we will finally * transfer the value from its Pythonic representation to the data_slot. */ if (isSQLShort) { minN = py_SHRT_MIN; maxN = py_SHRT_MAX; } else if (isSQLLong) { /* On non-Windows x86_64, a SQL_LONG is actually stored as an int, not a * long. */ minN = py_INT_MIN; maxN = py_INT_MAX; #ifdef INTERBASE_6_OR_LATER } else { /* data_type must be SQL_INT64 */ minN = py_LONG_LONG_MIN; maxN = py_LONG_LONG_MAX; #endif /* INTERBASE_6_OR_LATER */ } if (_PyObject2XSQLVAR_check_range_SQL_INTEGER( dialect, data_type, data_subtype, scale, py_input, minN, maxN ) != INPUT_OK ) { goto fail; } if (isSQLShort) { if (isPyInt) { ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, short, (short) PyInt_AS_LONG(py_input)); } else { /* Must be PyLong */ ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, short, (short) PyLong_AsLong(py_input)); } } else if (isSQLLong) { /* On non-Windows x86_64, a SQL_LONG is actually stored as an int, not a * long. */ if (isPyInt) { ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, int, PyInt_AS_LONG(py_input)); } else { /* Must be PyLong */ ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, int, PyLong_AsLong(py_input)); } #ifdef INTERBASE_6_OR_LATER } else { /* data_type must be SQL_INT64 */ if (isPyInt) { /* There is no PyInt_AsLongLong because a PyInt's value is stored * internally as a C long. */ ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, LONG_LONG, PyInt_AS_LONG(py_input)); } else { /* Must be PyLong */ ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, LONG_LONG, PyLong_AsLongLong(py_input)); } #endif /* INTERBASE_6_OR_LATER */ } return INPUT_OK; fail: assert (PyErr_Occurred()); return INPUT_ERROR; } /* _conv_in_internal_integer_types */ #define _create_func_conv_in_floating(floating_type) \ static InputStatus _conv_in_ ## floating_type ( \ boolean is_array_element, PyObject *py_input, char **data_slot, \ XSQLVAR *sqlvar, Cursor *cur \ ) \ { \ assert (!is_array_element || sqlvar == NULL); \ \ if (PyFloat_Check(py_input)) { \ ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, floating_type, PyFloat_AS_DOUBLE(py_input)); \ } else if (PyInt_Check(py_input)) { \ ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, floating_type, PyInt_AS_LONG(py_input)); \ } else if (PyLong_Check(py_input)) { \ ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, floating_type, PyLong_AsLong(py_input)); \ } else { \ if (!is_array_element) { \ TRY_TO_ACCEPT_STRING_AND_CONVERT(py_input, sqlvar, cur); \ } \ _complain_PyObject_to_database_field_type_mismatch(py_input, \ #floating_type, sqlvar, is_array_element \ ); \ goto fail; \ } \ \ return INPUT_OK; \ \ fail: \ assert (PyErr_Occurred()); \ return INPUT_ERROR; \ } /* Use a macro to create functions _conv_in_float and _conv_in_double: */ _create_func_conv_in_floating(float) _create_func_conv_in_floating(double) #define conv_in_float_conventional(py_input, sqlvar, cur) \ _conv_in_float(FALSE, py_input, &sqlvar->sqldata, sqlvar, cur) #define conv_in_float_array(py_input, data_slot, cur) \ _conv_in_float(TRUE, py_input, data_slot, NULL, cur) #define conv_in_double_conventional(py_input, sqlvar, cur) \ _conv_in_double(FALSE, py_input, &sqlvar->sqldata, sqlvar, cur) #define conv_in_double_array(py_input, data_slot, cur) \ _conv_in_double(TRUE, py_input, data_slot, NULL, cur) /* Date/time types: */ #define _DATETIME_INPUT_EL(index, ERROR_LABEL) \ el = PySequence_Fast_GET_ITEM(py_input_as_tuple, index); /* borrowed ref */ \ if (!PyInt_Check(el)) { goto ERROR_LABEL; } #define conv_in_timestamp_conventional(py_input, sqlvar, cur) \ _conv_in_timestamp(FALSE, py_input, &(sqlvar)->sqldata, sqlvar, cur) #define conv_in_timestamp_array(py_input, data_slot, cur) \ _conv_in_timestamp(TRUE, py_input, data_slot, NULL, cur) /* The _conv_in_timestamp function should not be called except via the * conv_in_timestamp_(conventional|array) macros defined above. */ static InputStatus _conv_in_timestamp( boolean is_array_element, PyObject *py_input, char **data_slot, XSQLVAR *sqlvar, Cursor *cur ) { struct tm c_tm; PyObject *py_input_as_tuple = NULL; ISC_TIME microseconds; assert (is_array_element ? sqlvar == NULL : sqlvar != NULL && sqlvar->sqldata == NULL ); /* If py_input is a string, or is a non-sequence, then it's an invalid * input value--unless the string happens to be a valid TIMESTAMP literal * that the database server will accept for implicit type conversion. */ if ( PyString_Check(py_input) || PyUnicode_Check(py_input) || !PySequence_Check(py_input) ) { if (!is_array_element) { TRY_TO_ACCEPT_STRING_AND_CONVERT(py_input, sqlvar, cur); } goto fail_with_type_complaint; } else { /* Only borrowed references are stored in el, so there's no need to DECREF * it: */ PyObject *el = NULL; /* We already know that py_input is a sequence, so there's no need to pass * an error message to PySequence_Fast. */ py_input_as_tuple = PySequence_Fast(py_input, ""); if (py_input_as_tuple == NULL) { goto fail_with_type_complaint; } if (PySequence_Fast_GET_SIZE(py_input_as_tuple) != 7) { _complain_PyObject_to_database_field_type_mismatch(py_input, "TIMESTAMP", sqlvar, is_array_element ); goto fail_with_type_complaint; } #define _TIMESTAMP_INPUT_EL(index) \ _DATETIME_INPUT_EL(index, fail) _TIMESTAMP_INPUT_EL(0); c_tm.tm_year = PyInt_AS_LONG(el) - 1900; _TIMESTAMP_INPUT_EL(1); c_tm.tm_mon = PyInt_AS_LONG(el) - 1; _TIMESTAMP_INPUT_EL(2); c_tm.tm_mday = PyInt_AS_LONG(el); _TIMESTAMP_INPUT_EL(3); c_tm.tm_hour = PyInt_AS_LONG(el); _TIMESTAMP_INPUT_EL(4); c_tm.tm_min = PyInt_AS_LONG(el); _TIMESTAMP_INPUT_EL(5); c_tm.tm_sec = PyInt_AS_LONG(el); _TIMESTAMP_INPUT_EL(6); if (!ISC_TIME_from_PyInt(el, µseconds)) { goto fail; } } if (!is_array_element) { *data_slot = (char *) kimem_main_malloc(sizeof(ISC_TIMESTAMP)); if (*data_slot == NULL) { goto fail; } } assert (*data_slot != NULL); { ISC_TIMESTAMP *t = (ISC_TIMESTAMP *) *data_slot; ENTER_GDAL isc_encode_timestamp(&c_tm, t); LEAVE_GDAL t->timestamp_time += microseconds / 100; /* millionths -> ten-thousandths */ } Py_XDECREF(py_input_as_tuple); return INPUT_OK; fail_with_type_complaint: _complain_PyObject_to_database_field_type_mismatch(py_input, "TIMESTAMP", sqlvar, is_array_element ); /* Fall through to fail: */ fail: assert (PyErr_Occurred()); Py_XDECREF(py_input_as_tuple); if (!is_array_element && *data_slot != NULL) { kimem_main_free(*data_slot); *data_slot = NULL; } return INPUT_ERROR; } /* _conv_in_timestamp */ #ifdef INTERBASE_6_OR_LATER #define conv_in_date_conventional(py_input, sqlvar, cur) \ _conv_in_date(FALSE, py_input, &sqlvar->sqldata, sqlvar, cur) #define conv_in_date_array(py_input, data_slot, cur) \ _conv_in_date(TRUE, py_input, data_slot, NULL, cur) /* The _conv_in_date function should not be called except via the * conv_in_date_(conventional|array) macros defined above. */ static InputStatus _conv_in_date( boolean is_array_element, PyObject *py_input, char **data_slot, XSQLVAR *sqlvar, Cursor *cur ) { struct tm c_tm; PyObject *py_input_as_tuple = NULL; assert (is_array_element ? sqlvar == NULL : sqlvar != NULL && sqlvar->sqldata == NULL ); /* If py_input is a string, or is a non-sequence, then it's an invalid * input value--unless the string happens to be a valid DATE literal * that the database server will accept for implicit type conversion. */ if ( PyString_Check(py_input) || PyUnicode_Check(py_input) || !PySequence_Check(py_input) ) { if (!is_array_element) { TRY_TO_ACCEPT_STRING_AND_CONVERT(py_input, sqlvar, cur); } goto fail_with_type_complaint; } else { /* Only borrowed references are stored in el, so there's no need to DECREF * it: */ PyObject *el = NULL; /* We already know that py_input is a sequence, so there's no need to pass * an error message to PySequence_Fast. */ py_input_as_tuple = PySequence_Fast(py_input, ""); if (py_input_as_tuple == NULL) { goto fail_with_type_complaint; } if (PySequence_Fast_GET_SIZE(py_input_as_tuple) != 3) { goto fail_with_type_complaint; } #define _DATE_INPUT_EL(index) \ _DATETIME_INPUT_EL(index, fail) _DATE_INPUT_EL(0); c_tm.tm_year = PyInt_AS_LONG(el) - 1900; _DATE_INPUT_EL(1); c_tm.tm_mon = PyInt_AS_LONG(el) - 1; _DATE_INPUT_EL(2); c_tm.tm_mday = PyInt_AS_LONG(el); } if (!is_array_element) { *data_slot = (char *) kimem_main_malloc(sizeof(ISC_DATE)); if (*data_slot == NULL) { goto fail; } } assert (*data_slot != NULL); ENTER_GDAL isc_encode_sql_date(&c_tm, (ISC_DATE *) *data_slot); LEAVE_GDAL Py_XDECREF(py_input_as_tuple); return INPUT_OK; fail_with_type_complaint: _complain_PyObject_to_database_field_type_mismatch(py_input, "DATE", sqlvar, is_array_element ); /* Fall through to fail: */ fail: assert (PyErr_Occurred()); Py_XDECREF(py_input_as_tuple); if (!is_array_element && *data_slot != NULL) { kimem_main_free(*data_slot); *data_slot = NULL; } return INPUT_ERROR; } /* _conv_in_date */ #define conv_in_time_conventional(py_input, sqlvar, cur) \ _conv_in_time(FALSE, py_input, &(sqlvar)->sqldata, sqlvar, cur) #define conv_in_time_array(py_input, data_slot, cur) \ _conv_in_time(TRUE, py_input, data_slot, NULL, cur) /* The _conv_in_time function should not be called except via the * conv_in_time_(conventional|array) macros defined above. */ static InputStatus _conv_in_time( boolean is_array_element, PyObject *py_input, char **data_slot, XSQLVAR *sqlvar, Cursor *cur ) { struct tm c_tm; PyObject *py_input_as_tuple = NULL; ISC_TIME microseconds; assert (is_array_element ? sqlvar == NULL : sqlvar != NULL && sqlvar->sqldata == NULL ); /* If py_input is a string, or is a non-sequence, then it's an invalid * input value--unless the string happens to be a valid TIME literal * that the database server will accept for implicit type conversion. */ if ( PyString_Check(py_input) || PyUnicode_Check(py_input) || !PySequence_Check(py_input) ) { if (!is_array_element) { TRY_TO_ACCEPT_STRING_AND_CONVERT(py_input, sqlvar, cur); } goto fail_with_type_complaint; } else { /* Only borrowed references are stored in el, so there's no need to DECREF * it: */ PyObject *el = NULL; /* We already know that py_input is a sequence, so there's no need to pass * an error message to PySequence_Fast. */ py_input_as_tuple = PySequence_Fast(py_input, ""); if (py_input_as_tuple == NULL) { goto fail_with_type_complaint; } if (PySequence_Fast_GET_SIZE(py_input_as_tuple) != 4) { goto fail_with_type_complaint; } #define _TIME_INPUT_EL(index) \ _DATETIME_INPUT_EL(index, fail) _TIME_INPUT_EL(0); c_tm.tm_hour = PyInt_AS_LONG(el); _TIME_INPUT_EL(1); c_tm.tm_min = PyInt_AS_LONG(el); _TIME_INPUT_EL(2); c_tm.tm_sec = PyInt_AS_LONG(el); _TIME_INPUT_EL(3); if (!ISC_TIME_from_PyInt(el, µseconds)) { goto fail; } } if (!is_array_element) { *data_slot = (char *) kimem_main_malloc(sizeof(ISC_TIME)); if (*data_slot == NULL) { goto fail; } } assert (*data_slot != NULL); { ISC_TIME *t = (ISC_TIME *) *data_slot; ENTER_GDAL isc_encode_sql_time(&c_tm, t); LEAVE_GDAL *t += microseconds / 100; /* millionths -> ten-thousandths */ } Py_XDECREF(py_input_as_tuple); return INPUT_OK; fail_with_type_complaint: _complain_PyObject_to_database_field_type_mismatch(py_input, "TIME", sqlvar, is_array_element ); /* Fall through to fail: */ fail: assert (PyErr_Occurred()); Py_XDECREF(py_input_as_tuple); if (!is_array_element && *data_slot != NULL) { kimem_main_free(*data_slot); *data_slot = NULL; } return INPUT_ERROR; } /* _conv_in_time */ #endif /* INTERBASE_6_OR_LATER */ static InputStatus conv_in_blob_materialized( Cursor *cursor, XSQLVAR *sqlvar, PyObject *py_input ) { /* No need for overflow check here in materialized blob input because at * present, Python strings/buffers and database blobs have the same maximum * size: 2147483647 bytes. */ ISC_STATUS *status_vector = cursor->status_vector; isc_db_handle db_handle = *Transaction_get_db_handle_p(cursor->trans); isc_tr_handle trans_handle = *Transaction_get_handle_p(cursor->trans); /* Next statement allocates space for the blob's id, not for the blob's * contents (the contents are written segment-at-a-time in * conv_in_blob_from_pybuffer). */ sqlvar->sqldata = kimem_main_malloc(sizeof(ISC_QUAD)); if (sqlvar->sqldata == NULL) { goto fail; } if (PyString_Check(py_input)) { if (conv_in_blob_from_pystring(py_input, (ISC_QUAD *) sqlvar->sqldata, status_vector, db_handle, trans_handle ) == INPUT_ERROR ) { goto fail; } } else if (PyBuffer_Check(py_input)) { if (conv_in_blob_from_pybuffer(py_input, (ISC_QUAD *) sqlvar->sqldata, status_vector, db_handle, trans_handle ) == INPUT_ERROR ) { goto fail; } } else { _complain_PyObject_to_database_field_type_mismatch(py_input, "str", sqlvar, FALSE /* Arrays of blobs are not supported by the engine. */ ); goto fail; } return INPUT_OK; fail: assert (PyErr_Occurred()); if (sqlvar->sqldata != NULL) { kimem_main_free(sqlvar->sqldata); sqlvar->sqldata = NULL; } return INPUT_ERROR; } /* conv_in_blob_materialized */ #define conv_in_boolean_conventional(py_input, sqlvar) \ _conv_in_boolean(FALSE, py_input, &(sqlvar)->sqldata) #define conv_in_boolean_array(py_input, data_slot) \ _conv_in_boolean(TRUE, py_input, data_slot) static InputStatus _conv_in_boolean(boolean is_array_element, PyObject *py_input, char **data_slot ) { ALLOC_IF_NOT_ARRAY_THEN_SET(*data_slot, short, (short) PyObject_IsTrue(py_input) ); return INPUT_OK; fail: assert (PyErr_Occurred()); return INPUT_ERROR; } /* conv_in_boolean */ /******************** UTILITY FUNCTIONS:BEGIN ********************/ static void _complain_PyObject_to_database_field_type_mismatch( PyObject *py_input, char *database_field_type_name_raw, XSQLVAR *sqlvar, boolean is_array_element ) { /* This function doesn't return any indicator if it failed to finish * successfully because it's only called if the conversion has already * failed; a secondary failure will change the Python exception, but won't * make any difference here at the C level. */ PyObject *database_field_type_name = NULL; PyObject *field_name = NULL; PyObject *input_type = NULL; PyObject *input_type_repr = NULL; PyObject *input_repr = NULL; PyObject *err_msg = NULL; assert (py_input != NULL); assert (database_field_type_name_raw != NULL); /* If it's an array element, there's no sqlvar. */ assert (!is_array_element || sqlvar == NULL); database_field_type_name = PyString_FromString(database_field_type_name_raw); if (database_field_type_name == NULL) { goto exit; } /* sqlvar->aliasname is not null-terminated. */ field_name = (sqlvar == NULL || sqlvar->aliasname_length == 0 ? PyString_FromString("[name not known at this stage of query execution]") : PyString_FromStringAndSize(sqlvar->aliasname, sqlvar->aliasname_length) ); if (field_name == NULL) { goto exit; } input_type = PyObject_Type(py_input); if (input_type == NULL) { goto exit; } input_type_repr = PyObject_Repr(input_type); if (input_type_repr == NULL) { goto exit; } input_repr = PyObject_Repr(py_input); if (input_repr == NULL) { goto exit; } err_msg = PyString_FromFormat( "Error while attempting to convert object of type %s to %s for storage" " in %sfield %s. The invalid input object is: %s", PyString_AS_STRING(input_type_repr), PyString_AS_STRING(database_field_type_name), (is_array_element ? "element of array " : ""), PyString_AS_STRING(field_name), PyString_AS_STRING(input_repr) ); if (err_msg == NULL) { goto exit; } raise_exception(InterfaceError, PyString_AS_STRING(err_msg)); exit: Py_XDECREF(database_field_type_name); Py_XDECREF(field_name); Py_XDECREF(input_type); Py_XDECREF(input_type_repr); Py_XDECREF(input_repr); Py_XDECREF(err_msg); } /* _complain_PyObject_to_database_field_type_mismatch */ static InputStatus _try_to_accept_string_and_convert( PyObject *py_input, XSQLVAR *sqlvar, Cursor *cur ) { if (PyUnicode_Check(py_input)) { /* Pretend that we received a str instead of a unicode. */ PyObject *py_str = PyUnicode_AsASCIIString(py_input); if (py_str == NULL) { goto fail; } { PyObject *release_list = cur->objects_to_release_after_execute; assert (release_list != NULL); { const int status = PyList_Append(release_list, py_str); /* Either the list now owns a reference to py_str, or the append call * failed and the list does not own a reference. Either way, we don't * want to retain ownership of a reference here. */ Py_DECREF(py_str); if (status != 0) { goto fail; } } /* cur->objects_to_release_after_execute owns the str created from the * unicode, and will make sure that it persists long enough for our * purposes, then will take care of releasing it. */ py_input = py_str; } } else if (!PyString_Check(py_input)) { goto fail; } { const Py_ssize_t string_length = PyString_GET_SIZE(py_input); if (string_length > SHRT_MAX) { goto fail; } /* Reset the XSQLVAR's type code, retaining its original null flag. */ sqlvar->sqltype = SQL_TEXT | XSQLVAR_SQLTYPE_READ_NULL_FLAG(sqlvar); sqlvar->sqllen = (short) string_length; /* Cast is safe; see above. */ /* Refer to the existing buffer inside py_input; do not allocate new * memory. */ sqlvar->sqldata = PyString_AS_STRING(py_input); } return INPUT_OK; fail: /* Lack of 'assert (PyErr_Occurred());' is deliberate; error code without * Python exception is sufficient in this case. */ return INPUT_ERROR; } /* _try_to_accept_string_and_convert */ static InputStatus _PyObject2XSQLVAR_check_range_SQL_CHARACTER( PyObject *py_s, size_t actual_length, size_t max_length ) { /* Client code should've already enforced this: */ assert (PyString_CheckExact(py_s)); if (actual_length > max_length) { /* PyString_FromFormat doesn't support the standard format code for * size_t, so we go through contortions: */ PyObject *py_actual_length_long = PyLong_FromUnsignedLongLong( (unsigned LONG_LONG) actual_length ); if (py_actual_length_long != NULL) { PyObject *py_max_length_long = PyLong_FromUnsignedLongLong( (unsigned LONG_LONG) max_length ); if (py_max_length_long != NULL) { PyObject *py_actual_length_str = PyObject_Str(py_actual_length_long); if (py_actual_length_str != NULL) { PyObject *py_max_length_str = PyObject_Str(py_max_length_long); if (py_max_length_str != NULL) { PyObject *err_msg = PyString_FromFormat( "String overflow: value %s bytes long cannot fit in character" " field of maximum length %s (value is '%s').", PyString_AS_STRING(py_actual_length_str), PyString_AS_STRING(py_max_length_str), PyString_AS_STRING(py_s) ); if (err_msg != NULL) { raise_exception_with_numeric_error_code(ProgrammingError, -802, /* -802 is the IB error code for an overflow */ PyString_AS_STRING(err_msg) ); Py_DECREF(err_msg); } Py_DECREF(py_max_length_str); } Py_DECREF(py_actual_length_str); } Py_DECREF(py_max_length_long); } Py_DECREF(py_actual_length_long); } assert (PyErr_Occurred()); return INPUT_ERROR; } return INPUT_OK; } /* _PyObject2XSQLVAR_check_range_SQL_CHARACTER */ static InputStatus _PyObject2XSQLVAR_check_range_SQL_INTEGER( unsigned short dialect, short data_type, short data_subtype, short scale, PyObject *n, PyObject *min, PyObject *max ) { assert (n != NULL); assert (min != NULL); assert (max != NULL); if (PyObject_Compare(n, min) < 0 || PyObject_Compare(n, max) > 0) { const char *external_data_type_name = get_external_data_type_name(dialect, data_type, data_subtype, scale ); const char *internal_data_type_name = get_internal_data_type_name(data_type); PyObject *n_str = NULL; PyObject *min_str = NULL; PyObject *max_str = NULL; PyObject *err_msg = NULL; n_str = PyObject_Str(n); if (n_str == NULL) { goto exit; } min_str = PyObject_Str(min); if (min_str == NULL) { goto exit; } max_str = PyObject_Str(max); if (max_str == NULL) { goto exit; } err_msg = PyString_FromFormat( "numeric overflow: value %s (%s scaled for %d decimal places) is of" " too great a magnitude to fit into its internal storage type %s," " which has range [%s, %s].", PyString_AS_STRING(n_str), external_data_type_name, abs(scale), internal_data_type_name, PyString_AS_STRING(min_str), PyString_AS_STRING(max_str) ); if (err_msg == NULL) { goto exit; } raise_exception_with_numeric_error_code(ProgrammingError, -802, /* -802 is the IB error code for an overflow */ PyString_AS_STRING(err_msg) ); exit: Py_XDECREF(n_str); Py_XDECREF(min_str); Py_XDECREF(max_str); Py_XDECREF(err_msg); return INPUT_ERROR; } return INPUT_OK; } /* _PyObject2XSQLVAR_check_range_SQL_INTEGER */ boolean ISC_TIME_from_PyInt(PyObject *py_int, ISC_TIME *t) { long val = PyInt_AS_LONG(py_int); if (val < 0 || val > UINT_MAX) { raise_exception(PyExc_ValueError, "Python integer intended for ISC_TIME" " variable does not fit." ); return FALSE; } *t = (unsigned int) val; return TRUE; } /* ISC_TIME_from_PyInt */ /******************** UTILITY FUNCTIONS:END ********************/ kinterbasdb-3.3.0/__init__.py0000644000175000001440000025774311133100174015327 0ustar pcisarusers# KInterbasDB Python Package - Python Wrapper for Core # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere # The doc strings throughout this module explain what API *guarantees* # kinterbasdb makes. # Notably, the fact that users can only rely on the return values of certain # functions/methods to be sequences or mappings, not instances of a specific # class. This policy is still compliant with the DB API spec, and is more # future-proof than implying that all of the classes defined herein can be # relied upon not to change. Module members whose names begin with an # underscore cannot be expected to have stable interfaces. __version__ = (3, 3, 0, 'pre-alpha', 0) __timestamp__ = '2009.01.13.11.52.28.UTC' import os, struct, sys _FS_ENCODING = sys.getfilesystemencoding() if sys.platform.lower().startswith('win'): import os.path # Better out-of-box support for embedded DB engine on Windows: if the # client library is detected in the same directory as kinterbasdb, or in # the 'embedded' subdirectory of kinterbasdb's directory, or in the same # directory as the Python executable, give that library instance precedence # over the location listed in the registry. # # 2007.03.19: Overhauled to support non-ASCII paths properly, in response # to: # http://sourceforge.net/forum/forum.php?thread_id=1695175&forum_id=30917 def _findAndLoadFBDLLDir(): clientLibDir = None curWorkingDir = os.getcwdu() kinterbasdbDir = os.path.dirname(os.path.abspath(__file__)).decode( _FS_ENCODING ) pythonDir = os.path.dirname(sys.executable).decode(_FS_ENCODING) for clientLibName in ( u'firebird.dll', # Vulcan u'fbclient.dll', # FB 1.5, 2.x ): for location in ( os.path.join(curWorkingDir, clientLibName), os.path.join(kinterbasdbDir, clientLibName), os.path.join(os.path.join(kinterbasdbDir, u'embedded'), clientLibName ), os.path.join(pythonDir, clientLibName), ): if os.path.isfile(location): clientLibDir = os.path.dirname(location) break if clientLibDir: break if clientLibDir: origOSPath = os.environ['PATH'].decode(_FS_ENCODING) os.environ['PATH'] = ( origOSPath + os.pathsep + clientLibDir ).encode(_FS_ENCODING) # At least with FB 1.5.2, the FIREBIRD environment variable must # also be set in order for all features to work properly. os.environ['FIREBIRD'] = clientLibDir.encode(_FS_ENCODING) else: # FB 1.5 RC7 and later, when installed via the packaged installer # or the "instreg.exe" command-line tool, record their installation # dir in the registry. If no client library was detected earlier, # we'll add the "bin" subdirectory of the directory from the # registry to the *end* of the PATH, so it'll be used as a last # resort. import _winreg reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) try: try: dbInstPathsKey = _winreg.OpenKey(reg, u'SOFTWARE\\Firebird Project\\Firebird Server' u'\\Instances' ) try: instPath = _winreg.QueryValueEx( dbInstPathsKey, u'DefaultInstance' )[0] finally: dbInstPathsKey.Close() except WindowsError: # Versions of IB/FB prior to FB 1.5 RC7 don't have this reg # entry, but they install the client library into a system # library directory, so there's no problem. pass else: origOSPath = os.environ['PATH'].decode(_FS_ENCODING) fbBinPath = os.pathsep + os.path.join(instPath, u'bin') os.environ['PATH'] = (origOSPath + fbBinPath).encode( _FS_ENCODING ) finally: reg.Close() _findAndLoadFBDLLDir() # The underlying C module: import _kinterbasdb as _k # Import database API constants into the namespace of this module as Python # objects: _k.init_kidb_basic_header_constants(globals()) # Export utility members: FB_API_VER = _k.FB_API_VER portable_int = _k.portable_int raw_timestamp_to_tuple = _k.raw_timestamp_to_tuple DEFAULT_CONCURRENCY_LEVEL = _k.DEFAULT_CONCURRENCY_LEVEL get_concurrency_level = _k.concurrency_level_get # Initialize the k_exceptions so that other Python modules in kinterbasdb can # have access to kinterbasdb's exceptions without a circular import. import k_exceptions Warning = k_exceptions.Warning = _k.Warning Error = k_exceptions.Error = _k.Error InterfaceError = k_exceptions.InterfaceError = _k.InterfaceError DatabaseError = k_exceptions.DatabaseError = _k.DatabaseError DataError = k_exceptions.DataError = _k.DataError OperationalError = k_exceptions.OperationalError = _k.OperationalError IntegrityError = k_exceptions.IntegrityError = _k.IntegrityError InternalError = k_exceptions.InternalError = _k.InternalError ProgrammingError = k_exceptions.ProgrammingError = _k.ProgrammingError TransactionConflict \ = k_exceptions.TransactionConflict = _k.TransactionConflict NotSupportedError = k_exceptions.NotSupportedError = _k.NotSupportedError _EVENT_HANDLING_SUPPORTED = hasattr(_k, 'ConduitWasClosed') if _EVENT_HANDLING_SUPPORTED: ConduitWasClosed = k_exceptions.ConduitWasClosed = _k.ConduitWasClosed _CONNECTION_TIMEOUT_SUPPORTED = hasattr(_k, 'ConnectionTimedOut') if _CONNECTION_TIMEOUT_SUPPORTED: ConnectionTimedOut = k_exceptions.ConnectionTimedOut = \ _k.ConnectionTimedOut import _connection_timeout _ALL_EXCEPTION_CLASSES = [ Warning, Error, InterfaceError, DatabaseError, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError, ] if _EVENT_HANDLING_SUPPORTED: _ALL_EXCEPTION_CLASSES.append(ConduitWasClosed) if _CONNECTION_TIMEOUT_SUPPORTED: _ALL_EXCEPTION_CLASSES.append(ConnectionTimedOut) _ALL_EXCEPTION_CLASSES = tuple(_ALL_EXCEPTION_CLASSES) ########################################## ## PUBLIC CONSTANTS: BEGIN ## ########################################## # Note: Numerous database API constants were imported into the global # namespace of this module by an earlier call to # _k.init_kidb_basic_header_constants. See _kinterbasdb_constants.c for more # info. apilevel = '2.0' threadsafety = 1 paramstyle = 'qmark' # Named positional constants to be used as indices into the description # attribute of a cursor (these positions are defined by the DB API spec). # For example: # nameOfFirstField = cursor.description[0][kinterbasdb.DESCRIPTION_NAME] DESCRIPTION_NAME = 0 DESCRIPTION_TYPE_CODE = 1 DESCRIPTION_DISPLAY_SIZE = 2 DESCRIPTION_INTERNAL_SIZE = 3 DESCRIPTION_PRECISION = 4 DESCRIPTION_SCALE = 5 DESCRIPTION_NULL_OK = 6 # Default transaction parameter buffer: default_tpb = ( # isc_tpb_version3 is a *purely* infrastructural value. kinterbasdb will # gracefully handle user-specified TPBs that don't start with # isc_tpb_version3 (as well as those that do start with it). isc_tpb_version3 + isc_tpb_write # Access mode + isc_tpb_read_committed + isc_tpb_rec_version # Isolation level + isc_tpb_wait # Lock resolution strategy # + isc_tpb_shared # Table reservation # access method ) from _request_buffer_builder import RequestBufferBuilder as _RequestBufferBuilder _request_buffer_builder.portable_int = portable_int ########################################## ## PUBLIC CONSTANTS: END ## ########################################## ################################################### ## DYNAMIC TYPE TRANSLATION CONFIGURATION: BEGIN ## ################################################### # Added deferred loading of dynamic type converters to facilitate the # elimination of all dependency on the mx package. The implementation is quite # ugly due to backward compatibility constraints. BASELINE_TYPE_TRANSLATION_FACILITIES = ( # Date and time translator names: 'date_conv_in', 'date_conv_out', 'time_conv_in', 'time_conv_out', 'timestamp_conv_in', 'timestamp_conv_out', # Fixed point translator names: 'fixed_conv_in_imprecise', 'fixed_conv_in_precise', 'fixed_conv_out_imprecise', 'fixed_conv_out_precise', # Optional unicode converters: 'OPT:unicode_conv_in', 'OPT:unicode_conv_out', # DB API 2.0 standard date and time type constructors: 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', ) # The next three will be modified by the init function as appropriate: _MINIMAL_TYPE_TRANS_TYPES = ('DATE', 'TIME', 'TIMESTAMP', 'FIXED',) _NORMAL_TYPE_TRANS_IN = None _NORMAL_TYPE_TRANS_OUT = None initialized = False _guessTextualBlobEncodingWhenUsingFB20AndEarlier = False def init(type_conv=200, concurrency_level=_k.DEFAULT_CONCURRENCY_LEVEL): global initialized, _MINIMAL_TYPE_TRANS_TYPES, \ _NORMAL_TYPE_TRANS_IN, _NORMAL_TYPE_TRANS_OUT, \ _guessTextualBlobEncodingWhenUsingFB20AndEarlier if initialized: raise ProgrammingError('Cannot initialize module more than once.') if _k.DEFAULT_CONCURRENCY_LEVEL == 0: if concurrency_level != 0: raise ProgrammingError('Support for concurrency was disabled at' ' compile time, so only Level 0 is available.' ) # Since only Level 0 is available and it's already active, there's no # need to do anything. else: if concurrency_level not in (1,2): raise ProgrammingError('Only Levels 1 and 2 are accessible at' ' runtime; Level 0 can only be activated at compile time.' ) _k.concurrency_level_set(concurrency_level) _k.provide_refs_to_python_entities( _RowMapping, _make_output_translator_return_type_dict_from_trans_dict, _look_up_array_descriptor, _look_up_array_subtype, _Cursor_execute_exception_type_filter, _validateTPB, _trans_info, ) globalz = globals() if not isinstance(type_conv, int): typeConvModule = type_conv else: typeConvOptions = { 0: 'typeconv_naked', 1: 'typeconv_backcompat', # the default 100: 'typeconv_23plus', 199: 'typeconv_23plus_lowmem', 200: 'typeconv_24plus', # considered "ideal" for KIDB 3.2 # Code 300 is considered "ideal" for KIDB 3.3 and FB 2.1+. It # causes textual blobs to be handled the same way as other textual # types, so unicode encoding/decoding is performed automagically. # When converting in the input direction, this doesn't work with # any version of FB prior to FB 2.1, because the API doesn't make # the blob's character set ID available. # Note that we use the same typeConvModule for code 300 as for # code 200; the textual-blob-related changes are implemented by # adding 'BLOB' entries to _NORMAL_TYPE_TRANS_*. 300: 'typeconv_24plus', } chosenTypeConvModuleName = typeConvOptions[type_conv] typeConvModule = __import__('kinterbasdb.' + chosenTypeConvModuleName, globalz, locals(), (chosenTypeConvModuleName,) ) extraMinTypeTransEntries = [] # 2007.02.10 if type_conv > 1: extraMinTypeTransEntries.append('TEXT_UNICODE') if type_conv >= 300: _guessTextualBlobEncodingWhenUsingFB20AndEarlier = True extraMinTypeTransEntries.append('BLOB') if extraMinTypeTransEntries: _MINIMAL_TYPE_TRANS_TYPES = _MINIMAL_TYPE_TRANS_TYPES \ + tuple(extraMinTypeTransEntries) for name in BASELINE_TYPE_TRANSLATION_FACILITIES: if not name.startswith('OPT:'): typeConvModuleMember = getattr(typeConvModule, name) else: # Members whose entries in BASELINE_TYPE_TRANSLATION_FACILITIES # begin with 'OPT:' are not required. name = name[4:] try: typeConvModuleMember = getattr(typeConvModule, name) except AttributeError: continue globalz[name] = typeConvModuleMember # Modify the initial, empty version of the DB API type singleton DATETIME, # transforming it into a fully functional version. # The fact that the object is *modifed* rather than replaced is crucial to # the preservation of compatibility with the 'from kinterbasdb import *' # form of importation. DATETIME.values = ( # Date, Time, and Timestamp refer to functions just loaded from the # typeConvModule in the loop above. type(Date(2003,12,31)), type(Time(23,59,59)), type(Timestamp(2003,12,31,23,59,59)) ) _NORMAL_TYPE_TRANS_IN = { 'DATE': date_conv_in, 'TIME': time_conv_in, 'TIMESTAMP': timestamp_conv_in, 'FIXED': fixed_conv_in_imprecise, } _NORMAL_TYPE_TRANS_OUT = { 'DATE': date_conv_out, 'TIME': time_conv_out, 'TIMESTAMP': timestamp_conv_out, 'FIXED': fixed_conv_out_imprecise, } if type_conv > 1: _NORMAL_TYPE_TRANS_IN['TEXT_UNICODE'] = unicode_conv_in _NORMAL_TYPE_TRANS_OUT['TEXT_UNICODE'] = unicode_conv_out if type_conv >= 300: # 2007.02.10 textBlobsAsTextConfig = { 'mode': 'materialize', 'treat_subtype_text_as_text': True } _NORMAL_TYPE_TRANS_IN['BLOB'] = textBlobsAsTextConfig _NORMAL_TYPE_TRANS_OUT['BLOB'] = textBlobsAsTextConfig initialized = True def _ensureInitialized(): if not initialized: init() # The following constructors will be replaced when kinterbasdb.init is called, # whether implicitly or explicitly. If one of the constructors is called # before kinterbasdb.init, it will trigger its own replacement by calling # _ensureInitialized. def Date(year, month, day): _ensureInitialized() return Date(year, month, day) def Time(hour, minute, second): _ensureInitialized() return Time(hour, minute, second) def Timestamp(year, month, day, hour, minute, second): _ensureInitialized() return Timestamp(year, month, day, hour, minute, second) def DateFromTicks(ticks): _ensureInitialized() return DateFromTicks(ticks) def TimeFromTicks(ticks): _ensureInitialized() return TimeFromTicks(ticks) def TimestampFromTicks(ticks): _ensureInitialized() return TimestampFromTicks(ticks) ################################################### ## DYNAMIC TYPE TRANSLATION CONFIGURATION: END ## ################################################### ############################################ ## PUBLIC DB-API TYPE CONSTRUCTORS: BEGIN ## ############################################ # All date/time constructors are loaded dynamically by the init function. # Changed from buffer to str in 3.1, with the possible addition of a lazy BLOB # reader at some point in the future: Binary = str # DBAPITypeObject implementation is the DB API's suggested implementation. class DBAPITypeObject: # Purposely remains a "classic class". def __init__(self, *values): self.values = values def __cmp__(self, other): if other in self.values: return 0 if other < self.values: return 1 else: return -1 STRING = DBAPITypeObject(str, unicode) BINARY = DBAPITypeObject(str, buffer) NUMBER = DBAPITypeObject(int, long, float) # DATETIME is loaded in a deferred manner (in the init function); this initial # version remains empty only temporarily. DATETIME = DBAPITypeObject() ROWID = DBAPITypeObject() ############################################ ## PUBLIC DB-API TYPE CONSTRUCTORS: END ## ############################################ ########################################## ## PUBLIC FUNCTIONS: BEGIN ## ########################################## def connect(*args, **keywords_args): """ Minimal arguments: keyword args $dsn, $user, and $password. Establishes a kinterbasdb.Connection to a database. See the docstring of kinterbasdb.Connection for details. """ return Connection(*args, **keywords_args) def create_database(*args): """ Creates a new database with the supplied "CREATE DATABASE" statement. Returns an active kinterbasdb.Connection to the newly created database. Parameters: $sql: string containing the CREATE DATABASE statement. Note that you may need to specify a username and password as part of this statement (see the Firebird SQL Reference for syntax). $dialect: (optional) the SQL dialect under which to execute the statement """ _ensureInitialized() # For a more general-purpose immediate execution facility (the non-"CREATE # DATABASE" variant of isc_dsql_execute_immediate, for those who care), see # Connection.execute_immediate. # 2007.04.05: If the path specified in this CREATE DATABASE statement needs # to be Unicode, we should accept that. if len(args) >= 1 and isinstance(args[0], unicode): args = (args[0].encode(_FS_ENCODING),) + args[1:] C_con = _k.create_database(*args) return Connection(_CConnection=C_con) def raw_byte_to_int(raw_byte): """ Convert the byte in the single-character Python string $raw_byte into a Python integer. This function is essentially equivalent to the built-in function ord, but is different in intent (see the database_info method). """ _ensureInitialized() if len(raw_byte) != 1: raise ValueError('raw_byte must be exactly one byte, not %d bytes.' % len(raw_byte) ) return struct.unpack('b', raw_byte)[0] ########################################## ## PUBLIC FUNCTIONS: END ## ########################################## ########################################## ## PUBLIC CLASSES: BEGIN ## ########################################## # BlobReader, PreparedStatement and Cursor can't be instantiated from Python, # but are exposed here to support isinstance(o, kinterbasdb.Class) and the # like. Transaction = _k.Transaction # 2007.01 BlobReader = _k.BlobReader Cursor = _k.Cursor PreparedStatement = _k.PreparedStatement if _EVENT_HANDLING_SUPPORTED: EventConduit = _k.EventConduit class Connection(object): """ Represents a connection between the database client (the Python process) and the database server. The basic functionality of this class is documented by the Python DB API Specification 2.0, while the large amount of additional functionality is documented by the KInterbasDB Usage Guide (docs/usage.html). """ def __init__(self, *args, **keywords_args): # self._C_con is the instance of ConnectionType that represents this # connection in the underlying C module _k. _ensureInitialized() # Optional DB API Extension: Make the module's exception classes # available as connection attributes to ease cross-module portability: for exc_class in _ALL_EXCEPTION_CLASSES: setattr(self, exc_class.__name__, exc_class) # Inherit the module-level default TPB. self._default_tpb = default_tpb # Allow other code WITHIN THIS MODULE to obtain an instance of # ConnectionType some other way and provide it to us instead of us # creating one via Connection_connect. (The create_database function # uses this facility, for example.) if '_CConnection' in keywords_args: C_con = self._C_con = keywords_args['_CConnection'] assert C_con is not None # Since we were given a pre-existing CConnection instance rather # than creating it ourselves, we need to explicitly give it a # reference to its Python companion (self), and allow it to # establish a main_transaction: _k.Connection_python_wrapper_obj_set(C_con, self) # Nullify the private properties that we don't have in this case: self._charset = None self._C_con_params = None else: n_nonkeyword = len(args) n_keyword = len(keywords_args) if n_nonkeyword == 0 and n_keyword == 0: raise ProgrammingError( 'connect() requires at least 3 keyword arguments.' ) elif n_keyword > 0 and n_nonkeyword == 0: source_dict = keywords_args # The typical case. else: # This case is for backward compatibility ONLY: import warnings # Lazy import. warnings.warn('The non-keyword-argument form of the connect()' ' function is deprecated. Use' ' connect(dsn=..., user=..., password=...) rather than' ' connect(..., ..., ...)', DeprecationWarning ) if n_keyword > 0: raise ProgrammingError('Do not specify both non-keyword' ' args and keyword args (keyword-only is preferred).' ) elif n_nonkeyword != 3: raise ProgrammingError('If using non-keyword args, must' ' provide exactly 3: dsn, user, password.' ) else: # Transform the argument tuple into an argument dict. source_dict = {'dsn': args[0], 'user': args[1], 'password': args[2] } timeout = keywords_args.pop('timeout', None) if timeout is not None: if not _CONNECTION_TIMEOUT_SUPPORTED: raise ProgrammingError("The connection timeout feature is" " disabled in this build." ) _connection_timeout.startTimeoutThreadIfNecessary( _k.ConnectionTimeoutThread_main, _k.CTM_halt ) # Pre-render the requisite buffers (plus the dialect), then send # them down to the C level. _k.Connection_connect() will give us a # C-level connection structure (self._C_con, of type # ConnectionType) in return. self will then serve as a proxy for # self._C_con. # # Notice that once rendered by _build_connect_structures, the # connection parameters are retained in self._C_con_params in case # kinterbasdb's internals need to clone this connection. b = _DPBBuilder() b.buildFromParamDict(source_dict) self._charset = b.charset self._C_con_params = (b.dsn, b.dpb, b.dialect) self._C_con = _k.Connection_connect(self, b.dsn, b.dpb, b.dialect, timeout ) self._normalize_type_trans() # 2003.03.30: Moved precision_mode up to the Python level (it's # deprecated). self._precision_mode = 0 def __del__(self): # This method should not call the Python implementation of close(). self._close_physical_connection(raiseExceptionOnError=False) def drop_database(self): """ Drops the database to which this connection is attached. Unlike plain file deletion, this method behaves responsibly, in that it removes shadow files and other ancillary files for this database. """ self._ensure_group_membership(False, "Cannot drop database via" " connection that is part of a ConnectionGroup." ) _k.Connection_drop_database(self._C_con) def begin(self, tpb=None): """ Starts a transaction explicitly. This is never *required*; a transaction will be started implicitly if necessary. Parameters: $tpb: Optional transaction parameter buffer (TPB) populated with kinterbasdb.isc_tpb_* constants. See the Interbase API guide for these constants' meanings. """ return self._main_trans.begin(tpb=tpb) def prepare(self): """ Manually triggers the first phase of a two-phase commit (2PC). Use of this method is optional; if preparation is not triggered manually, it will be performed implicitly by commit() in a 2PC. See also the method ConnectionGroup.prepare. """ self._main_trans.prepare() def commit(self, retaining=False): """ Commits (permanently applies the actions that have taken place as part of) the active transaction. Parameters: $retaining (optional boolean that defaults to False): If True, the transaction is immediately cloned after it has been committed. This retains system resources associated with the transaction and leaves undisturbed the state of any cursors open on this connection. In effect, retaining commit keeps the transaction "open" across commits. See IB 6 API Guide pages 75 and 291 for more info. """ return self._main_trans.commit(retaining=retaining) def savepoint(self, name): """ Establishes a SAVEPOINT named $name. To rollback to this SAVEPOINT, use rollback(savepoint=name). Example: con.savepoint('BEGINNING_OF_SOME_SUBTASK') ... con.rollback(savepoint='BEGINNING_OF_SOME_SUBTASK') """ return self._main_trans.savepoint(name) def rollback(self, retaining=False, savepoint=None): """ Rolls back (cancels the actions that have taken place as part of) the active transaction. Parameters: $retaining (optional boolean that defaults to False): If True, the transaction is immediately cloned after it has been rolled back. This retains system resources associated with the transaction and leaves undisturbed the state of any cursors open on this connection. In effect, retaining rollback keeps the transaction "open" across rollbacks. See IB 6 API Guide pages 75 and 373 for more info. $savepoint (string name of the SAVEPOINT): If a savepoint name is supplied, only rolls back as far as that savepoint, rather than rolling back the entire transaction. """ return self._main_trans.rollback( retaining=retaining, savepoint=savepoint ) def execute_immediate(self, sql): """ Executes a statement without caching its prepared form. The statement must NOT be of a type that returns a result set. In most cases (especially cases in which the same statement--perhaps a parameterized statement--is executed repeatedly), it is better to create a cursor using the connection's cursor() method, then execute the statement using one of the cursor's execute methods. """ if isinstance(sql, unicode): # 2007.04.05 if self._charset: from kinterbasdb import typeconv_text_unicode as ttu pyCS = ttu.DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP[ self._charset ] else: pyCS = 'ascii' sql = sql.encode(pyCS) return self._main_trans._execute_immediate(sql) def database_info(self, request, result_type): """ Wraps the Interbase C API function isc_database_info. For documentation, see the IB 6 API Guide section entitled "Requesting information about an attachment" (p. 51). Note that this method is a VERY THIN wrapper around the IB C API function isc_database_info. This method does NOT attempt to interpret its results except with regard to whether they are a string or an integer. For example, requesting isc_info_user_names will return a string containing a raw succession of length-name pairs. A thicker wrapper might interpret those raw results and return a Python tuple, but it would need to handle a multitude of special cases in order to cover all possible isc_info_* items. Note: Some of the information available through this method would be more easily retrieved with the Services API (see submodule kinterbasdb.services). Parameters: $result_type must be either: 's' if you expect a string result, or 'i' if you expect an integer result """ # Note: Server-side implementation for most of isc_database_info is in # jrd/inf.cpp. res = _k.Connection_database_info(self._C_con, request, result_type) # 2004.12.12: # The result buffers for a few request codes don't follow the generic # conventions, so we need to return their full contents rather than # omitting the initial infrastructural bytes. if ( result_type == 's' and request not in _DATABASE_INFO__KNOWN_LOW_LEVEL_EXCEPTIONS ): res = res[3:] return res def db_info(self, request): # Contributed by Pavel Cisar; incorporated 2004.09.10; heavily modified # 2004.12.12. """ Higher-level convenience wrapper around the database_info method that parses the output of database_info into Python-friendly objects instead of returning raw binary buffers in the case of complex result types. If an unrecognized code is requested, ValueError is raised. Parameters: $request must be either: - A single kinterbasdb.isc_info_* info request code. In this case, a single result is returned. - A sequence of such codes. In this case, a mapping of (info request code -> result) is returned. """ # Notes: # # - IB 6 API Guide page 391: "In InterBase, integer values... # are returned in result buffers in a generic format where # the least significant byte is first, and the most # significant byte last." # We process request as a sequence of info codes, even if only one code # was supplied by the caller. requestIsSingleton = isinstance(request, int) if requestIsSingleton: request = (request,) results = {} for infoCode in request: if infoCode == isc_info_base_level: # (IB 6 API Guide page 52) buf = self.database_info(infoCode, 's') # Ignore the first byte. baseLevel = struct.unpack('B', buf[1])[0] results[infoCode] = baseLevel elif infoCode == isc_info_db_id: # (IB 6 API Guide page 52) buf = self.database_info(infoCode, 's') pos = 0 conLocalityCode = struct.unpack('B', buf[pos])[0] pos += 1 dbFilenameLen = struct.unpack('B', buf[1])[0] pos += 1 dbFilename = buf[pos:pos+dbFilenameLen] pos += dbFilenameLen siteNameLen = struct.unpack('B', buf[pos])[0] pos += 1 siteName = buf[pos:pos+siteNameLen] pos += siteNameLen results[infoCode] = (conLocalityCode, dbFilename, siteName) elif infoCode == isc_info_implementation: # (IB 6 API Guide page 52) buf = self.database_info(infoCode, 's') # Skip the first four bytes. pos = 1 implNumber = struct.unpack('B', buf[pos])[0] pos += 1 classNumber = struct.unpack('B', buf[pos])[0] pos += 1 results[infoCode] = (implNumber, classNumber) elif infoCode in (isc_info_version, isc_info_firebird_version): # (IB 6 API Guide page 53) buf = self.database_info(infoCode, 's') # Skip the first byte. pos = 1 versionStringLen = struct.unpack('B', buf[pos])[0] pos += 1 versionString = buf[pos:pos+versionStringLen] results[infoCode] = versionString elif infoCode == isc_info_user_names: # (IB 6 API Guide page 54) # # The isc_info_user_names results buffer does not exactly match # the format declared on page 54 of the IB 6 API Guide. # The buffer is formatted as a sequence of clusters, each of # which begins with the byte isc_info_user_names, followed by a # two-byte cluster length, followed by a one-byte username # length, followed by a single username. # I don't understand why the lengths are represented # redundantly (the two-byte cluster length is always one # greater than the one-byte username length), but perhaps it's # an attempt to adhere to the general format of an information # cluster declared on page 51 while also [trying, but failing # to] adhere to the isc_info_user_names-specific format # declared on page 54. buf = self.database_info(infoCode, 's') usernames = [] pos = 0 while pos < len(buf): if struct.unpack('B', buf[pos])[0] != isc_info_user_names: raise OperationalError('While trying to service' ' isc_info_user_names request, found unexpected' ' results buffer contents at position %d of [%s]' % (pos, buf) ) pos += 1 # The two-byte cluster length: nameClusterLen = struct.unpack(' number of connections by that user. res = {} for un in usernames: res[un] = res.get(un, 0) + 1 results[infoCode] = res elif infoCode in _DATABASE_INFO_CODES_WITH_INT_RESULT: results[infoCode] = self.database_info(infoCode, 'i') elif infoCode in _DATABASE_INFO_CODES_WITH_COUNT_RESULTS: buf = self.database_info(infoCode, 's') countsByRelId = _extractDatabaseInfoCounts(buf) # Decided not to convert the relation IDs to relation names # for two reasons: # 1) Performance + Principle of Least Surprise # If the client program is trying to do some delicate # performance measurements, it's not helpful for # kinterbasdb to be issuing unexpected queries behind the # scenes. # 2) Field RDB$RELATIONS.RDB$RELATION_NAME is a CHAR field, # which means its values emerge from the database with # trailing whitespace, yet it's not safe in general to # strip that whitespace because actual relation names can # have trailing whitespace (think # 'create table "table1 " (f1 int)'). results[infoCode] = countsByRelId elif infoCode in _DATABASE_INFO_CODES_WITH_TIMESTAMP_RESULT: buf = self.database_info(infoCode, 's') timestampTuple = raw_timestamp_to_tuple(buf) registeredConverter = self.get_type_trans_out()['TIMESTAMP'] timestamp = registeredConverter(timestampTuple) results[infoCode] = timestamp else: raise ValueError('Unrecognized database info code %s' % str(infoCode) ) if requestIsSingleton: return results[request[0]] else: return results def transaction_info(self, request, result_type): return self._main_trans.transaction_info(request, result_type) def trans_info(self, request): return self._main_trans.trans_info(request) def trans(self, tpb=None): "Creates a new Transaction that operates within the context of this" " connection. Cursors can be created within that Transaction via its" " .cursor() method." return Transaction(con=self, tpb=tpb) def cursor(self): "Creates a new cursor that operates within the context of this" " connection's main_transaction." return self._main_trans.cursor() def close(self): "Closes the connection to the database server." self._ensure_group_membership(False, "Cannot close a connection that" " is a member of a ConnectionGroup." ) self._close_physical_connection(raiseExceptionOnError=True) # closed read-only property: def _closed_get(self): return _k.Connection_closed_get(self._C_con) closed = property(_closed_get) def _close_physical_connection(self, raiseExceptionOnError=True): # Sever the physical connection to the database server and replace our # underyling _kinterbasdb.ConnectionType object with a null instance # of that type, so that post-close() method calls on this connection # will raise ProgrammingErrors, as required by the DB API Spec. try: if getattr(self, '_C_con', None) is not None: if ( _k and self._C_con is not _k.null_connection and not _k.Connection_closed_get(self._C_con) ): try: _k.Connection_close(self._C_con) except ProgrammingError: if raiseExceptionOnError: raise self._C_con = _k.null_connection elif raiseExceptionOnError: raise ProgrammingError('Connection is already closed.') except: if raiseExceptionOnError: raise def _has_db_handle(self): return self._C_con is not _k.null_connection def _has_transaction(self): # Does this connection currently have an active transaction (including # a distributed transaction)? return _k.Connection_has_active_transaction(self._C_con) def _normalize_type_trans(self): # Set the type translation dictionaries to their "normal" form--the # minumum required for standard kinterbasdb operation. self.set_type_trans_in(_NORMAL_TYPE_TRANS_IN) self.set_type_trans_out(_NORMAL_TYPE_TRANS_OUT) def _enforce_min_trans(self, trans_dict, translator_source): # Any $trans_dict that the Python programmer supplies for a # Connection must have entries for at least the types listed in # _MINIMAL_TYPE_TRANS_TYPES, because kinterbasdb uses dynamic type # translation even if it is not explicitly configured by the Python # client programmer. # The Cursor.set_type_trans* methods need not impose the same # requirement, because "translator resolution" will bubble upward from # the cursor to its connection. # This method inserts the required translators into the incoming # $trans_dict if that $trans_dict does not already contain them. # Note that $translator_source will differ between in/out translators. for type_name in _MINIMAL_TYPE_TRANS_TYPES: if type_name not in trans_dict: trans_dict[type_name] = translator_source[type_name] def set_type_trans_out(self, trans_dict): """ Changes the outbound type translation map. For more information, see the "Dynamic Type Translation" section of the KInterbasDB Usage Guide. """ _trans_require_dict(trans_dict) self._enforce_min_trans(trans_dict, _NORMAL_TYPE_TRANS_OUT) return _k.set_Connection_type_trans_out(self._C_con, trans_dict) def get_type_trans_out(self): """ Retrieves the outbound type translation map. For more information, see the "Dynamic Type Translation" section of the KInterbasDB Usage Guide. """ return _k.get_Connection_type_trans_out(self._C_con) def set_type_trans_in(self, trans_dict): """ Changes the inbound type translation map. For more information, see the "Dynamic Type Translation" section of the KInterbasDB Usage Guide. """ _trans_require_dict(trans_dict) self._enforce_min_trans(trans_dict, _NORMAL_TYPE_TRANS_IN) return _k.set_Connection_type_trans_in(self._C_con, trans_dict) def get_type_trans_in(self): """ Retrieves the inbound type translation map. For more information, see the "Dynamic Type Translation" section of the KInterbasDB Usage Guide. """ return _k.get_Connection_type_trans_in(self._C_con) if not _EVENT_HANDLING_SUPPORTED: def event_conduit(self, event_names): raise NotSupportedError("Event handling was not enabled when" " kinterbasdb's C layer was compiled." ) else: def event_conduit(self, event_names): return _k.EventConduit_create(self._C_con, self._C_con_params, event_names ) # default_tpb read-write property: def _default_tpb_get(self): return self._default_tpb def _default_tpb_set(self, value): self._default_tpb = _validateTPB(value) default_tpb = property(_default_tpb_get, _default_tpb_set) # The C layer of KInterbasDB uses this read-only property when it needs a # TPB that's strictly a memory buffer, rather than potentially a TPB # instance. def __default_tpb_str_get_(self): defTPB = self.default_tpb if not isinstance(defTPB, str): defTPB = defTPB.render() return defTPB _default_tpb_str_ = property(__default_tpb_str_get_) # dialect read-write property: def _dialect_get(self): return _k.Connection_dialect_get(self._C_con) def _dialect_set(self, value): _k.Connection_dialect_set(self._C_con, value) dialect = property(_dialect_get, _dialect_set) # precision_mode read-write property (deprecated): def _precision_mode_get(self): # Postpone this warning until a later version: #import warnings # Lazy import. #warnings.warn( # 'precision_mode is deprecated in favor of dynamic type' # ' translation (see the [set|get]_type_trans_[in|out] methods).', # DeprecationWarning # ) return self._precision_mode def _precision_mode_set(self, value): # Postpone this warning until a later version: #import warnings # Lazy import. #warnings.warn( # 'precision_mode is deprecated in favor of dynamic type' # ' translation (see the [set|get]_type_trans_[in|out] methods).', # DeprecationWarning # ) value = bool(value) # Preserve the previous DTT settings that were in place before this # call to the greatest extent possible (although dynamic type # translation and the precision_mode attribute really aren't meant to # be used together). trans_in = self.get_type_trans_in() trans_out = self.get_type_trans_out() if value: # precise: trans_in['FIXED'] = fixed_conv_in_precise trans_out['FIXED'] = fixed_conv_out_precise else: # imprecise: trans_in['FIXED'] = fixed_conv_in_imprecise trans_out['FIXED'] = fixed_conv_out_imprecise self.set_type_trans_in(trans_in) self.set_type_trans_out(trans_out) self._precision_mode = value precision_mode = property(_precision_mode_get, _precision_mode_set) # server_version read-only property: def _server_version_get(self): return self.db_info(isc_info_version) server_version = property(_server_version_get) # charset read-only property: def _charset_get(self): return self._charset def _charset_set(self, value): # More informative error message: raise AttributeError("A connection's 'charset' property can be" " specified upon Connection creation as a keyword argument to" " kinterbasdb.connect, but it cannot be modified thereafter." ) charset = property(_charset_get, _charset_set) def _guessBlobCharSetIDFromConnectionCharSet(self): # 2007.02.10: HACK if _guessTextualBlobEncodingWhenUsingFB20AndEarlier: from kinterbasdb.typeconv_text_unicode \ import DB_CHAR_SET_NAME_TO_DB_CHAR_SET_ID_MAP return DB_CHAR_SET_NAME_TO_DB_CHAR_SET_ID_MAP.get( self._charset, None ) else: return None # group read-only property: def _group_get(self): return self._main_trans._group group = property(_group_get) def _set_group(self, group): # This package-private method allows ConnectionGroup's membership # management functionality to bypass the conceptually read-only nature # of the Connection.group property. self._main_trans._group = group def _ensure_group_membership(self, must_be_member, err_msg): if must_be_member: if self.group is None: raise ProgrammingError(err_msg) else: if not hasattr(self, 'group'): return if self.group is not None: raise ProgrammingError(err_msg) def _timeout_enabled_get(self): return _k.Connection_timeout_enabled(self._C_con) _timeout_enabled = property(_timeout_enabled_get) def _main_trans_get(self): # NOTE: We do not store a reference to the Transaction object within # this Python Connection class, because that would defeat the # Python-ref-cycle-free nature of the underlying C code. return _k.Connection_main_trans_get(self._C_con) _main_trans = property(_main_trans_get) def _main_transaction_get(self): # NOTE: We do not store a reference to the # ExternallyVisibleMainTransaction object within this Python Connection # class, because that would defeat the Python-ref-cycle-free nature of # the underlying C code. return ExternallyVisibleMainTransaction(self._main_trans) main_transaction = property(_main_transaction_get) def _transactions_get(self): return _k.Connection_transactions_get(self._C_con) transactions = property(_transactions_get) def _activity_stamps(self): return _k.Connection__read_activity_stamps(self._C_con) class ExternallyVisibleMainTransaction(Transaction): # Accept an internal (Transaction_is_main(t)) kinterbasdb.Transaction # instance and wrap it, impersonating it as closely as possible. Since # the internal Transaction object does not own a reference to its # Connection (*unlike* Transaction objects explicitly created by client # code), this proxy ensures that the Connection remains alive at least as # long as externally held references to its internal Transaction. def __init__(self, rawMainTrans): Transaction.__setattr__(self, '_rawMainTrans', rawMainTrans) Transaction.__setattr__(self, '_rawMainTrans_con', rawMainTrans.connection ) def __getattribute__(self, name): return getattr( Transaction.__getattribute__(self, '_rawMainTrans'), name ) def __cmp__(self, other): return cmp(Transaction.__getattribute__(self, '_rawMainTrans'), other) def __hash__(self): return hash(Transaction.__getattribute__(self, '_rawMainTrans')) class ConnectionGroup(object): # XXX: ConnectionGroup objects currently are not thread-safe. Since # separate Connections can be manipulated simultaneously by different # threads in kinterbasdb, it would make sense for a container of multiple # connections to be safely manipulable simultaneously by multiple threads. # XXX: Adding two connections to the same database freezes the DB client # library. However, I've no way to detect with certainty whether any given # con1 and con2 are connected to the same database, what with database # aliases, IP host name aliases, remote-vs-local protocols, etc. # Therefore, a warning must be added to the docs. def __init__(self, connections=()): _ensureInitialized() self._cons = [] self._trans_handle = None for con in connections: self.add(con) def __del__(self): self.disband() def disband(self): # Notice that the ConnectionGroup rollback()s itself BEFORE releasing # its Connection references. if getattr(self, '_trans_handle', None) is not None: self.rollback() if hasattr(self, '_cons'): self.clear() # Membership methods: def add(self, con): ### CONTRAINTS ON $con: ### # con must be an instance of kinterbasdb.Connection: if not isinstance(con, Connection): raise TypeError('con must be an instance of' ' kinterbasdb.Connection' ) # con cannot already be a member of this group: if con in self: raise ProgrammingError('con is already a member of this group.') # con cannot belong to more than one group at a time: if con.group: raise ProgrammingError('con is already a member of another group;' ' it cannot belong to more than one group at once.' ) # con cannot be added if it has an active transaction: if con._has_transaction(): raise ProgrammingError('con already has an active transaction;' ' that must be resolved before con can join this group.' ) # con must be connected to a database; it must not have been closed. if not con._has_db_handle(): raise ProgrammingError('con has been closed; it cannot join a' ' group.' ) if con._timeout_enabled: raise ProgrammingError('Connections with timeout enabled cannot' ' participate in distributed transactions.' ) ### CONTRAINTS ON $self: ### # self cannot accept new members while self has an unresolved # transaction: self._require_transaction_state(False, 'Cannot add connection to group that has an unresolved' ' transaction.' ) # self cannot have more than DIST_TRANS_MAX_DATABASES members: if self.count() >= DIST_TRANS_MAX_DATABASES: raise ProgrammingError('The database engine limits the number of' ' database handles that can participate in a single' ' distributed transaction to %d or fewer; this group already' ' has %d members.' % (DIST_TRANS_MAX_DATABASES, self.count()) ) ### CONTRAINTS FINISHED ### # Can't set con.group directly (read-only); must use package-private # method. con._set_group(self) self._cons.append(con) def remove(self, con): if con not in self: raise ProgrammingError('con is not a member of this group.') assert con.group is self self._require_transaction_state(False, 'Cannot remove connection from group that has an unresolved' ' transaction.' ) con._set_group(None) self._cons.remove(con) def clear(self): self._require_transaction_state(False, 'Cannot clear group that has an unresolved transaction.' ) for con in self.members(): self.remove(con) assert self.count() == 0 def members(self): return self._cons[:] # return a *copy* of the internal list def count(self): return len(self._cons) def contains(self, con): return con in self._cons __contains__ = contains # alias to support the 'in' operator def __iter__(self): return iter(self._cons) # Transactional methods: def _require_transaction_state(self, must_be_active, err_msg=''): trans_handle = self._trans_handle if ( (must_be_active and trans_handle is None) or (not must_be_active and trans_handle is not None) ): raise ProgrammingError(err_msg) def _require_non_empty_group(self, operation_name): if self.count() == 0: raise ProgrammingError('Cannot %s distributed transaction with' ' an empty ConnectionGroup.' % operation_name ) def begin(self): self._require_transaction_state(False, 'Must resolve current transaction before starting another.' ) self._require_non_empty_group('start') self._trans_handle = _k.distributed_begin(self, self._cons) def prepare(self): """ Manually triggers the first phase of a two-phase commit (2PC). Use of this method is optional; if preparation is not triggered manually, it will be performed implicitly by commit() in a 2PC. """ self._require_non_empty_group('prepare') self._require_transaction_state(True, 'This group has no transaction to prepare.' ) _k.distributed_prepare(self._trans_handle) def commit(self, retaining=False): self._require_non_empty_group('commit') # The consensus among Python DB API experts is that transactions should # always be started implicitly, even if that means allowing a commit() # or rollback() without an actual transaction. if self._trans_handle is None: return _k.distributed_commit(self, self._trans_handle, self._cons, retaining) self._trans_handle = None def rollback(self, retaining=False): self._require_non_empty_group('roll back') # The consensus among Python DB API experts is that transactions should # always be started implicitly, even if that means allowing a commit() # or rollback() without an actual transaction. if self._trans_handle is None: return _k.distributed_rollback(self, self._trans_handle, self._cons, retaining) self._trans_handle = None ########################################## ## PUBLIC CLASSES: END ## ########################################## class _RowMapping(object): """ An internal kinterbasdb class that wraps a row of results in order to map field name to field value. kinterbasdb makes ABSOLUTELY NO GUARANTEES about the return value of the fetch(one|many|all) methods except that it is a sequence indexed by field position, and no guarantees about the return value of the fetch(one|many|all)map methods except that it is a mapping of field name to field value. Therefore, client programmers should NOT rely on the return value being an instance of a particular class or type. """ def __init__(self, description, row): self._description = description fields = self._fields = {} pos = 0 for fieldSpec in description: # It's possible for a result set from the database engine to return # multiple fields with the same name, but kinterbasdb's key-based # row interface only honors the first (thus setdefault, which won't # store the position if it's already present in self._fields). fields.setdefault(fieldSpec[DESCRIPTION_NAME], row[pos]) pos += 1 def __len__(self): return len(self._fields) def __getitem__(self, fieldName): fields = self._fields # Straightforward, unnormalized lookup will work if the fieldName is # already uppercase and/or if it refers to a database field whose # name is case-sensitive. if fieldName in fields: return fields[fieldName] else: fieldNameNormalized = _normalizeDatabaseIdentifier(fieldName) try: return fields[fieldNameNormalized] except KeyError: raise KeyError('Result set has no field named "%s". The field' ' name must be one of: (%s)' % (fieldName, ', '.join(fields.keys())) ) def get(self, fieldName, defaultValue=None): try: return self[fieldName] except KeyError: return defaultValue def __contains__(self, fieldName): try: self[fieldName] except KeyError: return False else: return True def __str__(self): # Return an easily readable dump of this row's field names and their # corresponding values. return '' % ', '.join([ '%s = %s' % (fieldName, self[fieldName]) for fieldName in self._fields.keys() ]) def keys(self): # Note that this is an *ordered* list of keys. return [fieldSpec[DESCRIPTION_NAME] for fieldSpec in self._description] def values(self): # Note that this is an *ordered* list of values. return [self[fieldName] for fieldName in self.keys()] def items(self): return [(fieldName, self[fieldName]) for fieldName in self.keys()] def iterkeys(self): for fieldDesc in self._description: yield fieldDesc[DESCRIPTION_NAME] __iter__ = iterkeys def itervalues(self): for fieldName in self: yield self[fieldName] def iteritems(self): for fieldName in self: yield fieldName, self[fieldName] class _DPBBuilder(object): def buildFromParamDict(self, d): dsn = d.get('dsn', None) host = d.get('host', None) database = d.get('database', None) user = d.get('user', os.environ.get('ISC_USER', None)) password = d.get('password', os.environ.get('ISC_PASSWORD', None)) role = d.get('role', None) charset = d.get('charset', None) dialect = d.get('dialect', 0) dpbEntries = d.get('dpb_entries', ()) self.dsn = self.buildDSNFrom(dsn, host, database) del dsn, host, database # Build <>:begin: # Build the database parameter buffer. self._dpb is a list of binary # strings that will be rolled up into a single binary string and # passed, ultimately, to the C function isc_att4ch_database as the # 'dpb' argument. self.initializeDPB() self.addStringIfProvided(isc_dpb_user_name, user) self.addStringIfProvided(isc_dpb_password, password) self.addStringIfProvided(isc_dpb_sql_role_name, role) self.charset = (charset and charset.upper()) or None if self.charset: self.addString(isc_dpb_lc_ctype, charset) self.processUserSuppliedDPBEntries(dpbEntries) self.renderDPB() # Leave dialect alone; the C code will validate it. self.dialect = dialect assert self.dsn is not None assert self.dpb is not None assert self.dialect is not None assert hasattr(self, 'charset') def buildDSNFrom(self, dsn, host, database): if ( (not dsn and not host and not database) or (dsn and (host or database)) or (host and not database) ): raise ProgrammingError( "Must supply one of:\n" " 1. keyword argument dsn='host:/path/to/database'\n" " 2. both keyword arguments host='host' and" " database='/path/to/database'\n" " 3. only keyword argument database='/path/to/database'" ) if not dsn: if host and host.endswith(':'): raise ProgrammingError('Host must not end with a colon.' ' You should specify host="%s" rather than host="%s".' % (host[:-1], host) ) elif host: dsn = '%s:%s' % (host, database) else: dsn = database if _FS_ENCODING: # 2007.03.19 dsn = dsn.encode(_FS_ENCODING) assert dsn, 'Internal error in _build_connect_structures DSN prep.' return dsn def initializeDPB(self): # Start with requisite DPB boilerplate, a single byte that informs the # database API what version of DPB it's dealing with: self._dpb = [ struct.pack('c', isc_dpb_version1) ] def renderDPB(self): self.dpb = ''.join(self._dpb) def addString(self, codeAsByte, s): # Append a string parameter to the end of the DPB. A string parameter # is represented in the DPB by the following binary sequence: # - a 1-byte byte code telling the purpose of the upcoming string # - 1 byte telling the length of the upcoming string # - the string itself # See IB 6 API guide page 44 for documentation of what's is going on # here. self._validateCode(codeAsByte) sLen = len(s) if sLen >= 256: # Because the length is denoted in the DPB by a single byte. raise ProgrammingError('Individual component of database' ' parameter buffer is too large. Components must be less' ' than 256 bytes.' ) format = 'cc%ds' % sLen # like 'cc50s' for a 50-byte string newEntry = struct.pack(format, codeAsByte, chr(sLen), s) self._dpb.append(newEntry) def addStringIfProvided(self, codeAsByte, value): if value: self.addString(codeAsByte, value) def addInt(self, codeAsByte, value): self._validateCode(codeAsByte) if not isinstance(value, (int, long)) or value < 0 or value > 255: raise ProgrammingError('The value for an integer DPB code must be' ' an int or long with a value between 0 and 255.' ) newEntry = struct.pack('ccc', codeAsByte, '\x01', chr(value)) self._dpb.append(newEntry) def processUserSuppliedDPBEntries(self, dpbEntries): # 'dpb_entries' is supposed to be a sequence of 2- or 3-tuples, # containing: # (code, value[, type]) # kinterbasdb doesn't need the type specified for codes that it already # recognizes, but for future codes, the user can specify the type, # which controls how the code is inserted into the DPB. for i, entry in enumerate(dpbEntries): codeAsByte = entry[0] value = entry[1] if len(entry) > 2: typeCode = entry[2] else: typeCode = None if typeCode is None: if codeAsByte in _DPB_CODES_WITH_STRING_VALUE: typeCode = 's' elif codeAsByte in _DPB_CODE_WITH_INT_VALUE: typeCode = 'i' else: raise ProgrammingError('kinterbasdb cannot automatically' ' recognize DPB code %s. You need to supply a type' ' code (either \'s\' or \'i\') as the third element of' ' user-supplied DPB entry #%d.' % (repr(codeAsByte), i + 1) ) if typeCode == 's': self.addString(codeAsByte, value) elif typeCode == 'i': self.addInt(codeAsByte, value) else: raise ProgrammingError('The supplied DPB type code must be' ' either \'s\' or \'i\'.' ) def _validateCode(self, code): if not isinstance(code, str) or len(code) != 1: raise ProgrammingError('DPB code must be single-character str.') # All DPB codes as of FB 2.0.0b1: # Note: Many of these codes duplicate the functionality provided by the # Services API, so I've only attempted to add automatic recognition support # for the most useful parameters. # isc_dpb_version1 # isc_dpb_cdd_pathname # isc_dpb_allocation # isc_dpb_journal # isc_dpb_page_size # isc_dpb_num_buffers # isc_dpb_buffer_length # isc_dpb_debug # isc_dpb_garbage_collect # isc_dpb_verify # isc_dpb_sweep # isc_dpb_enable_journal # isc_dpb_disable_journal # isc_dpb_dbkey_scope # isc_dpb_number_of_users # isc_dpb_trace # isc_dpb_no_garbage_collect # isc_dpb_damaged # isc_dpb_license # isc_dpb_sys_user_name : s # isc_dpb_encrypt_key # isc_dpb_activate_shadow # isc_dpb_sweep_interval # isc_dpb_delete_shadow # isc_dpb_force_write # isc_dpb_begin_log # isc_dpb_quit_log # isc_dpb_no_reserve # isc_dpb_user_name # isc_dpb_password : s # isc_dpb_password_enc # isc_dpb_sys_user_name_enc # isc_dpb_interp # isc_dpb_online_dump # isc_dpb_old_file_size # isc_dpb_old_num_files # isc_dpb_old_file # isc_dpb_old_start_page # isc_dpb_old_start_seqno # isc_dpb_old_start_file # isc_dpb_drop_walfile # isc_dpb_old_dump_id # isc_dpb_wal_backup_dir # isc_dpb_wal_chkptlen # isc_dpb_wal_numbufs # isc_dpb_wal_bufsize # isc_dpb_wal_grp_cmt_wait # isc_dpb_lc_messages : s # isc_dpb_lc_ctype # isc_dpb_cache_manager # isc_dpb_shutdown # isc_dpb_online # isc_dpb_shutdown_delay # isc_dpb_reserved # isc_dpb_overwrite # isc_dpb_sec_attach # isc_dpb_disable_wal # isc_dpb_connect_timeout : i # isc_dpb_dummy_packet_interval : i # isc_dpb_gbak_attach # isc_dpb_sql_role_name # isc_dpb_set_page_buffers # isc_dpb_working_directory # isc_dpb_sql_dialect : i # isc_dpb_set_db_readonly # isc_dpb_set_db_sql_dialect : i # isc_dpb_gfix_attach # isc_dpb_gstat_attach # isc_dpb_set_db_charset : s _DPB_CODES_WITH_STRING_VALUE = [ isc_dpb_user_name, isc_dpb_password, isc_dpb_lc_messages, ] if 'isc_dpb_set_db_charset' in globals(): _DPB_CODES_WITH_STRING_VALUE.append(isc_dpb_set_db_charset) _DPB_CODE_WITH_INT_VALUE = [ isc_dpb_connect_timeout, isc_dpb_dummy_packet_interval, isc_dpb_sql_dialect, isc_dpb_set_db_sql_dialect, ] def _trans_require_dict(obj): if not isinstance(obj, dict): raise TypeError( "The dynamic type translation table must be a dictionary, not a %s" % ( (hasattr(obj, '__class__') and obj.__class__.__name__) or str(type(obj)) ) ) _OUT_TRANS_FUNC_SAMPLE_ARGS = { 'TEXT': 'sample', 'TEXT_UNICODE': ('sample', 3), 'BLOB': 'sample', 'INTEGER': 1, 'FLOATING': 1.0, 'FIXED': (10, -1), 'DATE': (2003,12,31), 'TIME': (23,59,59), 'TIMESTAMP': (2003,12,31,23,59,59), } def _make_output_translator_return_type_dict_from_trans_dict(trans_dict): # This Python function is called from the C level; don't remove it. # # Calls each output translator in trans_dict, passing the translator sample # arguments and recording its return type. # Returns a mapping of translator key -> return type. trans_return_types = {} for (trans_key, translator) in trans_dict.items(): if isinstance(trans_key, int): # The type entry in Cursor.description is not updated properly to # reflect *positional* DTT settings, and I can think of no # reasonable way to correct that. continue # Follow this path for any 'BLOB' DTT with a dict translator--the # contents of the dict will be validated later, at the C level. if trans_key == 'BLOB' and isinstance(translator, dict): if translator.get('mode', None) == 'stream': trans_return_types[trans_key] = BlobReader continue if translator is None: # Don't make an entry for "naked" translators; the # Cursor.description creation code will fall back on the default # type. continue try: sample_arg = _OUT_TRANS_FUNC_SAMPLE_ARGS[trans_key] except KeyError: raise ProgrammingError( "Cannot translate type '%s'. Type must be one of %s." % (trans_key, _OUT_TRANS_FUNC_SAMPLE_ARGS.keys()) ) return_val = translator(sample_arg) return_type = type(return_val) trans_return_types[trans_key] = return_type return trans_return_types class TPB(_RequestBufferBuilder): def __init__(self): _RequestBufferBuilder.__init__(self) self._access_mode = isc_tpb_write self._isolation_level = isc_tpb_concurrency self._lock_resolution = isc_tpb_wait self._lock_timeout = None self._table_reservation = None def copy(self): # A shallow copy of self would be entirely safe except that # .table_reservation is a complex object that needs to be copied # separately. import copy other = copy.copy(self) if self._table_reservation is not None: other._table_reservation = copy.copy(self._table_reservation) return other def render(self): # YYY: Optimization: Could memoize the rendered TPB str. self.clear() self._addCode(isc_tpb_version3) self._addCode(self._access_mode) il = self._isolation_level if not isinstance(il, tuple): il = (il,) for code in il: self._addCode(code) self._addCode(self._lock_resolution) if self._lock_timeout is not None: self._addCode(isc_tpb_lock_timeout) self._addRaw(struct.pack( # One bytes tells the size of the following value; an unsigned # int tells the number of seconds to wait before timing out. ' UINT_MAX ): raise ProgrammingError('Lock resolution must be either None' ' or a non-negative int number of seconds between 0 and' ' %d.' % UINT_MAX ) self._lock_timeout = lock_timeout lock_timeout = property(_get_lock_timeout, _set_lock_timeout) # table_reservation property (an instance of TableReservation): def _get_table_reservation(self): if self._table_reservation is None: self._table_reservation = TableReservation() return self._table_reservation def _set_table_reservation_access(self, _): raise ProgrammingError('Instead of changing the value of the' ' .table_reservation object itself, you must change its *elements*' ' by manipulating it as though it were a dictionary that mapped' '\n "TABLE_NAME": (sharingMode, accessMode)' '\nFor example:' '\n tpbBuilder.table_reservation["MY_TABLE"] =' ' (kinterbasdb.isc_tpb_protected, kinterbasdb.isc_tpb_lock_write)' ) table_reservation = property( _get_table_reservation, _set_table_reservation_access ) class TableReservation(object): _MISSING = object() _SHARING_MODE_STRS = { isc_tpb_shared: 'isc_tpb_shared', isc_tpb_protected: 'isc_tpb_protected', isc_tpb_exclusive: 'isc_tpb_exclusive', } _ACCESS_MODE_STRS = { isc_tpb_lock_read: 'isc_tpb_lock_read', isc_tpb_lock_write: 'isc_tpb_lock_write', } def __init__(self): self._res = {} def copy(self): # A shallow copy is fine. import copy return copy.copy(self) def render(self): if not self: return '' frags = [] _ = frags.append for tableName, resDefs in self.iteritems(): tableNameLenWithTerm = len(tableName) + 1 for (sharingMode, accessMode) in resDefs: _(accessMode) _(struct.pack('' frags = ['') return ''.join(frags) def keys(self): return self._res.keys() def values(self): return self._res.values() def items(self): return self._res.items() def iterkeys(self): return self._res.iterkeys() def itervalues(self): return self._res.itervalues() def iteritems(self): return self._res.iteritems() def __setitem__(self, key, value): key = self._validateKey(key) key = _normalizeDatabaseIdentifier(key) # If the += operator is being applied, the form of value will be like: # [(sharingMode0, accessMode0), ..., newSharingMode, newAccessMode] # For the sake of convenience, we detect this situation and handle it # "naturally". if isinstance(value, list) and len(value) >= 3: otherValues = value[:-2] value = tuple(value[-2:]) else: otherValues = None if ( (not isinstance(value, tuple)) or len(value) != 2 or value[0] not in (isc_tpb_shared, isc_tpb_protected, isc_tpb_exclusive) or value[1] not in (isc_tpb_lock_read, isc_tpb_lock_write) ): raise ValueError('Table reservation entry must be a 2-tuple of' ' the following form:\n' 'element 0: sharing mode (one of (isc_tpb_shared,' ' isc_tpb_protected, isc_tpb_exclusive))\n' 'element 1: access mode (one of (isc_tpb_lock_read,' ' isc_tpb_lock_write))\n' '%s is not acceptable.' % str(value) ) if otherValues is None: value = [value] else: otherValues.append(value) value = otherValues self._res[key] = value def _validateKey(self, key): keyMightBeAcceptable = isinstance(key, basestring) if keyMightBeAcceptable and isinstance(key, unicode): try: key = key.encode('ASCII') except UnicodeEncodeError: keyMightBeAcceptable = False if not keyMightBeAcceptable: raise TypeError('Only str keys are allowed.') return key def _validateTPB(tpb): if isinstance(tpb, TPB): # TPB's accessor methods perform their own validation, and its # render method takes care of infrastructural trivia. return tpb elif not (isinstance(tpb, str) and len(tpb) > 0): raise ProgrammingError('TPB must be non-unicode string of length > 0') # The kinterbasdb documentation promises (or at least strongly implies) # that if the user tries to set a TPB that does not begin with # isc_tpb_version3, kinterbasdb will automatically supply that # infrastructural value. This promise might cause problems in the future, # when isc_tpb_version3 is superseded. A possible solution would be to # check the first byte against all known isc_tpb_versionX version flags, # like this: # if tpb[0] not in (isc_tpb_version3, ..., isc_tpb_versionN): # tpb = isc_tpb_version3 + tpb # That way, compatibility with old versions of the DB server would be # maintained, but client code could optionally specify a newer TPB version. if tpb[0] != isc_tpb_version3: tpb = isc_tpb_version3 + tpb return tpb def _normalizeDatabaseIdentifier(ident): if ident.startswith('"') and ident.endswith('"'): # Quoted name; leave the case of the field name untouched, but # strip the quotes. return ident[1:-1] else: # Everything else is normalized to uppercase to support case- # insensitive lookup. return ident.upper() # Contributed by Pavel Cisar; incorporated 2004.09.10: # Connection.db_info support: # Conditionally add codes that aren't supported by all modern versions of the # database engine: def _addDatabaseInfoCodeIfPresent(name, addToList): globalz = globals() if name in globalz: addToList.append(globalz[name]) # Int codes: _DATABASE_INFO_CODES_WITH_INT_RESULT = [ isc_info_allocation, isc_info_no_reserve, isc_info_db_sql_dialect, isc_info_ods_minor_version, isc_info_ods_version, isc_info_page_size, isc_info_current_memory, isc_info_forced_writes, isc_info_max_memory, isc_info_num_buffers, isc_info_sweep_interval, isc_info_limbo, isc_info_attachment_id, isc_info_fetches, isc_info_marks, isc_info_reads, isc_info_writes, isc_info_set_page_buffers, isc_info_db_read_only, isc_info_db_size_in_pages, isc_info_page_errors, isc_info_record_errors, isc_info_bpage_errors, isc_info_dpage_errors, isc_info_ipage_errors, isc_info_ppage_errors, isc_info_tpage_errors, ] def _addIntDatabaseInfoCodeIfPresent(name): _addDatabaseInfoCodeIfPresent(name, _DATABASE_INFO_CODES_WITH_INT_RESULT) _addIntDatabaseInfoCodeIfPresent('isc_info_oldest_transaction') _addIntDatabaseInfoCodeIfPresent('isc_info_oldest_active') _addIntDatabaseInfoCodeIfPresent('isc_info_oldest_snapshot') _addIntDatabaseInfoCodeIfPresent('isc_info_next_transaction') _addIntDatabaseInfoCodeIfPresent('isc_info_active_tran_count') del _addIntDatabaseInfoCodeIfPresent _DATABASE_INFO_CODES_WITH_INT_RESULT = tuple( _DATABASE_INFO_CODES_WITH_INT_RESULT ) _DATABASE_INFO_CODES_WITH_COUNT_RESULTS = ( isc_info_backout_count, isc_info_delete_count, isc_info_expunge_count, isc_info_insert_count, isc_info_purge_count, isc_info_read_idx_count, isc_info_read_seq_count, isc_info_update_count ) # Timestamp codes: _DATABASE_INFO_CODES_WITH_TIMESTAMP_RESULT = [] def _addTimestampDatabaseInfoCodeIfPresent(name): _addDatabaseInfoCodeIfPresent(name, _DATABASE_INFO_CODES_WITH_TIMESTAMP_RESULT ) _addTimestampDatabaseInfoCodeIfPresent('isc_info_creation_date') del _addTimestampDatabaseInfoCodeIfPresent _DATABASE_INFO_CODES_WITH_TIMESTAMP_RESULT = tuple( _DATABASE_INFO_CODES_WITH_TIMESTAMP_RESULT ) _DATABASE_INFO__KNOWN_LOW_LEVEL_EXCEPTIONS = ( isc_info_user_names, ) def _extractDatabaseInfoCounts(buf): # Extract a raw binary sequence of (unsigned short, signed int) pairs into # a corresponding Python dictionary. uShortSize = struct.calcsize(' # # -904 335544451, # update_conflict: Update conflicts with concurrent update # # -615 335544475, # relation_lock: Lock on table conflicts with existing lock # # -615 335544476, # record_lock: Requested record lock conflicts with existing lock # # -901 335544510, # lock_timeout: lock time-out on wait transaction #-------------------------------------------------------------------------- ) def _trans_info(trans, request): # We process request as a sequence of info codes, even if only one code # was supplied by the caller. requestIsSingleton = isinstance(request, int) if requestIsSingleton: request = (request,) results = {} for infoCode in request: # The global().get(...) workaround is here because only recent versions # of FB expose constant isc_info_tra_isolation: if infoCode == globals().get('isc_info_tra_isolation', -1): buf = trans.transaction_info(infoCode, 's') buf = buf[1 + struct.calcsize('h'):] if len(buf) == 1: results[infoCode] = portable_int(buf) else: # For isolation level isc_info_tra_read_committed, the # first byte indicates the isolation level # (isc_info_tra_read_committed), while the second indicates # the record version flag (isc_info_tra_rec_version or # isc_info_tra_no_rec_version). isolationLevelByte, recordVersionByte = struct.unpack('cc', buf) isolationLevel = portable_int(isolationLevelByte) recordVersion = portable_int(recordVersionByte) results[infoCode] = (isolationLevel, recordVersion) else: # At the time of this writing (2006.02.09), # isc_info_tra_isolation is the only known return value of # isc_transaction_info that's not a simple integer. results[infoCode] = trans.transaction_info(infoCode, 'i') if requestIsSingleton: return results[request[0]] else: return results kinterbasdb-3.3.0/typeconv_datetime_mx.py0000644000175000001440000001447711130647414020024 0ustar pcisarusers# KInterbasDB Python Package - Type Conv : DateTime/eGenix mx.DateTime # # Version 3.3 # # The following contributors hold Copyright (C) over their respective # portions of code (see license.txt for details): # # [Original Author (maintained through version 2.0-0.3.1):] # 1998-2001 [alex] Alexander Kuznetsov # [Maintainers (after version 2.0-0.3.1):] # 2001-2002 [maz] Marek Isalski # 2002-2006 [dsr] David Rushby # [Contributors:] # 2001 [eac] Evgeny A. Cherkashin # 2001-2002 [janez] Janez Jere __all__ = ( # kinterbasdb-native date and time converters: 'date_conv_in', 'date_conv_out', 'time_conv_in', 'time_conv_out', 'timestamp_conv_in', 'timestamp_conv_out', # DB API 2.0 standard date and time type constructors: 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', ) import sys from kinterbasdb.k_exceptions import * # This conversion module uses mx.DateTime for its date/time operations. try: from mx import DateTime as mxDT except ImportError: raise ImportError('kinterbasdb uses the mx.DateTime module (from the' ' "eGenix mx Base Package") by default for date/time/timestamp' ' representation, but you do not have this package installed.' '\nYou can either download the eGenix mx Base Package from' '\nhttp://www.egenix.com/files/python/eGenix-mx-Extensions.html#Download-mxBASE' '\nor tell kinterbasdb to use the Python standard library datetime' ' module instead, as explained at' '\nhttp://kinterbasdb.sourceforge.net/dist_docs/usage.html#faq_fep_is_mxdatetime_required' ) ################################################################################ ## DATE AND TIME ################################################################################ # kinterbasdb-native date and time converters: def date_conv_in(mxDateTimeObj): # Allow implicit param conv: if mxDateTimeObj is None or isinstance(mxDateTimeObj, basestring): return mxDateTimeObj if not isinstance(mxDateTimeObj, mxDT.DateTimeType): raise InterfaceError( 'Required type: %s ; supplied type: %s' % ( str(mxDT.DateTimeType), str(type(mxDateTimeObj)) ) ) return mxDateTimeObj.tuple()[:3] def date_conv_out(dateTuple): if dateTuple is None: return None return mxDT.DateTime(*dateTuple) def time_conv_in(mxDateTimeOrTimeDeltaObj): # Allow implicit param conv: if ( mxDateTimeOrTimeDeltaObj is None or isinstance(mxDateTimeOrTimeDeltaObj, basestring) ): return mxDateTimeOrTimeDeltaObj if isinstance(mxDateTimeOrTimeDeltaObj, mxDT.DateTimeType): timeTuple = mxDateTimeOrTimeDeltaObj.tuple()[3:6] elif isinstance(mxDateTimeOrTimeDeltaObj, mxDT.DateTimeDeltaType): timeTuple = mxDateTimeOrTimeDeltaObj.tuple()[1:] else: raise InterfaceError( 'Cannot convert object of type %s to native kinterbasdb tuple.' % str(type(mxDateTimeOrTimeDeltaObj)) ) secondsFrac = timeTuple[2] seconds = int(secondsFrac) microseconds = int((secondsFrac - seconds) * 1000000) return (timeTuple[0], timeTuple[1], seconds, microseconds) def time_conv_out(timeTuple): if timeTuple is None: return None if len(timeTuple) != 4: return mxDT.Time(*timeTuple) else: (hour, minute, second, micros) = timeTuple secondsFrac = second + micros / 1000000.0 return mxDT.Time(hour, minute, secondsFrac) def timestamp_conv_in(mxDateTimeObj): # Allow implicit param conv: if mxDateTimeObj is None or isinstance(mxDateTimeObj, basestring): return mxDateTimeObj if not isinstance(mxDateTimeObj, mxDT.DateTimeType): raise InterfaceError( 'Required type: %s ; supplied type: %s' % ( str(mxDT.DateTimeType), str(type(mxDateTimeObj)) ) ) timestampTuple = mxDateTimeObj.tuple() secondsFrac = timestampTuple[5] seconds = int(secondsFrac) microseconds = int((secondsFrac - seconds) * 1000000) return timestampTuple[:5] + (seconds, microseconds) def timestamp_conv_out(timestampTuple): if timestampTuple is None: return None if len(timestampTuple) == 7: (year, month, day, hour, minute, second, micros) = timestampTuple secondsFrac = second + micros / 1000000.0 else: (year, month, day, hour, minute, second) = timestampTuple secondsFrac = second return mxDT.DateTime(year, month, day, hour, minute, secondsFrac) # DB API 2.0 standard date and time type constructors: def Date(year, month, day): try: theDate = mxDT.DateTime(year, month, day) except mxDT.Error, e: raise DataError(str(e)) return theDate def Time(hour, minute, second): # mx DateTimeDeltas roll over when provided with an hour greater than # 23, a minute greater than 59, and so on. That is not acceptable for our # purposes. if hour < 0 or hour > 23: raise DataError("hour must be between 0 and 23") if minute < 0 or minute > 59: raise DataError("minute must be between 0 and 59") if second < 0 or second > 59: raise DataError("second must be between 0 and 59") try: theTime = mxDT.TimeDelta(hour, minute, second) except mxDT.Error, e: raise DataError(str(e)) return theTime def Timestamp(year, month, day, hour, minute, second): args = (year, month, day, hour, minute, second) # Yes, I know about the # *args syntactical shortcut, but it's not particularly readable. # mx mxDT's Timestamp constructor accepts negative values in the # spirit of Python's negative list indexes, but I see no reason to allow # that behavior in this DB API-compliance-obsessed module. if 0 < len(filter(lambda x: x < 0, args)): raise DataError("Values less than zero not allowed in Timestamp." " (Received arguments %s)" % repr(args) ) try: theStamp = mxDT.DateTime(*args) except mxDT.Error, e: raise DataError(str(e)) return theStamp DateFromTicks = mxDT.DateFromTicks TimeFromTicks = mxDT.TimeFromTicks TimestampFromTicks = mxDT.TimestampFromTicks kinterbasdb-3.3.0/setup.py0000644000175000001440000016204511132651540014725 0ustar pcisarusers# This horrible tangle of code will soon be blown away and replaced with # something more respectable. Yep, RSN. import re, struct, sys, time from StringIO import StringIO # Exclude old versions of Python: if not hasattr(sys, 'version_info') or sys.version_info < (2,3): raise NotImplementedError('This version of kinterbasdb requires Python 2.3' ' or later.' ) import ConfigParser, os, os.path, re, shutil import distutils.core import distutils.ccompiler import distutils.sysconfig import distutils.util class BuildError(Exception): pass def doCommand(cmd, header='COMMAND EXECUTION ERROR'): print '\t' + cmd taskOutStream = os.popen(cmd) taskOutput = taskOutStream.read() taskRetCode = taskOutStream.close() if taskRetCode is not None: raise BuildError('\n%s\n Command [%s] died with error:\n[%s]\n' % (header, cmd, taskOutput) ) return taskOutput def doLibConvCmd(cmd): return doCommand(cmd, LIBCONVERSION_ERROR_HEADER) def determineWindowsSystemDir(): if sys.platform.lower() == 'cygwin': return doCommand('cygpath --sysdir')[:-1] # Trailing newline. else: # (normal win32) # If I were willing to introduce a win32all dependency into this build # script, this function would be replaced by win32api.GetSystemDirectory. winDir = os.environ.get('SYSTEMROOT', os.environ.get('WINDIR', 'C:\\Windows')) winSysDir = os.path.join(winDir, 'system32') if not os.path.isdir(winSysDir): winSysDir = os.path.join(winDir, 'system') return winSysDir # Be careful about changing these messages; the build documentation refers to them. PYTHON_SYSTEM_ERROR_HEADER = 'PYTHON SYSTEM ERROR:' COMPILER_CONFIGURATION_ERROR_HEADER = 'COMPILER CONFIGURATION ERROR:' KIDB_DISTRIBUTION_ERROR_HEADER = 'KINTERBASDB DISTRIBUTION ERROR:' LIBCONVERSION_ERROR_HEADER = 'LIBRARY CONVERSION ERROR:' AUTODETECTION_ERROR_HEADER = 'LIBRARY AUTODETECTION ERROR:' MANUAL_SPECIFICATION_ERROR_HEADER = 'LIBRARY MANUAL SPECIFICATION ERROR:' DISTUTILS_URL = 'http://www.python.org/sigs/distutils-sig/distutils.html' VERSION_FILE = 'version.txt' CONFIG_FILE = 'setup.cfg' DEBUG = int(os.environ.get('KINTERBASDB_DEBUG', 0)) # Module name and version number: kinterbasdb_name = 'kinterbasdb' # Retrive the kinterbasdb version number from a central text file for the sake # of maintainability: try: kinterbasdb_version = open(VERSION_FILE).read().strip() except IOError: raise BuildError( "\n%s\n" " File 'version.txt' is missing; cannot determine kinterbasdb" " version." % KIDB_DISTRIBUTION_ERROR_HEADER ) argJam = ' '.join(sys.argv[1:]).lower() shouldSkipBuild = argJam.find('--skip-build') != -1 platformIsWindows = sys.platform.lower().startswith('win') def isConfigValTrue(v): return str(v).strip().lower() in ('1', 'true', 'yes') # These config parameters are user-specifiable via setup.cfg: CHECKED_BUILD = 0 VERBOSE_DEBUGGING = 0 ENABLE_CONCURRENCY = 1 ENABLE_FREE_CONNECTION_AND_DISCONNECTION = 0 ENABLE_DB_EVENT_SUPPORT = 1 ENABLE_CONNECTION_TIMEOUT = 1 ENABLE_FIELD_PRECISION_DETERMINATION = 1 ENABLE_DB_ARRAY_SUPPORT = 1 ENABLE_DB_SERVICES_API = 1 DATABASE_IS_FIREBIRD = None DATABASE_HOME_DIR = None DATABASE_INCLUDE_DIR = None DATABASE_LIB_DIR = None DATABASE_LIB_NAME = None # These config parameters are not drawn from setup.cfg: CUSTOM_PREPROCESSOR_DEFS = [] PLATFORM_SPECIFIC_INCLUDE_DIRS = [] PLATFORM_SPECIFIC_LIB_DIRS = [] PLATFORM_SPECIFIC_LIB_NAMES = [] PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS = [] PLATFORM_SPECIFIC_EXTRA_LINKER_ARGS = [] # Create a list of all macro definitions that must be passed to distutils. allMacroDefs = [] # Create a list of all extra options to pass to the compiler, linker. allExtraCompilerArgs = [] extraCompilerArgsForExtension__kinterbasdb = [] extraCompilerArgsForExtension__kiservices = [] if not shouldSkipBuild: # Update the timestamp in kinterbasdb's __init__.py to reflect the time # (UTC zone) when this build script was run: initModule = file('__init__.py', 'rb') try: initModuleCode = initModule.read() finally: initModule.close() reTimestamp = re.compile(r"^(__timestamp__\s+=\s+')(.*?)(')$", re.MULTILINE) initModuleCode = reTimestamp.sub( r'\g<1>%s\g<3>' % time.strftime('%Y.%m.%d.%H.%M.%S.UTC', time.gmtime()), initModuleCode ) initModule = file('__init__.py', 'wb') try: initModule.write(initModuleCode) finally: initModule.close() # See if the user manually specified various build options in the setup config # file. If so, skip autodetection for the options that the user has specified. config = ConfigParser.ConfigParser() config.read(CONFIG_FILE) if config.has_section('manual_config'): def _boolConfOpt(name): if config.has_option('manual_config', name): return config.getboolean('manual_config', name) else: return False CHECKED_BUILD = _boolConfOpt('checked_build') VERBOSE_DEBUGGING = _boolConfOpt('verbose_debugging') ENABLE_CONCURRENCY = _boolConfOpt('enable_concurrency') if ENABLE_CONCURRENCY: try: import thread except ImportError: print ('Warning: This Python interpreter was built with threading' ' disabled, so kinterbasdb\'s concurrency support has been' ' disabled.' ) ENABLE_CONCURRENCY = False ENABLE_FREE_CONNECTION_AND_DISCONNECTION = _boolConfOpt( 'enable_free_connection_and_disconnection' ) if ENABLE_FREE_CONNECTION_AND_DISCONNECTION and not ENABLE_CONCURRENCY: print ('Warning: Free connection and disconnection support has been' ' disabled because concurrency is disabled. (See setup.cfg' ' settings enable_concurrency and' ' enable_free_connection_and_disconnection.)' ) ENABLE_FREE_CONNECTION_AND_DISCONNECTION = False ENABLE_DB_EVENT_SUPPORT = _boolConfOpt('enable_db_event_support') if ENABLE_DB_EVENT_SUPPORT and not ENABLE_CONCURRENCY: print ('Warning: Database event support has been disabled because' ' concurrency is disabled. (See setup.cfg settings' ' enable_concurrency and enable_db_event_support.)' ) ENABLE_DB_EVENT_SUPPORT = False ENABLE_CONNECTION_TIMEOUT = _boolConfOpt('enable_connection_timeout') if ENABLE_CONNECTION_TIMEOUT and not ENABLE_CONCURRENCY: print ('Warning: Connection timeout support has been disabled because' ' concurrency is disabled. (See setup.cfg settings' ' enable_concurrency and enable_connection_timeout.)' ) ENABLE_CONNECTION_TIMEOUT = False ENABLE_FIELD_PRECISION_DETERMINATION = _boolConfOpt('enable_field_precision_determination') ENABLE_DB_ARRAY_SUPPORT = _boolConfOpt('enable_db_array_support') ENABLE_DB_SERVICES_API = _boolConfOpt('enable_db_services_api') if config.has_option('manual_config', 'database_is_firebird'): DATABASE_IS_FIREBIRD = config.get('manual_config', 'database_is_firebird') if config.has_option('manual_config', 'database_home_dir'): DATABASE_HOME_DIR = config.get('manual_config', 'database_home_dir') if config.has_option('manual_config', 'database_include_dir'): DATABASE_INCLUDE_DIR = config.get('manual_config', 'database_include_dir') if config.has_option('manual_config', 'database_lib_dir'): DATABASE_LIB_DIR = config.get('manual_config', 'database_lib_dir') if config.has_option('manual_config', 'database_lib_name'): DATABASE_LIB_NAME = config.get('manual_config', 'database_lib_name') if DATABASE_HOME_DIR and not (DATABASE_INCLUDE_DIR or DATABASE_LIB_DIR): DATABASE_INCLUDE_DIR = os.path.join(DATABASE_HOME_DIR, 'include') DATABASE_LIB_DIR = os.path.join(DATABASE_HOME_DIR, 'lib') if DATABASE_INCLUDE_DIR is not None: fbHeaderPath = os.path.join(DATABASE_INCLUDE_DIR, 'ibase.h') if os.path.exists(fbHeaderPath): if 'FB_API_VER' in file(fbHeaderPath, 'rb').read(): if not DATABASE_IS_FIREBIRD: DATABASE_IS_FIREBIRD = True if DEBUG: print "*** CONFIG OPTIONS SPECIFIED IN %s SECTION 'manual_config' ***" % CONFIG_FILE for key in config.options('manual_config'): print '%s:' % (key) print ' %s' % (config.get('manual_config', key)) ALL_AUTODETECT_OPTIONS_MANUALLY_SPECIFIED = ( DATABASE_IS_FIREBIRD is not None and DATABASE_HOME_DIR and DATABASE_INCLUDE_DIR and DATABASE_LIB_DIR and DATABASE_LIB_NAME ) def verifyAutodetectedDatabaseIncludeDir(): if not os.path.exists(DATABASE_INCLUDE_DIR): sys.stderr.write( "%s\n" " Cannot autodetect the database header file directory.\n" " (Tried %s)\n" " Try specifying the 'database_include_dir' option in\n" " the 'manual_config' section of the setup config file (%s).\n" % (AUTODETECTION_ERROR_HEADER, DATABASE_INCLUDE_DIR, CONFIG_FILE) ) sys.exit(1) def verifyUserSpecifiedDatabaseIncludeDir(): if not os.path.exists(DATABASE_INCLUDE_DIR): sys.stderr.write( "%s\n" " The user-specified database header file directory\n" " %s\n" " does not exist.\n" " Try modifying the 'database_include_dir' option in\n" " the 'manual_config' section of the setup config file (%s),\n" " or comment out that option to force this script to\n" " to autodetect it.\n" % (MANUAL_SPECIFICATION_ERROR_HEADER, DATABASE_INCLUDE_DIR, CONFIG_FILE) ) sys.exit(1) def verifyAutodetectedDatabaseLibraryDir(): if not os.path.exists(DATABASE_LIB_DIR): sys.stderr.write( "%s\n" " Cannot autodetect the database lib directory.\n" " (Tried %s)\n" " Try specifying the 'database_include_dir' option in\n" " the 'manual_config' section of the setup config file (%s).\n" % (AUTODETECTION_ERROR_HEADER, DATABASE_LIB_DIR, CONFIG_FILE) ) sys.exit(1) def verifyUserSpecifiedDatabaseLibraryDir(): if not os.path.exists(DATABASE_LIB_DIR): sys.stderr.write( "%s\n" " The user-specified database lib directory\n" " %s\n" " does not exist.\n" " Try modifying the 'database_lib_dir' option in\n" " the 'manual_config' section of the setup config file (%s),\n" " or comment out that option to force this script to\n" " to autodetect it.\n" % (MANUAL_SPECIFICATION_ERROR_HEADER, DATABASE_LIB_DIR, CONFIG_FILE) ) sys.exit(1) def findSpacelessDirName(d): # On Windows, try to find the spaceless version of the provided directory # name. # This function was added on 2002.03.14 as part of an ugly hack to # surmount a bug in the distutils package. # Sometime distutils causes None to be fed to this function. if not d: return d d = os.path.normpath(os.path.abspath(d)) if ' ' not in d: return d # If d doesn't exist, its short equivalent can't be determined. # However, for the purposes of this program (which is solely for # convenience anyway) it's better just to risk feeding the # compiler/linker a path with a space in it than it is to raise # an exception when there's still a *chance* of success. if not os.path.isdir(d): return d try: import win32api return os.path.normcase(win32api.GetShortPathName(d)) except ImportError: # Since win32api is not available, we'll revert to a lame, # manual approximation of GetShortPathName. pass ds = d.split(os.sep) # Split into components. if DEBUG: print 'ds is', ds ds[0] = ds[0] + '\\' # Add slash back onto drive designation so that # it's like c:\ rather than just c: dsNoSpaces = [] # Will contain a version of the directory components # with all spaces removed. for x in range(len(ds)): dir = ds[x] if DEBUG: print 'dir is', dir if ' ' not in dir: shortDir = dir else: fullDir = apply(os.path.join, ds[:x + 1]) if DEBUG: print 'fullDir is', fullDir # Must deal with names like 'abc de' that have their space # before the sixth character. dirNoSpaces = dir.replace(' ', '') if len(dirNoSpaces) < 6: shortDirBase = dirNoSpaces else: shortDirBase = dirNoSpaces[:6] # Search for shortDirBase~i until we find it. shortDir = None i = 1 while i < 9: # This code doesn't handle situations where there are # more than nine directories with the same prefix. maybeShortDir = '%s~%d' % (shortDirBase, i) fullMaybeShortDir = os.path.join( os.path.dirname(fullDir), maybeShortDir) if not os.path.isdir(fullMaybeShortDir): continue # There follows a *really* lame approximation of # os.path.samefile, which is not available on Windows. if os.listdir(fullMaybeShortDir) == os.listdir(fullDir): shortDir = maybeShortDir break i = i + 1 if shortDir is None: raise Exception('Unable to find shortened version of' ' directory named %s' % d ) dsNoSpaces.append(shortDir) if DEBUG: print 'dsNoSpaces is', dsNoSpaces return os.path.normcase(apply(os.path.join, dsNoSpaces)) # Perform generic compilation parameter setup, then switch to platform- # specific. origWorkingDir = os.path.abspath(os.curdir) # Autodetect Python directory info. if DEBUG: print '*** PYTHON SETTINGS ***' pythonHomeDir = sys.exec_prefix pythonPkgDir = distutils.sysconfig.get_python_lib() if DEBUG: print '\tPython home dir:', pythonHomeDir print '\tPython package dir:', pythonPkgDir # Begin platform-specific compilation parameter setup: compilerIsMSVC = 0 compilerIsMinGW = 0 compilerIsGCC = 0 if platformIsWindows: ALL_AUTODETECT_WINDOWS_REGISTRY_OPTIONS_MANUALLY_SPECIFIED = ( DATABASE_HOME_DIR and DATABASE_INCLUDE_DIR and DATABASE_LIB_DIR and DATABASE_LIB_NAME ) CUSTOM_PREPROCESSOR_DEFS.append( ('WIN32', None) ) pyVersionSuffix = ''.join( [str(n) for n in sys.version_info[:2]] ) pyLibName = 'python%s.lib' % pyVersionSuffix # 2003.03.28: Accomodate source dists of Python on Windows (give the # PCBuild\pythonVV.lib file (if any) precedence over the # libs\pythonVV.lib file (if any)): pyLibsDir = os.path.join(pythonHomeDir, 'PCbuild') if not os.path.exists(os.path.join(pyLibsDir, pyLibName)): pyLibsDir = os.path.join(pythonHomeDir, 'libs') pyConventionalLibPath = os.path.join(pyLibsDir, pyLibName) # If this is a source distribution of Python, add a couple of necessary # include and lib directories. pcbuildDir = os.path.join( os.path.dirname(distutils.sysconfig.get_python_inc()), 'PCBuild' ) if os.path.exists(pcbuildDir): PLATFORM_SPECIFIC_LIB_DIRS.append(pcbuildDir) pySrcDistExtraIncludeDir = os.path.join( os.path.dirname(distutils.sysconfig.get_python_inc()), 'PC' ) PLATFORM_SPECIFIC_INCLUDE_DIRS.append(pySrcDistExtraIncludeDir) # Verify the various directories (such as include and library dirs) that # will be used during compilation. # Open the registry in preparation for reading various installation # directories from it. try: import _winreg except ImportError: # If the user has manually specified all of the options that would # require registry access to autodetect, we can proceed despite the # lack of _winreg. if not ALL_AUTODETECT_WINDOWS_REGISTRY_OPTIONS_MANUALLY_SPECIFIED: sys.stderr.write( "%s\n" " Cannot import the standard package '_winreg'.\n" " _winreg did not join the standard library until\n" " Python 2.0. If you are using a source distribution\n" " of Python 2.0 or later, you may have simply forgotten\n" " to compile the _winreg C extension.\n" " You can get around the lack of _winreg by manually\n" " specifying all of the configuration options in the\n" " 'manual_config' section of the setup config file (%s)." % (AUTODETECTION_ERROR_HEADER, CONFIG_FILE) ) sys.exit(1) if not ALL_AUTODETECT_WINDOWS_REGISTRY_OPTIONS_MANUALLY_SPECIFIED: try: r = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) except WindowsError, e: sys.stderr.write( "%s\n" " Unable to connect to the HKEY_LOCAL_MACHINE section of\n" " the Windows registry.\n" " The specific error encountered is:\n" " %s" % (AUTODETECTION_ERROR_HEADER, str(e)) ) sys.exit(1) if DEBUG: print '*** DATABASE SETTINGS ***' # Autodetect database directory info if the user did not specify it. if not DATABASE_HOME_DIR: def findDatabaseHomeDir(databaseInfoKey, databaseHomeValueName): databaseCurrentVersionKey = _winreg.OpenKey(r, databaseInfoKey) try: return _winreg.QueryValueEx( databaseCurrentVersionKey, databaseHomeValueName )[0] finally: _winreg.CloseKey(databaseCurrentVersionKey) # Try to find Firebird first; revert to Interbase only if necessary. try: try: # 2003.11.10: Firebird 1.5 RC7 changed the registry structure. DATABASE_HOME_DIR = findDatabaseHomeDir( r'SOFTWARE\Firebird Project\Firebird Server\Instances', 'DefaultInstance' ) DATABASE_IS_FIREBIRD = 1 except WindowsError: try: # Firebird 1.0-Firebird 1.5 RC6: DATABASE_HOME_DIR = findDatabaseHomeDir( r'SOFTWARE\FirebirdSQL\Firebird\CurrentVersion', 'RootDirectory' ) DATABASE_IS_FIREBIRD = 1 except WindowsError: # Revert to Interbase. DATABASE_IS_FIREBIRD = 0 DATABASE_HOME_DIR = findDatabaseHomeDir( r'SOFTWARE\Borland\InterBase\CurrentVersion', 'RootDirectory' ) except WindowsError, e: sys.stderr.write( "%s\n" " Unable to retrieve database directory from the Windows" " registry.\n" " Try specifying the 'database_home_dir' option in the\n" " 'manual_config' section of the setup config file (%s).\n" " The specific error encountered is:\n" " %s" % (AUTODETECTION_ERROR_HEADER, CONFIG_FILE, str(e)) ) sys.exit(1) if DATABASE_INCLUDE_DIR: verifyUserSpecifiedDatabaseIncludeDir() databaseSDKDir = os.path.dirname(DATABASE_INCLUDE_DIR) else: databaseSDKDir = os.path.join(DATABASE_HOME_DIR, 'SDK') if DATABASE_IS_FIREBIRD or not os.path.exists(databaseSDKDir): databaseSDKDir = DATABASE_HOME_DIR DATABASE_INCLUDE_DIR = os.path.join(databaseSDKDir, 'include') if DEBUG: print ( '\tDATABASE_INCLUDE_DIR exists at\n\t %s: %d' % (DATABASE_INCLUDE_DIR, os.path.exists(DATABASE_INCLUDE_DIR)) ) verifyAutodetectedDatabaseIncludeDir() if DATABASE_LIB_DIR: verifyUserSpecifiedDatabaseLibraryDir() else: DATABASE_LIB_DIR = os.path.join(databaseSDKDir, 'lib') verifyAutodetectedDatabaseLibraryDir() # Try to "autodetect" library. if not DATABASE_LIB_NAME: if os.path.exists(os.path.join(DATABASE_LIB_DIR, 'firebird.lib')): DATABASE_LIB_NAME = 'firebird' # Vulcan elif os.path.exists(os.path.join(DATABASE_LIB_DIR, 'fbclient_ms.lib')): DATABASE_LIB_NAME = 'fbclient_ms' # FB 1.5 or 2.0 else: DATABASE_LIB_NAME = 'gds32' # Default (IB5.5/IB6/FB1) # Perform compiler-specific setup. # I should discover a way to ask distutils what compiler it's # configured to use--the current code would only detect a compiler # change via the command line. I've looked at the compiler subpackage # of distutils, and can't find any such facility (though it can be # queried for the system's default compiler). customCompilerName = 'msvc' match = re.search(r'--compiler=(?P\S+)', argJam) if match: customCompilerName = match.group('cname') else: match = re.search(r'-c\s*(?P\S+)', argJam) if match: customCompilerName = match.group('cname') compilerIsMSVC = customCompilerName.lower().startswith('msvc') compilerIsMinGW = customCompilerName.lower().startswith('mingw') if customCompilerName == 'msvc': # 2008.11.25: commented this part since it cause problem with VS2003 and newer # running vcvars32.bat before the build is enough # Philippe Makowski # # 2004.11.05: SF 1056684: # # If MSVC is assumed to be the compiler (which it is, on Windows, # # unless the user explicitly indicates otherwise), but we're not # # actually compiling (as when this script is invoked with # # 'setup.py install --skip-build'), skip the registry lookups that'll # # break if MSVC is not installed. # if not shouldSkipBuild: # # 2004.10.24: # # Autodetect support files for "Microsoft Visual C++ Toolkit 2003": # if sys.version_info[:2] >= (2,4): # directoriesKey = _winreg.OpenKey(r, # r'SOFTWARE\Microsoft\MicrosoftSDK\Directories' # ) # try: # windowsSDKDir = _winreg.QueryValueEx( # directoriesKey, # 'Install Dir' # )[0] # finally: # _winreg.CloseKey(directoriesKey) # # windowsSDKLibDir = os.path.join(windowsSDKDir, 'Lib') # PLATFORM_SPECIFIC_LIB_DIRS.append(windowsSDKLibDir) # # End "Microsoft Visual C++ Toolkit 2003" support code. # else: # # 2004.10.28: Better support for building with VStudio 6 when # # "Register Environment Variables" was not selected during the # # installation process (vcvars32.bat doesn't quite paper over # # all of the differences). # vcKey = _winreg.OpenKey(r, # r'SOFTWARE\Microsoft\DevStudio\6.0\Products\Microsoft Visual C++' # ) # try: # vcDir = _winreg.QueryValueEx(vcKey, 'ProductDir')[0] # finally: # _winreg.CloseKey(vcKey) # # PLATFORM_SPECIFIC_INCLUDE_DIRS.append(os.path.join(vcDir, 'Include')) # PLATFORM_SPECIFIC_LIB_DIRS.append(os.path.join(vcDir, 'Lib')) if not DATABASE_IS_FIREBIRD: DATABASE_LIB_DIR = os.path.join(databaseSDKDir, r'lib_ms') if not os.path.exists(DATABASE_LIB_DIR): DATABASE_LIB_DIR = os.path.join(databaseSDKDir, r'lib') if not DATABASE_LIB_NAME or DATABASE_LIB_NAME == 'gds32': DATABASE_LIB_NAME = 'gds32_ms' elif customCompilerName == 'bcpp': print ' *** BCPP LIBRARY GENERATION : begin ***' COMPILER_EXE_NAME = 'bcc32.exe' # Try to find the home directory of the Borland compiler by searching # each directory listed in the PATH. # If I were willing to depend on win32all, win32api.FindExecutable # would replace this code. osPath = os.environ['PATH'].split(os.pathsep) bccHome = None for dir in osPath: if os.path.exists(os.path.join(dir, COMPILER_EXE_NAME)): bccHome = os.path.split(dir)[0] break else: # Couldn't find it. sys.stderr.write( "%s\n" " Unable to find the home directory of the Borland" " compiler.\n" " You must add the 'bin' subdirectory of the" " compiler's\n" " home directory to your PATH.\n" " One way to do this is to type a command of the" " format\n" " SET PATH=%%PATH%%;c:\\EXAMPLE_DIR\\bin\n" " in the same command prompt you use to run the" " kinterbasdb setup script." % (COMPILER_CONFIGURATION_ERROR_HEADER,) ) sys.exit(1) # Override the default behavior of distutils.bcppcompiler.BCPPCompiler # in order to force it to issue the correct command. from distutils.bcppcompiler import BCPPCompiler def _makeDirNameSpacless(kwargs, argName): x = kwargs.get(argName, None) if x is None: return elif isinstance(x, str): kwargs[argName] = findSpacelessDirName(x) else: # sequence of strings kwargs[argName] = [ findSpacelessDirName(d) for d in x ] class BCPP_UGLY_Hack(BCPPCompiler): def compile (self, *args, **kwargs): bccIncludePreargDir = findSpacelessDirName(r'%s\include' % bccHome) bccLibPreargDir = findSpacelessDirName(r'%s\lib' % bccHome) bccLibPSDKPreargDir = findSpacelessDirName(r'%s\lib\psdk' % bccHome) kwargs['extra_preargs'] = ( [ r'-I"%s"' % bccIncludePreargDir, r'-L"%s"' % bccLibPreargDir, r'-L"%s"' % bccLibPSDKPreargDir ] + kwargs.get('extra_preargs', []) ) for argName in ('output_dir', 'include_dirs'): _makeDirNameSpacless(kwargs, argName) return BCPPCompiler.compile(self, *args, **kwargs) def link (self, *args, **kwargs): ilinkLibPreargDir = findSpacelessDirName(r'%s\lib' % bccHome) ilinkLibPSDKPreargDir = findSpacelessDirName(r'%s\lib\psdk' % bccHome) myPreArgs = [ r'/L"%s"' % ilinkLibPreargDir, r'/L"%s"' % ilinkLibPSDKPreargDir ] if 'extra_preargs' not in kwargs: argsAsList = list(args) argsAsList[9] = myPreArgs # see distuils.ccompiler.py args = tuple(argsAsList) else: kwargs['extra_preargs'] = myPreArgs + kwargs.get['extra_preargs'] for argName in ( 'output_dir', 'library_dirs', 'runtime_library_dirs', 'build_temp' ): _makeDirNameSpacless(kwargs, argName) return BCPPCompiler.link(self, *args, **kwargs) # Force distutils to use our BCPP_UGLY_Hack class rather than the # default BCPPCompiler class. compilerSetupTuple = distutils.ccompiler.compiler_class['bcpp'] import distutils.bcppcompiler distutils.bcppcompiler.BCPP_UGLY_Hack = BCPP_UGLY_Hack distutils.ccompiler.compiler_class['bcpp'] = ( compilerSetupTuple[0], 'BCPP_UGLY_Hack', compilerSetupTuple[2] ) # Use the Borland command-line library conversion tool coff2omf to # create a Borland-compiler-compatible library file, # "pythonVV_bcpp.lib", from the standard "pythonVV.lib". libName = os.path.join(pyLibsDir, 'python%s_bcpp.lib' % pyVersionSuffix) if not os.path.exists(libName): print 'setup.py is trying to create %s' % libName coff2omfCommand = ('coff2omf %s %s' % (pyConventionalLibPath, libName)) os.system(coff2omfCommand) # Do this test instead of checking the return value of # os.system, which will not reliably indicate an error # condition on Win9x. if not os.path.exists(libName): sys.stderr.write( "%s\n" " Unable to create a Borland-compatible Python" " library file using the\n" " coff2omf utility.\n" " Tried command:\n" " %s" % (COMPILER_CONFIGURATION_ERROR_HEADER, coff2omfCommand) ) sys.exit(1) assert os.path.isfile(libName) print ' *** BCPP LIBRARY GENERATION : end ***' elif compilerIsMinGW: # 2003.08.05: print ' *** MINGW LIBRARY GENERATION : begin ***' # Use the MinGW tools pexports and dlltool to create a GCC-compatible # library file for Python. # Firebird 1.5 already includes a suitable library file # (fbclient_ms.lib). Versions earlier than FB 1.5 don't work with # MinGW, so override IB6/FB1-like lib names. if DATABASE_LIB_NAME in ('gds32', 'gds32_ms'): if DATABASE_LIB_NAME is not None: print ( '\tIgnoring your "%s" library name setting in favor\n' '\tof "fbclient_ms", which is the proper choice for\n' '\tMinGW.' % DATABASE_LIB_NAME ) DATABASE_LIB_NAME = 'fbclient_ms' winSysDir = determineWindowsSystemDir() # Python (pythonVV.lib -> libpythonVV.a): pyDLL = 'python%s.dll' % pyVersionSuffix pyDLLPathPossibilies = [os.path.join(d, pyDLL) for d in (pythonHomeDir, pcbuildDir, winSysDir) ] for pyDLLPath in pyDLLPathPossibilies: if os.path.isfile(pyDLLPath): break else: raise BuildError("""\n%s\n Can't find Python DLL "%s".""" % (LIBCONVERSION_ERROR_HEADER, pyDLL) ) libName = 'libpython%s.a' % pyVersionSuffix libUltimateDest = os.path.join(pyLibsDir, libName) defFilename = 'python%s.def' % pyVersionSuffix if os.path.isfile(libUltimateDest): print ('\tMinGW-compatible Python library already exists at:\n\t %s' % libUltimateDest ) else: print ( '\n\tsetup.py is trying to create MinGW-compatible Python' ' library at:\n' '\t "%s"' % libUltimateDest ) os.chdir(os.path.dirname(pyDLLPath)) try: doLibConvCmd('pexports %s > %s' % (pyDLL, defFilename)) doLibConvCmd( 'dlltool --dllname %s --def %s --output-lib %s' % (pyDLL, defFilename, libName) ) os.remove(defFilename) # With source builds of some versions of Python, the Python DLL # is located in the same directory that distutils declares to # be the "library directory", so the generated library file # shouldn't be moved. if os.path.dirname(libUltimateDest).lower() != os.path.abspath(os.curdir).lower(): shutil.copyfile(libName, libUltimateDest) os.remove(libName) finally: os.chdir(origWorkingDir) assert os.path.isfile(libUltimateDest) print ' *** MINGW LIBRARY GENERATION : end ***\n' if DEBUG: print '\tDATABASE_LIB_DIR exists at\n\t %s: %d' \ % (DATABASE_LIB_DIR, os.path.exists(DATABASE_LIB_DIR)) print '\tDATABASE_LIB_NAME is\n\t %s' % DATABASE_LIB_NAME elif sys.platform.lower() == 'cygwin': # 2003.08.05: print ' *** CYGWIN LIBRARY GENERATION : begin ***' if DATABASE_LIB_NAME != 'fbclient': if DATABASE_LIB_NAME is not None: print ( '\tIgnoring your "%s" library name setting in favor of\n' '\t "fbclient", which is the proper choice for Cygwin.' % DATABASE_LIB_NAME ) DATABASE_LIB_NAME = 'fbclient' winSysDir = determineWindowsSystemDir() # 2003.11.10: Switched to FB 1.5 RC7+ reg structure. # baseRegLoc = '/proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/FirebirdSQL/Firebird/CurrentVersion' regInstLoc = ( '/proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Firebird Project' '/Firebird Server/Instances/DefaultInstance' ) # Read the location of Firebird from the Windows registry. try: fbDir = doLibConvCmd('cat "%s"' % regInstLoc)[:-1] # Trailing null byte. except BuildError: raise BuildError("\n%s\n Windows registry settings for Firebird 1.5" " were not found. Try running Firebird's instreg.exe utility." " (Firebird 1.5 before RC7, Firebird 1.0, and Interbase are not" " supported.)" % AUTODETECTION_ERROR_HEADER ) if fbDir.endswith('\\'): fbDir = fbDir[:-1] # Trailing backslash. fbDir = fbDir.replace('\\', '/') fbDir = doLibConvCmd('cygpath --unix %s' % fbDir)[:-1] # Trailing null byte. if DATABASE_INCLUDE_DIR: verifyUserSpecifiedDatabaseIncludeDir() else: DATABASE_INCLUDE_DIR = os.path.join(fbDir, 'include') verifyAutodetectedDatabaseIncludeDir() libUltimateDest = '/usr/lib/libfbclient.a' libUltimateDir, libName = os.path.split(libUltimateDest) if os.path.isfile(libUltimateDest): print ('\tCygwin-compatible Firebird library already exists at:\n\t %s' % libUltimateDest ) else: print ( '\n\tsetup.py is trying to create cygwin-compatible Firebird' ' library at:\n' '\t "%s"' % libUltimateDest ) fbClientLibFilename = os.path.join(fbDir, 'lib', 'fbclient_ms.lib') origCurDir = os.path.abspath(os.curdir) os.chdir(winSysDir) try: # Create def file containing symbols from fbclient_ms.lib. doLibConvCmd( '''(echo EXPORTS; nm %s | grep " T _"''' ''' | sed 's/.* T _//g' | sed 's/@.*$//g')''' ''' > fbclient.def''' % fbClientLibFilename ) # End lame pexports substitute. # Create lib file from DLL and the just-generated def file. doLibConvCmd( 'dlltool --dllname fbclient.dll --def fbclient.def --output-lib %s' % libName ) os.remove('fbclient.def') # Move the lib file to a location where cygwin-GCC's linker will # find it. shutil.copyfile(libName, libUltimateDest) os.remove(libName) finally: os.chdir(origCurDir) assert os.path.isfile(libUltimateDest) print ' *** CYGWIN LIBRARY GENERATION : end ***\n' elif sys.platform.lower() == 'darwin': # Based on Patch 909886 (Piet van oostrum) PLATFORM_SPECIFIC_EXTRA_LINKER_ARGS.extend(['-framework', 'Firebird']) # Don't override the include dir specified in setup.cfg, if any: if not DATABASE_INCLUDE_DIR: DATABASE_INCLUDE_DIR = '/Library/Frameworks/Firebird.framework/Headers' else: # not win32, cygwin, or darwin # If the platform isn't Linux, issue a warning. if not sys.platform.lower().startswith('linux'): sys.stderr.write("Warning: The kinterbasdb setup code was not" " specifically written to support your platform (%s), and" " may not work properly.\n" % sys.platform ) # Is libcrypt necessary on all POSIX OSes, or just Linux? # Until someone informs me otherwise, I'll assume all. if os.name == 'posix': PLATFORM_SPECIFIC_LIB_NAMES.append('crypt') # Verify the various directories (such as include and library dirs) that # will be used during compilation. # Assumption: # This is a Unix-like OS, where a proper installation routine would have # placed the database [header, library] files in system-wide dirs. # We have no way of knowing beyond the shadow of a doubt whether that # has happened (as opposed to the situation on Windows, where we can # consult the registry to determine where a binary installer placed its # files), so we'll just let the compiler complain if it can't find the # [header, library] files. # If, on the other hand, the user manually specified the directories, we # verify that they exist before invoking the compiler. if DATABASE_INCLUDE_DIR: # the user manually specified it verifyUserSpecifiedDatabaseIncludeDir() if DATABASE_LIB_DIR: # the user manually specified it verifyUserSpecifiedDatabaseLibraryDir() # 2003.04.12: # On FreeBSD 4, the header and library files apparently are not made # visible by default. # This script attempts to "autodetect" an installation at the default # location, but only if: # - no DATABASE_HOME_DIR has been manually specified # - the default installation directory actually exists # # This "autodetection" will probably work for some other Unixes as well. if not DATABASE_HOME_DIR: DEFAULT_FREEBSD_HOME_DIR = '/usr/local/firebird' if os.path.isdir(DEFAULT_FREEBSD_HOME_DIR): DATABASE_HOME_DIR = DEFAULT_FREEBSD_HOME_DIR if not DATABASE_INCLUDE_DIR: DATABASE_INCLUDE_DIR = os.path.join(DATABASE_HOME_DIR, 'include') if not DATABASE_LIB_DIR: DATABASE_LIB_DIR = os.path.join(DATABASE_HOME_DIR, 'lib') if not DATABASE_LIB_NAME: # 2003.07.29: If the user hasn't specified the name of the database # library, this script will now guess its way from the most recent # known library back to the oldest, most conservative option. # The goal of this smarter probing is to allow kinterbasdb to build out # of the box with Firebird 1.5, without *requiring* the user to modify # setup.cfg to specify the correct library name. # # YYY: This isn't the most proper way to probe for libraries using # distutils, but I must admit that propriety isn't my highest priority # in this setup script. import distutils.command.config as cmd_conf import distutils.dist as dist_dist class _ConfigUglyHack(cmd_conf.config): # _ConfigUglyHack circumvents a distutils problem brought to light # on Unix by this script's abuse of the distutils. def try_link(self, *args, **kwargs): self.compiler.exe_extension = '' # ('' rather than None) return cmd_conf.config.try_link(self, *args, **kwargs) cfg = _ConfigUglyHack(dist_dist.Distribution()) for possibleLib in ('fbclient', 'fbembed'): if cfg.check_lib(possibleLib): DATABASE_LIB_NAME = possibleLib break else: DATABASE_LIB_NAME = 'gds' # On any non-Windows platform, assume that GCC is the compiler: compilerIsGCC = ((compilerIsMinGW or not platformIsWindows) and 1) or 0 if compilerIsMSVC: # MSVC's /Wall generates warnings for just about everything one could # imagine (even just to indicate that it has inlined a function). Some of # these are useful, but most aren't, so it's off by default. #PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('/Wall') # Generate warnings for potential 64-bit portability problems (this option # is not available in MSVC 6): if not (sys.version_info < (2,4) or 'MSC V.1200 ' in sys.version.upper()): PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('/Wp64') # Enable the pooling of read-only strings (reduces executable size # considerably, esp. when checked_build=1): PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('/GF') if CHECKED_BUILD: # Generate debugging symbol files (.pdb): PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('/Zi') buildDir = os.path.abspath(os.path.join( os.path.dirname(__file__), 'build' )) libDir = 'lib.%s-%s' % ( distutils.util.get_platform(), '.'.join( [str(n) for n in sys.version_info[:2]] ) ) libPath = os.path.join(buildDir, libDir) for extensionName in ('_kinterbasdb', '_kiservices'): extraCompilerArgsForExt = globals()[ 'extraCompilerArgsForExtension_' + extensionName ] extraCompilerArgsForExt.append( r'/Fd"%s\kinterbasdb\%s.pdb"' % (libPath, extensionName) ) elif compilerIsGCC: if CHECKED_BUILD: PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('-pedantic') # Include debugging symbols: PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('-g') # Prevent GCC from complaining about various things that are perfectly # legitimate, but not part of the C90 standard: PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('-std=c99') # By default, distutils includes the -fno-strict-aliasing flag on # *nix-GCC, but not on MinGW-GCC. PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('-fno-strict-aliasing') if not platformIsWindows: # Python's distutils infrastructure should already include this # argument if Python itself was compiled with threading enabled, # but make sure: PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('-pthread') PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS.append('-O3') # Now finished with platform-specific compilation parameter setup. # Create a list of all INCLUDE dirs to be passed to setup(): allIncludeDirs = [] # Add Python include directory: allIncludeDirs.append(distutils.sysconfig.get_python_inc()) if len(PLATFORM_SPECIFIC_INCLUDE_DIRS) > 0: allIncludeDirs.extend(PLATFORM_SPECIFIC_INCLUDE_DIRS) if DATABASE_INCLUDE_DIR: allIncludeDirs.append(DATABASE_INCLUDE_DIR) # Create a list of all LIB names to be passed to setup(): allLibNames = [] if DATABASE_LIB_NAME: allLibNames.append(DATABASE_LIB_NAME) allLibNames.extend(PLATFORM_SPECIFIC_LIB_NAMES) # Create a list of all LIB directories to be passed to setup(): allLibDirs = [] if len(PLATFORM_SPECIFIC_LIB_DIRS) > 0: allLibDirs.extend(PLATFORM_SPECIFIC_LIB_DIRS) if DATABASE_LIB_DIR: allLibDirs.append(DATABASE_LIB_DIR) if len(CUSTOM_PREPROCESSOR_DEFS) > 0: allMacroDefs.extend(CUSTOM_PREPROCESSOR_DEFS) if CHECKED_BUILD: # Activate assertions. # (Lack of a second tuple element is the distutils conventions for # "undefine this macro".) allMacroDefs.append(('NDEBUG',)) else: allMacroDefs.append(('NDEBUG', 1)) preprocConfigFile = file('__ki_platform_config.h', 'wb') def defineConfigItem(name, value=None): print >> preprocConfigFile, '#define %s%s' % ( name, (value and ' %s' % value) or '' ) for sizeOfName, sizeOfVal in ( ('SIZEOF_POINTER', struct.calcsize('P')), ('SIZEOF_INT', struct.calcsize('i')), ('SIZEOF_LONG', struct.calcsize('l')), ): defineConfigItem(sizeOfName, sizeOfVal) for boolOptName in ( 'VERBOSE_DEBUGGING', 'ENABLE_CONCURRENCY', 'ENABLE_FREE_CONNECTION_AND_DISCONNECTION', 'ENABLE_DB_EVENT_SUPPORT', 'ENABLE_CONNECTION_TIMEOUT', 'ENABLE_FIELD_PRECISION_DETERMINATION', 'ENABLE_DB_ARRAY_SUPPORT', 'ENABLE_DB_SERVICES_API', ): if eval(boolOptName): defineConfigItem(boolOptName) if len(PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS) > 0: allExtraCompilerArgs.extend(PLATFORM_SPECIFIC_EXTRA_COMPILER_ARGS) allExtraLinkerArgs = [] # MSVC will produce .pdbs with '/DEBUG'. if len(PLATFORM_SPECIFIC_EXTRA_LINKER_ARGS) > 0: allExtraLinkerArgs.extend(PLATFORM_SPECIFIC_EXTRA_LINKER_ARGS) # Work around the quirks in compiling against an uninstalled build of the FB # server on POSIX: if ( DATABASE_LIB_DIR is not None and os.path.split(os.path.dirname(os.path.dirname(DATABASE_LIB_DIR)))[1] == 'gen' ): if 'gds' in allLibNames: allLibNames.remove('gds') allLibNames.append('fbclient') allLibDirs.append(os.path.join(DATABASE_LIB_DIR, 'lib')) extensionModules = [ distutils.core.Extension( "kinterbasdb._kinterbasdb", sources=["_kinterbasdb.c"], libraries=allLibNames, include_dirs=allIncludeDirs, library_dirs=allLibDirs, define_macros=allMacroDefs, extra_compile_args=allExtraCompilerArgs + extraCompilerArgsForExtension__kinterbasdb, extra_link_args=allExtraLinkerArgs ), ] allPythonModules = [ 'kinterbasdb.__init__', 'kinterbasdb.k_exceptions', 'kinterbasdb.typeconv_naked', 'kinterbasdb.typeconv_backcompat', 'kinterbasdb.typeconv_23plus', 'kinterbasdb.typeconv_fixed_stdlib', 'kinterbasdb.typeconv_fixed_fixedpoint', 'kinterbasdb.typeconv_datetime_naked', 'kinterbasdb.typeconv_datetime_stdlib', 'kinterbasdb.typeconv_datetime_mx', 'kinterbasdb.typeconv_text_unicode', 'kinterbasdb._array_descriptor', 'kinterbasdb._connection_timeout', 'kinterbasdb._request_buffer_builder', # 2006.02.16 ] if kinterbasdb_version >= '3.2': allPythonModules.extend([ 'kinterbasdb.typeconv_23plus_lowmem', # 2005.11.28 'kinterbasdb.typeconv_24plus', 'kinterbasdb.typeconv_fixed_decimal', ]) # 2003.02.18: # Build somewhat differently if we're dealing with an IB version before 6.0. # (Innocent until proven guilty.) isIBLessThan_6_0 = 0 for incDir in allIncludeDirs: headerFilename = os.path.join(incDir, 'ibase.h') if os.path.exists(headerFilename): # Using the isc_decode_sql_time symbol as the detector is kinda arbitrary. if 'isc_decode_sql_time' not in open(headerFilename).read(): isIBLessThan_6_0 = 1 break # Only include the services module if dealing with >= IB 6.0. if isIBLessThan_6_0: print >> sys.stderr, \ 'WARNING: Not building the kinterbasdb._services module because' \ ' IB < 6.0 does not support it.' else: if ENABLE_DB_SERVICES_API: allPythonModules.append('kinterbasdb.services') extensionModules.append( distutils.core.Extension( "kinterbasdb._kiservices", sources=["_kiservices.c"], libraries=allLibNames, include_dirs=allIncludeDirs, library_dirs=allLibDirs, define_macros=allMacroDefs, extra_compile_args=allExtraCompilerArgs + extraCompilerArgsForExtension__kiservices, extra_link_args=allExtraLinkerArgs ) ) # Now we've detected and verified all compilation parameters, and are ready to # compile. if DEBUG: print '*** SETTINGS DETECTION PHASE COMPLETE; READY FOR BUILD ***' print ("\tThe DEBUG flag is enabled, so the setup script stops\n" "\t before actually invoking the distutils setup procedure." ) sys.exit(0) from distutils.command.config import config as DefaultConfigCommand from distutils.command.build_ext import build_ext as DefaultBuildExtCommand class ConfigCommand(DefaultConfigCommand): def run(self): print '-' * 79 print 'WILL NOW PROBE DATABASE API FOR FEATURES.' print 'COMPILER ERRORS THAT ARISE DURING THIS PHASE ARE NOT A PROBLEM.' print '-' * 79 # Suppress the probe-compilation output: origStdOut = sys.stdout origStdErr = sys.stderr sys.stdout = sys.stderr = StringIO() try: self._run() finally: sys.stdout = origStdOut sys.stderr = origStdErr print '-' * 79 print 'FINISHED PROBING DATABASE API FOR FEATURES.' print '-' * 79 def _run(self): if ENABLE_DB_EVENT_SUPPORT: # Sometime after FB 2.0b1, FPTR_EVENT_CALLBACK was renamed to # ISC_EVENT_CALLBACK. This code allows kinterbasdb to compile # properly either way. if not self.try_compile( body='/* THIS IS PROBE CODE; THE ERRORS ARE NOT A PROBLEM! */\n' 'void dummy_callback(void *x, ISC_USHORT y, const ISC_UCHAR *z) {\n' '}\n' 'int main() {\n' ' ISC_EVENT_CALLBACK x = dummy_callback;\n' ' return 0;\n' '}\n' '/* THIS IS PROBE CODE; THE ERRORS ARE NOT A PROBLEM! */\n', headers=['ibase.h'], include_dirs=allIncludeDirs ): defineConfigItem('ISC_EVENT_CALLBACK', 'FPTR_EVENT_CALLBACK') # This HAVE__? silliness is necessary because ibase.h is increasingly # not using the preprocessor for constant definitions, so setup.py has # to test for the presence of the constants, then use the preprocessor # to indicate the result. def probeIntConst(name): if self.try_compile( body='/* THIS IS PROBE CODE; THE ERRORS ARE NOT A PROBLEM! */\n' 'int main(void) {\n' ' /* The cast is present because we are testing for the' ' * presence of the constant; its type and value do not' ' * matter. */' ' const int x = (int) %s;\n' ' return 0;\n' '}\n' '/* THIS IS PROBE CODE; THE ERRORS ARE NOT A PROBLEM! */\n' % name, headers=['ibase.h'], include_dirs=allIncludeDirs ): defineConfigItem('HAVE__%s' % name) for intConstName in ''' isc_tpb_lock_timeout isc_info_active_tran_count isc_info_creation_date isc_info_tra_oldest_interesting isc_info_tra_oldest_snapshot isc_info_tra_oldest_active isc_info_tra_isolation isc_info_tra_access isc_info_tra_lock_timeout isc_info_tra_consistency isc_info_tra_concurrency isc_info_tra_read_committed isc_info_tra_no_rec_version isc_info_tra_rec_version isc_info_tra_readonly isc_info_tra_readwrite '''.split(): probeIntConst(intConstName) def probeCType(cTypeName, includeFiles=()): probeCTypeBody = ( '/* THIS IS PROBE CODE; THE ERRORS ARE NOT A PROBLEM! */\n' '%s\n' 'int main(void) {\n' ' %s x;\n' ' return 0;\n' '}\n' '/* THIS IS PROBE CODE; THE ERRORS ARE NOT A PROBLEM! */\n' % ('\n'.join(['#include %s' % f for f in includeFiles]), cTypeName ) ) shavedIncludeFiles = [ f[1:-1] for f in includeFiles if f.startswith('<') or f.startswith('"') ] if self.try_compile( body=probeCTypeBody, headers=['ibase.h'] + shavedIncludeFiles, include_dirs=allIncludeDirs ): defineConfigItem('HAVE__%s' % cTypeName) for cTypeName, includeFiles in ( # Linux distributions in the Red Hat family define useconds_t in # sys/types.h, but some distributions don't: ('useconds_t', ['']), ): probeCType(cTypeName, includeFiles=includeFiles) preprocConfigFile.close() DefaultConfigCommand.run(self) class BuildExtCommand(DefaultBuildExtCommand): def finalize_options(self): # Notice that the finalize_options method of the 'build' command is # called before the config command is called, so as to ensure that # self.compiler will have been computed by the time we need it. DefaultBuildExtCommand.finalize_options(self) configCommand = self.get_finalized_command('config') if not configCommand.compiler: configCommand.compiler = self.compiler self.run_command('config') # The MEAT: distutils.core.setup( name=kinterbasdb_name, version=kinterbasdb_version, author='''Originally by Alexander Kuznetsov ; rewritten and expanded by David S. Rushby with contributions from several others (see docs/license.txt for details).''', author_email='woodsplitter@rocketmail.com', url='http://kinterbasdb.sourceforge.net', description='Python DB API 2.0 extension for Firebird and Interbase', long_description= 'KInterbasDB implements Python Database API 2.0-compliant support\n' 'for the open source relational database Firebird and some versions\n' 'of its proprietary cousin Borland Interbase(R).\n\n' 'In addition to the minimal feature set of the standard Python DB API,\n' 'KInterbasDB also exposes nearly the entire native client API of the\n' 'database engine.', # 'kinterbasdb allows Python to access the Firebird and Interbase\n' # 'relational databases according to the interface defined by the\n' # 'Python Database API Specification version 2.0.', license='see docs/license.txt', packages=['kinterbasdb'], package_dir={'kinterbasdb': os.curdir}, package_data = {'kinterbasdb': ['docs/*.html','docs/_static/*','docs/_sources/*']}, py_modules=allPythonModules, # 2005.12.08: cmdclass={'config': ConfigCommand, 'build_ext': BuildExtCommand,}, ext_modules=extensionModules, ) print '\nSucceeded:\n ', sys.executable, ' '.join(sys.argv) kinterbasdb-3.3.0/_kiconversion.c0000644000175000001440000012152311130647414016226 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of Parameter Conversion * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ /* This source file is designed to be directly included in _kinterbasdb.c, * without the involvement of a header file. */ /******************** CONVENIENCE DEFS:BEGIN ********************/ #define NUMBER_OF_DECIMAL_PLACES_FROM_SCALE(scale) (-(scale)) /* Macros to determine whether a field is a fixed point field based on the * data_type, data_subtype, and scale. Because the Firebird C API is a * monstrosity, this must be done separately for array fields. */ #ifdef INTERBASE_6_OR_LATER #define _DATA_TYPE_IS_INT64_CONVENTIONAL(data_type) ((data_type) == SQL_INT64) #define _DATA_TYPE_IS_INT64_ARRAY(data_type) ((data_type) == blr_int64) #else #define _DATA_TYPE_IS_INT64_CONVENTIONAL(data_type) (FALSE) #define _DATA_TYPE_IS_INT64_ARRAY(data_type) (FALSE) #endif #define IS_FIXED_POINT__CONVENTIONAL(dialect, data_type, data_subtype, scale) \ (boolean)(( \ ((data_subtype) != SUBTYPE_NONE || (scale) != 0) \ && ( (data_type) == SQL_SHORT \ || (data_type) == SQL_LONG \ || _DATA_TYPE_IS_INT64_CONVENTIONAL(data_type) \ ) \ ) || ( \ /* Special case for fixed-point values with precisions between 10 and 18 \ * in dialect < 3 databases, which are stored internally as double. */ \ ((dialect) < 3 && (scale) != 0 && \ ((data_type) == SQL_DOUBLE || (data_type) == SQL_D_FLOAT) \ ) \ )) #define IS_FIXED_POINT__ARRAY_EL(dialect, data_type, data_subtype, scale) \ (boolean)(( \ ((data_subtype) != SUBTYPE_NONE || (scale) != 0) \ && ( (data_type) == blr_short \ || (data_type) == blr_long \ || _DATA_TYPE_IS_INT64_ARRAY(data_type) \ ) \ ) || ( \ /* Special case for fixed-point values with precisions between 10 and 18 \ * in dialect < 3 databases, which are stored internally as double. */ \ ((dialect) < 3 && (scale) != 0 && \ ((data_type) == blr_double || (data_type) == blr_d_float) \ ) \ )) #define TRY_INPUT_CONVERSION(conversion_code, label) \ if (INPUT_OK != (conversion_code)) { goto label; } /******************** CONVENIENCE DEFS:END ********************/ static const char *get_external_data_type_name(const unsigned short dialect, const short data_type, const short data_subtype, const short scale ); static const char *get_internal_data_type_name(short data_type); #include "_kiconversion_type_translation.c" #include "_kiconversion_from_db.c" #include "_kiconversion_blob_nonstandard.c" #include "_kiconversion_blob.c" #include "_kiconversion_blob_streaming.c" #include "_kiconversion_to_db.c" #ifdef ENABLE_DB_ARRAY_SUPPORT #include "_kiconversion_array.c" #endif /* ENABLE_DB_ARRAY_SUPPORT */ #define PyObject2XSQLVAR_TRY_INPUT_CONVERSION(conversion_code) \ TRY_INPUT_CONVERSION( (conversion_code), fail ); static InputStatus PyObject2XSQLVAR( Cursor *cur, short sqlvar_index, XSQLVAR *sqlvar, PyObject *py_input ) { int status = INPUT_ERROR; PyObject *py_input_converted = NULL; const short data_type = XSQLVAR_SQLTYPE_IGNORING_NULL_FLAG(sqlvar); const short data_subtype = sqlvar->sqlsubtype; const short scale = sqlvar->sqlscale; const unsigned short dialect = Transaction_get_dialect(cur->trans); boolean is_nonstandard_blob = FALSE; assert (py_input != NULL); /* With input parameters, we don't know beforehand just how much space the * incoming value might occupy (because of implicit parameter conversion). * Therefore, we must allocate+free an input buffer each time we receive an * individual incoming parameter value, rather than preallocating an input * buffer for all input XSQLVARs, as kinterbasdb 3.0 did. */ assert(sqlvar->sqldata == NULL); /* Space for the sqlind flag should already have been allocated in * reallocate_sqlda. */ assert (sqlvar->sqlind != NULL); /* Give the registered dynamic type translator a chance to convert the input * value before it's passed to the storage code. * Arrays are excluded because conceptually, they're just containers. Their * individual elements will be passed through the any applicable dynamic type * translator in kiconversion_array.c */ if (data_type == SQL_ARRAY) { /* Artificially INCREF py_input_converted because it's not actually a new * reference returned from a converter, but we need reference-ownership- * symmetry with the non-array branch. */ py_input_converted = py_input; Py_INCREF(py_input_converted); } else { /* Find the dynamic type translator (if any) for this field's type. */ PyObject *translator = cursor_get_in_converter( cur, sqlvar_index, data_type, data_subtype, scale, FALSE /* not an array element */ ); if (translator == NULL) { goto fail; } is_nonstandard_blob = (boolean) (data_type == SQL_BLOB && PyDict_Check(translator)); if (!is_nonstandard_blob) { py_input_converted = dynamically_type_convert_input_obj_if_necessary( py_input, FALSE, /* not an array element */ dialect, data_type, data_subtype, scale, translator ); } else { BlobMode mode; boolean treat_subtype_text_as_text; if ( validate_nonstandard_blob_config_dict(translator, &mode, &treat_subtype_text_as_text ) != DTT_BLOB_CONFIG_VALID ) { goto fail; } /* No other modes supported at the moment; this assertion should flag areas * that need to be adjusted if other modes are added in the future. */ assert (mode == blob_mode_materialize || mode == blob_mode_stream); if (data_subtype == isc_blob_text && treat_subtype_text_as_text) { /* We've been directed to handle textual blobs as if they were VARCHAR, * and this is a textual blob, so use materialized mode even if we were * in streaming mode. Act as though we'd executed the following * statement: * mode = blob_mode_materialize; */ PyObject *py_converter_override; PyObject *py_blob_charset_id; boolean is_unicode_charset; /* 2007.02.10: */ /* It's a textual blob that we've been ordered to treat like normal * text, so we revert the is_nonstandard_blob flag to FALSE so that the * converted input will be passed to the normal blob handler. */ is_nonstandard_blob = FALSE; if (get_blob_converter_override_for_direction(TRUE, cur, sqlvar, &py_converter_override, &py_blob_charset_id, &is_unicode_charset ) != 0 ) { goto fail; } assert (py_converter_override != NULL); assert (py_blob_charset_id != NULL); if (py_converter_override == Py_None) { /* There's no translator for the corresponding textual DTT slot * (i.e., TEXT or TEXT_UNICODE), so just INCREF the input object and * leave it unmodified. */ py_input_converted = py_input; Py_INCREF(py_input_converted); } else { if (!is_unicode_charset) { /* Pass single string (py_input): */ py_input_converted = PyObject_CallFunctionObjArgs( py_converter_override, py_input, NULL ); } else { /* Pass 2-tuple of (py_input, py_blob_charset_id): */ PyObject *tuple_of_unicode_obj_and_charset_code = PyTuple_New(2); if (tuple_of_unicode_obj_and_charset_code == NULL) { goto fail; } Py_INCREF(py_input); PyTuple_SET_ITEM(tuple_of_unicode_obj_and_charset_code, 0, py_input); Py_INCREF(py_blob_charset_id); PyTuple_SET_ITEM(tuple_of_unicode_obj_and_charset_code, 1, py_blob_charset_id ); py_input_converted = PyObject_CallFunctionObjArgs( py_converter_override, tuple_of_unicode_obj_and_charset_code, NULL ); Py_DECREF(tuple_of_unicode_obj_and_charset_code); } } assert (py_blob_charset_id != NULL); Py_DECREF(py_blob_charset_id); } else { /* Either it's not a textual blob, or it is a textual blob that we've * been ordered not to auto-[enc|de]code. * Mode blob_mode_stream expects py_input to be a file-like object (if * it isn't, an exception will be raised later). * Mode blob_mode_materialize with treat_subtype_text_as_text FALSE * functions the same way naked translation would (i.e., it passes * the input string/buffer through unchanged). */ if (mode == blob_mode_materialize) { /* For now, materialized mode with treat_subtype_text_as_text FALSE * behaves the same way as "conventional" naked translation. */ is_nonstandard_blob = FALSE; } py_input_converted = py_input; /* Need artificial INCREF to maintain symmetry with the cases that * actually create a new object via DTT: */ Py_INCREF(py_input_converted); } } if (py_input_converted == NULL) { goto fail; } } /* py_input_converted is now a new reference that must be released at the end * of this function. */ assert (py_input_converted != NULL); /* If the input Python object is None, set the input XSQLVAR to NULL whether * the field's definition allows NULLs or not. This is done to give BEFORE * triggers a chance to act on the incoming value without premature * interference by kinterbasdb. */ if (py_input_converted == Py_None) { if (!XSQLVAR_IS_ALLOWED_TO_BE_NULL(sqlvar)) { sqlvar->sqltype += 1; assert(XSQLVAR_IS_ALLOWED_TO_BE_NULL(sqlvar)); } XSQLVAR_SET_NULL(sqlvar); assert (XSQLVAR_IS_NULL(sqlvar)); /* Enforce symmetry between XSQLVAR nullifying macros. */ assert (sqlvar->sqldata == NULL); /* sqldata was null and will remain so. */ goto succeed_allowing_null; } /* It is now certain the sqlvar will not represent a NULL value; make that * understanding explicit. */ XSQLVAR_SET_NOT_NULL(sqlvar); assert (!XSQLVAR_IS_NULL(sqlvar)); /* Enforce symmetry between XSQLVAR nullifying macros. */ switch (data_type) { case SQL_VARYING: /* (SQL_VARYING -> VARCHAR) */ case SQL_TEXT: /* (SQL_TEXT -> CHAR) */ PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_text_conventional(py_input_converted, sqlvar, data_type) ); break; case SQL_SHORT: case SQL_LONG: #ifdef INTERBASE_6_OR_LATER case SQL_INT64: #endif /* INTERBASE_6_OR_LATER */ PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_internal_integer_types_conventional(py_input_converted, sqlvar, dialect, data_type, data_subtype, scale, cur ) ); break; case SQL_FLOAT: PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_float_conventional(py_input_converted, sqlvar, cur) ); break; case SQL_DOUBLE: case SQL_D_FLOAT: PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_double_conventional(py_input_converted, sqlvar, cur) ); break; /* Handle TIMESTAMP, DATE, and TIME fields: */ case SQL_TIMESTAMP: /* TIMESTAMP */ PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_timestamp_conventional(py_input_converted, sqlvar, cur) ); break; #ifdef INTERBASE_6_OR_LATER case SQL_TYPE_DATE: /* DATE */ PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_date_conventional(py_input_converted, sqlvar, cur) ); break; case SQL_TYPE_TIME: /* TIME */ PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_time_conventional(py_input_converted, sqlvar, cur) ); break; #endif /* INTERBASE_6_OR_LATER */ case SQL_BOOLEAN: PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_boolean_conventional(py_input_converted, sqlvar) ); break; case SQL_BLOB: /* 2005.06.19: Added support for non-materialized blob input. */ if (!is_nonstandard_blob) { PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_blob_materialized(cur, sqlvar, py_input_converted) ); } else { /* Next statement allocates space for the blob's id, not for the blob's * contents (the contents are written segment-at-a-time in * conv_in_blob_from_pyfilelike). */ sqlvar->sqldata = kimem_main_malloc(sizeof(ISC_QUAD)); if (sqlvar->sqldata == NULL) { goto fail; } PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_blob_from_pyfilelike( py_input_converted, (ISC_QUAD *) sqlvar->sqldata, cur->status_vector, *Transaction_get_db_handle_p(cur->trans), *Transaction_get_handle_p(cur->trans) ) ); } break; case SQL_ARRAY: #ifdef ENABLE_DB_ARRAY_SUPPORT /* In the SQL_ARRAY case, the input object has not actually been passed * through a dynamic type translator, and it never will be, because it's * merely a container (a Python sequence). However, dynamic type * translation *is* applied to each of the input sequence's elements by * conv_in_array. */ PyObject2XSQLVAR_TRY_INPUT_CONVERSION( conv_in_array( py_input_converted, (ISC_QUAD **) &sqlvar->sqldata, cur, sqlvar_index, sqlvar->relname, sqlvar->relname_length, sqlvar->sqlname, sqlvar->sqlname_length ) ); #else raise_exception(InternalError, "This build of kinterbasdb has database" " array support disabled." ); goto fail; #endif /* ENABLE_DB_ARRAY_SUPPORT */ break; default: raise_exception(NotSupportedError, "Database engine type is currently not supported." " " KIDB_REPORT " " KIDB_HOME_PAGE ); goto fail; } /* end of switch */ /* Fall through to success. */ assert (sqlvar->sqldata != NULL); succeed_allowing_null: /* As in the case of py_input == Py_None. */ status = INPUT_OK; goto cleanup; fail: status = INPUT_ERROR; if (sqlvar->sqldata != NULL) { kimem_main_free(sqlvar->sqldata); sqlvar->sqldata = NULL; } cleanup: if (py_input_converted != NULL && PyString_Check(py_input_converted)) { /* If py_input_converted is a string, sqlvar->sqldata contains only a * pointer to py_input_converted's internal character buffer, not a pointer * to a copy of the buffer. Therefore, we must ensure that * py_input_converted is not garbage collected until the database engine * has had a chance to read its internal buffer. */ assert(cur->objects_to_release_after_execute != NULL); if (PyList_Append(cur->objects_to_release_after_execute, py_input_converted) != 0) { status = INPUT_ERROR; } /* Decref py_input_converted so that if it was a new string object created * by an input-dynamic-type-translator, the only remaining reference to it * will then be held by cur->objects_to_release_after_execute. * When cur->objects_to_release_after_execute is released in * free_XSQLVAR_dynamically_allocated_memory (which is called at the end of * pyob_Cursor_execute), py_input_converted will be released as a consequence. */ } else { /* If py_input_converted was any other type of *new* input-dynamic-type- * translator-created object than a string, it can be released right now, * because sqlvar->sqldata now contains an independent C-level *copy* of * the input value. * If there was no relevant dynamic type translator, py_input_converted *is* * py_input with an artificially incremented reference count, so it's still * proper to decref it here. */ } Py_XDECREF(py_input_converted); return status; } /* PyObject2XSQLVAR */ static const char *get_external_data_type_name(const unsigned short dialect, const short data_type, const short data_subtype, const short scale ) { switch (data_type) { case SQL_TEXT: return "CHAR"; case SQL_VARYING: return "VARCHAR"; case SQL_SHORT: case SQL_LONG: #ifdef INTERBASE_6_OR_LATER case SQL_INT64: #endif /* INTERBASE_6_OR_LATER */ switch (data_subtype) { case SUBTYPE_NONE: /* The database engine doesn't always set data_subtype correctly, * so call IS_FIXED_POINT__CONVENTIONAL to second-guess the engine. */ if (IS_FIXED_POINT__CONVENTIONAL(dialect, data_type, data_subtype, scale)) { return "NUMERIC/DECIMAL"; } else { switch (data_type) { case SQL_SHORT: return "SMALLINT"; case SQL_LONG: return "INTEGER"; #ifdef INTERBASE_6_OR_LATER case SQL_INT64: return "BIGINT"; #endif /* INTERBASE_6_OR_LATER */ } } case SUBTYPE_NUMERIC: return "NUMERIC"; case SUBTYPE_DECIMAL: return "DECIMAL"; } case SQL_FLOAT: return "FLOAT"; case SQL_DOUBLE: case SQL_D_FLOAT: return "DOUBLE"; case SQL_TIMESTAMP: return "TIMESTAMP"; #ifdef INTERBASE_6_OR_LATER case SQL_TYPE_DATE: return "DATE"; case SQL_TYPE_TIME: return "TIME"; #endif /* INTERBASE_6_OR_LATER */ case SQL_BLOB: return "BLOB"; default: return "UNKNOWN"; } } /* get_external_data_type_name */ static const char *get_internal_data_type_name(short data_type) { switch (data_type) { case SQL_TEXT: return "SQL_TEXT"; case SQL_VARYING: return "SQL_VARYING"; case SQL_SHORT: return "SQL_SHORT"; case SQL_LONG: return "SQL_LONG"; #ifdef INTERBASE_6_OR_LATER case SQL_INT64: return "SQL_INT64"; #endif /* INTERBASE_6_OR_LATER */ case SQL_FLOAT: return "SQL_FLOAT"; case SQL_DOUBLE: case SQL_D_FLOAT: return "SQL_DOUBLE"; case SQL_TIMESTAMP: return "SQL_TIMESTAMP"; #ifdef INTERBASE_6_OR_LATER case SQL_TYPE_DATE: return "SQL_TYPE_DATE"; case SQL_TYPE_TIME: return "SQL_TYPE_TIME"; #endif /* INTERBASE_6_OR_LATER */ case SQL_BLOB: return "SQL_BLOB"; default: return "UNKNOWN"; } } /* get_internal_data_type_name */ static InputStatus convert_input_parameters(Cursor *cur, PyObject *params) { /* Assumption: the type of argument $params has already been screened by * Cursor_execute; we know it is a sequence. */ PreparedStatement *ps = cur->ps_current; XSQLDA *sqlda = ps->in_sqlda; int conversion_status; Py_ssize_t i; XSQLVAR *cur_sqlvar; OriginalXSQLVARSpecificationCache *cur_spec_cache; const Py_ssize_t num_required_statement_params = sqlda->sqld; Py_ssize_t num_supplied_statement_params = PySequence_Length(params); if (num_supplied_statement_params > MAX_XSQLVARS_IN_SQLDA) { /* These num_supplied-related contortions are here because * PyString_FromFormat lacks the standard %zd format code for displaying * Py_ssize_t values. */ PyObject *num_supplied = PyLong_FromUnsignedLongLong( (unsigned LONG_LONG) num_supplied_statement_params ); if (num_supplied != NULL) { PyObject *num_supplied_str = PyObject_Str(num_supplied); if (num_supplied_str != NULL) { PyObject *err_msg = PyString_FromFormat( "Statement parameter sequence contains %s parameters, but only %d" " are allowed.", PyString_AS_STRING(num_supplied_str), MAX_XSQLVARS_IN_SQLDA ); if (err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } Py_DECREF(num_supplied_str); } Py_DECREF(num_supplied); } goto fail; } /* For the sake of the safety of free_XSQLVAR_dynamically_allocated_memory * in case this function encounters a conversion error, do an initial pass * to set the appropriate pointers to NULL. */ for ( i = 0, cur_sqlvar = sqlda->sqlvar, cur_spec_cache = ps->in_var_orig_spec; i < num_required_statement_params; i++, cur_sqlvar++, cur_spec_cache++ ) { /* 2003.02.13: Was previously setting 'cur_sqlvar->sqlind = NULL;' here, * but that's no longer valid because sqlind is allocated in * reallocate_sqlda and not deallocated until Cursor_delete. */ assert (cur_sqlvar->sqlind != NULL); cur_sqlvar->sqldata = NULL; /* Also restore the original sqlvar specification flags before attempting * the conversion of this input row (they would have been reset if the * Python object previously inbound to this XSQLVAR was implicitly * converted from string -> whatever DB type the field really was). */ cur_sqlvar->sqltype = cur_spec_cache->sqltype; cur_sqlvar->sqllen = cur_spec_cache->sqllen; } /* This supplied-vs-required param count check must come AFTER the * set-all-sqlvar-pointers null loop above, so that the caller of this * function can safely call free_XSQLVAR_dynamically_allocated_memory in ALL * cases in which this function returns an error. */ if (num_supplied_statement_params != num_required_statement_params) { /* This code goes through contortions to convert the Py_ssize_t variables * to Python longs, then build string representations of the longs, because * PyString_FromFormat does not support the standard %zd code for * displaying Py_ssize_t variables. */ PyObject *n_req = PyLong_FromUnsignedLongLong( (unsigned LONG_LONG) num_required_statement_params ); if (n_req != NULL) { PyObject *n_req_str = PyObject_Str(n_req); if (n_req_str != NULL) { PyObject *n_sup = PyLong_FromUnsignedLongLong( (unsigned LONG_LONG) num_supplied_statement_params ); if (n_sup != NULL) { PyObject *n_sup_str = PyObject_Str(n_sup); if (n_sup_str != NULL) { PyObject *err_msg = PyString_FromFormat("Incorrect number of input" " parameters. Expected %s; received %s.", PyString_AS_STRING(n_req_str), PyString_AS_STRING(n_sup_str) ); if (err_msg != NULL) { raise_exception(ProgrammingError, PyString_AS_STRING(err_msg)); Py_DECREF(err_msg); } Py_DECREF(n_sup_str); } Py_DECREF(n_sup); } Py_DECREF(n_req_str); } Py_DECREF(n_req); } goto fail; } for ( i = 0, cur_sqlvar = sqlda->sqlvar; i < num_required_statement_params; i++, cur_sqlvar++ ) { PyObject *cur_param = PySequence_GetItem(params, SIZE_T_TO_PYTHON_SIZE(i)); if (cur_param == NULL) { goto fail; } conversion_status = PyObject2XSQLVAR(cur, /* The cast from Py_ssize_t to short is safe because we've already * validated that there are few enough parameters: */ (short) i, cur_sqlvar, cur_param ); /* PySequence_GetItem returns a new reference, which must be released. */ Py_DECREF(cur_param); if (conversion_status != INPUT_OK) { goto fail; } } return INPUT_OK; fail: assert (PyErr_Occurred()); return INPUT_ERROR; } /* convert_input_parameters */ #if (defined(ENABLE_FIELD_PRECISION_DETERMINATION) && !defined(INTERBASE_7_OR_LATER)) #include "_kiconversion_field_precision.c" #endif PyObject *XSQLDA2Description(XSQLDA *sqlda, Cursor *cur) { /* Creates a Python DB API Cursor.description tuple from a database XSQLDA * and a cursor object. */ const short sqlvar_count = sqlda->sqld; short sqlvar_index; XSQLVAR *sqlvar; short data_type; int display_size = -1; PyObject *py_descs_for_all_fields = NULL; PyObject *py_desc_for_this_field = NULL; PyObject *py_field_name = NULL; PyObject *py_type = NULL; PyObject *py_display_size = NULL; PyObject *py_internal_size = NULL; PyObject *py_precision = NULL; PyObject *py_scale = NULL; /* DB API Spec 2.0: "This attribute will be None for operations that do not * return rows..." */ if (sqlvar_count == 0) { Py_INCREF(Py_None); return Py_None; } py_descs_for_all_fields = PyTuple_New(sqlvar_count); if (py_descs_for_all_fields == NULL) { goto fail; } for (sqlvar_index = 0; sqlvar_index < sqlvar_count; sqlvar_index++) { sqlvar = sqlda->sqlvar + sqlvar_index; /* The length of py_desc_for_this_field is defined by the Python DB API. */ py_desc_for_this_field = PyTuple_New(7); if (py_desc_for_this_field == NULL) { goto fail; } data_type = XSQLVAR_SQLTYPE_IGNORING_NULL_FLAG(sqlvar); py_internal_size = PyInt_FromLong(sqlvar->sqllen); if (py_internal_size == NULL) { goto fail; } py_scale = PyInt_FromLong(sqlvar->sqlscale); if (py_scale == NULL) { goto fail; } #ifndef ENABLE_FIELD_PRECISION_DETERMINATION py_precision = PyInt_FromLong(0); #else #ifdef INTERBASE_7_OR_LATER py_precision = PyInt_FromLong(sqlvar->sqlprecision); #else py_precision = determine_field_precision( ENTITY_TYPE_UNKNOWN, sqlvar->relname, sqlvar->relname_length, sqlvar->sqlname, sqlvar->sqlname_length, cur ); #endif #endif if (py_precision == NULL) { goto fail; } /* Make the description's type slot adapt to dynamic type translation * instead of returning the same type regardless. */ if (data_type != SQL_ARRAY) { PyObject *translator_key = _get_cached_type_name_for_conventional_code( Transaction_get_dialect(cur->trans), data_type, sqlvar->sqlsubtype, sqlvar->sqlscale ); if (translator_key == NULL) { goto fail; } py_type = cursor_get_translator_output_type(cur, sqlvar_index, translator_key); /* If there is no registered converter for $translator_key, $py_type will * be NULL. That's fine; a default will be supplied below. */ if (py_type == NULL && PyErr_Occurred()) { goto fail; } } /* I've investigated the py_type-punning warning raised here by * GCC -Wall -fstrict-aliasing and concluded that the behavior that causes * it (casting a PyTypeObject* to a PyObject*) is unavoidable when using * the Python C API, but not unsafe (see the definitions in Python's * object.h). */ #define DEFAULT_TYPE_IS(default_type) \ if (py_type == NULL) { py_type = (PyObject *) &default_type; } switch (data_type) { case SQL_TEXT: case SQL_VARYING: DEFAULT_TYPE_IS(PyString_Type); display_size = (int) sqlvar->sqllen; break; case SQL_SHORT: DEFAULT_TYPE_IS(PyInt_Type); display_size = 6; break; case SQL_LONG: DEFAULT_TYPE_IS(PyInt_Type); display_size = 11; break; #ifdef INTERBASE_6_OR_LATER case SQL_INT64: DEFAULT_TYPE_IS(PyLong_Type); display_size = 20; break; #endif /* INTERBASE_6_OR_LATER */ case SQL_DOUBLE: case SQL_FLOAT: case SQL_D_FLOAT: DEFAULT_TYPE_IS(PyFloat_Type); display_size = 17; break; case SQL_BLOB: /* The next statement predates DSR's involvement with kinterbasdb. He * doesn't regard it as such a hot idea, but has left it alone for the * sake of backward compatibility. */ Py_DECREF(py_scale); py_scale = PyInt_FromLong(sqlvar->sqlsubtype); if (py_scale == NULL) { goto fail; } DEFAULT_TYPE_IS(PyString_Type); display_size = 0; break; case SQL_TIMESTAMP: DEFAULT_TYPE_IS(PyTuple_Type); display_size = 22; break; #ifdef INTERBASE_6_OR_LATER case SQL_TYPE_DATE: DEFAULT_TYPE_IS(PyTuple_Type); display_size = 10; break; case SQL_TYPE_TIME: DEFAULT_TYPE_IS(PyTuple_Type); display_size = 11; break; #endif /* INTERBASE_6_OR_LATER */ case SQL_BOOLEAN: DEFAULT_TYPE_IS(PyBool_Type); display_size = 5; break; case SQL_ARRAY: DEFAULT_TYPE_IS(PyList_Type); display_size = -1; /* Can't determine display size inexpensively. */ break; default: /* Notice that py_type gets set to None, *not* NoneType. */ py_type = Py_None; display_size = -1; /* Can't determine display size. */ } /* end switch on data py_type */ py_display_size = PyInt_FromLong(display_size); if (py_display_size == NULL) { goto fail; } /* If there is an alias, place the alias, rather than the real column name, * in the column name field of the descriptor tuple. Before this fix, the * presence of an alias made no difference whatsoever in the descriptor * setup, and was thus inaccessible to the client programmer. */ /* Use strncmp instead of strcmp because the sqlname fields are not * null-terminated. */ if ( ( sqlvar->aliasname_length != sqlvar->sqlname_length ) || ( strncmp(sqlvar->sqlname, sqlvar->aliasname, sqlvar->sqlname_length) != 0 ) ) { py_field_name = PyString_FromStringAndSize( sqlvar->aliasname, sqlvar->aliasname_length ); } else { py_field_name = PyString_FromStringAndSize( sqlvar->sqlname, sqlvar->sqlname_length ); } if (py_field_name == NULL) { goto fail; } assert (py_type != NULL); /* No Python API calls between here and the end of the loop can raise an * exception, so artificially INCREF py_type to allow PyTuple_SET_ITEM to * steal a reference to it. * This step should be done here because py_type can be set in so many * places (anywhere the DEFAULT_TYPE_IS macro is called, and elsewhere). */ Py_INCREF(py_type); PyTuple_SET_ITEM( py_desc_for_this_field, 0, py_field_name ); PyTuple_SET_ITEM( py_desc_for_this_field, 1, py_type ); PyTuple_SET_ITEM( py_desc_for_this_field, 2, py_display_size ); PyTuple_SET_ITEM( py_desc_for_this_field, 3, py_internal_size ); PyTuple_SET_ITEM( py_desc_for_this_field, 4, py_precision ); PyTuple_SET_ITEM( py_desc_for_this_field, 5, py_scale ); PyTuple_SET_ITEM( py_desc_for_this_field, 6, PyBool_FromLong(XSQLVAR_SQLTYPE_READ_NULL_FLAG(sqlvar)) ); PyTuple_SET_ITEM(py_descs_for_all_fields, sqlvar_index, py_desc_for_this_field); /* Nullify intermediate PyObject pointers so that if an error arises during * the next iteration, the error handler at the end of the function can * uniformly XDECREF the pointers without worrying about whether they * represent references to object from the previous iteration, the * ownership of which has already been taken by a container. */ py_desc_for_this_field = NULL; py_field_name = NULL; py_type = NULL; py_display_size = NULL; py_internal_size = NULL; py_precision = NULL; py_scale = NULL; } return py_descs_for_all_fields; fail: assert (PyErr_Occurred()); Py_XDECREF(py_descs_for_all_fields); Py_XDECREF(py_desc_for_this_field); Py_XDECREF(py_field_name); /* py_type should NOT be DECREFed. */ Py_XDECREF(py_display_size); Py_XDECREF(py_internal_size); Py_XDECREF(py_precision); Py_XDECREF(py_scale); return NULL; } /* XSQLDA2Description */ static PyObject *XSQLVAR2PyObject( Cursor *cur, const short sqlvar_index, XSQLVAR *sqlvar ) { PyObject *result = NULL; PyObject *converter = NULL; /* DTT translator */ const short scale = sqlvar->sqlscale; const short data_type = XSQLVAR_SQLTYPE_IGNORING_NULL_FLAG(sqlvar); const short data_subtype = sqlvar->sqlsubtype; const unsigned short dialect = Transaction_get_dialect(cur->trans); const boolean is_array = (boolean) (data_type == SQL_ARRAY); boolean is_nonstandard_blob; /* Array DTT is activated elsewhere--see kiconversion_array.c */ if (!is_array) { converter = cursor_get_out_converter(cur, sqlvar_index, data_type, data_subtype, scale, FALSE ); /* cursor_get_out_converter returns NULL on error; borrowed reference to * Py_None if there was no converter. */ if (converter == NULL) { goto fail; } } /* 2007.02.10: */ /* The determination of is_nonstandard_blob needs to be made before we check * for SQL NULL below, so that if SQL NULL is found and we jump to 'succeed', * the 'succeed' clause won't mistakenly try to use a standard converter for * a nonstandard blob: */ is_nonstandard_blob = (data_type == SQL_BLOB && PyDict_Check(converter)); if ( XSQLVAR_IS_ALLOWED_TO_BE_NULL(sqlvar) && XSQLVAR_IS_NULL(sqlvar) ) { /* SQL NULL becomes Python None regardless of field type. */ Py_INCREF(Py_None); result = Py_None; /* Give converters a chance to act on this value: */ goto succeed; } /* For documentation of these data_type cases, see the IB6 API Guide * section entitled "SQL datatype macro constants". */ switch (data_type) { /* Character data: */ case SQL_TEXT: result = conv_out_char(sqlvar->sqldata, sqlvar->sqllen); break; case SQL_VARYING: result = conv_out_varchar(sqlvar->sqldata); break; /* Numeric data: */ case SQL_SHORT: case SQL_LONG: result = conv_out_short_long(sqlvar->sqldata, data_type, IS_FIXED_POINT__CONVENTIONAL(dialect, data_type, data_subtype, scale), scale ); break; #ifdef INTERBASE_6_OR_LATER case SQL_INT64: result = conv_out_int64(sqlvar->sqldata, IS_FIXED_POINT__CONVENTIONAL(dialect, data_type, data_subtype, scale), scale ); break; #endif /* INTERBASE_6_OR_LATER */ case SQL_FLOAT: result = conv_out_floating(*((float *) sqlvar->sqldata), dialect, scale); break; case SQL_DOUBLE: case SQL_D_FLOAT: result = conv_out_floating(*((double *) sqlvar->sqldata), dialect, scale); break; /* Date and time data: */ case SQL_TIMESTAMP: /* TIMESTAMP */ result = conv_out_timestamp(sqlvar->sqldata); break; #ifdef INTERBASE_6_OR_LATER case SQL_TYPE_DATE: /* DATE */ result = conv_out_date(sqlvar->sqldata); break; case SQL_TYPE_TIME: /* TIME */ result = conv_out_time(sqlvar->sqldata); break; #endif /* INTERBASE_6_OR_LATER */ case SQL_BOOLEAN: result = conv_out_boolean(sqlvar->sqldata); break; case SQL_BLOB: /* 2005.06.21: */ /* Special cases for blobs: * If treat_subtype_text_as_text is enabled for this field, that behavior * should override any other configured behavior, be it materialized or * streaming. * * Otherwise, if streaming is enabled for this field, apply the indicated * dynamic type translation (i.e., streaming) "prematurely", so as to avoid * materializing the entire blob in memory before exposing it to client * code via a BlobReader. */ { ISC_QUAD *blob_id = (ISC_QUAD *) sqlvar->sqldata; if (!is_nonstandard_blob) { /* Backward-compatible behavior (materialized, no auto-decoding): */ result = conv_out_blob_materialized(blob_id, cur->status_vector, *Transaction_get_db_handle_p(cur->trans), *Transaction_get_handle_p(cur->trans) ); } else { BlobMode mode; boolean treat_subtype_text_as_text; boolean treat_subtype_text_as_text__applies; if ( validate_nonstandard_blob_config_dict(converter, &mode, &treat_subtype_text_as_text ) != DTT_BLOB_CONFIG_VALID ) { goto fail; } /* No other modes supported at the moment; this assertion should flag areas * that need to be adjusted if other modes are added in the future. */ assert (mode == blob_mode_materialize || mode == blob_mode_stream); treat_subtype_text_as_text__applies = (boolean) (data_subtype == isc_blob_text && treat_subtype_text_as_text); if (treat_subtype_text_as_text__applies) { /* We've been directed to handle textual blobs as if they were VARCHAR, * and this is a textual blob, so use to materialized mode even if we * were in streaming mode. */ mode = blob_mode_materialize; } if (mode == blob_mode_materialize) { result = conv_out_blob_materialized(blob_id, cur->status_vector, *Transaction_get_db_handle_p(cur->trans), *Transaction_get_handle_p(cur->trans) ); if (treat_subtype_text_as_text__applies) { /* Look up the character set ID of this blob to determine whether to * route it through the TEXT or TEXT_UNICODE dynamic type translator * for auto-decoding. */ PyObject *py_converter_override; PyObject *py_blob_charset_id; boolean is_unicode_charset; if (get_blob_converter_override_for_direction(FALSE, cur, sqlvar, &py_converter_override, &py_blob_charset_id, &is_unicode_charset ) != 0 ) { goto fail; } assert (py_converter_override != NULL); assert (py_blob_charset_id != NULL); if (py_converter_override == Py_None) { /* Return the raw result without any translation. */ } else { if (!is_unicode_charset) { /* Pass single string (result): */ PyObject *result_conv = PyObject_CallFunctionObjArgs( py_converter_override, result, NULL ); Py_DECREF(result); result = result_conv; } else { /* Pass 2-tuple of (result, py_blob_charset_id): */ PyObject *tuple_of_raw_string_and_charset_code = PyTuple_New(2); if (tuple_of_raw_string_and_charset_code == NULL) { goto fail; } /* We pass reference ownership over result to the tuple. */ PyTuple_SET_ITEM(tuple_of_raw_string_and_charset_code, 0, result); Py_INCREF(py_blob_charset_id); PyTuple_SET_ITEM(tuple_of_raw_string_and_charset_code, 1, py_blob_charset_id ); result = PyObject_CallFunctionObjArgs(py_converter_override, tuple_of_raw_string_and_charset_code, NULL ); Py_DECREF(tuple_of_raw_string_and_charset_code); /* Drop through to the end of the function, which will detect a * NULL result and indicate an error if necessary. */ } } assert (py_blob_charset_id != NULL); Py_DECREF(py_blob_charset_id); } } else { result = (PyObject *) BlobReader_create(cur->trans); if (result == NULL) { goto fail; } if (BlobReader_open((BlobReader *) result, blob_id) != 0) { goto fail; } assert (BlobReader_is_open((BlobReader *) result)); } } } break; case SQL_ARRAY: #ifdef ENABLE_DB_ARRAY_SUPPORT result = conv_out_array( cur, sqlvar_index, (ISC_QUAD *) sqlvar->sqldata, cur->status_vector, Transaction_get_db_handle_p(cur->trans), Transaction_get_handle_p(cur->trans), sqlvar->relname, sqlvar->relname_length, sqlvar->sqlname, sqlvar->sqlname_length ); #else raise_exception(InternalError, "This build of kinterbasdb has database" " array support disabled." ); goto fail; #endif /* ENABLE_DB_ARRAY_SUPPORT */ break; default: raise_exception( NotSupportedError, "Outgoing conversion of type not supported." " " KIDB_REPORT " " KIDB_HOME_PAGE ); return NULL; } if (result == NULL) { goto fail; } succeed: /* Nonstandard blobs and arrays are subject to special dynamic type * translation, which is not applied in the standard manner. */ assert (result != NULL); if (!(is_nonstandard_blob || is_array)) { assert (converter != NULL); /* Can't be NULL, but may be None. */ /* Replacing the PyObject pointer in result is *not* a refcount leak; see * the comments in dynamically_type_convert_output_obj_if_necessary. */ result = dynamically_type_convert_output_obj_if_necessary( result, converter, data_type, data_subtype ); } return result; fail: Py_XDECREF(result); return NULL; } /* XSQLVAR2PyObject */ PyObject *XSQLDA2Tuple(Cursor *cur, XSQLDA *sqlda) { const short sqlvar_count = sqlda->sqld; short sqlvar_index; PyObject *var; PyObject *record = PyTuple_New(sqlvar_count); if (record == NULL) { return NULL; } for (sqlvar_index = 0; sqlvar_index < sqlvar_count; ++sqlvar_index) { var = XSQLVAR2PyObject(cur, sqlvar_index, sqlda->sqlvar + sqlvar_index); if (var == NULL) { /* XSQLVAR2PyObject will have set an exception. */ goto fail; } /* PyTuple_SET_ITEM steals our ref to var. */ PyTuple_SET_ITEM(record, sqlvar_index, var); } return record; fail: assert (PyErr_Occurred()); Py_XDECREF(record); return NULL; } /* XSQLDA2Tuple */ kinterbasdb-3.3.0/_kicore_create_drop_db.c0000644000175000001440000001055111130647414020003 0ustar pcisarusers/* KInterbasDB Python Package - Implementation of SQL Statement Execution, etc. * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ static PyObject *pyob_create_database(PyObject *self, PyObject *args) { CConnection *con = NULL; char *sql = NULL; Py_ssize_t sql_len = -1; short dialect = 0; if (!PyArg_ParseTuple(args, "s#|h", &sql, &sql_len, &dialect)) { goto fail; } if (!_check_statement_length(sql_len)) { goto fail; } /* A negative value for the dialect is not acceptable because the IB/FB API * requires an UNSIGNED SHORT. */ if (dialect < 0) { raise_exception(ProgrammingError, "connection dialect must be > 0"); goto fail; } con = Connection_create(); if (con == NULL) { goto fail; } assert (con->main_trans == NULL); /* conn->dialect is set to a default value in the Connection_create * function, so we only need to change it if we received a dialect argument * to this function. */ if (dialect > 0) { con->dialect = (unsigned short) dialect; } assert (con->dialect > 0); { isc_tr_handle unused_trans_handle = NULL_TRANS_HANDLE; LEAVE_GIL_WITHOUT_AFFECTING_DB ENTER_GDAL_WITHOUT_LEAVING_PYTHON ENTER_GCDL_WITHOUT_LEAVING_PYTHON isc_dsql_execute_immediate( con->status_vector, &con->db_handle, &unused_trans_handle, /* Cast is safe because sql_len has already been constrained: */ (unsigned short) sql_len, sql, con->dialect, NULL ); LEAVE_GCDL_WITHOUT_ENTERING_PYTHON LEAVE_GDAL_WITHOUT_ENTERING_PYTHON ENTER_GIL_WITHOUT_AFFECTING_DB /* For CREATE DATABASE statements, isc_dsql_execute_immediate is not * supposed to touch the transaction handle: */ assert (unused_trans_handle == NULL_TRANS_HANDLE); } if (DB_API_ERROR(con->status_vector)) { raise_sql_exception(ProgrammingError, "pyob_create_database: ", con->status_vector ); goto fail; } con->state = CON_STATE_OPEN; return (PyObject *) con; fail: assert (PyErr_Occurred()); Py_XDECREF((PyObject *) con); return NULL; } /* pyob_create_database */ static PyObject *pyob_Connection_drop_database(PyObject *self, PyObject *args) { CConnection *con; if (!PyArg_ParseTuple(args, "O!", &ConnectionType, &con) ) { goto fail; } CONN_REQUIRE_OPEN(con); /* CONN_REQUIRE_OPEN should enforce non-null db_handle, but assert anyway: */ assert (con->db_handle != NULL_DB_HANDLE); /* Here, we first save the connection's db_handle, then ask Connection_close * to "close the connection, but don't actually detach." This preserves the * uniformity of the cleanup code between the normal closure paths and * drop_database. */ { isc_db_handle db_handle = con->db_handle; assert (con->state == CON_STATE_OPEN); assert (NOT_RUNNING_IN_CONNECTION_TIMEOUT_THREAD); if (Connection_close(con, TRUE, FALSE) != 0) { goto fail; } assert (con->state == CON_STATE_CLOSED); assert (con->db_handle == NULL_DB_HANDLE); /* We now restore the OPEN state and the db_handle to con; they'll be cleared * below, but only if the isc_drop_database call succeeds. */ con->state = CON_STATE_OPEN; con->db_handle = db_handle; } LEAVE_GIL_WITHOUT_AFFECTING_DB ENTER_GDAL_WITHOUT_LEAVING_PYTHON ENTER_GCDL_WITHOUT_LEAVING_PYTHON isc_drop_database(con->status_vector, &con->db_handle); LEAVE_GCDL_WITHOUT_ENTERING_PYTHON LEAVE_GDAL_WITHOUT_ENTERING_PYTHON ENTER_GIL_WITHOUT_AFFECTING_DB if (DB_API_ERROR(con->status_vector)) { raise_sql_exception(OperationalError, "pyob_Connection_drop_database: ", con->status_vector ); goto fail; } con->db_handle = NULL_DB_HANDLE; con->state = CON_STATE_CLOSED; RETURN_PY_NONE; fail: assert (PyErr_Occurred()); return NULL; } /* pyob_Connection_drop_database */ kinterbasdb-3.3.0/_kisupport_threadsafe_fifo_queue.h0000644000175000001440000000330311130647414022152 0ustar pcisarusers/* KInterbasDB Python Package - Header File for ThreadSafeFIFOQueue * * Version 3.3 * * The following contributors hold Copyright (C) over their respective * portions of code (see license.txt for details): * * [Original Author (maintained through version 2.0-0.3.1):] * 1998-2001 [alex] Alexander Kuznetsov * [Maintainers (after version 2.0-0.3.1):] * 2001-2002 [maz] Marek Isalski * 2002-2007 [dsr] David Rushby * [Contributors:] * 2001 [eac] Evgeny A. Cherkashin * 2001-2002 [janez] Janez Jere */ #ifndef _KISUPPORT_THREADSAFE_FIFO_QUEUE_H #define _KISUPPORT_THREADSAFE_FIFO_QUEUE_H #include "_kisupport.h" typedef void (*QueueNodeDelFunc)(void *); typedef struct _QueueNode { volatile void *payload; volatile QueueNodeDelFunc payload_del_func; volatile struct _QueueNode *next; } QueueNode; typedef struct { PlatformMutexType lock; /* There are so many subtle differences between pthread_cond_t objects and * Windows Event objects that it's less error-prone to ifdef the operations * inline than to try to build a uniform abstraction. * Clients of the queue aren't aware of what synch primitives it's using * under the hood anyway, so the platform-specificity doesn't leak into other * parts of the code base. */ #ifdef PLATFORM_WINDOWS HANDLE #else pthread_cond_t #endif not_empty; volatile boolean cancelled; volatile boolean closed; volatile QueueNode *head; volatile QueueNode *tail; } ThreadSafeFIFOQueue; #endif /* if not def _KISUPPORT_THREADSAFE_FIFO_QUEUE_H */ kinterbasdb-3.3.0/docs/0000755000175000001440000000000011133100174014124 5ustar pcisaruserskinterbasdb-3.3.0/docs/beyond-python-db-api.html0000644000175000001440000117617511133076420020773 0ustar pcisarusers Native Database Engine Features and Extensions Beyond the Python DB API — KInterbasDB v3.3.0 documentation

Native Database Engine Features and Extensions Beyond the Python DB API¶

Programmatic Database Creation and Deletion¶

The Firebird engine stores a database in a fairly straightforward manner: as a single file or, if desired, as a segmented group of files.

The engine supports dynamic database creation via the SQL statement CREATE DATABASE.

The engine also supports dropping (deleting) databases dynamically, but dropping is a more complicated operation than creating, for several reasons: an existing database may be in use by users other than the one who requests the deletion, it may have supporting objects such as temporary sort files, and it may even have dependent shadow databases. Although the database engine recognizes a DROP DATABASE SQL statement, support for that statement is limited to the isql command-line administration utility. However, the engine supports the deletion of databases via an API call, which KInterbasDB exposes to Python (see below).

KInterbasDB supports dynamic database creation and deletion via the module-level function kinterbasdb.create_database() and the method drop_database(). These are documented below, then demonstrated by a brief example.

kinterbasdb.create_database()¶

Creates a database according to the supplied CREATE DATABASE SQL statement. Returns an open connection to the newly created database.

Arguments:

Sql:string containing the CREATE DATABASE statement. Note that this statement may need to include a username and password.
Dialect:optional - the SQL dialect under which to execute the statement (defaults to 3).
Connection.drop_database()¶

Deletes the database to which the connection is attached.

This method performs the database deletion in a responsible fashion. Specifically, it:

  • raises an OperationalError instead of deleting the database if there are other active connections to the database
  • deletes supporting files and logs in addition to the primary database file(s)

This method has no arguments.

Example program:

import kinterbasdb

con = kinterbasdb.create_database(
      "create database '/temp/db.db' user 'sysdba' password 'pass'"
      )
con.drop_database()

Advanced Transaction Control¶

For the sake of simplicity, KInterbasDB lets the Python programmer ignore transaction management to the greatest extent allowed by the Python Database API Specification 2.0. The specification says, “if the database supports an auto-commit feature, this must be initially off”. At a minimum, therefore, it is necessary to call the commit method of the connection in order to persist any changes made to the database. Transactions left unresolved by the programmer will be `rollback`ed when the connection is garbage collected.

Remember that because of ACID, every data manipulation operation in the Firebird database engine takes place in the context of a transaction, including operations that are conceptually “read-only”, such as a typical SELECT. The client programmer of KInterbasDB establishes a transaction implicitly by using any SQL execution method, such as execute_immediate(), Cursor.execute(), or Cursor.callproc().

Although KInterbasDB allows the programmer to pay little attention to transactions, it also exposes the full complement of the database engine’s advanced transaction control features: transaction parameters, retaining transactions, savepoints, and distributed transactions.

Explicit transaction start¶

In addition to the implicit transaction initiation required by Python Database API, KInterbasDB allows the programmer to start transactions explicitly via the Connection.begin method.

Connection.begin(tpb)¶

Starts a transaction explicitly. This is never required; a transaction will be started implicitly if necessary.

Tpb:Optional transaction parameter buffer (TPB) populated with kinterbasdb.isc_tpb_* constants. See the Firebird API guide for these constants’ meanings.

Transaction Parameters¶

The database engine offers the client programmer an optional facility called transaction parameter buffers (TPBs) for tweaking the operating characteristics of the transactions he initiates. These include characteristics such as whether the transaction has read and write access to tables, or read-only access, and whether or not other simultaneously active transactions can share table access with the transaction.

Connections have a default_tpb attribute that can be changed to set the default TPB for all transactions subsequently started on the connection. Alternatively, if the programmer only wants to set the TPB for a single transaction, he can start a transaction explicitly via the begin() method and pass a TPB for that single transaction.

For details about TPB construction, see the Firebird API documentation. In particular, the ibase.h supplied with Firebird contains all possible TPB elements – single bytes that the C API defines as constants whose names begin with isc_tpb_. KInterbasDB makes all of those TPB constants available (under the same names) as module-level constants in the form of single-character strings. A transaction parameter buffer is handled in C as a character array; KInterbasDB requires that TPBs be constructed as Python strings. Since the constants in the kinterbasdb.isc_tpb_* family are single-character Python strings, they can simply be concatenated to create a TPB.

Warning

This method requires good knowledge of tpc_block structure and proper order of various parameters, as Firebird engine will raise an error when badly structured block would be used. Also definition of table reservation parameters is uncomfortable as you’ll need to mix binary codes with table names passed as Pascal strings (characters preceded by string length).

The following program uses explicit transaction initiation and TPB construction to establish an unobtrusive transaction for read-only access to the database:

import kinterbasdb

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')

# Construct a TPB by concatenating single-character strings (bytes)
# from the kinterbasdb.isc_tpb_* family.
customTPB = (
      kinterbasdb.isc_tpb_read
    + kinterbasdb.isc_tpb_read_committed
    + kinterbasdb.isc_tpb_rec_version
  )

# Explicitly start a transaction with the custom TPB:
con.begin(tpb=customTPB)

# Now read some data using cursors:
...

# Commit the transaction with the custom TPB.  Future transactions
# opened on con will not use a custom TPB unless it is explicitly
# passed to con.begin every time, as it was above, or
# con.default_tpb is changed to the custom TPB, as in:
#   con.default_tpb = customTPB
con.commit()

For convenient and safe construction of custom tpb_block, KInterbasDB provides special utility class TPB.

class kinterbasdb.TPB¶
access_mode¶
Required access mode. Default isc_tpb_write.
isolation_level¶
Required Transaction Isolation Level. Default isc_tpb_concurrency.
lock_resolution¶
Required lock resolution method. Default isc_tpb_wait.
lock_timeout¶
Required lock timeout. Default None.
table_reservation¶

Table reservation specification. Default None. Instead of changing the value of the table_reservation object itself, you must change its elements by manipulating it as though it were a dictionary that mapped “TABLE_NAME”: (sharingMode, accessMode) For example:

tpbBuilder.table_reservation["MY_TABLE"] =
  (kinterbasdb.isc_tpb_protected, kinterbasdb.isc_tpb_lock_write)
render()¶
Returns valid transaction parameter block according to current values of member attributes.
import kinterbasdb

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')

# Use TPB to construct valid transaction parameter block
# from the kinterbasdb.isc_tpb_* family.
customTPB = TPB()
customTPB.access_mode = kinterbasdb.isc_tpb_read
customTPB.isolation_level = kinterbasdb.isc_tpb_read_committed
    + kinterbasdb.isc_tpb_rec_version

# Explicitly start a transaction with the custom TPB:
con.begin(tpb=customTPB.render())

# Now read some data using cursors:
...

# Commit the transaction with the custom TPB.  Future transactions
# opened on con will not use a custom TPB unless it is explicitly
# passed to con.begin every time, as it was above, or
# con.default_tpb is changed to the custom TPB, as in:
#   con.default_tpb = customTPB.render()
con.commit()

If you want to build only table reservation part of tpb (for example to add to various custom built parameter blocks), you can use class TableReservation instead TPB.

class kinterbasdb.TableReservation¶

This is a dictionary-like class, where keys are table names and values must be tuples of access parameters, i.e. “TABLE_NAME”: (sharingMode, accessMode)

render()¶
Returns propely formatted table reservation part of transaction parameter block according to current values.

Conenction object also exposes two methods that return infromation about current transaction:

class kinterbasdb.Connection¶
trans_info(request)¶

Pythonic wrapper around transaction_info() call.

Request:One or more information request codes (see transaction_info for details). Multiple codes must be passed as tuple.

Returns decoded response(s) for specified request code(s). When multiple requests are passed, returns a dictionary where key is the request code and value is the response from server.

transaction_info(request, result_type)¶

Thin wrapper around Firebird API isc_transaction_info call. This function returns information about active transaction. Raises ProgrammingError exception when transaction is not active.

Request:

One from the next constants:

  • isc_info_tra_id
  • isc_info_tra_oldest_interesting
  • isc_info_tra_oldest_snapshot
  • isc_info_tra_oldest_active
  • isc_info_tra_isolation
  • isc_info_tra_access
  • isc_info_tra_lock_timeout

See Firebird API Guide for details.

Result_type:

String code for result type:

  • ‘i’ for Integer
  • ‘s’ fro String

Retaining Operations¶

The commit and rollback methods of kinterbasdb.Connection accept an optional boolean parameter retaining (default False) to indicate whether to recycle the transactional context of the transaction being resolved by the method call.

If retaining is True, the infrastructural support for the transaction active at the time of the method call will be “retained” (efficiently and transparently recycled) after the database server has committed or rolled back the conceptual transaction.

In code that commits or rolls back frequently, “retaining” the transaction yields considerably better performance. However, retaining transactions must be used cautiously because they can interfere with the server’s ability to garbage collect old record versions. For details about this issue, read the “Garbage” section of this document by Ann Harrison.

For more information about retaining transactions, see Firebird documentation.

Savepoints¶

Firebird 1.5 introduced support for transaction savepoints. Savepoints are named, intermediate control points within an open transaction that can later be rolled back to, without affecting the preceding work. Multiple savepoints can exist within a single unresolved transaction, providing “multi-level undo” functionality.

Although Firebird savepoints are fully supported from SQL alone via the SAVEPOINT ‘name’ and ROLLBACK TO ‘name’ statements, KInterbasDB also exposes savepoints at the Python API level for the sake of convenience.

Connection.savepoint(name)¶
Establishes a savepoint with the specified name. To roll back to a specific savepoint, call the rollback() method and provide a value (the name of the savepoint) for the optional savepoint parameter. If the savepoint parameter of rollback() is not specified, the active transaction is cancelled in its entirety, as required by the Python Database API Specification.

The following program demonstrates savepoint manipulation via the KInterbasDB API, rather than raw SQL.

import kinterbasdb

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')
cur = con.cursor()

cur.execute("recreate table test_savepoints (a integer)")
con.commit()

print 'Before the first savepoint, the contents of the table are:'
cur.execute("select * from test_savepoints")
print ' ', cur.fetchall()

cur.execute("insert into test_savepoints values (?)", [1])
con.savepoint('A')
print 'After savepoint A, the contents of the table are:'
cur.execute("select * from test_savepoints")
print ' ', cur.fetchall()

cur.execute("insert into test_savepoints values (?)", [2])
con.savepoint('B')
print 'After savepoint B, the contents of the table are:'
cur.execute("select * from test_savepoints")
print ' ', cur.fetchall()

cur.execute("insert into test_savepoints values (?)", [3])
con.savepoint('C')
print 'After savepoint C, the contents of the table are:'
cur.execute("select * from test_savepoints")
print ' ', cur.fetchall()

con.rollback(savepoint='A')
print 'After rolling back to savepoint A, the contents of the table are:'
cur.execute("select * from test_savepoints")
print ' ', cur.fetchall()

con.rollback()
print 'After rolling back entirely, the contents of the table are:'
cur.execute("select * from test_savepoints")
print ' ', cur.fetchall()

The output of the example program is shown below.

Before the first savepoint, the contents of the table are:
  []
After savepoint A, the contents of the table are:
  [(1,)]
After savepoint B, the contents of the table are:
  [(1,), (2,)]
After savepoint C, the contents of the table are:
  [(1,), (2,), (3,)]
After rolling back to savepoint A, the contents of the table are:
  [(1,)]
After rolling back entirely, the contents of the table are:
  []

Using multiple transactions with the same connection¶

New in version 3.3.

Python Database API 2.0 was created with assumption that connection can support only one transactions per single connection. However, Firebird can support multiple independent transactions that can run simultaneously within single connection / attachment to the database. This feature is very important, as applications may require multiple transaction openned simultaneously to perform various tasks, which would require to open multiple connections and thus consume more resources than necessary.

KInterbasDB surfaces this Firebird feature through new class Transaction and extensions to Connection and Cursor classes.

class kinterbasdb.Connection
trans(tpb=None)¶
Creates a new Transaction that operates within the context of this connection. Cursors can be created within that Transaction via its .cursor() method.
transactions¶

read-only property

List of non-close()d Transaction objects associated with this Connection. An element of this list may represent a resolved or unresolved physical transaction. Once a Transaction object has been created, it is only removed from the Connection’s tracker if the Transaction’s close() method is called (Transaction.__del__ triggers an implicit close() call if necessary), or (obviously) if the Connection itself is close()d. The initial implementation will not make any guarantees about the order of the Transactions in this list.

main_transaction¶

read-only property

Transaction object that represents the DB-API implicit transaction. The implementation guarantees that the same Transaction object will be reused across all DB-API transactions during the lifetime of the Connection.

prepare()¶
Manually triggers the first phase of a two-phase commit (2PC). Use of this method is optional; if preparation is not triggered manually, it will be performed implicitly by commit() in a 2PC. See also the Distributed Transactions section for details.
class kinterbasdb.Cursor¶
transaction¶

read-only property

Transaction with which this Cursor is associated. None if the Transaction has been close()d, or if the Cursor has been close()d.

class kinterbasdb.Transaction¶
__init__(connection, tpb=None)¶
Constructor requires open Connection object and optional tpb specification.
connection¶

read-only property

Connection object on which this Transaction is based. When the Connection’s close() method is called, all Transactions that depend on the connection will also be implicitly close()d. If a Transaction has been close()d, its connection property will be None.

closed¶

read-only property

True if Transaction has been closed (explicitly or implicitly).

n_physical¶

read-only property (int)

Number of physical transactions that have been executed via this Transaction object during its lifetime.

resolution¶

read-only property (int)

Zero if this Transaction object is currently managing an open physical transaction. One if the physical transaction has been resolved normally. Note that this is an int property rather than a bool, and is named resolution rather than resolved, so that the non-zero values other than one can be assigned to convey specific information about the state of the transaction, in a future implementation (consider distributed transaction prepared state, limbo state, etc.).

cursors¶
List of non-close()d Cursor objects associated with this Transaction. When Transaction’s close() method is called, whether explicitly or implicitly, it will implicitly close() each of its Cursors. Current implementation do not make any guarantees about the order of the Cursors in this list.
begin(tpb)¶
See Connection.begin() for details.
commit(retaining=False)¶
See kinterbasdb.Connection.commit() for details.
close()¶
Permanently closes the Transaction object and severs its associations with other objects. If the physical transaction is unresolved when this method is called, a rollback() will be performed first.
prepare()¶
See Connection.prepare() for details.
rollback(retaining=False)¶
See kinterbasdb.Connection.rollback() for details.
savepoint()¶
See Connection.savepoint() for details.
trans_info()¶
See Connection.trans_info() for details.
transaction_info()¶
See Connection.transaction_info() for details.
cursor()¶
Creates a new Cursor that will operate in the context of this Transaction. The association between a Cursor and its Transaction is set when the Cursor is created, and cannot be changed during the lifetime of that Cursor. See Connection.cursor() for more details.

If you don’t want multiple transactions, you can use implicit transaction object associated with Connection and control it via transaction-management and cursor methods of the Connection.

Alternatively, you can directly access the implicit transaction exposed as main_transaction and control it via its transaction-management methods.

To use additional transactions, create new Transaction object calling Connection.trans() method.

Distributed Transactions¶

Distributed transactions are transactions that span multiple databases. KInterbasDB provides this Firebird feature through ConnectionGroup class.

class kinterbasdb.ConnectionGroup¶
__init__(connections=())¶
Constructor accepts optional list of database connections. Connections cannot be in closed state.
disband()¶
Forcefully deletes all connections from connection group. If transaction is active, it’s canceled (rollback).
add(con)¶
Adds active connection to the group. If connection altready belong to this or any other ConnectionGroup, has active transaction, or timeout for it is defined, an exception is raised. Group also cannot accept new members when in unresolved transactions.
remove(con)¶
Removes specified connection from group. Raises an exception if connection doesn’t belong to this group or if group has unresolved transaction.
clear()¶
Removes all connections from group. Raises an exception if group has unresolved transaction.
members()¶
Returns list of connection objects that belong to this group.
count()¶
Returns number of connection objects that belong to this group.
contains(con)¶
Returns True if specified connection belong to this group.
begin()¶
Starts distributed transaction over member connections.
commit(retaining=False)¶
Commits distributed transaction over member connections using 2PC.
prepare()¶
Manually triggers the first phase of a two-phase commit (2PC). Use of this method is optional; if preparation is not triggered manually, it will be performed implicitly by commit() in a 2PC.
rollback(retaining=False)¶
Rollbacks distributed transaction over member connections.

Note

While a Connection belongs to a ConnectionGroup, any calls to the connection’s transactional methods ( begin, prepare, commit, rollback) will “bubble upward” to apply to the distributed transaction shared by the group as a whole.

Pitfalls and Limitations

  • Never add more than one connection to the same database to the same ConnectionGroup!
  • Current implementation works only with connection objects and their main transactions. Secondary transaction objects obrained from connection cannot participate in distributed transaction.

Example:

import kinterbasdb

# Establish multiple connections the usual way:
con1 = kinterbasdb.connect(dsn='weasel:/temp/test.db', user='sysdba', password='pass')
con2 = kinterbasdb.connect(dsn='coyote:/temp/test.db', user='sysdba', password='pass')

# Create a ConnectionGroup to associate multiple connections in such a
# way that they can participate in a distributed transaction.
# !!!
# NO TWO MEMBERS OF A SINGLE CONNECTIONGROUP SHOULD BE ATTACHED TO THE SAME DATABASE!
# !!!
group = kinterbasdb.ConnectionGroup( connections=(con1,con2) )

# Start a distributed transaction involving all of the members of the group
# (con1 and con2 in this case) with one of the following approaches:
#   - Call  group.begin()
#   - Call  con1.begin(); the operation will "bubble upward" and apply to the group.
#   - Call  con2.begin(); the operation will "bubble upward" and apply to the group.
#   - Just start executing some SQL statements on either con1 or con2.
#     A transaction will be started implicitly; it will be a distributed
#     transaction because con1 and con2 are members of a ConnectionGroup.
group.begin()

# Perform some database changes the usual way (via cursors on con1 and con2):
...

# Commit or roll back the distributed transaction by calling the commit
# or rollback method of the ConnectionGroup itself, or the commit or
# rollback method of any member connection (con1 or con2 in this case).
group.commit()

# Unless you want to perform another distributed transaction, disband the
# group so that member connections can operate independently again.
group.clear()

Prepared Statements¶

When you define a Python function, the interpreter initially parses the textual representation of the function and generates a binary equivalent called bytecode. The bytecode representation can then be executed directly by the Python interpreter any number of times and with a variety of parameters, but the human-oriented textual definition of the function never need be parsed again.

Database engines perform a similar series of steps when executing a SQL statement. Consider the following series of statements:

cur.execute("insert into the_table (a,b,c) values ('aardvark', 1, 0.1)")
...
cur.execute("insert into the_table (a,b,c) values ('zymurgy', 2147483647, 99999.999)")

If there are many statements in that series, wouldn’t it make sense to “define a function” to insert the provided “parameters” into the predetermined fields of the predetermined table, instead of forcing the database engine to parse each statement anew and figure out what database entities the elements of the statement refer to? In other words, why not take advantage of the fact that the form of the statement (“the function”) stays the same throughout, and only the values (“the parameters”) vary? Prepared statements deliver that performance benefit and other advantages as well.

The following code is semantically equivalent to the series of insert operations discussed previously, except that it uses a single SQL statement that contains Firebird’s parameter marker ( ?) in the slots where values are expected, then supplies those values as Python tuples instead of constructing a textual representation of each value and passing it to the database engine for parsing:

insertStatement = "insert into the_table (a,b,c) values (?,?,?)"
cur.execute(insertStatement, ('aardvark', 1, 0.1))
...
cur.execute(insertStatement, ('zymurgy', 2147483647, 99999.999))

Only the values change as each row is inserted; the statement remains the same. For many years, KInterbasDB has recognized situations similar to this one and automatically reused the same prepared statement in each Cursor.execute() call. In KInterbasDB 3.2, the scheme for automatically reusing prepared statements has become more sophisticated, and the API has been extended to offer the client programmer manual control over prepared statement creation and use.

The entry point for manual statement preparation is the Cursor.prep method.

Cursor.prep(sql)¶
Sql:string parameter that contains the SQL statement to be prepared. Returns a PreparedStatement instance.
class kinterbasdb.PreparedStatement¶

PreparedStatement has no public methods, but does have the following public read-only properties:

sql¶
A reference to the string that was passed to prep() to create this PreparedStatement.
statement_type¶

An integer code that can be matched against the statement type constants in the kinterbasdb.isc_info_sql_stmt_* series. The following statement type codes are currently available:

  • isc_info_sql_stmt_commit
  • isc_info_sql_stmt_ddl
  • isc_info_sql_stmt_delete
  • isc_info_sql_stmt_exec_procedure
  • isc_info_sql_stmt_get_segment
  • isc_info_sql_stmt_insert
  • isc_info_sql_stmt_put_segment
  • isc_info_sql_stmt_rollback
  • isc_info_sql_stmt_savepoint
  • isc_info_sql_stmt_select
  • isc_info_sql_stmt_select_for_upd
  • isc_info_sql_stmt_set_generator
  • isc_info_sql_stmt_start_trans
  • isc_info_sql_stmt_update
n_input_params¶
The number of input parameters the statement requires.
n_output_params¶
The number of output fields the statement produces.
plan¶
A string representation of the execution plan generated for this statement by the database engine’s optimizer. This property can be used, for example, to verify that a statement is using the expected index.
description¶
A Python DB API 2.0 description sequence (of the same format as Cursor.description) that describes the statement’s output parameters. Statements without output parameters have a description of None.

In addition to programmatically examining the characteristics of a SQL statement via the properties of PreparedStatement, the client programmer can submit a PreparedStatement to Cursor.execute() or Cursor.executemany() for execution. The code snippet below is semantically equivalent to both of the previous snippets in this section, but it explicitly prepares the INSERT statement in advance, then submits it to Cursor.executemany() for execution:

insertStatement = cur.prep("insert into the_table (a,b,c) values (?,?,?)")
inputRows = [
    ('aardvark', 1, 0.1),
    ...
    ('zymurgy', 2147483647, 99999.999)
  ]
cur.executemany(insertStatement, inputRows)

Example Program

The following program demonstrates the explicit use of PreparedStatements. It also benchmarks explicit PreparedStatement reuse against KInterbasDB’s automatic PreparedStatement reuse, and against an input strategy that prevents PreparedStatement reuse.

import time
import kinterbasdb

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test-20.firebird',
    user='sysdba', password='masterkey'
  )

cur = con.cursor()

# Create supporting database entities:
cur.execute("recreate table t (a int, b varchar(50))")
con.commit()
cur.execute("create unique index unique_t_a on t(a)")
con.commit()

# Explicitly prepare the insert statement:
psIns = cur.prep("insert into t (a,b) values (?,?)")
print 'psIns.sql: "%s"' % psIns.sql
print 'psIns.statement_type == kinterbasdb.isc_info_sql_stmt_insert:', (
    psIns.statement_type == kinterbasdb.isc_info_sql_stmt_insert
  )
print 'psIns.n_input_params: %d' % psIns.n_input_params
print 'psIns.n_output_params: %d' % psIns.n_output_params
print 'psIns.plan: %s' % psIns.plan

print

N = 10000
iStart = 0

# The client programmer uses a PreparedStatement explicitly:
startTime = time.time()
for i in xrange(iStart, iStart + N):
    cur.execute(psIns, (i, str(i)))
print (
    'With explicit prepared statement, performed'
    '\n  %0.2f insertions per second.' % (N / (time.time() - startTime))
  )
con.commit()

iStart += N

# KInterbasDB automatically uses a PreparedStatement "under the hood":
startTime = time.time()
for i in xrange(iStart, iStart + N):
    cur.execute("insert into t (a,b) values (?,?)", (i, str(i)))
print (
    'With implicit prepared statement, performed'
    '\n  %0.2f insertions per second.' % (N / (time.time() - startTime))
  )
con.commit()

iStart += N

# A new SQL string containing the inputs is submitted every time, so
# KInterbasDB is not able to implicitly reuse a PreparedStatement.  Also, in a
# more complicated scenario where the end user supplied the string input
# values, the program would risk SQL injection attacks:
startTime = time.time()
for i in xrange(iStart, iStart + N):
    cur.execute("insert into t (a,b) values (%d,'%s')" % (i, str(i)))
print (
    'When unable to reuse prepared statement, performed'
    '\n  %0.2f insertions per second.' % (N / (time.time() - startTime))
  )
con.commit()

# Prepare a SELECT statement and examine its properties.  The optimizer's plan
# should use the unique index that we created at the beginning of this program.
print
psSel = cur.prep("select * from t where a = ?")
print 'psSel.sql: "%s"' % psSel.sql
print 'psSel.statement_type == kinterbasdb.isc_info_sql_stmt_select:', (
    psSel.statement_type == kinterbasdb.isc_info_sql_stmt_select
  )
print 'psSel.n_input_params: %d' % psSel.n_input_params
print 'psSel.n_output_params: %d' % psSel.n_output_params
print 'psSel.plan: %s' % psSel.plan

# The current implementation does not allow PreparedStatements to be prepared
# on one Cursor and executed on another:
print
print 'Note that PreparedStatements are not transferrable from one cursor to another:'
cur2 = con.cursor()
cur2.execute(psSel)

Output:

psIns.sql: "insert into t (a,b) values (?,?)"
psIns.statement_type == kinterbasdb.isc_info_sql_stmt_insert: True
psIns.n_input_params: 2
psIns.n_output_params: 0
psIns.plan: None

With explicit prepared statement, performed
  9551.10 insertions per second.
With implicit prepared statement, performed
  9407.34 insertions per second.
When unable to reuse prepared statement, performed
  1882.53 insertions per second.

psSel.sql: "select * from t where a = ?"
psSel.statement_type == kinterbasdb.isc_info_sql_stmt_select: True
psSel.n_input_params: 1
psSel.n_output_params: 2
psSel.plan: PLAN (T INDEX (UNIQUE_T_A))

Note that PreparedStatements are not transferrable from one cursor to another:
Traceback (most recent call last):
  File "adv_prepared_statements__overall_example.py", line 86, in ?
    cur2.execute(psSel)
kinterbasdb.ProgrammingError: (0, 'A PreparedStatement can only be used with the
 Cursor that originally prepared it.')

As you can see, the version that prevents the reuse of prepared statements is about five times slower – for a trivial statement. In a real application, SQL statements are likely to be far more complicated, so the speed advantage of using prepared statements would only increase.

As the timings indicate, KInterbasDB does a good job of reusing prepared statements even if the client program is written in a style strictly compatible with the Python DB API 2.0 (which accepts only strings – not PreparedStatement objects – to the Cursor.execute() method). The performance loss in this case is less than one percent.

Named Cursors¶

To allow the Python programmer to perform scrolling UPDATE or DELETE via the “SELECT ... FOR UPDATE” syntax, KInterbasDB provides the read/write property Cursor.name.

Cursor.name¶
Name for the SQL cursor. This property can be ignored entirely if you don’t need to use it.

Example Program

import kinterbasdb

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')
curScroll = con.cursor()
curUpdate = con.cursor()

curScroll.execute("select city from addresses for update")
curScroll.name = 'city_scroller'
update = "update addresses set city=? where current of " + curScroll.name

for (city,) in curScroll:
    city = ... # make some changes to city
    curUpdate.execute( update, (city,) )

con.commit()

Parameter Conversion¶

KInterbasDB converts bound parameters marked with a ? in SQL code in a standard way. However, the module also offers several extensions to standard parameter binding, intended to make client code more readable and more convenient to write.

Implicit Conversion of Input Parameters from Strings¶

The database engine treats most SQL data types in a weakly typed fashion: the engine may attempt to convert the raw value to a different type, as appropriate for the current context. For instance, the SQL expressions 123 (integer) and ‘123’ (string) are treated equivalently when the value is to be inserted into an integer field; the same applies when ‘123’ and 123 are to be inserted into a varchar field.

This weak typing model is quite unlike Python’s dynamic yet strong typing. Although weak typing is regarded with suspicion by most experienced Python programmers, the database engine is in certain situations so aggressive about its typing model that KInterbasDB must compromise in order to remain an elegant means of programming the database engine.

An example is the handling of “magic values” for date and time fields. The database engine interprets certain string values such as ‘yesterday’ and ‘now’ as having special meaning in a date/time context. If KInterbasDB did not accept strings as the values of parameters destined for storage in date/time fields, the resulting code would be awkward. Consider the difference between the two Python snippets below, which insert a row containing an integer and a timestamp into a table defined with the following DDL statement:

create table test_table (i int, t timestamp)
i = 1
t = 'now'
sqlWithMagicValues = "insert into test_table (i, t) values (?, '%s')" % t
cur.execute( sqlWithMagicValues, (i,) )
i = 1
t = 'now'
cur.execute( "insert into test_table (i, t) values (?, ?)", (i, t) )

If KInterbasDB did not support weak parameter typing, string parameters that the database engine is to interpret as “magic values” would have to be rolled into the SQL statement in a separate operation from the binding of the rest of the parameters, as in the first Python snippet above. Implicit conversion of parameter values from strings allows the consistency evident in the second snippet, which is both more readable and more general.

It should be noted that KInterbasDB does not perform the conversion from string itself. Instead, it passes that responsibility to the database engine by changing the parameter metadata structure dynamically at the last moment, then restoring the original state of the metadata structure after the database engine has performed the conversion.

A secondary benefit is that when one uses KInterbasDB to import large amounts of data from flat files into the database, the incoming values need not necessarily be converted to their proper Python types before being passed to the database engine. Eliminating this intermediate step may accelerate the import process considerably, although other factors such as the chosen connection protocol and the deactivation of indexes during the import are more consequential. For bulk import tasks, the database engine’s external tables also deserve consideration. External tables can be used to suck semi-structured data from flat files directly into the relational database without the intervention of an ad hoc conversion program.

Dynamic Type Translation¶

Dynamic type translators are conversion functions registered by the Python programmer to transparently convert database field values to and from their internal representation.

The client programmer can choose to ignore translators altogether, in which case KInterbasDB will manage them behind the scenes. Otherwise, the client programmer can use any of several standard type translators included with KInterbasDB, register custom translators, or set the translators to None to deal directly with the KInterbasDB-internal representation of the data type. When translators have been registered for a specific SQL data type, Python objects on their way into a database field of that type will be passed through the input translator before they are presented to the database engine; values on their way out of the database into Python will be passed through the corresponding output translator. Output and input translation for a given type is usually implemented by two different functions.

Specifics of the Dynamic Type Translation API¶

Translators are managed with next methods of Connection and Cursor.

Connection.get_type_trans_in()¶
Retrieves the inbound type translation map.
Connection.set_type_trans_in(trans_dict)¶
Changes the inbound type translation map.
Cursor.get_type_trans_in()¶
Retrieves the inbound type translation map.
Cursor.set_type_trans_in(trans_dict)¶
Changes the inbound type translation map.

The set_type_trans_[in|out] methods accept a single argument: a mapping of type name to translator. The get_type_trans[in|out] methods return a copy of the translation table.

Cursor`s inherit their `Connection‘s translation settings, but can override them without affecting the connection or other cursors (much as subclasses can override the methods of their base classes).

The following code snippet installs an input translator for fixed point types ( NUMERIC/ DECIMAL SQL types) into a connection:

con.set_type_trans_in( {'FIXED': fixed_input_translator_function} )

The following method call retrieves the type translation table for con:

con.get_type_trans_in()

The method call above would return a translation table (dictionary) such as this:

{
  'DATE': <function date_conv_in at 0x00920648>,
  'TIMESTAMP': <function timestamp_conv_in at 0x0093E090>,
  'FIXED': <function <lambda> at 0x00962DB0>,
  'TIME': <function time_conv_in at 0x009201B0>
}

Notice that although the sample code registered only one type translator, there are four listed in the mapping returned by the get_type_trans_in method. By default, KInterbasDB uses dynamic type translation to implement the conversion of DATE, TIME, TIMESTAMP, NUMERIC, and DECIMAL values. For the source code locations of KInterbasDB’s reference translators, see the table in the next section.

In the sample above, a translator is registered under the key ‘FIXED’, but Firebird has no SQL data type named FIXED. The following table lists the names of the database engine’s SQL data types in the left column, and the corresponding KInterbasDB-specific key under which client programmers can register translators in the right column.

Mapping of SQL Data Type Names to Translator Keys

SQL Type(s) Translator Key
CHAR / VARCHAR

‘TEXT’ for fields with charsets NONE, OCTETS, or ASCII

‘TEXT_UNICODE’ for all other charsets

BLOB ‘BLOB’
SMALLINT/INTEGER/BIGINT ‘INTEGER’
FLOAT/ DOUBLE PRECISION ‘FLOATING’
NUMERIC / DECIMAL ‘FIXED’
DATE ‘DATE’
TIME ‘TIME’
TIMESTAMP ‘TIMESTAMP’

Consequences of the Dynamic Type Translation in KInterbasDB¶

Dynamic type translation haseliminated KInterbasDB’s dependency on mx.DateTime. Although KInterbasDB will continue to use mx.DateTime as its default date/time representation for the sake of backward compatibility, dynamic type translation allows users to conveniently deal with database date/time values in terms of the new standard library module datetime, or any other representation they care to write translators for.

Dynamic type translation also allows NUMERIC/ DECIMAL values to be transparently represented as decimal.Decimal objects rather than scaled integers, which is much more convenient. For backward compatibility, NUMERIC/ DECIMAL values are still represented by default as Python floats, and the older API based on the precision_mode attribute is still present. However, all of these representations are now implemented “under the hood” via dynamic type translation.

Reference implementations of all of the translators discussed above are provided with KInterbasDB, in these modules:

Reference Translators Included with KInterbasDB

SQL Type(s) Python Type(s) Reference Implementation In Module
NUMERIC/DECIMAL float (imprecise) (default) kinterbasdb.typeconv_fixed_stdlib
scaled int (precise) kinterbasdb.typeconv_fixed_stdlib
fixedpoint.FixedPoint (precise) kinterbasdb.typeconv_fixed_fixedpoint
decimal.Decimal (precise) kinterbasdb.typeconv_fixed_decimal
DATE/TIME/TIMESTAMP mx.DateTime (default) kinterbasdb.typeconv_datetime_mx
Python 2.4+ datetime kinterbasdb.typeconv_datetime_stdlib
CHAR/VARCHAR (with any character set except NONE, OCTETS, ASCII) unicode kinterbasdb.typeconv_text_unicode

Writing Custom Translators¶

Below is a table that specifies the required argument and return value signatures of input and output converters for the various translator keys. Python’s native types map perfectly to ‘TEXT’, ‘TEXT_UNICODE’, ‘BLOB’, ‘INTEGER’, and ‘FLOATING’ types, so in those cases the translator signatures are very simple. The signatures for ‘FIXED’, ‘DATE’, ‘TIME’, and ‘TIMESTAMP’ are not as simple because Python (before 2.4) lacks native types to represent these values with both precision and convenience. KInterbasDB handles ‘FIXED’ values internally as scaled integers; the date and time types as tuples.

KInterbasDB itself uses translators implemented according to the rules in the table below; the code for these reference translators can be found in the Python modules named kinterbasdb.typeconv_* (see the table in the previous section for details).

Signature Specifications for Translators¶

Translator Key ‘TEXT’
For CHAR / VARCHAR fields with character sets NONE, OCTETS, or ASCII
Input Translator Argument / Return Value Signature:

Args: a single Python str`ing argument (or `None)

Returns: a single Python string

Output Translator Signature:
Same signature as input translator, except that return value is not constrained.
Translator Key ‘TEXT_UNICODE’
For CHAR / VARCHAR fields with charsets other than NONE, OCTETS, or ASCII
Input Translator Argument / Return Value Signature:

Args: a single Python 2-tuple argument containing a Python unicode or str object (or None) in the first element; the database character set code in the second element (the tuple is of the form (val, dbCharacterSetCode)).

The database character set codes (which are integers) are defined in RDB$CHARACTER_SETS system table. The module kinterbasdb.typeconv_text_unicode contains a dictionary named DB_TO_PYTHON_ENCODING_MAP that maps database character set codes to Python codec names.

For example, the database character set UNICODE_FSS has code 3; typeconv_text_unicode.DB_TO_PYTHON_ENCODING_MAP[3] is ‘utf_8’, the name of a Python codec that can be passed to the encode / decode methods of unicode / str.

Returns: a Python str object containing the encoded representation of the incoming value (typically computed via val.encode).

Output Translator Signature:

Args: a single Python 2-tuple argument containing a Python str object (or None) in the first element; the database character set code in the second element (the tuple is of the form (val, dbCharacterSetCode)). val contains the encoded representation of the Unicode string.

Returns: a Python unicode object containing the decoded representation of the outgoing value (typically computed via val.decode).

Translator Key ‘BLOB’

Input Translator Argument / Return Value Signature:
By default, same signature as that of ‘TEXT’. A special case was introduced in KInterbasDB 3.2 to allow for streaming blob handling.
Output Translator Signature:
Same signature as input translator, except that return value is not constrained.

Translator Key ‘INTEGER’

Input Translator Argument / Return Value Signature:

Args: a single Python int argument (or None)

Returns: a single Python int (or long, if the number too large to fit in an int)

Output Translator Signature:
Same signature as input translator, except that return value is not constrained.

Translator Key ‘FLOATING’

Input Translator Argument / Return Value Signature:

Args: a single Python float argument (or None)

Returns: a single Python float

Output Translator Signature:
Same signature as input translator, except that return value is not constrained.

Translator Key ‘FIXED’

Input Translator Argument / Return Value Signature:

Args: a single Python 2-tuple argument containing a scaled Python integer in the first element and the scale factor in the second element (the tuple is of the form (val, scale)).

Returns: a single Python integer, scaled appropriately

Output Translator Signature:
Same signature as input translator, except that return value is not constrained.

Translator Key ‘DATE’

Input Translator Argument / Return Value Signature:

Args: an instance of the chosen date type (such as Python 2.4+’s datetime.date) or None

Returns: a single Python 3-tuple of the form (year, month, day)

Output Translator Signature:

Args: a single Python 3-tuple of the form (year, month, day) (or None if the database field was NULL)

Return value is not constrained.

Translator Key ‘TIME’

Input Translator Argument / Return Value Signature:

Args: an instance of the chosen time type (such as Python 2.4+’s datetime.time) or None

Returns: a single Python 4-tuple of the form (hour, minute, second, microseconds)

Output Translator Signature:

Args: a single Python 4-tuple of the form (hour, minute, second, microseconds) (or None if the database field was NULL).

Return value is not constrained.

Translator Key ‘TIMESTAMP’

Input Translator Argument / Return Value Signature:

Args: an instance of the chosen time type (such as Python 2.4+’s datetime.datetime) or None

Returns: a single Python 7-tuple of the form (year, month, day, hour, minute, second, microseconds)

Output Translator Signature:

Args: a single Python 7-tuple of the form (year, month, day, hour, minute, second, microseconds). (or None if the database field was NULL).

Return value is not constrained.

Example Programs¶

DATE/TIME/TIMESTAMP

import datetime # Python 2.3 standard library module

import kinterbasdb
import kinterbasdb.typeconv_datetime_stdlib as tc_dt

def connect(*args, **kwargs):
    """
      This wrapper around kinterbasdb.connect creates connections that use
    the datetime module (which entered the standard library in Python 2.3)
    for both input and output of DATE, TIME, and TIMESTAMP database fields.
      This wrapper simply registers kinterbasdb's official date/time
    translators for the datetime module, which reside in the
    kinterbasdb.typeconv_datetime_stdlib module.
      An equivalent set of translators for mx.DateTime (which kinterbasdb
    uses by default for backward compatibility) resides in the
    kinterbasdb.typeconv_datetime_mx module.
      Note that because cursors inherit their connection's dynamic type
    translation settings, cursors created upon connections returned by this
    function will also use the datetime module.
    """
    con = kinterbasdb.connect(*args, **kwargs)

    con.set_type_trans_in({
        'DATE':             tc_dt.date_conv_in,
        'TIME':             tc_dt.time_conv_in,
        'TIMESTAMP':        tc_dt.timestamp_conv_in,
        })

    con.set_type_trans_out({
        'DATE':             tc_dt.date_conv_out,
        'TIME':             tc_dt.time_conv_out,
        'TIMESTAMP':        tc_dt.timestamp_conv_out,
        })

    return con


def _test():
    con = connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')
    cur = con.cursor()

    # Retrieve the current timestamp of the database server.
    cur.execute("select current_timestamp from rdb$database")
    curStamp = cur.fetchone()[0]
    print 'The type of curStamp is', type(curStamp)
    print 'curStamp is', curStamp

    # Create a test table with a single TIMESTAMP column.
    con.execute_immediate("recreate table test_stamp (a timestamp)")
    con.commit()

    # Insert a timestamp into the database, then retrieve it.
    py23StandardLibTimestamp = datetime.datetime.now()
    cur.execute("insert into test_stamp values (?)", (py23StandardLibTimestamp,))
    cur.execute("select * from test_stamp")
    curStamp = cur.fetchone()[0]
    print 'The type of curStamp is', type(curStamp)
    print 'curStamp is', curStamp


if __name__ == '__main__':
    _test()

Sample output:

The type of curStamp is
curStamp is 2003-05-20 03:55:42
The type of stamp is
stamp is 2003-05-20 03:55:42

Deferred Loading of Dynamic Type Translators¶

KInterbasDB has existed since 1998, five years before the datetime module was available in the Python standard library. Therefore, KInterbasDB’s default representation for date and time values is the mx.DateTime module. This representation is recommended by the Python DB API 2.0 Specification, and was an entirely sensible choice during the many years before the advent of the standard library datetime module.

Now that the datetime module is available in the standard library, many KInterbasDB users prefer it to mx.DateTime. For the sake of backward-compatibility, it is necessary to continue to use mx.DateTime by default, but it’s both burdensome and wasteful to import mx.DateTime in programs that don’t use it. To address this situation, KInterbasDB’s type translation initialization code defers the choice of a default set of translators until the kinterbasdb.init() function is called. A client program can explicitly call kinterbasdb.init to forestall the import of mx.DateTime.

kinterbasdb.init(type_conv=200)¶

Changed in version 3.3.

Takes a keyword argument type_conv, which controls KInterbasDB’s initial choice of type translators. type_conv can be either an integer or an object that has all of the attributes named in kinterbasdb.BASELINE_TYPE_TRANSLATION_FACILITIES (an example of such an object is the module kinterbasdb.typeconv_backcompat). If type_conv is an integer, it will cause KInterbasDB to use one of the following predefined type translator configurations:

type_conv code Resulting translator configuration
0

Minimal type translators that represent date/time values as tuples and fixed point values as either floats or scaled integers, depending on the value of the deprecated Connection.precision_mode attribute.

Unicode values are not encoded or decoded automatically.

Implemented by the kinterbasdb.typeconv_naked module.

1

Backward-compatible type translators that represent date/time values via the mx.DateTime module and fixed point values as either floats or scaled integers, depending on the value of the deprecated Connection.precision_mode attribute.

Unicode values are not encoded or decoded automatically.

Implemented by the kinterbasdb.typeconv_backcompat

This configuration, perfectly mimics the type translation behavior of KInterbasDB 3.0.

100

This translator configuration, which is intended for use with Python 2.4 and later, represents date/time values via the standard library module datetime and fixed point values via the third-party fixedpoint module.

Unicode values are encoded and decoded automatically.

Implemented by the kinterbasdb.typeconv_23plus module.

200 (the default)

This translator configuration represents date/time values via the standard library module datetime and fixed point values via the decimal module. The decimal module entered the standard library in Python 2.4, but can also be manually installed in Python 2.3.

Unicode values are encoded and decoded automatically.

Implemented by the kinterbasdb.typeconv_24plus module.

199

This translator configuration is exactly like 200, except that it represents fixed point values as float objects in order to avoid the substantial memory overhead of the decimal module.

It is fundamentally imprecise to represent fixed point values in floating point, so this convenience code is intended solely for users who wish to use datetime instead of mx.DateTime, but don’t care about fixed point values and don’t want to suffer the memory overhead of the decimal module.

Implemented by the kinterbasdb.typeconv_23plus_lowmem

300 (the ideal for Firebird 2.1 and later)

New in v3.3

This translator configuration is identical to 200, but textual blobs are handled in the same way as other textual types, so unicode encoding/decoding is performed automagically. When converting in the input direction, this doesn’t work with any Firebird version prior to 2.1, because the Firebird API doesn’t make the blob’s character set ID available.

These integer type conversion codes are defined solely for convenience. The same functionality is available via the object variant of type_conv, but setting it up is more laborious for typical translator configurations.

Warning

The default type_conv value was changed from 1 to 200 in version 3.3 !

Deferred Loading: Backward Compatibility Issues¶

The deferred type translator loading scheme introduced in KInterbasDB 3.1 goes to great lengths to maintain backward compatibility. If the client programmer does not call kinterbasdb.init(), KInterbasDB will implicitly initialize itself in a backward-compatible manner ( type_conv=1) the first time one of its public functions is called or one of its public classes is instantiated.

The only known backward incompatibility is this: the DB API type comparison singleton DATETIME will not compare equal to any type until the kinterbasdb.init() function has been called (whether explicitly or implicitly). After kinterbasdb.init() has been called, DATETIME will compare equal to the date, time, and timestamp types that were loaded.

This issue should affect hardly any existing KInterbasDB-based programs.

Deferred Loading Example¶

import datetime, decimal, os.path, string, sys

import kinterbasdb
kinterbasdb.init(type_conv=200)
# This program never imports mx.DateTime:
assert 'mx' not in sys.modules

def test():
    dbFilename = r'D:\temp\test-deferred.firebird'
    prepareTestDatabase(dbFilename)

    # Connect with character set UNICODE_FSS, to match the default character
    # set of the test database.
    con = kinterbasdb.connect(dsn=dbFilename,
        user='sysdba', password='masterkey', charset='UNICODE_FSS'
      )
    cur = con.cursor()

    # Create a test table.
    cur.execute("""
        create table test (
          a numeric(18,2),
          b date,
          c time,
          d timestamp,
          e varchar(50), /* Defaults to character set UNICODE_FSS. */
          f varchar(50), /* Defaults to character set UNICODE_FSS. */
          g varchar(50) character set ASCII
        )
      """)
    con.commit()

    # Create an input value for each field in the test table.
    aIn = decimal.Decimal('4.53')

    # Notice that the DB API date/time constructors in kinterbasdb generate
    # datetime-based objects instead of mx-based objects because of our earlier
    # call to kinterbasdb.init(type_conv=200).
    bIn = kinterbasdb.Date(2004,1,4)
    assert isinstance(bIn, datetime.date)
    cIn = kinterbasdb.Time(16,27,59)
    assert isinstance(cIn, datetime.time)
    dIn = kinterbasdb.Timestamp(2004,1,4, 16,27,59)
    assert isinstance(dIn, datetime.datetime)

    eIn = u'A unicod\u2211 object stored in a Unicode field.'
    fIn = 'A str object stored in a Unicode field.'
    gIn = 'A str object stored in an ASCII field.'

    print '-' * 70
    inputValues = (aIn, bIn, cIn, dIn, eIn, fIn, gIn)
    reportValues('In', inputValues)
    cur.execute("insert into test values (?,?,?,?,?,?,?)", inputValues)
    print '-' * 70
    cur.execute("select a,b,c,d,e,f,g from test")
    (aOut, bOut, cOut, dOut, eOut, fOut, gOut) = outputValues = cur.fetchone()
    reportValues('Out', outputValues)
    print '-' * 70

    # Notice that all values made the journey to and from the database intact.
    assert inputValues == outputValues

def reportValues(direction, values):
    for (val, c) in zip(values, string.ascii_lowercase[:len(values)]):
        varName = c + direction
        print '%s has type %s, value\n  %s' % (varName, type(val), repr(val))

def prepareTestDatabase(dbFilename):
    # Delete the test database if an old copy is already present.
    if os.path.isfile(dbFilename):
        conOld = kinterbasdb.connect(dsn=dbFilename,
            user='sysdba', password='masterkey'
          )
        conOld.drop_database()
    # Create the test database afresh.
    kinterbasdb.create_database("""
          create database '%s'
          user 'sysdba' password 'masterkey'
          default character set UNICODE_FSS
        """ % dbFilename
      )

if __name__ == '__main__':
    test()

Program output:

----------------------------------------------------------------------
aIn has type , value
  Decimal("4.53")
bIn has type , value
  datetime.date(2004, 1, 4)
cIn has type , value
  datetime.time(16, 27, 59)
dIn has type , value
  datetime.datetime(2004, 1, 4, 16, 27, 59)
eIn has type , value
  u'A unicod\u2211 object stored in a Unicode field.'
fIn has type , value
  'A str object stored in a Unicode field.'
gIn has type , value
  'A str object stored in an ASCII field.'
----------------------------------------------------------------------
aOut has type , value
  Decimal("4.53")
bOut has type , value
  datetime.date(2004, 1, 4)
cOut has type , value
  datetime.time(16, 27, 59)
dOut has type , value
  datetime.datetime(2004, 1, 4, 16, 27, 59)
eOut has type , value
  u'A unicod\u2211 object stored in a Unicode field.'
fOut has type , value
  u'A str object stored in a Unicode field.'
gOut has type , value
  u'A str object stored in an ASCII field.'
----------------------------------------------------------------------

Notes about Unicode handling in the example above:

Upon input, the Python unicode object eIn was transparently encoded for storage in database field TEST.E (a VARCHAR field with character set UNICODE_FSS (that is, UTF-8)). Upon output, the UNICODE_FSS value in TEST.E was decoded transparently into the Python unicode object eOut.

TEST.F accepted a Python str object even though it’s a Unicode field. The output value fOut is a Python unicode object rather than a str.

Although TEST.G is an ASCII field, and the input value gIn is a str, the output value gOut is a unicode object. This is because the connection’s charset is UNICODE_FSS, and Firebird tries to convert every retrieved value to match that character set.

Positional Dymanic Type Translation¶

All forms of dynamic type translation discussed so far have used the type of the database field as the basis for selecting a translator. KInterbasDB 3.2 also allows the client programmer to control translator selection on the basis of a field’s position within a Cursor. Translator selection based on database field type is called ” typal translation”, while selection based on position is called ” positional translation”.

Positional translation can be enabled at the Cursor level by including zero-based integer keys in the dictionary passed to Cursor.set_type_trans[in|out]. Consider the following example program:

import kinterbasdb

con = kinterbasdb.connect(dsn=r'D:\temp\test-20.firebird',
    user='sysdba', password='masterkey'
  )
cur = con.cursor()

cur.execute("recreate table test(a int, b int, c int, d int, e float)")
con.commit()

cur.execute("insert into test values (?,?,?,?,?)", (1, 2, 3, 4, 5.0))

cur.execute("select a,b,c,d,e from test")
print 'Before translator modifications, output row is:'
print ' ', cur.fetchone()

cur.set_type_trans_out({
    'INTEGER':  lambda i: i * 10,
    1:          lambda i: i * 100,
    3:          lambda i: i * 1000
  })

cur.execute("select a,b,c,d,e from test")
print 'After translator modifications, output row is:'
print ' ', cur.fetchone()

Program output:

Before translator modifications, output row is:
  (1, 2, 3, 4, 5.0)
After translator modifications, output row is:
  (10, 200, 30, 4000, 5.0)

The cur.set_type_trans_out call in the example program specifies that integer values retrieved by cur should be multiplied by 10, then overrides that setting for specific columns: the value in the second column (position 1) is multiplied by 100, while the value in the fourth column (position 3) is multiplied by 1000.

KInterbasDB uses a cascading method of translator selection, listed below in order from highest to lowest precedence:

  • Positional translation settings, which can only be activated at the Cursor level, take precedence over typal translation settings.
  • Cursor-level translation settings take precedence over Connection-level settings.
  • Connection-level translation settings take precedence over the module-level defaults.
  • The module-level defaults are established by the call to kinterbasdb.init(). If the client programmer does not call kinterbasdb.init() explicitly, KInterbasDB’s internals will do so implicitly.

Database Arrays¶

KInterbasDB converts database arrays from Python sequences (except strings) on input; to Python lists on output. On input, the Python sequence must be nested appropriately if the array field is multi- dimensional, and the incoming sequence must not fall short of its maximum possible length (it will not be “padded” implicitly–see below). On output, the lists will be nested if the database array has multiple dimensions.

Database arrays have no place in a purely relational data model, which requires that data values be atomized (that is, every value stored in the database must be reduced to elementary, non-decomposable parts). The Firebird implementation of database arrays, like that of most relational database engines that support this data type, is fraught with limitations.

Database arrays are of fixed size, with a predeclared number of dimensions (max. 16) and number of elements per dimension. Individual array elements cannot be set to NULL / None, so the mapping between Python lists (which have dynamic length and are therefore not normally “padded” with dummy values) and non-trivial database arrays is clumsy.

Stored procedures cannot have array parameters.

Finally, many interface libraries, GUIs, and even the isql command line utility do not support database arrays.

In general, it is preferable to avoid using database arrays unless you have a compelling reason.

Example Program

The following program inserts an array (nested Python list) into a single database field, then retrieves it.

import kinterbasdb

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')
con.execute_immediate("recreate table array_table (a int[3,4])")
con.commit()

cur = con.cursor()

arrayIn = [
    [1, 2, 3, 4],
    [5, 6, 7, 8],
    [9,10,11,12]
  ]

print 'arrayIn:  %s' % arrayIn
cur.execute("insert into array_table values (?)", (arrayIn,))

cur.execute("select a from array_table")
arrayOut = cur.fetchone()[0]
print 'arrayOut: %s' % arrayOut

con.commit()

Output:

arrayIn:  [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
arrayOut: [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]

Blobs¶

KInterbasDB supports the insertion and retrieval of blobs either wholly in memory (“materialized mode”) or in chunks (“streaming mode”) to reduce memory usage when handling large blobs. The default handling mode is “materialized”; the “streaming” method is selectable via a special case of Dynamic Type Translation.

In materialized mode, input and output blobs are represented as Python str objects, with the result that the entirety of each blob’s contents is loaded into memory. Unfortunately, flaws in the database engine’s C API prevent automatic Unicode conversion from applying to textual blobs in the way it applies to Unicode CHAR and VARCHAR fields in any Firebird version prior to version 2.1.

Note

KInterbasDB 3.3 introduces new type_conv mode 300 that enables automatic type conversion for textual blobs when you’re working with Firebird 2.1 and newer.

In streaming mode, any Python “file-like” object is acceptable as input for a blob parameter. Obvious examples of such objects are instances of file or StringIO. Each output blob is represented by a kinterbasdb.BlobReader object.

class kinterbasdb.BlobReader¶

BlobReader is a “file-like” class, so it acts much like a file instance opened in rb mode.

BlobReader adds one method not found in the “file-like” interface:

chunks()¶

Takes a single integer parameter that specifies the number of bytes to retrieve in each chunk (the final chunk may be smaller).

For example, if the size of the blob is 50000000 bytes, BlobReader.chunks(2**20) will return 47 one-megabyte chunks, and a smaller final chunk of 716928 bytes.

Due to the combination of CPython’s deterministic finalization with careful programming in KInterbasDB’s internals, it is not strictly necessary to close BlobReader instances explicitly. A BlobReader object will be automatically closed by its __del__ method when it goes out of scope, or when its Connection closes, whichever comes first. However, it is always a better idea to close resources explicitly (via try...finally) than to rely on artifacts of the CPython implementation. (For the sake of clarity, the example program does not follow this practice.)

Example Program

The following program demonstrates blob storage and retrieval in both materialized and streaming modes.

import os.path
from cStringIO import StringIO

import kinterbasdb

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test-20.firebird',
    user='sysdba', password='masterkey'
  )

cur = con.cursor()

cur.execute("recreate table blob_test (a blob)")
con.commit()

# --- Materialized mode (str objects for both input and output) ---
# Insertion:
cur.execute("insert into blob_test values (?)", ('abcdef',))
cur.execute("insert into blob_test values (?)", ('ghijklmnop',))
# Retrieval:
cur.execute("select * from blob_test")
print 'Materialized retrieval (as str):'
print cur.fetchall()

cur.execute("delete from blob_test")

# --- Streaming mode (file-like objects for input; kinterbasdb.BlobReader
#     objects for output) ---
cur.set_type_trans_in ({'BLOB': {'mode': 'stream'}})
cur.set_type_trans_out({'BLOB': {'mode': 'stream'}})

# Insertion:
cur.execute("insert into blob_test values (?)", (StringIO('abcdef'),))
cur.execute("insert into blob_test values (?)", (StringIO('ghijklmnop'),))

f = file(os.path.abspath(__file__), 'rb')
cur.execute("insert into blob_test values (?)", (f,))
f.close()

# Retrieval using the "file-like" methods of BlobReader:
cur.execute("select * from blob_test")

readerA = cur.fetchone()[0]

print '\nStreaming retrieval (via kinterbasdb.BlobReader):'

# Python "file-like" interface:
print 'readerA.mode:    "%s"' % readerA.mode
print 'readerA.closed:   %s'  % readerA.closed
print 'readerA.tell():   %d'  % readerA.tell()
print 'readerA.read(2): "%s"' % readerA.read(2)
print 'readerA.tell():   %d'  % readerA.tell()
print 'readerA.read():  "%s"' % readerA.read()
print 'readerA.tell():   %d'  % readerA.tell()
print 'readerA.read():  "%s"' % readerA.read()
readerA.close()
print 'readerA.closed:   %s'  % readerA.closed

# The chunks method (not part of the Python "file-like" interface, but handy):
print '\nFor a blob with contents "ghijklmnop", iterating over'
print 'BlobReader.chunks(3) produces:'
readerB = cur.fetchone()[0]
for chunkNo, chunk in enumerate(readerB.chunks(3)):
    print 'Chunk %d is: "%s"' % (chunkNo, chunk)

Output:

Materialized retrieval (as str):
[('abcdef',), ('ghijklmnop',)]

Streaming retrieval (via kinterbasdb.BlobReader):
readerA.mode:    "rb"
readerA.closed:   False
readerA.tell():   0
readerA.read(2): "ab"
readerA.tell():   2
readerA.read():  "cdef"
readerA.tell():   6
readerA.read():  ""
readerA.closed:   True

For a blob with contents "ghijklmnop", iterating over
BlobReader.chunks(3) produces:
Chunk 0 is: "ghi"
Chunk 1 is: "jkl"
Chunk 2 is: "mno"
Chunk 3 is: "p"

Connection Timeouts¶

Connection timeouts allow the programmer to request that a connection be automatically closed after a specified period of inactivity. The simplest uses of connection timeouts are trivial, as demonstrated by the following snippet:

import kinterbasdb

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db',
    user='sysdba', password='masterkey',
    timeout={'period': 120.0} # time out after 120.0 seconds of inactivity
  )

...

The connection created in the example above is eligible to be automatically closed by KInterbasDB if it remains idle for at least 120.0 consecutive seconds. KInterbasDB does not guarantee that the connection will be closed immediately when the specified period has elapsed. On a busy system, there might be a considerable delay between the moment a connection becomes eligible for timeout and the moment KInterbasDB actually closes it. However, the thread that performs connection timeouts is programmed in such a way that on a lightly loaded system, it acts almost instantaneously to take advantage of a connection’s eligibility for timeout.

After a connection has timed out, KInterbasDB reacts to attempts to reactivate the severed connection in a manner dependent on the state of the connection when it timed out. Consider the following example program:

import time
import kinterbasdb

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db',
    user='sysdba', password='masterkey',
    timeout={'period': 3.0}
  )
cur = con.cursor()

cur.execute("recreate table test (a int, b char(1))")
con.commit()

cur.executemany("insert into test (a, b) values (?, ?)",
    [(1, 'A'), (2, 'B'), (3, 'C')]
  )
con.commit()

cur.execute("select * from test")
print 'BEFORE:', cur.fetchall()

cur.execute("update test set b = 'X' where a = 2")

time.sleep(6.0)

cur.execute("select * from test")
print 'AFTER: ', cur.fetchall()

So, should the example program print

BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')]
AFTER:  [(1, 'A'), (2, 'X'), (3, 'C')]

or

BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')]
AFTER:  [(1, 'A'), (2, 'B'), (3, 'C')]

or should it raise an exception? The answer is more complex than one might think.

First of all, we cannot guarantee much about the example program’s behavior because there is a race condition between the obvious thread that’s executing the example code (which we’ll call “UserThread” for the rest of this section) and the KInterbasDB-internal background thread that actually closes connections that have timed out (“TimeoutThread”). If the operating system were to suspend UserThread just after the kinterbasdb.connect() call for more than the specified timeout period of 3.0 seconds, the TimeoutThread might close the connection before UserThread had performed any preparatory operations on the database. Although such a scenario is extremely unlikely when more “realistic” timeout periods such as 1800.0 seconds (30 minutes) are used, it is important to consider. We’ll explore solutions to this race condition later.

The likely (but not guaranteed) behavior of the example program is that UserThread will complete all preparatory database operations including the cur. execute ( “update test set b = ‘X’ where a = 2” ) statement in the example program, then go to sleep for not less than 6.0 seconds. Not less than 3.0 seconds after UserThread executes the cur. execute ( “update test set b = ‘X’ where a = 2” ) statement, TimeoutThread is likely to close the connection because it has become eligible for timeout.

The crucial issue is how TimeoutThread should resolve the transaction that UserThread left open on con, and what should happen when UserThread reawakens and tries to execute the cur. execute ( “select * from test” ) statement, since the transaction that UserThread left open will no longer be active.

User-Supplied Connection Timeout Callbacks¶

In the context of a particular client program, it is not possible for KInterbasDB to know the best way for TimeoutThread to react when it encounters a connection that is eligible for timeout, but has an unresolved transaction. For this reason, KInterbasDB’s connection timeout system offers callbacks that the client programmer can use to guide the TimeoutThread’s actions, or to log information about connection timeout patterns.

The “Before Timeout” Callback¶

The client programmer can supply a “before timeout” callback that accepts a single dictionary parameter and returns an integer code to indicate how the TimeoutThread should proceed when it finds a connection eligible for timeout. Within the dictionary, KInterbasDB provides the following entries:

dsn:The dsn parameter that was passed to kinterbasdb.connect when the connection was created.
has_transaction:
 A boolean that indicates whether the connection has an unresolved transaction.
active_secs:A float that indicates how many seconds elapsed between the point when the connection attached to the server and the last client program activity on the connection.
idle_secs:A float that indicates how many seconds have elapsed since the last client program activity on the connection. This value will not be less than the specified timeout period, and is likely to only a fraction of a second longer.

Based on those data, the user-supplied callback should return one of the following codes:

kinterbasdb.CT_VETO¶
Directs the TimeoutThread not to close the connection at the current time, and not to reconsider timing the connection out until at least another timeout period has passed. For example, if a connection was created with a timeout period of 120.0 seconds, and the user-supplied “before callback” returns CT_VETO, the TimeoutThread will not reconsider timing out that particular connection until at least another 120.0 seconds have elapsed.
kinterbasdb.CT_NONTRANSPARENT¶

(“Nontransparent rollback”)

Directs the TimeoutThread to roll back the connection’s unresolved transaction (if any), then close the connection. Any future attempt to use the connection will raise a kinterbasdb.ConnectionTimedOut exception.

kinterbasdb.CT_ROLLBACK¶

(“Transparent rollback”)

Directs the TimeoutThread to roll back the connection’s unresolved transaction (if any), then close the connection. Upon any future attempt to use the connection, KInterbasDB will attempt to transparently reconnect to the database and “resume where it left off” insofar as possible. Of course, network problems and the like could prevent KInterbasDB’s attempt at transparent resumption from succeeding. Also, highly state-dependent objects such as open result sets, BlobReader, and PreparedStatement cannot be used transparently across a connection timeout.

kinterbasdb.CT_COMMIT¶

(“Transparent commit”)

Directs the TimeoutThread to commit the connection’s unresolved transaction (if any), then close the connection. Upon any future attempt to use the connection, KInterbasDB will attempt to transparently reconnect to the database and “resume where it left off” insofar as possible.

If the user does not supply a “before timeout” callback, KInterbasDB considers the timeout transparent only if the connection does not have an unresolved transaction.

If the user-supplied “before timeout” callback returns anything other than one of the codes listed above, or if it raises an exception, the TimeoutThread will act as though CT_NONTRANSPARENT had been returned.

You might have noticed that the input dictionary to the “before timeout” callback does not include a reference to the Connection object itself. This is a deliberate design decision intended to steer the client programmer away from writing callbacks that take a long time to complete, or that manipulate the Connection instance directly. See the caveats section for more information.

The “After Timeout” Callback¶

The client programmer can supply an “after timeout” callback that accepts a single dictionary parameter. Within that dictionary, KInterbasDB currently provides the following entries:

dsn:The dsn parameter that was passed to kinterbasdb.connect() when the connection was created.
active_secs:A float that indicates how many seconds elapsed between the point when the connection attached to the server and the last client program activity on the connection.
idle_secs:A float that indicates how many seconds elapsed between the last client program activity on the connection and the moment the TimeoutThread closed the connection.

KInterbasDB only calls the “after timeout” callback after the connection has actually been closed by the TimeoutThread. If the “before timeout” callback returns CT_VETO to cancel the timeout attempt, the “after timeout” callback will not be called.

KInterbasDB discards the return value of the “after timeout” callback, and ignores any exceptions.

The same caveats that apply to the “before timeout” callback also apply to the “after timeout” callback.

User-Supplied Connection Timeout Callback Caveats¶

  • The user-supplied callbacks are executed by the TimeoutThread. They should be designed to avoid blocking the TimeoutThread any longer than absolutely necessary.
  • Manipulating the Connection object that is being timed out (or any of that connection’s subordinate objects such as Cursor, BlobReader, or PreparedStatement) from the timeout callbacks is strictly forbidden.

Examples¶

Example: `CT_VETO`

The following program registers a “before timeout” callback that unconditionally returns CT_VETO, which means that the TimeoutThread never times the connection out. Although an “after timeout” callback is also registered, it will never be called.

import time
import kinterbasdb

def callback_before(info):
    print
    print 'callback_before called; input parameter contained:'
    for key, value in info.items():
        print '  %s: %s' % (repr(key).ljust(20), repr(value))
    print
    # Unconditionally veto any timeout attempts:
    return kinterbasdb.CT_VETO

def callback_after(info):
    assert False, 'This will never be called.'

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db',
    user='sysdba', password='masterkey',
    timeout={
        'period': 3.0,
        'callback_before': callback_before,
        'callback_after':  callback_after,
      }
  )
cur = con.cursor()

cur.execute("recreate table test (a int, b char(1))")
con.commit()

cur.executemany("insert into test (a, b) values (?, ?)",
    [(1, 'A'), (2, 'B'), (3, 'C')]
  )
con.commit()

cur.execute("select * from test")
print 'BEFORE:', cur.fetchall()

cur.execute("update test set b = 'X' where a = 2")

time.sleep(6.0)

cur.execute("select * from test")
rows = cur.fetchall()
# The value of the second column of the second row of the table is still 'X',
# because the transaction that changed it from 'B' to 'X' remains active.
assert rows[1][1] == 'X'
print 'AFTER: ', rows

Sample output:

BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')]

callback_before called; input parameter contained:
  'dsn'               : 'localhost:D:\\temp\\test.db'
  'idle_secs'         : 3.0
  'has_transaction'   : True

AFTER:  [(1, 'A'), (2, 'X'), (3, 'C')]

Example: Supporting Module `timeout_authorizer`

The example programs for CT_NONTRANSPARENT, CT_ROLLBACK, and CT_COMMIT rely on the TimeoutAuthorizer class from the module below to guarantee that the TimeoutThread will not time the connection out before the preparatory code has executed.

import threading
import kinterbasdb

class TimeoutAuthorizer(object):
    def __init__(self, opCodeWhenAuthorized):
        self.currentOpCode = kinterbasdb.CT_VETO
        self.opCodeWhenAuthorized = opCodeWhenAuthorized

        self.lock = threading.Lock()

    def authorize(self):
        self.lock.acquire()
        try:
            self.currentOpCode = self.opCodeWhenAuthorized
        finally:
            self.lock.release()

    def __call__(self, info):
        self.lock.acquire()
        try:
            return self.currentOpCode
        finally:
            self.lock.release()

Example: `CT_NONTRANSPARENT`

import threading, time
import kinterbasdb
import timeout_authorizer

authorizer = timeout_authorizer.TimeoutAuthorizer(kinterbasdb.CT_NONTRANSPARENT)
connectionTimedOut = threading.Event()

def callback_after(info):
    print
    print 'The connection was closed nontransparently.'
    print
    connectionTimedOut.set()

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db',
    user='sysdba', password='masterkey',
    timeout={
        'period': 3.0,
        'callback_before': authorizer,
        'callback_after':  callback_after,
      }
  )
cur = con.cursor()

cur.execute("recreate table test (a int, b char(1))")
con.commit()

cur.executemany("insert into test (a, b) values (?, ?)",
    [(1, 'A'), (2, 'B'), (3, 'C')]
  )
con.commit()

cur.execute("select * from test")
print 'BEFORE:', cur.fetchall()

cur.execute("update test set b = 'X' where a = 2")

authorizer.authorize()
connectionTimedOut.wait()

# This will raise a kinterbasdb.ConnectionTimedOut exception because the
# before callback returned kinterbasdb.CT_NONTRANSPARENT:
cur.execute("select * from test")

Sample output:

BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')]

The connection was closed nontransparently.

Traceback (most recent call last):
  File "connection_timeouts_ct_nontransparent.py", line 42, in ?
    cur.execute("select * from test")
kinterbasdb.ConnectionTimedOut: (0, 'A transaction was still unresolved when
this connection timed out, so it cannot be transparently reactivated.')

Example: `CT_ROLLBACK`

import threading, time
import kinterbasdb
import timeout_authorizer

authorizer = timeout_authorizer.TimeoutAuthorizer(kinterbasdb.CT_ROLLBACK)
connectionTimedOut = threading.Event()

def callback_after(info):
    print
    print 'The unresolved transaction was rolled back; the connection has been'
    print ' closed transparently.'
    print
    connectionTimedOut.set()

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db',
    user='sysdba', password='masterkey',
    timeout={
        'period': 3.0,
        'callback_before': authorizer,
        'callback_after':  callback_after,
      }
  )
cur = con.cursor()

cur.execute("recreate table test (a int, b char(1))")
con.commit()

cur.executemany("insert into test (a, b) values (?, ?)",
    [(1, 'A'), (2, 'B'), (3, 'C')]
  )
con.commit()

cur.execute("select * from test")
print 'BEFORE:', cur.fetchall()

cur.execute("update test set b = 'X' where a = 2")

authorizer.authorize()
connectionTimedOut.wait()

# The value of the second column of the second row of the table will have
# reverted to 'B' when the transaction that changed it to 'X' was rolled back.
# The cur.execute call on the next line will transparently reactivate the
# connection, which was timed out transparently.
cur.execute("select * from test")
rows = cur.fetchall()
assert rows[1][1] == 'B'
print 'AFTER: ', rows

Sample output:

BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')]

The unresolved transaction was rolled back; the connection has been
 closed transparently.

AFTER:  [(1, 'A'), (2, 'B'), (3, 'C')]

Example: `CT_COMMIT`

import threading, time
import kinterbasdb
import timeout_authorizer

authorizer = timeout_authorizer.TimeoutAuthorizer(kinterbasdb.CT_COMMIT)
connectionTimedOut = threading.Event()

def callback_after(info):
    print
    print 'The unresolved transaction was committed; the connection has been'
    print ' closed transparently.'
    print
    connectionTimedOut.set()

con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db',
    user='sysdba', password='masterkey',
    timeout={
        'period': 3.0,
        'callback_before': authorizer,
        'callback_after':  callback_after,
      }
  )
cur = con.cursor()

cur.execute("recreate table test (a int, b char(1))")
con.commit()

cur.executemany("insert into test (a, b) values (?, ?)",
    [(1, 'A'), (2, 'B'), (3, 'C')]
  )
con.commit()

cur.execute("select * from test")
print 'BEFORE:', cur.fetchall()

cur.execute("update test set b = 'X' where a = 2")

authorizer.authorize()
connectionTimedOut.wait()

# The modification of the value of the second column of the second row of the
# table from 'B' to 'X' will have persisted, because the TimeoutThread
# committed the transaction before it timed the connection out.
# The cur.execute call on the next line will transparently reactivate the
# connection, which was timed out transparently.
cur.execute("select * from test")
rows = cur.fetchall()
assert rows[1][1] == 'X'
print 'AFTER: ', rows

Sample output:

BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')]

The unresolved transaction was committed; the connection has been
 closed transparently.

AFTER:  [(1, 'A'), (2, 'X'), (3, 'C')]

Database Event Notification¶

What are database events?¶

The database engine features a distributed, interprocess communication mechanism based on messages called database events. A database event is a message passed from a trigger or stored procedure to an application to announce the occurrence of a specified condition or action, usually a database change such as an insertion, modification, or deletion of a record. The Firebird event mechanism enables applications to respond to actions and database changes made by other, concurrently running applications without the need for those applications to communicate directly with one another, and without incurring the expense of CPU time required for periodic polling to determine if an event has occurred.

Why use database events?¶

Anything that can be accomplished with database events can also be implemented using other techniques, so why bother with events? Since you’ve chosen to write database-centric programs in Python rather than assembly language, you probably already know the answer to this question, but let’s illustrate.

A typical application for database events is the handling of administrative messages. Suppose you have an administrative message database with a messages table, into which various applications insert timestamped status reports. It may be desirable to react to these messages in diverse ways, depending on the status they indicate: to ignore them, to initiate the update of dependent databases upon their arrival, to forward them by e-mail to a remote administrator, or even to set off an alarm so that on-site administrators will know a problem has occurred.

It is undesirable to tightly couple the program whose status is being reported (the message producer) to the program that handles the status reports (the message handler). There are obvious losses of flexibility in doing so. For example, the message producer may run on a separate machine from the administrative message database and may lack access rights to the downstream reporting facilities (e.g., network access to the SMTP server, in the case of forwarded e-mail notifications). Additionally, the actions required to handle status reports may themselves be time-consuming and error-prone, as in accessing a remote network to transmit e-mail.

In the absence of database event support, the message handler would probably be implemented via polling. Polling is simply the repetition of a check for a condition at a specified interval. In this case, the message handler would check in an infinite loop to see whether the most recent record in the messages table was more recent than the last message it had handled. If so, it would handle the fresh message(s); if not, it would go to sleep for a specified interval, then loop.

The polling-based implementation of the message handler is fundamentally flawed. Polling is a form of busy-wait; the check for new messages is performed at the specified interval, regardless of the actual activity level of the message producers. If the polling interval is lengthy, messages might not be handled within a reasonable time period after their arrival; if the polling interval is brief, the message handler program (and there may be many such programs) will waste a large amount of CPU time on unnecessary checks.

The database server is necessarily aware of the exact moment when a new message arrives. Why not let the message handler program request that the database server send it a notification when a new message arrives? The message handler can then efficiently sleep until the moment its services are needed. Under this event-based scheme, the message handler becomes aware of new messages at the instant they arrive, yet it does not waste CPU time checking in vain for new messages when there are none available.

How events are exposed to the server and the client process?¶

  1. Server Process (“An event just occurred!”)

    To notify any interested listeners that a specific event has occurred, issue the POST_EVENT statement from Stored Procedure or Trigger. The POST_EVENT statement has one parameter: the name of the event to post. In the preceding example of the administrative message database, POST_EVENT might be used from an after insert trigger on the messages table, like this:

    create trigger trig_messages_handle_insert
      for messages
        after insert
    as
    begin
      POST_EVENT 'new_message';
    end

    Note

    The physical notification of the client process does not occur until the transaction in which the POST_EVENT took place is actually committed. Therefore, multiple events may conceptually occur before the client process is physically informed of even one occurrence. Furthermore, the database engine makes no guarantee that clients will be informed of events in the same groupings in which they conceptually occurred. If, within a single transaction, an event named event_a is posted once and an event named event_b is posted once, the client may receive those posts in separate “batches”, despite the fact that they occurred in the same conceptual unit (a single transaction). This also applies to multiple occurrences of the same event within a single conceptual unit: the physical notifications may arrive at the client separately.

  2. Client Process (“Send me a message when an event occurs.”)

    Note

    If you don’t care about the gory details of event notification, skip to the section that describes KInterbasDB’s Python-level event handling API.

    The Firebird C client library offers two forms of event notification. The first form is synchronous notification, by way of the function isc_wait_for_event(). This form is admirably simple for a C programmer to use, but is inappropriate as a basis for KInterbasDB’s event support, chiefly because it’s not sophisticated enough to serve as the basis for a comfortable Python-level API. The other form of event notification offered by the database client library is asynchronous, by way of the functions isc_que_events() (note that the name of that function is misspelled), isc_cancel_events(), and others. The details are as nasty as they are numerous, but the essence of using asynchronous notification from C is as follows:

    1. Call isc_event_block() to create a formatted binary buffer that will tell the server which events the client wants to listen for.
    2. Call isc_que_events() (passing the buffer created in the previous step) to inform the server that the client is ready to receive event notifications, and provide a callback that will be asynchronously invoked when one or more of the registered events occurs.
    3. [The thread that called isc_que_events() to initiate event listening must now do something else.]
    4. When the callback is invoked (the database client library starts a thread dedicated to this purpose), it can use the isc_event_counts() function to determine how many times each of the registered events has occurred since the last call to isc_event_counts() (if any).
    5. [The callback thread should now “do its thing”, which may include communicating with the thread that called isc_que_events().]
    6. When the callback thread is finished handling an event notification, it must call isc_que_events() again in order to receive future notifications. Future notifications will invoke the callback again, effectively “looping” the callback thread back to Step 4.

How events are exposed to the Python programmer?¶

The KInterbasDB database event API is comprised of the following: the method Connection.event_conduit and the class EventConduit.

Connection.event_conduit()¶

Creates a conduit (an instance of EventConduit) through which database event notifications will flow into the Python program.

event_conduit is a method of Connection rather than a module-level function or a class constructor because the database engine deals with events in the context of a particular database (after all, POST_EVENT must be issued by a stored procedure or a trigger).

Arguments:

Event_names:A sequence of string event names The EventConduit.wait() method will block until the occurrence of at least one of the events named by the strings in event_names. KInterbasDB’s own event-related code is capable of operating with up to 2147483647 events per conduit. However, it has been observed that the Firebird client library experiences catastrophic problems (including memory corruption) on some platforms with anything beyond about 100 events per conduit. These limitations are dependent on both the Firebird version and the platform.
class kinterbasdb.EventConduit¶
__init__()¶
The EventConduit class is not designed to be instantiated directly by the Python programmer. Instead, use the Connection.event_conduit method to create EventConduit instances.
wait(timeout=None)¶

Blocks the calling thread until at least one of the events occurs, or the specified timeout (if any) expires.

If one or more event notifications has arrived since the last call to wait, this method will retrieve a notification from the head of the EventConduit‘s internal queue and return immediately.

The names of the relevant events were supplied to the Connection.event_conduit method during the creation of this EventConduit. In the code snippet below, the relevant events are named event_a and event_b:

conduit = connection.event_conduit( ('event_a', 'event_b') )
conduit.wait()

Arguments:

Timeout:optional number of seconds (use a float to indicate fractions of seconds) If not even one of the relevant events has occurred after timeout seconds, this method will unblock and return None. The default timeout is infinite.
Returns:
None if the wait timed out, otherwise a dictionary that maps event_name -> event_occurrence_count.

In the code snippet above, if event_a occurred once and event_b did not occur at all, the return value from conduit.wait() would be the following dictionary:

{
 'event_a': 1,
 'event_b': 0
}
close()¶

Cancels the standing request for this conduit to be notified of events.

After this method has been called, this EventConduit object is useless, and should be discarded. (The boolean property closed is True after an EventConduit has been closed.)

This method has no arguments.

flush()¶

This method allows the Python programmer to manually clear any event notifications that have accumulated in the conduit’s internal queue.

From the moment the conduit is created by the Connection.event_conduit() method, notifications of any events that occur will accumulate asynchronously within the conduit’s internal queue until the conduit is closed either explicitly (via the close method) or implicitly (via garbage collection). There are two ways to dispose of the accumulated notifications: call wait() to receive them one at a time ( wait() will block when the conduit’s internal queue is empty), or call this method to get rid of all accumulated notifications.

This method has no arguments.

Returns:
The number of event notifications that were flushed from the queue. The “number of event notifications” is not necessarily the same as the “number of event occurrences“, since a single notification can indicate multiple occurrences of a given event (see the return value of the wait method).

Example Program¶

The following code (a SQL table definition, a SQL trigger definition, and two Python programs) demonstrates KInterbasDB-based event notification.

The example is based on a database at ‘localhost:/temp/test.db’, which contains a simple table named test_tabletest_table has an after insert trigger that posts several events. Note that the trigger posts test_event_a twice, test_event_b once, and test_event_c once.

The Python event handler program connects to the database and establishes an EventConduit in the context of that connection. As specified by the list of RELEVANT_EVENTS passed to event_conduit, the event conduit will concern itself only with events named test_event_a and test_event_b. Next, the program calls the conduit’s wait method without a timeout; it will wait infinitely until at least one of the relevant events is posted in a transaction that is subsequently committed.

The Python event producer program simply connects to the database, inserts a row into test_table, and commits the transaction. Notice that except for the printed comment, no code in the producer makes any mention of events – the events are posted as an implicit consequence of the row’s insertion into test_table.

The insertion into test_table causes the trigger to conceptually post events, but those events are not physically sent to interested listeners until the transaction is committed. When the commit occurs, the handler program returns from the wait call and prints the notification that it received.

SQL table definition:

create table test_table (a integer)

SQL trigger definition:

create trigger trig_test_insert_event
  for test_table
    after insert
as
begin
  post_event 'test_event_a';
  post_event 'test_event_b';
  post_event 'test_event_c';

  post_event 'test_event_a';
end

Python event handler program:

import kinterbasdb

RELEVANT_EVENTS = ['test_event_a', 'test_event_b']

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')
conduit = con.event_conduit(RELEVANT_EVENTS)

print 'HANDLER: About to wait for the occurrence of one of %s...\n' % RELEVANT_EVENTS
result = conduit.wait()
print 'HANDLER: An event notification has arrived:'
print result
conduit.close()

Python event producer program:

import kinterbasdb

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')
cur = con.cursor()

cur.execute("insert into test_table values (1)")
print 'PRODUCER: Committing transaction that will cause event notification to be sent.'
con.commit()

Event producer output:

PRODUCER: Committing transaction that will cause event notification to be sent.

Event handler output (assuming that the handler was already started and waiting when the event producer program was executed):

HANDLER: About to wait for the occurrence of one of ['test_event_a', 'test_event_b']...

HANDLER: An event notification has arrived:
{'test_event_a': 2, 'test_event_b': 1}

Notice that there is no mention of test_event_c in the result dictionary received by the event handler program. Although test_event_c was posted by the after insert trigger, the event conduit in the handler program was created to listen only for test_event_a and test_event_b events.

Pitfalls and Limitations¶

  • Remember that if an EventConduit is left active (not yet closed or garbage collected), notifications for any registered events that actually occur will continue to accumulate in the EventConduit’s internal queue even if the Python programmer doesn’t call EventConduit.wait() to receive the notifications or EventConduit.flush() to clear the queue. The ill-informed may misinterpret this behavior as a memory leak in KInterbasDB; it is not.
  • NEVER use LOCAL-protocol connections in a multithreaded program that also uses event handling! The database client library implements the local protocol on some platforms in such a way that deadlocks may arise in bizarre places if you do this. This no-LOCAL prohibition is not limited to connections that are used as the basis for event conduits; it applies to all connections throughout the process. So why doesn’t KInterbasDB protect the Python programmer from this mistake? Because the event handling thread is started by the database client library, and it operates beyond the synchronization domain of KInterbasDB at times.

Note

The restrictions on the number of active EventConduit`s in a process, and on the number of event names that a single `EventConduit can listen for, have been removed in KInterbasDB 3.2.

The database_info API¶

Firebird provides various informations about server and connected database via database_info API call. KInterbasDB surfaces this API through next methods on Connection object:

Connection.database_info(request, result_type)¶

This method is a very thin wrapper around function isc_database_info(). This method does not attempt to interpret its results except with regard to whether they are a string or an integer.

For example, requesting isc_info_user_names with the call

con.database_info(kinterbasdb.isc_info_user_names, 's')

will return a binary string containing a raw succession of length- name pairs. A more convenient way to access the same functionality is via the db_info() method.

Arguments:

Request:One of the kinterbasdb.isc_info_* constants.
Result_type:Must be either ‘s’ if you expect a string result, or ‘i’ if you expect an integer result.

Example Program

import kinterbasdb

con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass')

# Retrieving an integer info item is quite simple.
bytesInUse = con.database_info(kinterbasdb.isc_info_current_memory, 'i')

print 'The server is currently using %d bytes of memory.' % bytesInUse

# Retrieving a string info item is somewhat more involved, because the
# information is returned in a raw binary buffer that must be parsed
# according to the rules defined in the Interbase® 6 API Guide section
# entitled "Requesting buffer items and result buffer values" (page 51).
#
# Often, the buffer contains a succession of length-string pairs
# (one byte telling the length of s, followed by s itself).
# Function kinterbasdb.raw_byte_to_int is provided to convert a raw
# byte to a Python integer (see examples below).
buf = con.database_info(kinterbasdb.isc_info_db_id, 's')

# Parse the filename from the buffer.
beginningOfFilename = 2
# The second byte in the buffer contains the size of the database filename
# in bytes.
lengthOfFilename = kinterbasdb.raw_byte_to_int(buf[1])
filename = buf[beginningOfFilename:beginningOfFilename + lengthOfFilename]

# Parse the host name from the buffer.
beginningOfHostName = (beginningOfFilename + lengthOfFilename) + 1
# The first byte after the end of the database filename contains the size
# of the host name in bytes.
lengthOfHostName = kinterbasdb.raw_byte_to_int(buf[beginningOfHostName - 1])
host = buf[beginningOfHostName:beginningOfHostName + lengthOfHostName]

print 'We are connected to the database at %s on host %s.' % (filename, host)

Sample output:

The server is currently using 8931328 bytes of memory.
We are connected to the database at C:\TEMP\TEST.DB on host WEASEL.

As you can see, extracting data with the database_info function is rather clumsy. In KInterbasDB 3.2, a higher-level means of accessing the same information is available: the db_info() method. Also, the Services API (accessible to Python programmers via the kinterbasdb.services module) provides high-level support for querying database statistics and performing maintenance.

Connection.db_info(request)¶

High-level convenience wrapper around the database_info() method that parses the output of database_info into Python-friendly objects instead of returning raw binary uffers in the case of complex result types. If an unrecognized isc_info_* code is requested, this method raises ValueError.

For example, requesting isc_info_user_names with the call

con.db_info(kinterbasdb.isc_info_user_names)

returns a dictionary that maps (username -> number of open connections). If SYSDBA has one open connection to the database to which con is connected, and TEST_USER_1 has three open connections to that same database, the return value would be {‘SYSDBA’: 1, ‘TEST_USER_1’: 3}

Arguments:

Request:

must be either:

  • A single kinterbasdb.isc_info_* info request code. In this case, a single result is returned.
  • A sequence of such codes. In this case, a mapping of (info request code -> result) is returned.

Example Program

import os.path

import kinterbasdb

DB_FILENAME = r'D:\temp\test-20.firebird'
DSN = 'localhost:' + DB_FILENAME

###############################################################################
# Querying an isc_info_* item that has a complex result:
###############################################################################
# Establish three connections to the test database as TEST_USER_1, and one
# connection as SYSDBA.  Then use the Connection.db_info method to query the
# number of attachments by each user to the test database.
testUserCons = []
for i in range(3):
    tCon = kinterbasdb.connect(dsn=DSN, user='test_user_1', password='pass')
    testUserCons.append(tCon)

con = kinterbasdb.connect(dsn=DSN, user='sysdba', password='masterkey')

print 'Open connections to this database:'
print con.db_info(kinterbasdb.isc_info_user_names)

###############################################################################
# Querying multiple isc_info_* items at once:
###############################################################################
# Request multiple db_info items at once, specifically the page size of the
# database and the number of pages currently allocated.  Compare the size
# computed by that method with the size reported by the file system.
# The advantages of using db_info instead of the file system to compute
# database size are:
#   - db_info works seamlessly on connections to remote databases that reside
#     in file systems to which the client program lacks access.
#   - If the database is split across multiple files, db_info includes all of
#     them.
res = con.db_info(
    [kinterbasdb.isc_info_page_size, kinterbasdb.isc_info_allocation]
  )
pagesAllocated = res[kinterbasdb.isc_info_allocation]
pageSize = res[kinterbasdb.isc_info_page_size]
print '\ndb_info indicates database size is', pageSize * pagesAllocated, 'bytes'
print   'os.path.getsize indicates size is ', os.path.getsize(DB_FILENAME), 'bytes'

Sample output:

  Open connections to this database:
  {'SYSDBA': 1, 'TEST_USER_1': 3}

db_info indicates database size is 20684800 bytes
os.path.getsize indicates size is  20684800 bytes

Using Firebird Services API¶

Database server maintenance tasks such as user management, load monitoring, and database backup have traditionally been automated by scripting the command-line tools gbak, gfix, gsec, and gstat.

The API presented to the client programmer by these utilities is inelegant because they are, after all, command-line tools rather than native components of the client language. To address this problem, Firebird has a facility called the Services API, which exposes a uniform interface to the administrative functionality of the traditional command-line tools.

The native Services API, though consistent, is much lower-level than a Pythonic API. If the native version were exposed directly, accomplishing a given task would probably require more Python code than scripting the traditional command-line tools. For this reason, KInterbasDB presents its own abstraction over the native API via the kinterbasdb.services module.

Establishing Services API Connections¶

All Services API operations are performed in the context of a connection to a specific database server, represented by the kinterbasdb.services.Connection class.

kinterbasdb.services.connect(host='service_mgr', user='sysdba', password=None)¶

Establish a connection to database server Services and returns kinterbasdb.services.Connection object.

Host:The network name of the computer on which the database server is running.
User:The name of the database user under whose authority the maintenance tasks are to be performed.
Password:User’s password.

Since maintenance operations are most often initiated by an administrative user on the same computer as the database server, host defaults to the local computer, and user defaults to SYSDBA.

The three calls to kinterbasdb.services.connect() in the following program are equivalent:

from kinterbasdb import services

con = services.connect(password='masterkey')
con = services.connect(user='sysdba', password='masterkey')
con = services.connect(host='localhost', user='sysdba', password='masterkey')
class kinterbasdb.services.Connection¶
close()¶
Explicitly terminates a Connection; if this is not invoked, the underlying connection will be closed implicitly when the Connection object is garbage collected.

Server Configuration and Activity Levels¶

Connection.getServiceManagerVersion()¶

To help client programs adapt to version changes, the service manager exposes its version number as an integer.

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getServiceManagerVersion()

Output (on Firebird 1.5.0):

2

kinterbasdb.services is a thick wrapper of the Services API that can shield its users from changes in the underlying C API, so this method is unlikely to be useful to the typical Python client programmer.

Connection.getServerVersion()¶

Returns the server’s version string:

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getServerVersion()

Output (on Firebird 1.5.0/Win32):

WI-V1.5.0.4290 Firebird 1.5

At first glance, thhis method appears to duplicate the functionality of the kinterbasdb.Connection.server_version property, but when working with Firebird, there is a difference. kinterbasdb.Connection.server_version is based on a C API call (isc_database_info()) that existed long before the introduction of the Services API. Some programs written before the advent of Firebird test the version number in the return value of isc_database_info(), and refuse to work if it indicates that the server is too old. Since the first stable version of Firebird was labeled 1.0, this pre-Firebird version testing scheme incorrectly concludes that (e.g.) Firebird 1.0 is older than Interbase 5.0.

Firebird addresses this problem by making isc_database_info() return a “pseudo-InterBase” version number, whereas the Services API returns the true Firebird version, as shown:

import kinterbasdb
con = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey')
print 'Interbase-compatible version string:', con.server_version

import kinterbasdb.services
svcCon = kinterbasdb.services.connect(host='localhost', user='sysdba', password='masterkey')
print 'Actual Firebird version string:     ', svcCon.getServerVersion()

Output (on Firebird 1.5.0/Win32):

Interbase-compatible version string: WI-V6.3.0.4290 Firebird 1.5
Actual Firebird version string:      WI-V1.5.0.4290 Firebird 1.5
Connection.getArchitecture()¶

Returns platform information for the server, including hardware architecture and operating system family.

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getArchitecture()

Output (on Firebird 1.5.0/Windows 2000):

Firebird/x86/Windows NT

Unfortunately, the architecture string is almost useless because its format is irregular and sometimes outright idiotic, as with Firebird 1.5.0 running on x86 Linux:

Firebird/linux Intel

Magically, Linux becomes a hardware architecture, the ASCII store decides to hold a 31.92% off sale, and Intel grabs an unfilled niche in the operating system market.

Connection.getHomeDir()¶

Returns the equivalent of the RootDirectory setting from firebird.conf:

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getHomeDir()

Output (on a particular Firebird 1.5.0/Windows 2000 installation):

C:\dev\db\firebird150\

Output (on a particular Firebird 1.5.0/Linux installation):

/opt/firebird/
Connection.getSecurityDatabasePath()¶

Returns the location of the server’s core security database, which contains user definitions and such. Interbase® and Firebird 1.0 named this database isc4.gdb, while in Firebird 1.5 it’s renamed to security.fdb and to security2.fdb in Firebird 2.0 and later.

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getSecurityDatabasePath()

Output (on a particular Firebird 1.5.0/Windows 2000 installation):

C:\dev\db\firebird150\security.fdb

Output (on a particular Firebird 1.5.0/Linux installation):

/opt/firebird/security.fdb
Connection.getLockFileDir()¶

The database engine uses a lock file to coordinate interprocess communication; getLockFileDir() returns the directory in which that file resides:

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getLockFileDir()

Output (on a particular Firebird 1.5.0/Windows 2000 installation):

C:\dev\db\firebird150\

Output (on a particular Firebird 1.5.0/Linux installation):

/opt/firebird/
Connection.getCapabilityMask()¶
The Services API offers “a bitmask representing the capabilities currently enabled on the server”, but the only availabledocumentation for this bitmask suggests that it is “reserved for future implementation”. kinterbasdb exposes this bitmask as a Python int returned from the getCapabilityMask() method.
Connection.getMessageFileDir()¶

To support internationalized error messages/prompts, the database engine stores its messages in a file named interbase.msg (Interbase® and Firebird 1.0) or firebird.msg (Firebird 1.5 and later). The directory in which this file resides can be determined with the getMessageFileDir() method.

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getMessageFileDir()

Output (on a particular Firebird 1.5.0/Windows 2000 installation):

C:\dev\db\firebird150\

Output (on a particular Firebird 1.5.0/Linux installation):

/opt/firebird/
Connection.getConnectionCount()¶

Returns the number of active connections to databases managed by the server. This count only includes database connections (such as open instances of kinterbasdb.Connection), not services manager connections (such as open instances of kinterbasdb.services.Connection).

import kinterbasdb, kinterbasdb.services
svcCon = kinterbasdb.services.connect(host='localhost', user='sysdba', password='masterkey')

print 'A:', svcCon.getConnectionCount()

con1 = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey')
print 'B:', svcCon.getConnectionCount()

con2 = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey')
print 'C:', svcCon.getConnectionCount()

con1.close()
print 'D:', svcCon.getConnectionCount()

con2.close()
print 'E:', svcCon.getConnectionCount()

On an otherwise inactive server, the example program generates the following output:

A: 0
B: 1
C: 2
D: 1
E: 0
Connection.getAttachedDatabaseNames()¶

Returns a list of the names of all databases to which the server is maintaining at least one connection. The database names are not guaranteed to be in any particular order.

import kinterbasdb, kinterbasdb.services
svcCon = kinterbasdb.services.connect(host='localhost', user='sysdba', password='masterkey')

print 'A:', svcCon.getAttachedDatabaseNames()

con1 = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey')
print 'B:', svcCon.getAttachedDatabaseNames()

con2 = kinterbasdb.connect(dsn='localhost:C:/temp/test2.db', user='sysdba', password='masterkey')
print 'C:', svcCon.getAttachedDatabaseNames()

con3 = kinterbasdb.connect(dsn='localhost:C:/temp/test2.db', user='sysdba', password='masterkey')
print 'D:', svcCon.getAttachedDatabaseNames()

con1.close()
print 'E:', svcCon.getAttachedDatabaseNames()

con2.close()
print 'F:', svcCon.getAttachedDatabaseNames()

con3.close()
print 'G:', svcCon.getAttachedDatabaseNames()

On an otherwise inactive server, the example program generates the following output:

A: []
B: ['C:\\TEMP\\TEST.DB']
C: ['C:\\TEMP\\TEST2.DB', 'C:\\TEMP\\TEST.DB']
D: ['C:\\TEMP\\TEST2.DB', 'C:\\TEMP\\TEST.DB']
E: ['C:\\TEMP\\TEST2.DB']
F: ['C:\\TEMP\\TEST2.DB']
G: []
Connection.getLog()¶

Returns the contents of the server’s log file (named interbase.log by Interbase® and Firebird 1.0; firebird.log by Firebird 1.5 and later):

from kinterbasdb import services
con = services.connect(host='localhost', user='sysdba', password='masterkey')

print con.getLog()

Output (on a particular Firebird 1.5.0/Windows 2000 installation):

WEASEL (Client) Thu Jun 03 12:01:35 2004
  INET/inet_error: send errno = 10054

WEASEL (Client) Sun Jun 06 19:21:17 2004
  INET/inet_error: connect errno = 10061

Database Statistics¶

Connection.getStatistics(database, showOnlyDatabaseLogPages=0...)¶

Returns a string containing a printout in the same format as the output of the gstat command-line utility. This method has one required parameter, the location of the database on which to compute statistics, and five optional boolean parameters for controlling the domain of the statistics.

Map of gstat paremeters to getStatistics options

gstat command-line option getStatistics boolean parameter
-header showOnlyDatabaseHeaderPages
-log showOnlyDatabaseLogPages
-data showUserDataPages
-index showUserIndexPages
-system showSystemTablesAndIndexes

The following program presents several getStatistics calls and their gstat-command-line equivalents. In this context, output is considered “equivalent” even if their are some whitespace differences. When collecting textual output from the Services API, kinterbasdb terminates lines with n regardless of the platform’s convention; gstat is platform-sensitive.

from kinterbasdb import services
con = services.connect(user='sysdba', password='masterkey')

# Equivalent to 'gstat -u sysdba -p masterkey C:/temp/test.db':
print con.getStatistics('C:/temp/test.db')

# Equivalent to 'gstat -u sysdba -p masterkey -header C:/temp/test.db':
print con.getStatistics('C:/temp/test.db', showOnlyDatabaseHeaderPages=True)

# Equivalent to 'gstat -u sysdba -p masterkey -log C:/temp/test.db':
print con.getStatistics('C:/temp/test.db', showOnlyDatabaseLogPages=True)

# Equivalent to 'gstat -u sysdba -p masterkey -data -index -system C:/temp/test.db':
print con.getStatistics('C:/temp/test.db',
    showUserDataPages=True,
    showUserIndexPages=True,
    showSystemTablesAndIndexes=True
  )

The output of the example program is not shown here because it is quite long.

Backup and Restoration¶

KInterbasDB offers convenient programmatic control over database backup and restoration via the backup and restore methods.

At the time of this writing, released versions of Firebird/Interbase® do not implement incremental backup, so we can simplistically define backup as the process of generating and storing an archived replica of a live database, and restoration as the inverse. The backup/restoration process exposes numerous parameters, which are properly documented in Firebird Documentation to gbak. The KInterbasDB API to these parameters is presented with minimal documentation in the sample code below.

Connection.backup(sourceDatabase, destFilenames, destFileSizes=(), <options>)¶

Creates a backup file from database content.

Simple Form

The simplest form of backup creates a single backup file that contains everything in the database. Although the extension ‘.fbk’ is conventional, it is not required.

from kinterbasdb import services
con = services.connect(user='sysdba', password='masterkey')

backupLog = con.backup('C:/temp/test.db', 'C:/temp/test_backup.fbk')
print backupLog

In the example, backupLog is a string containing a gbak-style log of the backup process. It is too long to reproduce here.

Although the return value of the backup method is a freeform log string, backup will raise an exception if there is an error. For example:

from kinterbasdb import services
con = services.connect(user='sysdba', password='masterkey')

# Pass an invalid backup path to the engine:
backupLog = con.backup('C:/temp/test.db', 'BOGUS/PATH/test_backup.fbk')
print backupLog
Traceback (most recent call last):
  File "adv_services_backup_simplest_witherror.py", line 5, in ?
    backupLog = con.backup('C:/temp/test.db', 'BOGUS/PATH/test_backup.fbk')
  File "C:\code\projects\kinterbasdb\Kinterbasdb-3.0\build\lib.win32-2.3\kinterbasdb\services.py", line 269, in backup
    return self._actAndReturnTextualResults(request)
  File "C:\code\projects\kinterbasdb\Kinterbasdb-3.0\build\lib.win32-2.3\kinterbasdb\services.py", line 613, in _actAndReturnTextualResults
    self._act(requestBuffer)
  File "C:\code\projects\kinterbasdb\Kinterbasdb-3.0\build\lib.win32-2.3\kinterbasdb\services.py", line 610, in _act
    return _ksrv.action_thin(self._C_conn, requestBuffer.render())
kinterbasdb.OperationalError: (-902, '_kiservices could not perform the action: cannot open backup file BOGUS/PATH/test_backup.fbk. ')

Multifile Form

The database engine has built-in support for splitting the backup into multiple files, which is useful for circumventing operating system file size limits or spreading the backup across multiple discs.

KInterbasDB exposes this facility via the Connection.backup parameters destFilenames and destFileSizes. destFilenames (the second positional parameter of Connection.backup) can be either a string (as in the example above, when creating the backup as a single file) or a sequence of strings naming each constituent file of the backup. If destFilenames is a string-sequence with length N, destFileSizes must be a sequence of integer file sizes (in bytes) with length N-1. The database engine will constrain the size of each backup constituent file named in destFilenames[:-1] to the corresponding size specified in destFileSizes; any remaining backup data will be placed in the file name by destFilenames[-1].

Unfortunately, the database engine does not appear to expose any convenient means of calculating the total size of a database backup before its creation. The page size of the database and the number of pages in the database are available via database_info() calls: database_info(kinterbasdb.isc_info_page_size, ‘i’) and database_info(kinterbasdb.isc_info_db_size_in_pages, ‘i’), respectively, but the size of the backup file is usually smaller than the size of the database.

There should be no harm in submitting too many constituent specifications; the engine will write an empty header record into the excess constituents. However, at the time of this writing, released versions of the database engine hang the backup task if more than 11 constituents are specified (that is, if len(destFilenames) > 11). KInterbasDB does not prevent the programmer from submitting more than 11 constituents, but it does issue a warning.

The following program directs the engine to split the backup of the database at C:/temp/test.db into C:/temp/back01.fbk, a file 4096 bytes in size, C:/temp/back02.fbk, a file 16384 bytes in size, and C:/temp/back03.fbk, a file containing the remainder of the backup data.

from kinterbasdb import services
con = services.connect(user='sysdba', password='masterkey')

con.backup('C:/temp/test.db',
   ('C:/temp/back01.fbk', 'C:/temp/back02.fbk', 'C:/temp/back03.fbk'),
    destFileSizes=(4096, 16384)
  )

Extended Options

In addition to the three parameters documented previously (positional sourceDatabase, positional destFilenames, and keyword destFileSizes), the Connection.backup method accepts six boolean parameters that control aspects of the backup process and the backup file output format. These options are well documented so in this document we present only a table of equivalence between gbak options and names of the boolean keyword parameters:

gbak option Parameter Name Default Value
-T transportable True
-M metadataOnly False
-G garbageCollect True
-L ignoreLimboTransactions False
-IG ignoreChecksums False
-CO convertExternalTablesToInternalTables True
Connection.restore(sourceFilenames, destFilenames, destFilePages=(), <options>)¶

Restores database from backup file.

Simplest Form

The simplest form of restore creates a single-file database, regardless of whether the backup data were split across multiple files.

from kinterbasdb import services
con = services.connect(user='sysdba', password='masterkey')

restoreLog = con.restore('C:/temp/test_backup.fbk', 'C:/temp/test_restored.db')
print restoreLog

In the example, restoreLog is a string containing a gbak-style log of the restoration process. It is too long to reproduce here.

Multifile Form

The database engine has built-in support for splitting the restored database into multiple files, which is useful for circumventing operating system file size limits or spreading the database across multiple discs.

KInterbasDB exposes this facility via the Connection.restore parameters destFilenames and destFilePages. destFilenames (the second positional argument of Connection.restore) can be either a string (as in the example above, when restoring to a single database file) or a sequence of strings naming each constituent file of the restored database. If destFilenames is a string-sequence with length N, destFilePages must be a sequence of integers with length N-1. The database engine will constrain the size of each database constituent file named in destFilenames[:-1] to the corresponding page count specified in destFilePages; any remaining database pages will be placed in the file name by destFilenames[-1].

The following program directs the engine to restore the backup file at C:/temp/test_backup.fbk into a database with three constituent files: C:/temp/test_restored01.db, C:/temp/test_restored02.db, and C:/temp/test_restored03.db. The engine is instructed to place fifty user data pages in the first file, seventy in the second, and the remainder in the third file. In practice, the first database constituent file will be larger than pageSize*destFilePages[0], because metadata pages must also be stored in the first constituent of a multifile database.

from kinterbasdb import services
con = services.connect(user='sysdba', password='masterkey')

con.restore('C:/temp/test_backup.fbk',
    ('C:/temp/test_restored01.db', 'C:/temp/test_restored02.db', 'C:/temp/test_restored03.db'),
    destFilePages=(50, 70),
    pageSize=1024,
    replace=True
  )

Extended Options

These options are well documented so in this document we present only a table of equivalence between the gbak options and the names of the keyword parameters to Connection.restore:

gbak option Parameter Name Default Value
-P pageSize [use server default]
-REP replace False
-O commitAfterEachTable False
-K doNotRestoreShadows False
-I deactivateIndexes False
-N doNotEnforceConstraints False
-USE useAllPageSpace False
-MO accessModeReadOnly False
-BU cacheBuffers [use server default]

Database Operating Modes, Sweeps, and Repair¶

Connection.sweep(database, markOutdatedRecordsAsFreeSpace=1)¶
Not yet documented.
Connection.setSweepInterval(database, n)¶
Not yet documented.
Conenction.setDefaultPageBuffers(database, n)¶
Not yet documented.
Conenction.setShouldReservePageSpace(database, shouldReserve)¶
Not yet documented.
Conenction.setWriteMode(database, mode)¶
Not yet documented.
Conenction.setAccessMode(database, mode)¶
Not yet documented.
Conenction.setSQLDialect(database, dialect)¶
Not yet documented.
Conenction.activateShadowFile(database)¶
Not yet documented.
Conenction.shutdown(database, shutdownMethod, timeout)¶
Not yet documented.
Conenction.bringOnline(database)¶
Not yet documented.
Conenction.getLimboTransactionIDs(database)¶
Not yet documented.
Conenction.commitLimboTransaction(database, transactionID)¶
Not yet documented.
Conenction.rollbackLimboTransaction(database, transactionID)¶
Not yet documented.
Conenction.repair(database, <options>)¶
Not yet documented.

User Maintenance¶

Conenction.getUsers(username=None)¶
By default, lists all users.
Conenction.addUser(user)¶
User:An instance of User with at least its username and password attributes specified as non-empty values.
Conenction.modifyUser(user)¶

Changes user data.

User:An instance of User with at least its username and password attributes specified as non-empty values.
Conenction.removeUser(user)¶
Accepts either an instance of services.User or a string username, and deletes the specified user.
Conenction.userExists(user)¶
Returns a boolean that indicates whether the specified user exists.
class kinterbasdb.services.User¶
Not yet documented.

Table Of Contents

Previous topic

Compliance to Python Database API 2.0

Next topic

Concurrency

This Page

Quick search

kinterbasdb-3.3.0/docs/python-db-api-compliance.html0000644000175000001440000006243111132652265021617 0ustar pcisarusers Compliance to Python Database API 2.0 — KInterbasDB v3.3.0 documentation

Compliance to Python Database API 2.0¶

Incompatibilities¶

kinterbasdb.DATETIME¶
KInterbasDB’s deferred loading of dynamic type translators causes this singleton to behave in violation of the standard until the kinterbasdb.init() function has been called (whether explicitly or implicitly). For more information, see the documnation section about Deferred Loading of Dynamic Type Translators.

Unsupported Optional Features¶

Cursor.nextset()¶
This method is not implemented because the database engine does not support opening multiple result sets simultaneously with a single cursor.

Nominally Supported Optional Features¶

class kinterbasdb.Cursor¶
arraysize¶

As required by the spec, the value of this attribute is observed with respect to the fetchmany method. However, changing the value of this attribute does not make any difference in fetch efficiency because the database engine only supports fetching a single row at a time.

setinputsizes()¶

Although this method is present, it does nothing, as allowed by the spec.

setoutputsize()¶

Although this method is present, it does nothing, as allowed by the spec.

Extensions and Caveats¶

KInterbasDB offers a large feature set beyond the minimal requirements of the Python DB API. Most of these extensions are documented in the section of this document entitled Native Database Engine Features and Extensions Beyond the Python DB API.

This section attempts to document only those features that overlap with the DB API, or are too insignificant to warrant their own subsection elsewhere.

kinterbasdb.connect()¶

This function supports the following optional keyword arguments in addition to those required by the spec:

Role:For connecting to a database with a specific SQL role.

Example:

kinterbasdb.connect(dsn='host:/path/database.db', user='limited_user',
   password='pass', role='MORE_POWERFUL_ROLE')
Charset:For explicitly specifying the character set of the connection. See Firebird Documentation for a list of available character sets, and Unicode Fields and KInterbasDB section for information on handling extended character sets with KInterbasDB.

Example:

kinterbasdb.connect(dsn='host:/path/database.db', user='sysdba',
    password='pass', charset='UTF8')
Dialect:The SQL dialect is feature for backward compatibility with Interbase® 5.5 or earlier. The default dialect is 3 (the most featureful dialect, default for Firebird). If you want to connect to legacy databases, you must explicitly set this argument’s value to 1. Dialect 2 is a transitional dialect that is normally used only during ports from IB < 6 to IB >= 6 or Firebird. See Firebird documentation for more information about SQL Dialects.

Example:

kinterbasdb.connect(dsn='host:/path/database.db', user='sysdba',
   password='pass', dialect=1)
Timeout:(Optional) Dictionary with timeout and action specification. See section about Connection Timeouts for details.
class kinterbasdb.Connection¶
charset¶
(read-only) The character set of the connection (set via the charset parameter of kinterbasdb.connect()). See Firebird Documentation for a list of available character sets, and Unicode Fields and KInterbasDB section for information on handling extended character sets with KInterbasDB.
dialect¶
This integer attribute indicates which SQL dialect the connection is using. You should not change a connection’s dialect; instead, discard the connection and establish a new one with the desired dialect. For more information, see the documentation of the dialect argument of the connect function.
server_version¶
(read-only) The version string of the database server to which this connection is connected. For example, a connection to Firebird 1.0 on Windows has the following server_version: WI-V6.2.794 Firebird 1.0
execute_immediate()¶

Executes a statement without caching its prepared form. The statement must not be of a type that returns a result set. In most cases (especially cases in which the same statement – perhaps a parameterized statement – is executed repeatedly), it is better to create a cursor using the connection’s cursor method, then execute the statement using one of the cursor’s execute methods.

Arguments:

Sql:String containing the SQL statement to execute.
precision_mode¶
Although this attribute is present in KInterbasDB 3.1+ and works in a backward-compatible fashion, it is deprecated in favor of the more general dynamic type translation feature.
commit(retaining=False)¶
rollback(retaining=False)¶
The commit and rollback methods accept an optional boolean parameter retaining (default False) that indicates whether the transactional context of the transaction being resolved should be recycled. For details, see the Advanced Transaction Control: Retaining Operations section of this document. The rollback method accepts an optional string parameter savepoint that causes the transaction to roll back only as far as the designated savepoint, rather than rolling back entirely. For details, see the Advanced Transaction Control: Savepoints section of this document.
class kinterbasdb.Cursor
description¶

KInterbasDB makes absolutely no guarantees about description except those required by the Python Database API Specification 2.0 (that is, description is either None or a sequence of 7-element sequences). Therefore, client programmers should not rely on description being an instance of a particular class or type. KInterbasDB provides several named positional constants to be used as indices into a given element of description . The contents of all description elements are defined by the DB API spec; these constants are provided merely for convenience.

DESCRIPTION_NAME
DESCRIPTION_TYPE_CODE
DESCRIPTION_DISPLAY_SIZE
DESCRIPTION_INTERNAL_SIZE
DESCRIPTION_PRECISION
DESCRIPTION_SCALE
DESCRIPTION_NULL_OK

Here is an example of accessing the name of the first field in the description of cursor cur:

nameOfFirstField = cur.description[0][kinterbasdb.DESCRIPTION_NAME]

For more information, see the documentation of Cursor.description in the DB API Specification.

rowcount¶
Although KInterbasDB’s Cursor`s implement this attribute, the database engine’s own support for the determination of “rows affected”/”rows selected” is quirky. The database engine only supports the determination of rowcount for `INSERT, UPDATE, DELETE, and SELECT statements. When stored procedures become involved, row count figures are usually not available to the client. Determining rowcount for SELECT statements is problematic: the rowcount is reported as zero until at least one row has been fetched from the result set, and the rowcount is misreported if the result set is larger than 1302 rows. The server apparently marshals result sets internally in batches of 1302, and will misreport the rowcount for result sets larger than 1302 rows until the 1303rd row is fetched, result sets larger than 2604 rows until the 2605th row is fetched, and so on, in increments of 1302. As required by the Python DB API Spec, the rowcount attribute “is -1 in case no executeXX() has been performed on the cursor or the rowcount of the last operation is not determinable by the interface”.
fetchone()¶
fetchmany()¶
fetchall()¶
KInterbasDB makes absolutely no guarantees about the return value of the fetchone / fetchmany / fetchall methods except that it is a sequence indexed by field position. KInterbasDB makes absolutely no guarantees about the return value of the fetchonemap / fetchmanymap / fetchallmap methods (documented below) except that it is a mapping of field name to field value. Therefore, client programmers should not rely on the return value being an instance of a particular class or type.
fetchonemap()¶
This method is just like the standard fetchone method of the DB API, except that it returns a mapping of field name to field value, rather than a sequence.
fetchmanymap()¶
This method is just like the standard fetchmany method of the DB API, except that it returns a sequence of mappings of field name to field value, rather than a sequence of sequences.
fetchallmap()¶
This method is just like the standard fetchall method of the DB API, except that it returns a sequence of mappings of field name to field value, rather than a sequence of sequences.
iter()¶
itermap()¶
These methods are equivalent to the fetchall and fetchallmap methods, respectively, except that they return iterators rather than materialized sequences. iter and itermap are exercised in this example.
kinterbasdb-3.3.0/docs/objects.inv0000644000175000001440000003023011133077256016307 0ustar pcisarusers# Sphinx inventory version 1 # Project: KInterbasDB # Version: 3.3 kinterbasdb.typeconv_23plus_lowmem mod beyond-python-db-api.html kinterbasdb mod index.html kinterbasdb.typeconv_naked mod beyond-python-db-api.html kinterbasdb.typeconv_fixed_fixedpoint mod beyond-python-db-api.html kinterbasdb.typeconv_23plus mod beyond-python-db-api.html kinterbasdb.typeconv_fixed_decimal mod beyond-python-db-api.html kinterbasdb.typeconv_fixed_stdlib mod beyond-python-db-api.html kinterbasdb.typeconv_datetime_stdlib mod beyond-python-db-api.html kinterbasdb.typeconv_text_unicode mod beyond-python-db-api.html kinterbasdb.typeconv_datetime_mx mod beyond-python-db-api.html kinterbasdb.typeconv_backcompat mod beyond-python-db-api.html kinterbasdb.services mod beyond-python-db-api.html kinterbasdb.typeconv_24plus mod beyond-python-db-api.html kinterbasdb.services.Connection.getArchitecture method beyond-python-db-api.html kinterbasdb.connect function python-db-api-compliance.html kinterbasdb.Cursor.transaction attribute beyond-python-db-api.html Cursor.executemany method Python-DB-API-2.0.html kinterbasdb.TableReservation.render method beyond-python-db-api.html Error exception Python-DB-API-2.0.html kinterbasdb.PreparedStatement.description attribute beyond-python-db-api.html kinterbasdb.DATETIME data python-db-api-compliance.html kinterbasdb.Connection.prepare method beyond-python-db-api.html kinterbasdb.PreparedStatement.sql attribute beyond-python-db-api.html kinterbasdb.Transaction.cursors attribute beyond-python-db-api.html kinterbasdb.Connection.trans method beyond-python-db-api.html kinterbasdb.services.Conenction.setWriteMode method beyond-python-db-api.html kinterbasdb.Connection.event_conduit method beyond-python-db-api.html kinterbasdb.BlobReader.chunks method beyond-python-db-api.html kinterbasdb.ConnectionGroup.prepare method beyond-python-db-api.html kinterbasdb.TableReservation class beyond-python-db-api.html Cursor.close method Python-DB-API-2.0.html kinterbasdb.Transaction.n_physical attribute beyond-python-db-api.html TimestampFromTicks function Python-DB-API-2.0.html kinterbasdb.CT_NONTRANSPARENT data beyond-python-db-api.html kinterbasdb.TPB.isolation_level attribute beyond-python-db-api.html kinterbasdb.Transaction.close method beyond-python-db-api.html kinterbasdb.PreparedStatement.n_output_params attribute beyond-python-db-api.html Cursor.nextset method Python-DB-API-2.0.html DATETIME data Python-DB-API-2.0.html kinterbasdb.Cursor.rowcount attribute python-db-api-compliance.html kinterbasdb.Connection.database_info method beyond-python-db-api.html kinterbasdb.ConnectionGroup.remove method beyond-python-db-api.html kinterbasdb.TPB.render method beyond-python-db-api.html kinterbasdb.services.Conenction.modifyUser method beyond-python-db-api.html kinterbasdb.services.Conenction.rollbackLimboTransaction method beyond-python-db-api.html kinterbasdb.Transaction.__init__ method beyond-python-db-api.html kinterbasdb.services.Connection.getHomeDir method beyond-python-db-api.html kinterbasdb.Cursor.setinputsizes method python-db-api-compliance.html paramstyle data Python-DB-API-2.0.html kinterbasdb.TPB.lock_timeout attribute beyond-python-db-api.html Connection.rollback method Python-DB-API-2.0.html kinterbasdb.EventConduit.close method beyond-python-db-api.html kinterbasdb.Cursor.arraysize attribute python-db-api-compliance.html DatabaseError exception Python-DB-API-2.0.html kinterbasdb.Cursor.description attribute python-db-api-compliance.html kinterbasdb.EventConduit.flush method beyond-python-db-api.html kinterbasdb.services.Connection.close method beyond-python-db-api.html kinterbasdb.services.Conenction.activateShadowFile method beyond-python-db-api.html kinterbasdb.Cursor.setoutputsize method python-db-api-compliance.html kinterbasdb.ConnectionGroup.commit method beyond-python-db-api.html kinterbasdb.services.Conenction.setSQLDialect method beyond-python-db-api.html connect function Python-DB-API-2.0.html Cursor.setinputsizes method Python-DB-API-2.0.html kinterbasdb.Connection.dialect attribute python-db-api-compliance.html kinterbasdb.Connection.execute_immediate method python-db-api-compliance.html kinterbasdb.create_database function beyond-python-db-api.html kinterbasdb.Connection.get_type_trans_in method beyond-python-db-api.html kinterbasdb.TPB class beyond-python-db-api.html kinterbasdb.services.connect function beyond-python-db-api.html kinterbasdb.Transaction.commit method beyond-python-db-api.html kinterbasdb.Connection.charset attribute python-db-api-compliance.html TimeFromTicks function Python-DB-API-2.0.html kinterbasdb.Connection.savepoint method beyond-python-db-api.html kinterbasdb.Cursor.fetchallmap method python-db-api-compliance.html kinterbasdb.PreparedStatement.statement_type attribute beyond-python-db-api.html kinterbasdb.Cursor class beyond-python-db-api.html kinterbasdb.services.Conenction.setShouldReservePageSpace method beyond-python-db-api.html kinterbasdb.ConnectionGroup class beyond-python-db-api.html Cursor.callproc method Python-DB-API-2.0.html kinterbasdb.services.Connection class beyond-python-db-api.html kinterbasdb.services.Connection.getServiceManagerVersion method beyond-python-db-api.html IntegrityError exception Python-DB-API-2.0.html kinterbasdb.Transaction class beyond-python-db-api.html Cursor class Python-DB-API-2.0.html kinterbasdb.Connection.drop_database method beyond-python-db-api.html Connection.commit method Python-DB-API-2.0.html kinterbasdb.services.Conenction.userExists method beyond-python-db-api.html kinterbasdb.Cursor.fetchone method python-db-api-compliance.html kinterbasdb.Connection.transactions attribute beyond-python-db-api.html kinterbasdb.ConnectionGroup.disband method beyond-python-db-api.html InterfaceError exception Python-DB-API-2.0.html kinterbasdb.Transaction.connection attribute beyond-python-db-api.html kinterbasdb.Connection.rollback method python-db-api-compliance.html kinterbasdb.services.Connection.getStatistics method beyond-python-db-api.html kinterbasdb.TPB.access_mode attribute beyond-python-db-api.html kinterbasdb.services.Connection.getCapabilityMask method beyond-python-db-api.html kinterbasdb.Connection.precision_mode attribute python-db-api-compliance.html kinterbasdb.Connection.transaction_info method beyond-python-db-api.html kinterbasdb.EventConduit class beyond-python-db-api.html BINARY data Python-DB-API-2.0.html kinterbasdb.Connection.db_info method beyond-python-db-api.html kinterbasdb.ConnectionGroup.add method beyond-python-db-api.html Binary function Python-DB-API-2.0.html kinterbasdb.init function beyond-python-db-api.html Cursor.setoutputsize method Python-DB-API-2.0.html kinterbasdb.CT_COMMIT data beyond-python-db-api.html kinterbasdb.services.Connection.getConnectionCount method beyond-python-db-api.html kinterbasdb.services.Connection.getLockFileDir method beyond-python-db-api.html kinterbasdb.ConnectionGroup.contains method beyond-python-db-api.html kinterbasdb.ConnectionGroup.rollback method beyond-python-db-api.html kinterbasdb.Connection.set_type_trans_in method beyond-python-db-api.html kinterbasdb.Transaction.trans_info method beyond-python-db-api.html Cursor.rowcount attribute Python-DB-API-2.0.html threadsafety data Python-DB-API-2.0.html kinterbasdb.services.Connection.restore method beyond-python-db-api.html Warning exception Python-DB-API-2.0.html kinterbasdb.services.Connection.getSecurityDatabasePath method beyond-python-db-api.html Timestamp function Python-DB-API-2.0.html kinterbasdb.Cursor.fetchmanymap method python-db-api-compliance.html kinterbasdb.Transaction.closed attribute beyond-python-db-api.html kinterbasdb.Cursor.fetchonemap method python-db-api-compliance.html Date function Python-DB-API-2.0.html kinterbasdb.ConnectionGroup.count method beyond-python-db-api.html kinterbasdb.services.Connection.setSweepInterval method beyond-python-db-api.html ProgrammingError exception Python-DB-API-2.0.html kinterbasdb.Transaction.begin method beyond-python-db-api.html kinterbasdb.CT_ROLLBACK data beyond-python-db-api.html kinterbasdb.Cursor.set_type_trans_in method beyond-python-db-api.html kinterbasdb.services.Conenction.shutdown method beyond-python-db-api.html kinterbasdb.services.Connection.getAttachedDatabaseNames method beyond-python-db-api.html kinterbasdb.Cursor.fetchall method python-db-api-compliance.html kinterbasdb.services.Connection.backup method beyond-python-db-api.html Connection class Python-DB-API-2.0.html kinterbasdb.ConnectionGroup.begin method beyond-python-db-api.html Time function Python-DB-API-2.0.html kinterbasdb.services.Connection.getMessageFileDir method beyond-python-db-api.html kinterbasdb.Transaction.resolution attribute beyond-python-db-api.html Connection.cursor method Python-DB-API-2.0.html kinterbasdb.ConnectionGroup.__init__ method beyond-python-db-api.html kinterbasdb.Cursor.itermap method python-db-api-compliance.html kinterbasdb.services.User class beyond-python-db-api.html DataError exception Python-DB-API-2.0.html kinterbasdb.CT_VETO data beyond-python-db-api.html kinterbasdb.services.Conenction.bringOnline method beyond-python-db-api.html apilevel data Python-DB-API-2.0.html kinterbasdb.Cursor.get_type_trans_in method beyond-python-db-api.html InternalError exception Python-DB-API-2.0.html NUMBER data Python-DB-API-2.0.html kinterbasdb.services.Conenction.repair method beyond-python-db-api.html Cursor.fetchall method Python-DB-API-2.0.html kinterbasdb.Transaction.prepare method beyond-python-db-api.html kinterbasdb.Transaction.rollback method beyond-python-db-api.html kinterbasdb.services.Conenction.setAccessMode method beyond-python-db-api.html Cursor.fetchmany method Python-DB-API-2.0.html kinterbasdb.services.Connection.getServerVersion method beyond-python-db-api.html kinterbasdb.Transaction.cursor method beyond-python-db-api.html kinterbasdb.services.Conenction.commitLimboTransaction method beyond-python-db-api.html kinterbasdb.Cursor.fetchmany method python-db-api-compliance.html Cursor.execute method Python-DB-API-2.0.html kinterbasdb.services.Conenction.removeUser method beyond-python-db-api.html kinterbasdb.Cursor.name attribute beyond-python-db-api.html kinterbasdb.ConnectionGroup.clear method beyond-python-db-api.html kinterbasdb.Connection class beyond-python-db-api.html kinterbasdb.Connection.main_transaction attribute beyond-python-db-api.html kinterbasdb.Cursor.prep method beyond-python-db-api.html kinterbasdb.Cursor.nextset method python-db-api-compliance.html kinterbasdb.BlobReader class beyond-python-db-api.html kinterbasdb.TPB.lock_resolution attribute beyond-python-db-api.html kinterbasdb.services.Conenction.getLimboTransactionIDs method beyond-python-db-api.html STRING data Python-DB-API-2.0.html Cursor.description attribute Python-DB-API-2.0.html NotSupportedError exception Python-DB-API-2.0.html kinterbasdb.PreparedStatement class beyond-python-db-api.html kinterbasdb.ConnectionGroup.members method beyond-python-db-api.html kinterbasdb.services.Conenction.addUser method beyond-python-db-api.html kinterbasdb.PreparedStatement.n_input_params attribute beyond-python-db-api.html kinterbasdb.services.Conenction.setDefaultPageBuffers method beyond-python-db-api.html kinterbasdb.Connection.server_version attribute python-db-api-compliance.html Connection.close method Python-DB-API-2.0.html kinterbasdb.Connection.trans_info method beyond-python-db-api.html kinterbasdb.Cursor.iter method python-db-api-compliance.html DateFromTicks function Python-DB-API-2.0.html kinterbasdb.EventConduit.wait method beyond-python-db-api.html kinterbasdb.EventConduit.__init__ method beyond-python-db-api.html Cursor.fetchone method Python-DB-API-2.0.html kinterbasdb.Transaction.savepoint method beyond-python-db-api.html kinterbasdb.services.Conenction.getUsers method beyond-python-db-api.html kinterbasdb.TPB.table_reservation attribute beyond-python-db-api.html kinterbasdb.Transaction.transaction_info method beyond-python-db-api.html kinterbasdb.PreparedStatement.plan attribute beyond-python-db-api.html kinterbasdb.services.Connection.getLog method beyond-python-db-api.html ROWID data Python-DB-API-2.0.html kinterbasdb.Connection.begin method beyond-python-db-api.html kinterbasdb.services.Connection.sweep method beyond-python-db-api.html kinterbasdb.Connection.commit method python-db-api-compliance.html OperationalError exception Python-DB-API-2.0.html kinterbasdb-3.3.0/docs/index.html0000644000175000001440000003745411133077256016154 0ustar pcisarusers Welcome to KInterbasDB’s documentation! — KInterbasDB v3.3.0 documentation

Welcome to KInterbasDB’s documentation!¶

KInterbasDB is a Python extension package that implements Python Database API 2.0-compliant support for the open source relational database Firebird® and some versions of its proprietary cousin Interbase®. In addition to the minimal feature set of the standard Python DB API, KInterbasDB also exposes nearly the entire native client API of the database engine.

KInterbasDB is free – covered by a permissive BSD-style license that both commercial and noncommercial users should find agreeable.

This documentation set is not a tutorial on Python, SQL, or Firebird; rather, it is a topical presentation of KInterbasDB’s feature set, with example code to demonstrate basic usage patterns. For detailed information about Firebird features, see the Firebird documentation, and especially the excellent The Firebird Book written by Helen Borrie and published by APress.

Documentation Contents:¶

Indices and tables¶

kinterbasdb-3.3.0/docs/tutorial.html0000644000175000001440000006224411132652266016702 0ustar pcisarusers Quick-start Guide / Tutorial — KInterbasDB v3.3.0 documentation

Quick-start Guide / Tutorial¶

This brief tutorial aims to get the reader started by demonstrating elementary usage of KInterbasDB. It is not a comprehensive Python Database API tutorial, nor is it comprehensive in its coverage of anything else.

The numerous advanced features of KInterbasDB are covered in another section of this documentation, which is not in a tutorial format, though it is replete with examples.

Connecting to a Database¶

Example 1

A database connection is typically established with code such as this:

import kinterbasdb

# The server is named 'bison'; the database file is at '/temp/test.db'.
con = kinterbasdb.connect(dsn='bison:/temp/test.db', user='sysdba', password='pass')

# Or, equivalently:
con = kinterbasdb.connect(
    host='bison', database='/temp/test.db',
    user='sysdba', password='pass'
  )

Example 2

Suppose we want to connect to the database in SQL Dialect 1 and specifying UTF-8 as the character set of the connection:

import kinterbasdb

con = kinterbasdb.connect(
    dsn='bison:/temp/test.db',
    user='sysdba', password='pass',
    dialect=1, # necessary for all dialect 1 databases
    charset='UTF8' # specify a character set for the connection
  )

Executing SQL Statements¶

For this section, suppose we have a table defined and populated by the following SQL code:

create table languages
(
  name               varchar(20),
  year_released      integer
);

insert into languages (name, year_released) values ('C',        1972);
insert into languages (name, year_released) values ('Python',   1991);

Example 1

This example shows the simplest way to print the entire contents of the languages table:

import kinterbasdb

con = kinterbasdb.connect(dsn='/temp/test.db', user='sysdba', password='masterkey')

# Create a Cursor object that operates in the context of Connection con:
cur = con.cursor()

# Execute the SELECT statement:
cur.execute("select * from languages order by year_released")

# Retrieve all rows as a sequence and print that sequence:
print cur.fetchall()

Sample output:

[('C', 1972), ('Python', 1991)]

Example 2

Here’s another trivial example that demonstrates various ways of fetching a single row at a time from a SELECT-cursor:

import kinterbasdb

con = kinterbasdb.connect(dsn='/temp/test.db', user='sysdba', password='masterkey')

cur = con.cursor()
SELECT = "select name, year_released from languages order by year_released"

# 1. Iterate over the rows available from the cursor, unpacking the
# resulting sequences to yield their elements (name, year_released):
cur.execute(SELECT)
for (name, year_released) in cur:
    print '%s has been publicly available since %d.' % (name, year_released)

# 2. Equivalently:
cur.execute(SELECT)
for row in cur:
    print '%s has been publicly available since %d.' % (row[0], row[1])

# 3. Using mapping-iteration rather than sequence-iteration:
cur.execute(SELECT)
for row in cur.itermap():
    print '%(name)s has been publicly available since %(year_released)d.' % row

Sample output:

C has been publicly available since 1972.
Python has been publicly available since 1991.
C has been publicly available since 1972.
Python has been publicly available since 1991.
C has been publicly available since 1972.
Python has been publicly available since 1991.

Example 3

The following program is a simplistic table printer (applied in this example to languages):

import kinterbasdb as k

TABLE_NAME = 'languages'
SELECT = 'select * from %s order by year_released' % TABLE_NAME

con = k.connect(dsn='/temp/test.db', user='sysdba', password='masterkey')

cur = con.cursor()
cur.execute(SELECT)

# Print a header.
for fieldDesc in cur.description:
    print fieldDesc[k.DESCRIPTION_NAME].ljust(fieldDesc[k.DESCRIPTION_DISPLAY_SIZE]) ,
print # Finish the header with a newline.
print '-' * 78

# For each row, print the value of each field left-justified within
# the maximum possible width of that field.
fieldIndices = range(len(cur.description))
for row in cur:
    for fieldIndex in fieldIndices:
        fieldValue = str(row[fieldIndex])
        fieldMaxWidth = cur.description[fieldIndex][k.DESCRIPTION_DISPLAY_SIZE]

        print fieldValue.ljust(fieldMaxWidth) ,

    print # Finish the row with a newline.

Sample output:

NAME                 YEAR_RELEASED
------------------------------------------------------------------------------
C                    1972
Python               1991

Example 4

Let’s insert more languages:

import kinterbasdb

con = kinterbasdb.connect(dsn='/temp/test.db', user='sysdba', password='masterkey')

cur = con.cursor()

newLanguages = [
    ('Lisp',  1958),
    ('Dylan', 1995),
  ]

cur.executemany("insert into languages (name, year_released) values (?, ?)",
    newLanguages
  )

# The changes will not be saved unless the transaction is committed explicitly:
con.commit()

Note the use of a parameterized SQL statement above. When dealing with repetitive statements, this is much faster and less error-prone than assembling each SQL statement manually. (You can read more about parameterized SQL statements in the section on Prepared Statements.)

After running Example 4, the table printer from Example 3 would print:

NAME                 YEAR_RELEASED
------------------------------------------------------------------------------
Lisp                 1958
C                    1972
Python               1991
Dylan                1995

Calling Stored Procedures¶

Firebird supports stored procedures written in a proprietary procedural SQL language. Firebird stored procedures can have input parameters and/or output parameters. Some databases support input/output parameters, where the same parameter is used for both input and output; Firebird does not support this.

It is important to distinguish between procedures that return a result set and procedures that populate and return their output parameters exactly once. Conceptually, the latter “return their output parameters” like a Python function, whereas the former “yield result rows” like a Python generator.

Firebird’s server-side procedural SQL syntax makes no such distinction, but client-side SQL code (and C API code) must. A result set is retrieved from a stored procedure by SELECT`ing from the procedure, whereas output parameters are retrieved with an `EXECUTE PROCEDURE statement.

To retrieve a result set from a stored procedure with KInterbasDB, use code such as this:

cur.execute("select output1, output2 from the_proc(?, ?)", (input1, input2))

# Ordinary fetch code here, such as:
for row in cur:
    ... # process row

con.commit() # If the procedure had any side effects, commit them.

To execute a stored procedure and access its output parameters, use code such as this:

cur.callproc("the_proc", (input1, input2))

# If there are output parameters, retrieve them as though they were the
# first row of a result set.  For example:
outputParams = cur.fetchone()

con.commit() # If the procedure had any side effects, commit them.

This latter is not very elegant; it would be preferable to access the procedure’s output parameters as the return value of Cursor.callproc(). The Python DB API specification requires the current behavior, however.

kinterbasdb-3.3.0/docs/Python-DB-API-2.0.html0000644000175000001440000013162111132652262017477 0ustar pcisarusers Python Database API Specification 2.0 — KInterbasDB v3.3.0 documentation

Python Database API Specification 2.0¶

KInterbasDB is the Python Database API 2.0 compliant driver for Firebird. The Reference / Usage Guide is therefore divided into three parts:

  • Python Database API 2.0 specification
  • KInterbasDB Compliance to Python DB 2.0 API specification.
  • KInterbasDB features beyond Python DB 2.0 API specification.

If you’re familiar to Python DB 2.0 API specification, you may skip directly to the next topic.

Note

This is a local copy of the specification. The online source copy is available at http://www.python.org/topics/database/DatabaseAPI-2.0.html

Introduction¶

This API has been defined to encourage similarity between the Python modules that are used to access databases. By doing this, we hope to achieve a consistency leading to more easily understood modules, code that is generally more portable across databases, and a broader reach of database connectivity from Python.

The interface specification consists of several sections:

  • Module Interface
  • Connection Objects
  • Cursor Objects
  • Type Objects and Constructors
  • Implementation Hints
  • Major Changes from 1.0 to 2.0

Comments and questions about this specification may be directed to the SIG for Database Interfacing with Python.

For more information on database interfacing with Python and available packages see the Database Topics Guide on www.python.org.

This document describes the Python Database API Specification 2.0. The previous version 1.0 version is still available as reference. Package writers are encouraged to use this version of the specification as basis for new interfaces.

Module Interface¶

Access to the database is made available through connection objects. The module must provide the following constructor for these:

connect(parameters...)¶
Constructor for creating a connection to the database. Returns a Connection Object . It takes a number of parameters which are database dependent. [1]

These module globals must be defined:

apilevel¶
String constant stating the supported DB API level. Currently only the strings ‘1.0’ and ‘2.0’ are allowed. If not given, a Database API 1.0 level interface should be assumed.
threadsafety¶

Integer constant stating the level of thread safety the interface supports. Possible values are:

  • 0 = Threads may not share the module.
  • 1 = Threads may share the module, but not connections.
  • 2 = Threads may share the module and connections.
  • 3 = Threads may share the module, connections and cursors. Sharing in the above context means that two threads may use a resource without wrapping it using a mutex semaphore to implement resource locking.

Note that you cannot always make external resources thread safe by managing access using a mutex: the resource may rely on global variables or other external sources that are beyond your control.

paramstyle¶

String constant stating the type of parameter marker formatting expected by the interface. Possible values are [2]:

  • ‘qmark’ = Question mark style, e.g. ‘...WHERE name=?’
  • ‘numeric’ = Numeric, positional style, e.g. ‘...WHERE name=:1’
  • ‘named’ = Named style, e.g. ‘...WHERE name=:name’
  • ‘format’ = ANSI C printf format codes, e.g. ‘...WHERE name=%s’
  • ‘pyformat’ = Python extended format codes, e.g. ‘...WHERE name=%(name)s’

The module should make all error information available through these exceptions or subclasses thereof:

exception Warning¶
Exception raised for important warnings like data truncations while inserting, etc. It must be a subclass of the Python StandardError (defined in the module exceptions).
exception Error¶
Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single ‘except’ statement. Warnings are not considered errors and thus should not use this class as base. It must be a subclass of the Python StandardError (defined in the module exceptions).
exception InterfaceError¶
Exception raised for errors that are related to the database interface rather than the database itself. It must be a subclass of Error.
exception DatabaseError¶
Exception raised for errors that are related to the database. It must be a subclass of Error.
exception DataError¶
Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range, etc. It must be a subclass of DatabaseError.
exception OperationalError¶
Exception raised for errors that are related to the database’s operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, a memory allocation error occurred during processing, etc. It must be a subclass of DatabaseError.
exception IntegrityError¶
Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails. It must be a subclass of DatabaseError.
exception InternalError¶
Exception raised when the database encounters an internal error, e.g. the cursor is not valid anymore, the transaction is out of sync, etc. It must be a subclass of DatabaseError.
exception ProgrammingError¶
Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement, wrong number of parameters specified, etc. It must be a subclass of DatabaseError.
exception NotSupportedError¶
Exception raised in case a method or database API was used which is not supported by the database, e.g. requesting a .rollback() on a connection that does not support transaction or has transactions turned off. It must be a subclass of DatabaseError.

This is the exception inheritance layout:

  StandardError
  |__Warning
  |__Error
     |__InterfaceError
     |__DatabaseError
        |__DataError
        |__OperationalError
        |__IntegrityError
        |__InternalError
        |__ProgrammingError
        |__NotSupportedError

Note: The values of these exceptions are not defined. They should give
the user a fairly good idea of what went wrong though.

Connection Objects¶

Connections Objects should respond to the following methods:

class Connection¶
close()¶
Close the connection now (rather than whenever __del__ is called). The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection.
commit()¶
Commit any pending transaction to the database. Note that if the database supports an auto-commit feature, this must be initially off. An interface method may be provided to turn it back on. Database modules that do not support transactions should implement this method with void functionality.
rollback()¶
This method is optional since not all databases provide transaction support. [3] In case a database does provide transactions this method causes the the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed.
cursor()¶
Return a new Cursor Object using the connection. If the database does not provide a direct cursor concept, the module will have to emulate cursors using other means to the extent needed by this specification. [4]

Cursor Objects¶

These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursor Objects should respond to the following methods and attributes:

class Cursor¶
description¶
This read-only attribute is a sequence of 7-item sequences. Each of these sequences contains information describing one result column: (name, type_code, display_size, internal_size, precision, scale, null_ok). This attribute will be None for operations that do not return rows or if the cursor has not had an operation invoked via the executeXXX() method yet. The type_code can be interpreted by comparing it to the Type Objects specified in the section below.
rowcount¶
This read-only attribute specifies the number of rows that the last executeXXX() produced (for DQL statements like select) or affected (for DML statements like update or insert ). The attribute is -1 in case no executeXXX() has been performed on the cursor or the rowcount of the last operation is not determinable by the interface. [7]
callproc(procname[, parameters])¶
This method is optional since not all databases provide stored procedures. [3] Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each argument that the procedure expects. The result of the call is returned as modified copy of the input sequence. Input parameters are left untouched, output and input/output parameters replaced with possibly new values. The procedure may also provide a result set as output. This must then be made available through the standard fetchXXX() methods.
close()¶
Close the cursor now (rather than whenever __del__ is called). The cursor will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the cursor.
execute(operation[, parameters])¶
Prepare and execute a database operation (query or command). Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified in a database-specific notation (see the module’s paramstyle attribute for details). [5] A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times). For maximum efficiency when reusing an operation, it is best to use the setinputsizes() method to specify the parameter types and sizes ahead of time. It is legal for a parameter to not match the predefined information; the implementation should compensate, possibly with a loss of efficiency. The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is depreciated: executemany() should be used instead. Return values are not defined.
executemany(operation, seq_of_parameters)¶
Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. Modules are free to implement this method using multiple calls to the execute() method or by using array operations to have the database process the sequence as a whole in one call. The same comments as for execute() also apply accordingly to this method. Return values are not defined.
fetchone()¶
Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. [6] An Error (or subclass) exception is raised if the previous call to executeXXX() did not produce any result set or no call was issued yet.
fetchmany([size=cursor.arraysize])¶
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor’s arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An Error (or subclass) exception is raised if the previous call to executeXXX() did not produce any result set or no call was issued yet. Note there are performance considerations involved with the size parameter. For optimal performance, it is usually best to use the arraysize attribute. If the size parameter is used, then it is best for it to retain the same value from one fetchmany() call to the next.
fetchall()¶
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor’s arraysize attribute can affect the performance of this operation. An Error (or subclass) exception is raised if the previous call to executeXXX() did not produce any result set or no call was issued yet.
nextset()¶
This method is optional since not all databases support multiple result sets. [3] This method will make the cursor skip to the next available set, discarding any remaining rows from the current set. If there are no more sets, the method returns None. Otherwise, it returns a true value and subsequent calls to the fetch methods will return rows from the next result set. An Error (or subclass) exception is raised if the previous call to executeXXX() did not produce any result set or no call was issued yet.
setinputsizes(sizes)¶
This can be used before a call to executeXXX() to predefine memory areas for the operation’s parameters. sizes is specified as a sequence – one item for each input parameter. The item should be a Type Object that corresponds to the input that will be used, or it should be an integer specifying the maximum length of a string parameter. If the item is None, then no predefined memory area will be reserved for that column (this is useful to avoid predefined areas for large inputs). This method would be used before the executeXXX() method is invoked. Implementations are free to have this method do nothing and users are free to not use it.
setoutputsize(size[, column])¶
Set a column buffer size for fetches of large columns (e.g. LONGs, BLOBs, etc.). The column is specified as an index into the result sequence. Not specifying the column will set the default size for all large columns in the cursor. This method would be used before the executeXXX() method is invoked. Implementations are free to have this method do nothing and users are free to not use it.

Type Objects and Constructors¶

Many databases need to have the input in a particular format for binding to an operation’s input parameters. For example, if an input is destined for a DATE column, then it must be bound to the database in a particular string format. Similar problems exist for “Row ID” columns or large binary items (e.g. blobs or RAW columns). This presents problems for Python since the parameters to the executeXXX() method are untyped. When the database module sees a Python string object, it doesn’t know if it should be bound as a simple CHAR column, as a raw BINARY item, or as a DATE. To overcome this problem, a module must provide the constructors defined below to create objects that can hold special values. When passed to the cursor methods, the module can then detect the proper type of the input parameter and bind it accordingly. A Cursor Object’s description attribute returns information about each of the result columns of a query. The type_code must compare equal to one of Type Objects defined below. Type Objects may be equal to more than one type code (e.g. DATETIME could be equal to the type codes for date, time and timestamp columns; see the Implementation Hints below for details). The module exports the following constructors and singletons:

Date(year, month, day)¶
This function constructs an object holding a date value.
Time(hour, minute, second)¶
This function constructs an object holding a time value.
Timestamp(year, month, day, hour, minute, second)¶
This function constructs an object holding a time stamp value.
DateFromTicks(ticks)¶
This function constructs an object holding a date value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details).
TimeFromTicks(ticks)¶
This function constructs an object holding a time value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details).
TimestampFromTicks(ticks)¶
This function constructs an object holding a time stamp value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details).
Binary(string)¶
This function constructs an object capable of holding a binary (long) string value.
STRING¶
This type object is used to describe columns in a database that are string-based (e.g. CHAR).
BINARY¶
This type object is used to describe (long) binary columns in a database (e.g. LONG, RAW, BLOBs).
NUMBER¶
This type object is used to describe numeric columns in a database.
DATETIME¶
This type object is used to describe date/time columns in a database.
ROWID¶
This type object is used to describe the “Row ID” column in a database.

SQL NULL values are represented by the Python None singleton on input and output. Note: Usage of Unix ticks for database interfacing can cause troubles because of the limited date range they cover.

Implementation Hints¶

  • The preferred object types for the date/time objects are those defined in the mxDateTime package. It provides all necessary constructors and methods both at Python and C level.
  • The preferred object type for Binary objects are the buffer types available in standard Python starting with version 1.5.2. Please see the Python documentation for details. For information about the the C interface have a look at Include/bufferobject.h and Objects/bufferobject.c in the Python source distribution.
  • Here is a sample implementation of the Unix ticks based constructors for date/time delegating work to the generic constructors:
import time

def DateFromTicks(ticks):

    return apply(Date,time.localtime(ticks)[:3])

def TimeFromTicks(ticks):

    return apply(Time,time.localtime(ticks)[3:6])

def TimestampFromTicks(ticks):

    return apply(Timestamp,time.localtime(ticks)[:6])
  • This Python class allows implementing the above type objects even though the description type code field yields multiple values for on type object:
  class DBAPITypeObject:
      def __init__(self,*values):
      self.values = values
      def __cmp__(self,other):
      if other in self.values:
          return 0
      if other < self.values:
          return 1
      else:
          return -1

The resulting type object compares equal to all values passed to the
constructor.
  • Here is a snippet of Python code that implements the exception hierarchy defined above:
  import exceptions

  class Error(exceptions.StandardError):
      pass

  class Warning(exceptions.StandardError):
      pass

  class InterfaceError(Error):
      pass

  class DatabaseError(Error):
      pass

  class InternalError(DatabaseError):
      pass

  class OperationalError(DatabaseError):
      pass

  class ProgrammingError(DatabaseError):
      pass

  class IntegrityError(DatabaseError):
      pass

  class DataError(DatabaseError):
      pass

  class NotSupportedError(DatabaseError):
      pass

In C you can use the `PyErr_NewException(fullname, base, NULL)` API to
create the exception objects.

Major Changes from Version 1.0 to Version 2.0¶

The Python Database API 2.0 introduces a few major changes compared to the 1.0 version. Because some of these changes will cause existing DB API 1.0 based scripts to break, the major version number was adjusted to reflect this change. These are the most important changes from 1.0 to 2.0:

  • The need for a separate dbi module was dropped and the functionality merged into the module interface itself.
  • New constructors and Type Objects were added for date/time values, the RAW Type Object was renamed to BINARY. The resulting set should cover all basic data types commonly found in modern SQL databases.
  • New constants (apilevel, threadlevel, paramstyle) and methods (executemany, nextset) were added to provide better database bindings.
  • The semantics of .callproc() needed to call stored procedures are now clearly defined.
  • The definition of the .execute() return value changed. Previously, the return value was based on the SQL statement type (which was hard to implement right) – it is undefined now; use the more flexible .rowcount attribute instead. Modules are free to return the old style return values, but these are no longer mandated by the specification and should be considered database interface dependent.
  • Class based exceptions were incorporated into the specification. Module implementors are free to extend the exception layout defined in this specification by subclassing the defined exception classes.

Open Issues¶

Although the version 2.0 specification clarifies a lot of questions that were left open in the 1.0 version, there are still some remaining issues:

  • Define a useful return value for .nextset() for the case where a new result set is available.
  • Create a fixed point numeric type for use as loss-less monetary and decimal interchange format.

Footnotes¶

[1]As a guideline the connection constructor parameters should be implemented as keyword parameters for more intuitive use and follow this order of parameters: dsn = Data source name as string user = User name as string (optional) password = Password as string (optional) host = Hostname (optional) database = Database name (optional) E.g. a connect could look like this: connect(dsn=’myhost:MYDB’,user=’guido’,password=‘234$?’)
[2]Module implementors should prefer ‘numeric’, ‘named’ or ‘pyformat’ over the other formats because these offer more clarity and flexibility.
[3](1, 2, 3) If the database does not support the functionality required by the method, the interface should throw an exception in case the method is used. The preferred approach is to not implement the method and thus have Python generate an AttributeError in case the method is requested. This allows the programmer to check for database capabilities using the standard hasattr() function. For some dynamically configured interfaces it may not be appropriate to require dynamically making the method available. These interfaces should then raise a NotSupportedError to indicate the non-ability to perform the roll back when the method is invoked.
[4]A database interface may choose to support named cursors by allowing a string argument to the method. This feature is not part of the specification, since it complicates semantics of the .fetchXXX() methods.
[5]The module will use the __getitem__ method of the parameters object to map either positions (integers) or names (strings) to parameter values. This allows for both sequences and mappings to be used as input. The term “bound” refers to the process of binding an input value to a database execution buffer. In practical terms, this means that the input value is directly used as a value in the operation. The client should not be required to “escape” the value so that it can be used – the value should be equal to the actual database value.
[6]Note that the interface may implement row fetching using arrays and other optimizations. It is not guaranteed that a call to this method will only move the associated cursor forward by one row.
[7]The rowcount attribute may be coded in a way that updates its value dynamically. This can be useful for databases that return useable rowcount values only after the first call to a .fetchXXX() method.
kinterbasdb-3.3.0/docs/concurrency.html0000644000175000001440000004037011132652265017364 0ustar pcisarusers Concurrency — KInterbasDB v3.3.0 documentation

Concurrency¶

Overview¶

Note: This section will not be comprehensible unless you understand the basic characteristics of the Firebird server architectures. These are documented in the “Classic or Superserver?” section of the doc/Firebird-1.5-QuickStart.pdf file included with the Firebird distribution.

Versions of KInterbasDB prior to 3.2 imposed a global lock over all database client library calls. This lock, referred to as the Global Database API Lock (GDAL), must be active for multithreaded client programs to work correctly with versions of the Firebird client library that do not properly support concurrency. Many such versions are still in use, so the GDAL remains active by default in KInterbasDB 3.2. To determine whether the client library you’re using can correctly handle concurrent database calls, read this Overview of Firebird Client Library Thread-Safety.

Note that a single client library might have different thread-safety properties depending on which protocol the client program specifies via the parameters of kinterbasdb.connect(). For example, the Firebird 1.5 client library on Windows is thread-safe if the remote protocol is used, as in

kinterbasdb.connect(dsn=r'localhost:C:\temp\test.db', ...)

but is not thread-safe if the local protocol is used, as in

kinterbasdb.connect(dsn=r'C:\temp\test.db', ...)

Selecting and Activating a KInterbasDB Concurrency Level¶

KInterbasDB 3.2 supports three levels of concurrency:

  • Level 0:  No lock management whatsoever If the C preprocessor symbol ENABLE_CONCURRENCY is not defined when KInterbasDB is compiled, no lock management at all is performed at runtime. In fact, the code to initialize and manage the locks is not even compiled in. Level 0 is intended only for compiling KInterbasDB on non-threaded builds of the Python interpreter. It would not be desirable for a client program running on a normal (threaded) build of the Python interpreter to use Level 0, so no overhead is invested in making it possible to transition to Level 0 at runtime. Since Level 0 is intended for use in Python interpreters that have no Global Interpreter Lock (GIL), the GIL is not manipulated.
  • Level 1:  Global Database API Lock (GDAL) is active    (this is the default level) At Level 1, a global lock serializes all calls to the database client library. This lock, called the Global Database API Lock (GDAL), is to the database client library as the GIL is to the Python interpreter: a mechanism to guarantee that at most one thread is using the database client library at any time. Level 1 exists to support those versions of Firebird in which the client library is not thread-safe at the connection level (see the Overview of Firebird Client Library Thread-Safety for details). In environments where the author ofKInterbasDB creates binaries and distributes them to client programmers, there is no way of knowing at compile time which Firebird client library configuration the KInterbasDB binaries will be used with. Level 1 protects client programmers who are not aware of the thread-safety properties of their version of the client library. For these reasons, Level 1 is the default, but Level 2 can be selected at runtime via the kinterbasdb.init() function (see next section). At Level 1, the Python GIL is released and reacquired around most database client library calls in order to avoid blocking the entire Python process for the duration of the call.
  • Level 2:  Global Database API Lock (GDAL) is not active, but connection and disconnection are serialized via the GCDL At Level 2, calls to the database client library are not serialized, except for calls to the connection attachment and detachment functions, which are serialized by a lock called the Global Connection and Disconnection Lock (GCDL). This limited form of serialization is necessary because the Firebird client library makes no guarantees about the thread- safety of connection and disconnection. Since most client programs written with high concurrency in mind use a connection pool that minimizes the need to physically connect and disconnect, the GCDL is not a serious impediment to concurrency. Level 2, which can be activated at runtime by calling kinterbasdb.init(concurrency_level=2), is appropriate for client programmers who are aware of the thread-safety guarantees provided by their version of the Firebird client library, and have written the client program accordingly. For details about the thread-safety of various Firebird client library versions, see the Overview of Firebird Client Library Thread-Safety. At Level 2, the Python GIL is released and reacquired around most database client library calls, just as it is at Level 1.

Level 1 is the default, so if you don’t understand these subtleties, or are using a client library configuration that is not thread-safe, you do not need to take any action to achieve thread-safety.

Level 2 can greatly increase the throughput of a database-centric, multithreaded Python application, so you should use it if possible. Once you’ve determined that you’re using an appropriate connection protocol with a capable client library, you can activate Level 2 at runtime with the following call:

kinterbasdb.init(concurrency_level=2)

The kinterbasdb.init function can only be called once during the life of a process. If it has not been called explicitly, the function will be called implicitly when the client program tries to perform any database operation. Therefore, the recommended place to call kinterbasdb.init is at the top level of one of the main modules of your program. The importation infrastructure of the Python interpreter serializes all imports, so calling kinterbasdb.init at import time avoids the potential for multiple simultaneous calls, which could cause subtle problems.

Caveats¶

  • threadsafety versus concurrency_level Make sure not to confuse KInterbasDB’s concurrency_level with its threadsafety. threadsafety, a module-level property required by the Python DB API Specification 2.0, represents the highest level of granularity at which the DB API implementation remains thread-safe. KInterbasDB is always “thread-safe at the connection level” (DB API threadsafety 1), regardless of which concurrency_level is active. Think of threadsafety as the level of thread-safety that KInterbasDB guarantees, and concurrency_level as the degree to which KInterbasDB’s internals are able to exploit a client program’s potential for concurrency.

Tips on Achieving High Concurrency¶

  • Use the Classic server architecture, but the SuperServer client library. At the time of this writing (December 2005), the thread- centric Vulcan had not been released, so the multi-process Classic architecture was the only Firebird server architecture that could take advantage of multiple CPUs. This means that in most scenarios, Classic is far more concurrency-friendly than SuperServer. The Windows version of Firebird–whether Classic or SuperServer–offers a single client library, so the following advice is not relevant to Windows. The non- Windows versions of Firebird Classic include two client libraries:

    • fbclient ( libfbclient.so) communicates with the server solely via the network protocol (possibly over an emulated network such as the local loopback). fbclient is thread-safe in recent versions of Firebird.
    • fbembed ( libfbembed.so) uses an in-process Classic server to manipulate the database file directly. fbembed is not thread-safe in any version of Firebird; it should never be used with KInterbasDB concurrency level 2.

    At present, the best way to achieve a concurrency-friendly KInterbasDB/Firebird configuration is to use a version of KInterbasDB linked against a thread-safe fbclient, running at concurrency level 2, and communicating with a Classic server. On Linux, such a setup can be created by installing the Classic server, then compiling KInterbasDB with the database_lib_name option in setup.cfg set to fbclient (this is the default setting). A version of KInterbasDB that was linked against fbembed (by setting database_lib_name=fbembed) will not work in a multithreaded program if the concurrency level is higher than 1. On Windows, use a Classic server in combination with one of the standard KInterbasDB Windows binaries for Firebird 1.5 or later, and be sure to set KInterbasDB’s concurrency level to 2.

kinterbasdb-3.3.0/docs/links.html0000644000175000001440000002316111132652265016151 0ustar pcisarusers KInterbasDB Links — KInterbasDB v3.3.0 documentation kinterbasdb-3.3.0/docs/changelog.html0000644000175000001440000027556111133077256016777 0ustar pcisarusers KInterbasDB Changelog — KInterbasDB v3.3.0 documentation

KInterbasDB Changelog¶

Version 3.3¶

New Features¶

  • It is now possible to use multiple transactions simultaneously on a single kinterbasdb.Connection. See discussion: http://sourceforge.net/forum/forum.php?thread_id=1597658&forum_id=30917

  • If a unicode object is passed as the SQL statement to any of

    • kinterbasdb.create_database
    • Connection.execute_immediate
    • Cursor.prep
    • Cursor.execute

    KInterbasDB will attempt to encode it to the character set of file system (in the case of kinterbasdb.create_database), or to the character set of the connection, in the other cases. Previously, only unicode objects that could be encoded to ASCII were accepted.

  • Documentation was extended and completely redone using reStructured text and Sphinx (http://sphinx.pocoo.org)

Backward-incompatibilities¶

  • Default type conversion setting was changed to type_conv=200. Applications that doesn’t call kinterbasdb.init() and rely on Python older than 2.4 and/or mx.DateTime and/or explicit unicode conversion must call kinterbasdb.init() with type_conv=1 as first thing after kinterbasdb import. Applications that explicitly call kinterbasdb.init() doesn’t need to be changed at all.

    Details about new default setting are described in Parameter Conversion section Deferred Loading of Dynamic Type Translators of KInterbasDB documentation.

Version 3.2.2¶

Bug Fixes¶

  • Fixed bug with wrong Transaction Parameter Block structure. It surfaces with Firebird 2.1 that’s more strict about TPB correctness.
  • Fixed bug with Services under Firebird 2.1.

Version 3.2.1¶

Bug Fixes¶

Version 3.2¶

Bug Fixes¶

  • At concurrency_level 1, it was possible for a deadlock to occur if KInterbasDB simultaneously raised an exception in one thread while executing a SQL statement in another. This problem did not affect concurrency_level 2.

    Thanks to Atsuo Ishimoto for reporting this bug.

  • The official implementation of the automagic TEXT_UNICODE type translator (in the kinterbasdb.typeconv_text_unicode module) was missing support for the new character sets introduced in Firebird 2.0 (namely, the corrected version of UTF8, plus KOI8-R, KOI8-U, and WIN1258).

    Thanks to Oleg Deribas for bringing this to my attention.

Version 3.2rc1¶

Bug Fixes¶

New Features¶

  • KInterbasDB now stores and retrieves the undocumented sub-second component of TIME and TIMESTAMP fields.

    Thanks to Petr Jakes and Helen Borrie for bringing the availability of the sub-second data to my attention.

  • Passing None to Cursor.execute (instead of a SQL string or a PreparedStatement) now executes the most recently prepared/executed statement, if any. This can enhance convenience because it frees the client programmer from the responsibility of separately tracking the most recent statement in order to execute it again.

    Thanks to Igor Youdytsky for suggesting this feature.

  • PreparedStatements now have a read-only ‘description’ property that contains a Python DB API 2.0 description sequence of the same format as Cursor.description.

    Thanks to Alexandr Zamaraev for suggesting this feature. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#PreparedStatement_description

  • The following query and resolution methods for limbo transactions have been added to the kinterbasdb.services.Connection class: getLimboTransactionIDs, commitLimboTransaction, rollbackLimboTransaction.

Backward-incompatibilities¶

  • Dynamic Type Translators for TIME and TIMESTAMP fields must now accomodate an additional tuple element: an integer which represents microseconds. The official TIME and TIMESTAMP type translators in typeconv_datetime_naked.py, typeconv_datetime_stdlib.py, and typeconv_datetime_mx.py have been updated, and can be used as a guide.

Version 3.2b1¶

Bug Fixes¶

  • Previously, if KInterbasDB detected that the field to which a NULL value was bound was defined as NOT NULL, KInterbasDB itself immediately raised an exception. This caused problems for fields defined as NOT NULL but populated by BEFORE triggers, so KInterbasDB now submits the illegal NULL and allows the database engine to make the decision.
  • Fixed an obscure memory leak in Connection.drop_database.
  • Fixed a few more compatibility problems with Interbase 7.x.
  • kinterbasdb.Cursor can again be used as a base class for user-defined subclasses. This capability had been removed in KInterbasDB 3.2a1.

New Features¶

  • Connection timeouts: KInterbasDB connections can now be asked to time out after a specified period of inactivity. This feature is not supported by the Firebird C API, so it is implemented entirely at the KInterbasDB level. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_ct

  • Added a TPB class to assist with the construction of complex Transaction Parameter Buffers.

    This feature has not yet been documented. In the meantime, you can find example code in the test_transactions.py module of the KInterbasDB test suite: http://kinterbasdb.sf.net/test-suite/releases/

  • Added methods Connection.transaction_info and Connection.trans_info. transaction_info is a thin wrapper around the C function isc_transaction_info, while trans_info is a Pythonic wrapper around transaction_info.

    This feature has not yet been documented. In the meantime, you can find example code in the test_services.py module of the KInterbasDB test suite: http://kinterbasdb.sf.net/test-suite/releases/

  • Exposed the Firebird header constant FB_API_VER as kinterbasdb.FB_API_VER. This integer represents the version of Firebird against which KInterbasDB was compiled, as follows:

    • Any version of Interbase, or Firebird 1.0.x: 10
    • Firebird 1.5.x: 15
    • Firebird 2.0.x: 20
  • KInterbasDB now raises a kinterbasdb.TransactionConflict exception (instead of the rather generic ProgrammingError) when it receives a server-side transaction conflict notification. This makes it easier for the client programmer to detect and resolve deadlocks.

    TransactionConflict is a subclass of kinterbasdb.OperationalError.

Backward-incompatibilities¶

  • Client programs that encounter transaction conflicts in routine operation, and which contain logic to deal with this type of exception specifically (on the basis of the payload of the ProgrammingError) should be updated to use:

    try:
      ...
    except kinterbasdb.TransactionConflict:
      ...
    

    instead.

    For more info, see the last item under “New Features” above.

Version 3.2a1¶

New Features¶

Bug Fixes¶

  • KInterbasDB should now compile and run out of the box with Interbase 7.x. DSR doesn’t have that version of Interbase, however, so KInterbasDB is not actually tested with it.

Backward-incompatibilities¶

  • KInterbasDB 3.2 has dropped support for Python versions earlier than 2.3, and (officially) supports only Firebird 1.0 and later. However, Interbase 7.x support has been considerably enhanced, so it could be said that Interbase is “unofficially” supported.

  • Most Python classes in KInterbasDB have become new-style, for symmetry with the new-style C classes added in 3.2. Notably, kinterbasdb.Cursor is now a new-style class written in pure C.

    Impact rating: Low (There is practically no reason for a client program to access the affected KInterbasDB classes in such a way that this change would matter.)

  • Previously, the “infinite timeout value” for EventConduit.wait was 0.0. The choice of that value was a terrible mistake, because attempting to specify an extremely short timeout with a value such as 0.000000000000001 in fact created an infinite timeout. The new “infinite timeout value” is -1.0.

    Impact rating: Low-Medium (The Usage Guide for KInterbasDB 3.1 specified that “The default timeout is infinite.”, but it did not guarantee a particular value. Client programs that use both events and event timeouts should be checked, however.)

Version 3.1.3¶

Bug Fixes¶

Version 3.1.2¶

Bug Fixes¶

  • Attempting to apply dynamic type translation settings dictionaries that had non-string keys caused a segfault under some some circumstances.
  • kinterbasdb’s Services API infrastructure parsed integer results from the engine as recommended by the IB 6 API Guide, but this was inappropriate on non-Windows x86-64, and could cause invalid memory access.
  • Input handling of INTEGER ARRAY fields did not work correctly on non-Windows x86-64.
  • Overridding a connection’s dynamic type translation settings for a particular slot with the “naked” translator by passing None as the translator to Cursor.set_type_trans_[in|out] did not work.
  • The FIXED dynamic type translation slot was not handled properly on dialect 1 connections (dialect 1 databases store NUMERIC/DECIMAL values with precisions 10-18 internally as floating point).
  • Documentation bug: The “Using KInterbasDB with Embedded Firebird” section of the Usage Guide stated that the Services API did not work with the embedded server architecture. That was written when Firebird 1.5 was in alpha; the Services API does work with embedded Firebird 1.5.2.

Version 3.1.1¶

Bug Fixes¶

  • kinterbasdb.init(type_conv=100|200) didn’t work under Python 2.4. Late in the Python 2.4 development cycle, additional constraints were introduced in Python’s funcobject.c that defeated kinterbasdb’s attempts to manipulate the ‘func_code’ attribute of some functions during the execution of kinterbasdb.init.
  • C type ConnectionType’s destructor was called directly (rather than as a result of DECREF) if an error arose in kinterbasdb.connect or kinterbasdb.create_database. This triggered a refcount assertion in debug builds of Python.
  • Fixed a reference count leak in the C layer’s central exception-raising function.
  • Fixed some potential memory handling problems in exceptional situations in the event handling code.
  • A trivial problem prevented kinterbasdb 3.1 from compiling with the prereleases of Firebird 2.0.

New Features¶

  • In typeconv_text_unicode.py, enabled auto-translation of some Asian Unicode codecs that didn’t enter the Python standard library until Python 2.4.

Version 3.1¶

Bug Fixes¶

  • Fixed minor problems with the Connection.database_info method.

Version 3.1_pre9¶

Version 3.1_pre9 is being released instead of 3.1 final primarily to test Python 2.4 compatibility. Since the first beta of Python 2.4 has now been released, it is expected that these binaries will continue to work throughout 2.4’s lifespan (including maintenance releases - 2.4.x).

New Features¶

  • Python 2.4 support (that is, a few build script changes and the availability of official Windows binaries for Python 2.4).

Bug Fixes¶

Version 3.1_pre8¶

Version 3.1_pre8 is the recommended stable version of kinterbasdb.

New Features¶

  • kinterbasdb._RowMapping has a richer dict-like interface (now implements __len__, __getitem__, get, __contains__, keys, values, items, __iter__, iterkeys, itervalues, iteritems).

Bug Fixes¶

Backward-incompatibilities¶

  • The kinterbasdb.services.Connection.getEnvironmentMessage method has been renamed to getMessageFileDir.
  • The kinterbasdb.services.Connection.getLog method should not have accepted a database parameter; it no longer does.

DOCUMENTATION CHANGES¶

  • Documented about 66% of the Services API (kinterbasdb.services module) in the KInterbasDB Usage Guide.

KNOWN ISSUES¶

  • The third-party fixedpoint.py module contains an incompatibility with Python 2.1 that is exposed by a bugfix applied to the kinterbasdb.typeconv_fixed_fixedpoint module in 3.1_pre8.

    No attempt will be made to fix this problem (which is a fixedpoint bug, not a kinterbasdb bug); users should either upgrade to a newer version of Python or refrain from using fixedpoint.

Version 3.1_pre7¶

Version 3.1_pre7 should be considered a release candidate. It is thought to be ready for production use.

New Features¶

  • Introduced dynamic type translation slot TEXT_UNICODE, which applies to all CHAR/VARCHAR fields except those with character sets NONE, OCTETS, or ASCII. Used in combination with the official translators in the kinterbasdb.typeconv_text_unicode module, TEXT_UNICODE enables automatic encoding/decoding of Unicode values.

    This translator is not active by default except when kinterbasdb is initialized with kinterbasdb.init(type_conv=100); the backward compatibility implications are discussed in detail in the Backward-incompatibilities section below.

    Refs: docs/usage.html#faq_fep_unicode

  • Added read-only .charset attribute to Connection.

  • On Windows, kinterbasdb now conforms to the client library loading scheme introduced in FB 1.5 RC7, so fbclient.dll need not be explicitly placed in a directory on the PATH if the registry settings are present.

Bug Fixes¶

Backward-incompatibilities¶

  • Programs that use BOTH of the following:

    • the TEXT dynamic type translation slot
    • unicode database fields

    will need to be updated to take the new TEXT_UNICODE slot into account. Since the TEXT slot is not particularly useful, this incompatibility is expected to affect very few existing programs.

    Refs: docs/usage.html#faq_fep_unicode

  • Convenience code 100 for the kinterbasdb.init function now activates the new TEXT_UNICODE translation slot, so unicode values are automatically encoded and decoded.

    Convenience code 1 remains the default, however, and it does not activate the TEXT_UNICODE slot. Programs that do BOTH of the following:

    • invoke kinterbasdb.init(type_conv=100)
    • use unicode database fields

    will need to be updated to take the new TEXT_UNICODE slot into account.

    Refs: docs/usage.html#adv_param_conv_dynamic_type_translation_tbl_convenience_codes

Version 3.1_pre6¶

Version 3.1_pre6 should be considered a release candidate. It is thought to be stable.

New Features¶

  • Added support for manual control over the phases of two-phase commit. The client programmer now has the option of triggering the first phase manually via Connection.prepare() or ConnectionGroup.prepare().

    This is useful when integrating with third-party transaction managers.

  • KInterbasDB can now be compiled “out of the box” with MinGW when building against Firebird 1.5 (but not Firebird 1.0 or Interbase).

    See docs/installation-source.html for instructions.

Bug Fixes¶

  • Connection.drop_database() now rolls back the connection’s active transaction (if any) before dropping the database.

    Previously, the database could be dropped with the transaction still active; when the connection was subsequently garbage collected, a rollback request was issued for the transaction (in a nonexistent database), resulting in memory corruption.

  • String values returned by input dynamic type translators were sometimes prematurely garbage collected before the database engine had read their contents.

  • SQL fields with dynamic definitions (such as expressions in a SELECT list) that involved fixed point data types (NUMERIC or DECIMAL) didn’t get passed through the FIXED dynamic type translator because the database engine does not flag dynamically defined fields properly.

    Though this is a bug in the database engine rather than KInterbasDB, a workaround was added to KInterbasDB.

    Thanks to Bert Hughes for reporting this bug.

  • The installation action of the setup script (‘setup.py install’) did not place the supporting files (documentation) in the proper directory on Linux.

    Thanks to Treeve Jelbert for reporting this bug.

Version 3.1_pre5¶

Version 3.1_pre5 should be considered a release candidate. It is thought to be stable.

New Features¶

  • Deferred loading of dynamic type translators:

    KInterbasDB’s choice of initial dynamic type translators for date/time and fixed point types is now deferred as late as possible, and the programmer has the option of controlling the choice via the type_conv parameter of the new kinterbasdb.init function.

    This feature is documented in the Usage Guide at: usage.html#adv_param_conv_dynamic_type_translation_deferred_loading

  • KInterbasDB’s setup script is now capable of compiling the source distribution “out of the box” with MinGW on Windows, but only with Firebird 1.5 or later (Borland C++ can be used with Firebird 1.0).

    This feature is documented in the installation guide for the source distribution at:

    installation-source.html#compiler_specific_compilation_notes

Bug Fixes¶

  • During blob insertion, not enough memory was allocated to hold the blob ID returned by the database engine, resulting in an overflow.
  • Implicit conversion of DATE/TIME/TIMESTAMP input parameters from strings to the appropriate internal types was accidentally disallowed in 3.1_pre4. This feature has been enabled again.
  • The Services API method kinterbasdb.services.Connection.restore was incapable of restoring a backup into a multi-file database because it sent the wrong cluster identifier for destination file page counts.

Backward-incompatibilities¶

  • Because of the new “Deferred loading of dynamic type translators” feature, the DB API type comparison singleton kinterbasdb.DATETIME will not compare equal to any type until the kinterbasdb.init function has been called (whether explicitly or implicitly).

    This issue–which is expected to affect little or no existing code–is documented in the Usage Guide at:

    usage.html#adv_param_conv_dynamic_type_translation_deferred_loading_backcompat

  • The dynamic type translation module typeconv_preferred has been renamed to typeconv_23plus.

Version 3.1_pre4¶

Version 3.1_pre4 should be considered a late beta release. It is thought to be stable, and there are no plans to add new features before 3.1 final (only to fix bugs and finish updating the documentation).

Note that the KInterbasDB Usage Guide has been considerably updated, though it is not quite complete. When complete, it will document all of the numerous new features in kinterbasdb 3.1; it’s a “must read” even now.

The Usage Guide is distributed with KInterbasDB (kinterbasdb-installation-dir/docs/usage.html), and is available online at: http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/checkout/kinterbasdb/Kinterbasdb-3.0/docs/usage.html

New Features¶

  • DATABASE EVENT HANDLING has been reinstated, ported to POSIX, and timeout support has been added.

    This feature is thoroughly documented in the updated Usage Guide.

    Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=637796&group_id=9913&atid=109913

  • DISTRIBUTED TRANSACTIONS are now supported via the kinterbasdb.ConnectionGroup class.

    Although the Usage Guide does not yet fully document this feature, it does contain an example program and a few hints: http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/checkout/kinterbasdb/Kinterbasdb-3.0/docs/usage.html#adv_trans_control_distributed

  • DYNAMIC TYPE TRANSLATION

    KInterbasDB 3.1_pre4 implements two-way “dynamic type translation”. This feature allows the client programmer to change the type conversion methods for specific SQL data types and achieve complete “type transparency”. For example, KInterbasDB 3.1_pre4 includes reference implementations of converters for both input and output of ‘mx.DateTime’ and Python 2.3 stdlib ‘datetime’ for TIME/DATE/TIMESTAMP fields.

    One consequence of two-way dynamic type translation support is that KInterbasDB 3.1_pre4 can be used with Python 2.3’s datetime module occupying the former role mx.DateTime. For backward compatibility, mx.DateTime is still the default, and it will remain so.

    This feature is documented in the updated Usage Guide.

  • Cursor.rowcount support has been added (insofar as the database engine supports it).

    This feature is documented in the updated Usage Guide.

    Refs: http://sourceforge.net/forum/forum.php?thread_id=866629&forum_id=30917

  • SAVEPOINTs (a Firebird 1.5 feature) are exposed at the Python level via the Connection.savepoint(savepoint=’name’) method and the optional $savepoint argument to the Cursor.rollback method.

    This feature is documented in the updated Usage Guide.

  • New attributes suggested by the “Optional DB API Extensions” section of PEP 249:

    • Access to a cursor’s connection via the Cursor.connection attribute.
    • Access to kinterbasdb’s exception classes via connection attributes.

    Refs: http://www.python.org/peps/pep-0249.html

  • A cursor can now be reused after it has caused an exception.

Bug Fixes¶

  • Passing the wrong number of parameters for a parameterized SQL statement sometimes caused a crash instead of an exception with kinterbasdb 3.1_pre3. This would not have affected client programs that were written correctly, but it was still a bug.

  • The kinterbasdb.create_database function leaked memory if it encountered an error.

  • Additional Windows binaries are being released to avoid dynamic linking problems with Interbase 5.5 and Firebird 1.5-embedded.

    Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=707644&group_id=9913&atid=109913 http://sourceforge.net/forum/forum.php?thread_id=855348&forum_id=30917

  • kinterbasdb now builds with less hassle on FreeBSD.

    Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=720021&group_id=9913&atid=109913

  • Whenever a transactional context is needed, a transaction is now started implicitly if the Python programmer has not started one explicitly with Connection.begin. This implicit behavior is implicitly required by the Python DB API specification.

    Refs: http://mail.python.org/pipermail/db-sig/2003-February/003158.html

  • The mapping objects returned from the Cursor.fetch*map() method now accept “double-quoted” field names (lookup keys). If the field name is double-quoted, its original case will be preserved–instead of being normalized to upper case–when the result set is searched for a field with that name.

    For example, if a table were defined this way:

    create table tbl ("sTRanGelyCasEDfieldnAme" integer)

    and the statement:

    cur.execute("select * from tbl")
    

    were executed against it, the mapping objects returned by:

    cur.fetchonemap()
    

    would have rejected the lookup key ‘sTRanGelyCasEDfieldnAme’, converting it instead to ‘STRANGELYCASEDFIELDNAME’ and then failing to find the upper-cased field name.

    The solution available in 3.1_pre4 is to perform the lookup this way:

    cur.execute("select * from tbl")
    mapping = cur.fetchonemap()
    mapping['"sTRanGelyCasEDfieldnAme"']
             ^-----double-quoted-----^

    which will force the preservation of the field name’s case.

    An easy way to avoid problems such as this is to refrain from using quoted identifiers; in that case, the database engine will treat identifiers in a case-insensitive manner.

    Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=720130&group_id=9913&atid=109913

INTERNAL CHANGES¶

  • kinterbasdb now implements its standard date/time and fixed point handling via the new, general-purpose dynamic type translation feature.

    This eliminates the C-compile-time dependency on mx.DateTime. Although mx.DateTime (for date/time types) and Connection.precision_mode (for precise fixed point types) still function as before, dynamic type translation allows other types to be transparently substituted (such as those in Python 2.3’s standard library datetime module for date/time types, or those in the fixedpoint module for precise fixed point types).

    For more information, see the Usage Guide.

Backward-incompatibilities¶

There are no outright incompatibilities, but there is one deprecation:

  • Although Connection.precision_mode continues to function as in earlier versions, it is deprecated in favor of dynamic type translation. The functionality that Connection.precision_mode delivers (precise I/O of fixed point values) is now implemented at the Python level via dynamic type translation, rather than at the C level.

    If you explicitly use both Connection.precision_mode and dynamic type translation, beware that changing the value of Connection.precision_mode will cause changes to the registered dynamic type converters under the hood.

    For more information, see the INTERNAL CHANGES section above, and the Usage Guide.

Version 3.1_pre3¶

Version 3.1_pre3 should be considered a beta release.

New Features¶

  • database array support

    Database arrays are mapped from Python sequences (except strings) on input; to Python lists on output. On output, the lists will be nested if the database array has multiple dimensions.

    I’m not impressed by the Interbase/Firebird implementation of database arrays. The database engine claims to support up to 16 dimensions, but actually malfunctions catastrophically* above 10.

    The arrays are of fixed size, with a predeclared number of dimensions and number of elements per dimension. Individual array elements cannot be set to NULL/None**, so the mapping between Python lists (which have dynamic length and are therefore not normally null-padded) and non-trivial database arrays is clumsy.

    Arrays cannot be passed as parameters to, or returned from, stored procedures.

    Finally, many interface libraries, GUIs, and even the isql command line utility do not support arrays. Refs:

    ** Interbase 6 API Guide page 153.

  • retaining commit/retaining rollback

    The commit() and rollback() methods of kinterbasdb.Connection now accept an optional boolean parameter ‘retaining’ (default False). If retaining is True, the infrastructural support for the transaction active at the time of the method call will be “retained” (efficiently and transparently recycled) after the database server has committed or rolled back the conceptual transaction.

    In code that commits or rolls back frequently, ‘retaining’ the transaction yields considerably better performance. ‘retaining’ will become the default at some point in the future if the switch can be made without serious backward compatibility issues.

    Refs: http://sourceforge.net/forum/forum.php?thread_id=799246&forum_id=30917 Interbase 6 API Guide page 74.

  • unicode strings can now be executed via:

    • kinterbasdb.Cursor.execute[many]()
    • kinterbasdb.Cursor.callproc()
    • kinterbasdb.Connection.execute_immediate()

    However, the encoding of the incoming unicode string is rather simplistic–via PyUnicode_AsASCIIString.

Bug Fixes¶

  • Addressed buffer overflow potential in:

    • kinterbasdb.create_database()
    • kinterbasdb.connect()
    • kinterbasdb.Connection.begin()
    • kinterbasdb.Connection.execute_immediate()
    • kinterbasdb.Cursor.execute() (and thence, executemany() and callproc())
  • Fixed reference count leaks in:

    • exception handling (_exception_functions.c)
    • field precision determination (_kiconversion_field_precision.c)
  • Fixed kinterbasdb.Connection.close() bug: The physical connection to the database server was not actually closed until the kinterbasdb.Connection instance was garbage collected.

  • Fixed a bug in the kinterbasdb.services.Connection.userExists() method. Usernames are now normalized to upper case.

  • Database version compatibility:

    • kinterbasdb compiles properly against Firebird 1.5.

    • kinterbasdb compiles against and ought to work with (but has not been tested with) Interbase 5.5, albeit with some lost functionality, namely:

      • field precision determination (the precision entry in cursor.description)
      • Services API support
      • retaining rollback
      • various data storage options, such as precise 64-bit integer storage of NUMERIC and DECIMAL values (IB 5.5 uses doubles instead, which is not really adequate) and more diverse date/time types.

    Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=627816&group_id=9913&atid=109913 IB 6 Data Definition Guide page 65.

  • Improved DB API compliance:

    • Now, there need not be an active transaction before any execute(), commit(), or rollback() call; transaction establishment is implicit in these cases.
    • Cursors no longer need to be discarded after an exception; the same cursor can be reused. Of course if the cursor was in the process of fetching a result set, the remainder of the set will not be available after the exception.

INTERNAL CHANGES¶

  • Numerous modest optimizations, especially with regard to memory handling. Among these is a move to take advantage of Python 2.3’s specialized, Python-oriented memory manager.
  • MAJOR code refactoring and tidying.

Backward-incompatibilities¶

  • Invalid argument combinations to the connect() function now raise a ProgrammingError rather than an InterfaceError. Note that this refers to invalid combinations of arguments, not necessarily to invalid values for those arguments.

  • Non-keyword-argument forms of connect() are now deprecated; passing non-keyword arguments to connect() results in a DeprecationWarning being issued via the standard Python warning framework. This is a warning, not an incompatibility in the strict sense.

    Refs: http://www.python.org/doc/current/lib/module-warnings.html

  • Official support for database event handling has been deferred until 3.2. A Win32-only prototype will still be included with the kinterbasdb 3.1 source distribution (but not compiled by default).

    Refs: docs/usage.html#database_events_unsupported

Version 3.1_pre2¶

New Features¶

  • Global Interpreter Lock management

    Previously, kinterbasdb operated in a serial manner, with the sole exception of the event handling code, whose parallelism is “under the hood”. Aside from event handling, all kinterbasdb operations, including potentially long-running Firebird API calls, were serialized by the Python GIL.

    With the advent of kinterbasdb 3.1_pre2, kinterbasdb releases the GIL where appropriate–that is, when it is about to make a potentially long- running Firebird API call, and can do so without invoking the Python API, or otherwise operating on Python structures.

    However, the Firebird client library itself is not threadsafe, so Firebird API calls must also be serialized. To that end, kinterbasdb maintains a process-wide thread lock around which all Firebird API calls are serialized.

    When kinterbasdb is about to make a potentially long-running Firebird API call, it follows these steps:

    1. Extract necessary parameter data from Python structures

    2. Release the Python GIL

    3. Acquire the kinterbasdb process-wide Firebird client thread lock

    4. Execute the Firebird API call

    5. Release the kinterbasdb process-wide Firebird client thread lock

    6. Acquire the Python GIL

    7. Modify Python structures to reflect the results of the Firebird API

      call

    The addition of GIL management should improve kinterbasdb’s maximum possible throughput for multi-threaded Python programs on multi-processor systems (one processor can run the Python interpreter while another executes a Firebird client library operation). GIL management may also yield greater “responsiveness” for multi-threaded Python programs running on single-processor systems.

    The addition of GIL management required fairly extensive internal changes, and therefore warranted a whole prerelease version virtually unto itself.

  • Cursor name support

    The read/write property Cursor.name allows the Python programmer to perform scrolling UPDATEs or DELETions via the “SELECT ... FOR UPDATE” syntax. If you don’t know what this means, refer to the database SQL syntax documentation of the FOR UPDATE clause of the SELECT statement. The Cursor.name property can be ignored entirely if you don’t need to use it.

    Here’s an example code fragment:

    con = ... # establish a kinterbasdb.Connection
    curScroll = con.cursor()
    curUpdate = con.cursor()
    
    curScroll.execute('select city from customer for update')
    curScroll.name = 'city_scroller'
    update = 'update customer set city=? where current of ' + curScroll.name
    
    for (city,) in curScroll:
        city = ... # make some changes to city
        curUpdate.execute( update, (city,) )
    

Version 3.1_pre1¶

Version 3.1_pre1 should be considered an early alpha release.

New Features¶

This list of new features represents the state of kinterbasdb 3.1_pre1, which does not include some features slated for inclusion in the final release of kinterbasdb 3.1. For a discussion of the ultimate goals of version 3.1, see: http://sourceforge.net/forum/forum.php?thread_id=696302&forum_id=30917

Also, the documentation has not yet been updated to cover these new features, nor will it be for at least another month. In the meantime, those who need to use the new features must refer to the source code.

  • Cursor Iteration Support

    When used with Python 2.2 or later, kinterbasdb’s Cursors now support

    “natural” iteration. For example:

    # Index-based field lookup (based on Cursor.fetchone):
    cur = con.cursor()
    cur.execute("select col1, col2 from the_table")
    for row in cur:
        col1 = row[0]
    
    # Key-based field lookup (based on Cursor.fetchonemap):
    cur = con.cursor()
    cur.execute("select col1, col2 from the_table")
    for rowMap in cur.itermap():
        col1 = rowMap['col1']
    

    The iterator-based pattern supercedes the ugly fetch pattern of old (though of course the old pattern will still work):

    # Index-based field lookup (based on Cursor.fetchone):
    cur = con.cursor()
    cur.execute("select col1, col2 from the_table")
    while 1:
        row = cur.fetchone()
        if not row:
            break
    
        col1 = row[0]
    
  • Implicit Parameter Conversion

    Implicit parameter conversion allows any SQL datatype supported by kinterbasdb to be passed to the database engine as a Python string.

    This is especially useful for parameterized statements that involve date/time datatypes, because they can now accept server-computed “magic” values such as ‘now’ and ‘current_date’ more naturally. Implicit parameter conversion is also likely to yield a speedup for programs that load external data from flat files into the database, since the incoming values do not need to be converted from their original string representation into an acceptable Python type before being forwarded to the database.

    For a more thorough discussion of this new feature, see: http://sourceforge.net/tracker/index.php?func=detail&aid=531828&group_id=9913&atid=309913

  • Services API Support (see IB 6 API Guide Chapter 12)

    The database engine provides an API (the Services API) to facilitate programmatic invocation of the maintenance tasks available through the command-line tools gbak, gfix, etc.

    I’ve wrapped nearly the entire Services API in a thick Python API of my own design. My API design is only provisional; I seek feedback as to how it could be made more elegant. The Services API support is accessible via the kinterbasdb.services module.

  • Database Event Support (see IB 6 API Guide Chapter 11)

    The database engine allows client programs to register to be informed of the occurrence of database events, which can be raised with the POST_EVENT statement in stored procedures or triggers. kinterbasdb 3.1 supports a subset of this functionality (synchronous waiting only) via the Connection.wait(eventNames) method.

    The current implementation is only a rough prototype; though usable, it is not recommended for production environments.

    The current implementation suffers from a major limitation: only one thread per process is allowed to listen for event notification. This is so because the current implementation resorts to some roundabout trickery to circumvent the lack of database API support for synchronous event notification on Windows. Because the database API only starts one asynchronous event handler thread per process, I doubt that support for multiple event-listening threads in a single process will materialize.

Bug Fixes¶

  • In the past, the opaque mapping object returned by the Cursor.fetch*map methods returned None when asked for a field not in its select list, rather than raising a KeyError. It now raises a KeyError in such a case. For example:

    cur = con.cursor()
    cur.execute("select col1, col2 from the_table")
    for rowMap in cur.itermap():
        x = rowMap['col3'] # Used to return None.  Now raises KeyError,
                           # because col3 was not SELECTed.
    

Backward-incompatibilities¶

  • Although kinterbasdb 3.1 is significantly different internally, there is only one known API incompatibility with version 3.0.2. It would only arise in code that relies on the erroneous behavior of the mapping-fetch bug mentioned above.
  • Python versions prior to 2.1 are no longer officially supported. Although kinterbasdb might still compile against Python 2.0 and earlier, I will not go out of my way to ensure that it does.

Version 3.0.2¶

Bug Fixes¶

Version 3.0.1¶

Bug Fixes¶

  • Adjusted input handling of NULL values. The new scheme raises an exception immediately when it detects that a Python None value has arrived for storage in a database field or parameter that disallows NULL values.

    The old scheme simply accepted the Python None value and then tried to execute the query, relying on the database API to detect the error. With certain data types, the database API would silently insert a bogus value rather than detecting the error.

  • Scrutinized the datetime input/output facilities, found some incompatibilities with the DB API, and corrected them. These changes are backward-incompatible, but are warranted because the previous behavior was in defiance of the specification. See further notes about the nature of these changes in the backward-incompatibilities section.

  • Fixed a memory leak that affected the storage of Python string input parameters in BLOB fields.

  • Fixed a rollback-related bug that arose if a connection to a database was established, but a transaction was never started on that connection. In such a case, a spurious exception was raised when the connection was garbage collected.

    Normal code would not have invoked this bug, but it was still a bug.

Backward-incompatibilities¶

  • Datetime input/output has changed to better comply with the DB API (see datetime bugfix discussion above).

    Code that uses the mx.DateTime module directly (rather than the kinterbasdb DB API datetime constructors) should not be affected.

    For details, see the comments in the code block in __init__.py tagged with “DSR:2002.07.19”.

Version 3.0.1_pre3¶

Bug Fixes¶

  • Bug #572326 (which was not present in kinterbasdb 3.0 and never affected Python 2.2+) caused several numeric types to not be transferred from Python to the database engine when they were passed as query parameters.

    This was a serious bug; it caused even such fundamental operations as: cursor.execute(“insert into the_table values (?)”, (1,)) to not work correctly.

Version 3.0.1_pre2¶

Bug Fixes¶

  • CHAR output now doesn’t have such problems with multibyte character sets and values shorter than the maximum declared length of the field.

    CHARs are no longer returned with their trailing blanks intact. The trailing blanks have been abandoned because they were in fact NULL characters, not spaces. kinterbasdb would fill in the spaces manually, except for the problems that approach causes with multibyte character sets.

  • Fixed a potential buffer overflow, but the fix only applies when compiled against Python 2.2 or later.

Backward-incompatibilities¶

  • See coverage of CHAR output changes in the ‘Bug Fixes’ section. In a nutshell: CHAR output values no longer have trailing NULL bytes.

Version 3.0.1_pre1¶

New Features¶

  • It is now possible to connect to a database under a specific role by using the ‘role’ keyword argument of the kinterbasdb.connect function.
  • The following methods now accept any sequence except a string for their ‘parameter’ argument, rather than demanding a tuple: Cursor.execute, Cursor.executemany and Cursor.callproc.

Bug Fixes¶

  • kinterbasdb supports IB 5.x again.

    Various identifiers specific to IB 6.x/Firebird had crept into unguarded areas of __init__.py and _kinterbasdb.c, but this has been changed so that kinterbasdb compiles gracefully with IB 5.x. See: http://sourceforge.net/tracker/index.php?func=detail&aid=553184&group_id=9913&atid=209913

  • The distutils setup script no longer raises a ValueError on Windows 2000 or XP.

  • The precision slot in Cursor.description was always zero. It now contains the correct value if that value can reasonably be determined.

    Note that the database engine records the precision of some fields as zero (e.g., FLOAT), and the slot will also be zero in cases where the database engine does not expose the precision of the field (e.g., dynamic fields such as “SELECT 33.5 FROM RDB$DATABASE”).

    Since the database API does not provide the field’s precision figure in the XSQLVAR structure, it is necessary to query the system tables. In order to minimize the performance hit, precision figures are cached per Connection; the determination of a given field’s precision figure in the context of a given Connection will require only dictionary lookups after it is determined the first time with a system table query.

    An unfortunate side effect of this caching is that if a field’s precision is altered after the figure has been cached in by a Connection, cursors based on that Connection will still show the old precision figure. In practice, this situation will almost never arise. See: http://sourceforge.net/tracker/index.php?func=detail&aid=549982&group_id=9913&atid=109913

  • On Linux, attempting to fetch immediately after having executed a non-query statement resulted in a segfault. An exception is now raised instead. The problem did not afflict Windows, which always raised the exception. See: http://sourceforge.net/tracker/index.php?func=detail&aid=551098&group_id=9913&atid=109913

    • The message carried by this exception grew without bound in on both Windows and Linux. It no longer does.
  • Under some circumstances, the fetched values of CHAR fields were incorrect. CHAR values now appear as expected (they are left-padded with spaces and always of length equal to their field’s designated maximum length).

  • Cursor.fetchmany raised an error if there were no remaining values to fetch. It now returns an empty sequence instead, as required by the DB API Specification.

  • Field domains are checked more strictly. It is now impossible to (for example) issue a statement that attempts to insert a 12-character string into a 10-character CHAR field without encountering an exception.

    This checking is not perfect, since it validates against the field’s internal storage type rather than the field’s declared type. For example, a NUMERIC(1,1), which is stored internally as a short, will erroneously accept the value 12.5 because 125 fits in a short.

  • When operating in imprecise mode (connection.precision_mode == 0), kinterbasdb 3.0 sometimes interpreted integer values as though it were operating in precise mode.

Version 3.0 versus 2.0-0.3.1¶

New Features¶

The new features are thoroughly documented in the KInterbasDB Usage Guide (usage.html); they need not be reiterated here. However, backward-incompatible changes have been documented in this changelog (see the Backward-incompatibilities section).

Bug Fixes¶

Many bugs have been fixed, including (but not limited to) the following, which were registered with the KInterbasDB bug tracker at SourceForge ( http://sourceforge.net/tracker/index.php?group_id=9913&atid=109913 ):

  • 433090 cannot connect to firebird server
  • 438130 cursor.callproc not adding param code
  • 468304 fetchmany return all record
  • 498086 ignores column aliases in select
  • 498403 fetching after a callproc hangs program
  • 498414 execute procedure message length error
  • 505950 inconsistent fetch* return types
  • 515974 Wrong decoding of FB isc_version
  • 517093 broken fixed-point handling in 3.0
  • 517840 C function normalize_double inf. loop
  • 517842 fetch bug - program hangs
  • 520793 poor DB API compliance ^ a BIG fix that entailed many changes
  • 522230 error with blobs larger than (2^16) - 1
  • 522774 inconsistent fixed-point conv in 3.0-rc2
  • 523348 memory leak in Blob2PyObject
  • immediate execution facilities unreliable in 2.x

Backward-incompatibilities¶

As a result of the changes required for some of the bugfixes (especially #520793 - “poor DB API compliance”) and general reengineering, several areas of backward-incompatibility have arisen:

  • fetch* return types

    The standard fetch(one|many|all) methods now return just a sequence, not a combined sequence/mapping. If you want a mapping, use one of the fetch(one|many|all)map methods.

    Note the “‘absolutely no guarantees’ except...” caveats in the KInterbasDB Usage Guide regarding the return types of the Cursor.fetch* methods and the contents of the Cursor.description attribute.

    This is a significant backward-incompatibility, and was not undertaken without serious consideration (for evidence see http://sourceforge.net/forum/forum.php?thread_id=622782&forum_id=30919 ).

  • Fixed point number handling

    Fixed point number handling has been remodelled. By default, fixed point numbers (NUMERIC/DECIMAL field values) are now represented (with a potential loss of precision) as Python floats.

    A Connection.precision_mode attribute has been added so that precise representation of fixed point values as scaled Python integers (as in KInterbasDB 2.x) can be used at will.

    For more information, see the KInterbasDB Usage Guide.

  • Connection.dialect

    In KInterbasDB 2.x, the default connection dialect was 1 (the backward-compatibility dialect for use with Interbase 5.5 and earlier).

    KInterbasDB 3.0 is being released into quite a different climate. Interbase 6.0 was released nearly two years ago, and Firebird 1.0 has recently been released. Because it is expected that KInterbasDB 3.0 will be used most frequently with Interbase 6.0+ and Firebird, the default connection dialect is 3.

    Using KInterbasDB 3.0 with Interbase 5.5 and earlier is still possible, though untested by the developers of KInterbasDB 3.0. See the Connection.dialect documentation in the KInterbasDB Usage Guide for an explanation of how to initialize a connection with a dialect other than 3.

  • Connection.server_version

    The Connection.server_version attribute is now a string rather than an integer. An integer simply was not expressive enough to represent the numerous Interbase variants that exist today (including Firebird, which does not fit neatly into the Interbase version progression).

    For more information, see the KInterbasDB Usage Guide.

  • kinterbasdb.execute_immediate

    The kinterbasdb.execute_immediate function has been removed. A similar function named kinterbasdb.create_database has been added. The primary differences between kinterbasdb.execute_immediate and kinterbasdb.create_database are:

    • kinterbasdb.create_database is not as general
    • kinterbasdb.create_database actually works

    The execute_immediate method of the Connection class has been retained.

    For more information, see the KInterbasDB Usage Guide.

kinterbasdb-3.3.0/docs/genindex.html0000644000175000001440000007475311133077256016651 0ustar pcisarusers Index — KInterbasDB v3.3.0 documentation

Index

_ | A | B | C | D | E | F | G | I | K | L | M | N | O | P | R | S | T | U | W

_

__init__() (kinterbasdb.ConnectionGroup method)
(kinterbasdb.EventConduit method)
(kinterbasdb.Transaction method)

A

access_mode (kinterbasdb.TPB attribute)
activateShadowFile() (kinterbasdb.services.Conenction method)
add() (kinterbasdb.ConnectionGroup method)
addUser() (kinterbasdb.services.Conenction method)
apilevel (built-in variable)
arraysize (kinterbasdb.Cursor attribute)

B

backup() (kinterbasdb.services.Connection method)
begin() (kinterbasdb.Connection method)
(kinterbasdb.ConnectionGroup method)
(kinterbasdb.Transaction method)
BINARY (built-in variable)
Binary() (built-in function)
BlobReader (class in kinterbasdb)
bringOnline() (kinterbasdb.services.Conenction method)

C

callproc() (Cursor method)
charset (kinterbasdb.Connection attribute)
chunks() (kinterbasdb.BlobReader method)
clear() (kinterbasdb.ConnectionGroup method)
close() (Connection method)
(Cursor method)
(kinterbasdb.EventConduit method)
(kinterbasdb.Transaction method)
(kinterbasdb.services.Connection method)
closed (kinterbasdb.Transaction attribute)
commit() (Connection method)
(kinterbasdb.Connection method)
(kinterbasdb.ConnectionGroup method)
(kinterbasdb.Transaction method)
commitLimboTransaction() (kinterbasdb.services.Conenction method)
connect() (built-in function)
(in module kinterbasdb)
(in module kinterbasdb.services)
Connection (built-in class)
(class in kinterbasdb), [1], [2]
(class in kinterbasdb.services)
connection (kinterbasdb.Transaction attribute)
ConnectionGroup (class in kinterbasdb)
contains() (kinterbasdb.ConnectionGroup method)
count() (kinterbasdb.ConnectionGroup method)
create_database() (in module kinterbasdb)
CT_COMMIT (in module kinterbasdb)
CT_NONTRANSPARENT (in module kinterbasdb)
CT_ROLLBACK (in module kinterbasdb)
CT_VETO (in module kinterbasdb)
Cursor (built-in class)
(class in kinterbasdb), [1], [2]
cursor() (Connection method)
(kinterbasdb.Transaction method)
cursors (kinterbasdb.Transaction attribute)

D

database_info() (kinterbasdb.Connection method)
DatabaseError
DataError
Date() (built-in function)
DateFromTicks() (built-in function)
DATETIME (built-in variable)
(in module kinterbasdb)
db_info() (kinterbasdb.Connection method)
description (Cursor attribute)
(kinterbasdb.Cursor attribute)
(kinterbasdb.PreparedStatement attribute)
dialect (kinterbasdb.Connection attribute)
disband() (kinterbasdb.ConnectionGroup method)
drop_database() (kinterbasdb.Connection method)

E

Error
event_conduit() (kinterbasdb.Connection method)
EventConduit (class in kinterbasdb)
execute() (Cursor method)
execute_immediate() (kinterbasdb.Connection method)
executemany() (Cursor method)

F

fetchall() (Cursor method)
(kinterbasdb.Cursor method)
fetchallmap() (kinterbasdb.Cursor method)
fetchmany() (Cursor method)
(kinterbasdb.Cursor method)
fetchmanymap() (kinterbasdb.Cursor method)
fetchone() (Cursor method)
(kinterbasdb.Cursor method)
fetchonemap() (kinterbasdb.Cursor method)
flush() (kinterbasdb.EventConduit method)

G

get_type_trans_in() (kinterbasdb.Connection method)
(kinterbasdb.Cursor method)
getArchitecture() (kinterbasdb.services.Connection method)
getAttachedDatabaseNames() (kinterbasdb.services.Connection method)
getCapabilityMask() (kinterbasdb.services.Connection method)
getConnectionCount() (kinterbasdb.services.Connection method)
getHomeDir() (kinterbasdb.services.Connection method)
getLimboTransactionIDs() (kinterbasdb.services.Conenction method)
getLockFileDir() (kinterbasdb.services.Connection method)
getLog() (kinterbasdb.services.Connection method)
getMessageFileDir() (kinterbasdb.services.Connection method)
getSecurityDatabasePath() (kinterbasdb.services.Connection method)
getServerVersion() (kinterbasdb.services.Connection method)
getServiceManagerVersion() (kinterbasdb.services.Connection method)
getStatistics() (kinterbasdb.services.Connection method)
getUsers() (kinterbasdb.services.Conenction method)

I

init() (in module kinterbasdb)
IntegrityError
InterfaceError
InternalError
isolation_level (kinterbasdb.TPB attribute)
iter() (kinterbasdb.Cursor method)
itermap() (kinterbasdb.Cursor method)

K

kinterbasdb (module)
kinterbasdb.services (module)
kinterbasdb.typeconv_23plus (module)
kinterbasdb.typeconv_23plus_lowmem (module)
kinterbasdb.typeconv_24plus (module)
kinterbasdb.typeconv_backcompat (module)
kinterbasdb.typeconv_datetime_mx (module)
kinterbasdb.typeconv_datetime_stdlib (module)
kinterbasdb.typeconv_fixed_decimal (module)
kinterbasdb.typeconv_fixed_fixedpoint (module)
kinterbasdb.typeconv_fixed_stdlib (module)
kinterbasdb.typeconv_naked (module)
kinterbasdb.typeconv_text_unicode (module)

L

lock_resolution (kinterbasdb.TPB attribute)
lock_timeout (kinterbasdb.TPB attribute)

M

main_transaction (kinterbasdb.Connection attribute)
members() (kinterbasdb.ConnectionGroup method)
modifyUser() (kinterbasdb.services.Conenction method)

N

n_input_params (kinterbasdb.PreparedStatement attribute)
n_output_params (kinterbasdb.PreparedStatement attribute)
n_physical (kinterbasdb.Transaction attribute)
name (kinterbasdb.Cursor attribute)
nextset() (Cursor method)
(kinterbasdb.Cursor method)
NotSupportedError
NUMBER (built-in variable)

O

OperationalError

P

paramstyle (built-in variable)
plan (kinterbasdb.PreparedStatement attribute)
precision_mode (kinterbasdb.Connection attribute)
prep() (kinterbasdb.Cursor method)
prepare() (kinterbasdb.Connection method)
(kinterbasdb.ConnectionGroup method)
(kinterbasdb.Transaction method)
PreparedStatement (class in kinterbasdb)
ProgrammingError

R

remove() (kinterbasdb.ConnectionGroup method)
removeUser() (kinterbasdb.services.Conenction method)
render() (kinterbasdb.TableReservation method)
(kinterbasdb.TPB method)
repair() (kinterbasdb.services.Conenction method)
resolution (kinterbasdb.Transaction attribute)
restore() (kinterbasdb.services.Connection method)
rollback() (Connection method)
(kinterbasdb.Connection method)
(kinterbasdb.ConnectionGroup method)
(kinterbasdb.Transaction method)
rollbackLimboTransaction() (kinterbasdb.services.Conenction method)
rowcount (Cursor attribute)
(kinterbasdb.Cursor attribute)
ROWID (built-in variable)

S

savepoint() (kinterbasdb.Connection method)
(kinterbasdb.Transaction method)
server_version (kinterbasdb.Connection attribute)
set_type_trans_in() (kinterbasdb.Connection method)
(kinterbasdb.Cursor method)
setAccessMode() (kinterbasdb.services.Conenction method)
setDefaultPageBuffers() (kinterbasdb.services.Conenction method)
setinputsizes() (Cursor method)
(kinterbasdb.Cursor method)
setoutputsize() (Cursor method)
(kinterbasdb.Cursor method)
setShouldReservePageSpace() (kinterbasdb.services.Conenction method)
setSQLDialect() (kinterbasdb.services.Conenction method)
setSweepInterval() (kinterbasdb.services.Connection method)
setWriteMode() (kinterbasdb.services.Conenction method)
shutdown() (kinterbasdb.services.Conenction method)
sql (kinterbasdb.PreparedStatement attribute)
statement_type (kinterbasdb.PreparedStatement attribute)
STRING (built-in variable)
sweep() (kinterbasdb.services.Connection method)

T

table_reservation (kinterbasdb.TPB attribute)
TableReservation (class in kinterbasdb)
threadsafety (built-in variable)
Time() (built-in function)
TimeFromTicks() (built-in function)
Timestamp() (built-in function)
TimestampFromTicks() (built-in function)
TPB (class in kinterbasdb)
trans() (kinterbasdb.Connection method)
trans_info() (kinterbasdb.Connection method)
(kinterbasdb.Transaction method)
Transaction (class in kinterbasdb)
transaction (kinterbasdb.Cursor attribute)
transaction_info() (kinterbasdb.Connection method)
(kinterbasdb.Transaction method)
transactions (kinterbasdb.Connection attribute)

U

User (class in kinterbasdb.services)
userExists() (kinterbasdb.services.Conenction method)

W

wait() (kinterbasdb.EventConduit method)
Warning

Quick search

kinterbasdb-3.3.0/docs/_sources/0000755000175000001440000000000011133100174015746 5ustar pcisaruserskinterbasdb-3.3.0/docs/_sources/thread-safety-overview.txt0000644000175000001440000001564611132652266023144 0ustar pcisarusers################################################# Overview of Firebird Client Library Thread-Safety ################################################# The thread-safety properties of the standard Firebird client library vary according to the following: + Firebird version + operating system + Firebird server architecture (SuperServer, Classic, Embedded) + underlying connection protocol (embedded vs. local vs. IPC vs. remote) Determining whether the client library you're using is thread-safe can be quite confusing. This document aims to reduce that confusion by defining what thread-safety means in the context of the Firebird client library, then presenting a table that specifies which client library configurations are thread-safe. Note that this document deals only with the *standard* Firebird client library that underlies the Firebird C API. It has *no relevance* to the clean-slate reimplementations in access libraries such as Jaybird. Definition of "Thread-Safety" ============================= Currently, the highest level of concurrency supported by any version of the Firebird client library is *thread-safety at the connection level*. When we say that the Firebird client library is *thread-safe at the connection level*, we mean that it is safe to use a particular connection in only one thread at a time, although the same connection can be manipulated by different threads in a serial fashion, and different connections can be manipulated by different threads in parallel. For example, in a multithreaded application server, it is safe for a particular connection to be leased from a connection pool by Thread A, used, and returned to the pool for later lease by Thread B. It is not safe for Thread A and Thread B to use the same connection at the same time. Thread-Safety Table =================== +------------+---------+-----------------+----------------+--------------------+ | FB Version | OS | FB Architecture | Remote / Local | Thread-Safe? | +============+=========+=================+================+====================+ | 1.0.3 | Windows | SuperServer | Local | No | | | | +----------------+--------------------+ | | | | Remote | Yes | | | +-----------------+----------------+--------------------+ | | | Classic | Local | No such config. | | | | +----------------+--------------------+ | | | | Remote | No such config. | | | +-----------------+----------------+--------------------+ | | | Embedded | Local | No such config. | | | | +----------------+--------------------+ | | | | Remote | No such config. | | +---------+-----------------+----------------+--------------------+ | | Linux | SuperServer | Local | No such config. | | | | +----------------+--------------------+ | | | | Remote | No | | | +-----------------+----------------+--------------------+ | | | Classic | Local | No | | | | +----------------+--------------------+ | | | | Remote | No | +------------+---------+-----------------+----------------+--------------------+ | 1.5 | Windows | SuperServer | Local | No | | | | +----------------+--------------------+ |  | | | Remote | Yes | | | +-----------------+----------------+--------------------+ | | | Classic | Local | No such config. | | | | +----------------+--------------------+ | | | | Remote | Yes | | | +-----------------+----------------+--------------------+ | | | Embedded | Local | Yes | | | | +----------------+--------------------+ | | | | Remote | No such config. | | +---------+-----------------+----------------+--------------------+ | | Linux | SuperServer | Local | No such config. | | | | +----------------+--------------------+ | | | | Remote | Yes | | | +-----------------+----------------+--------------------+ | | | Classic | Local | No | | | | +----------------+--------------------+ | | | | Remote | No | +------------+---------+-----------------+----------------+--------------------+ | 2.0/2.1 | Windows | SuperServer | Local | Yes | | | | +----------------+--------------------+ |  | | | Remote | Yes | | | +-----------------+----------------+--------------------+ | | | Classic | Local | Yes | | | | +----------------+--------------------+ | | | | Remote | Yes | | | +-----------------+----------------+--------------------+ | | | Embedded | Local | Yes | | | | +----------------+--------------------+ | | | | Remote | No such config. | | +---------+-----------------+----------------+--------------------+ | | Linux | SuperServer | Local | No such config. | | | | +----------------+--------------------+ | | | | Remote | Yes | | | +-----------------+----------------+--------------------+ | | | Classic | Local | No | | | | +----------------+--------------------+ | | | | Remote | No | +------------+---------+-----------------+----------------+--------------------+ This document was written by David Rushby, with assistance from Dmitry Yemanov. Errors are attributable to Rushby rather than Yemanov. kinterbasdb-3.3.0/docs/_sources/concurrency.txt0000644000175000001440000002160311132652265021057 0ustar pcisarusers########### Concurrency ########### Overview ======== Note: This section will not be comprehensible unless you understand the basic characteristics of the Firebird server architectures. These are documented in the "Classic or Superserver?" section of the `doc/Firebird-1.5-QuickStart.pdf` file included with the Firebird distribution. Versions of KInterbasDB prior to 3.2 imposed a global lock over all database client library calls. This lock, referred to as the Global Database API Lock (GDAL), must be active for multithreaded client programs to work correctly with versions of the Firebird client library that do not properly support concurrency. Many such versions are still in use, so the GDAL remains active by default in KInterbasDB 3.2. To determine whether the client library you're using can correctly handle concurrent database calls, read this `Overview of Firebird Client Library Thread-Safety `__. Note that a single client library might have different thread-safety properties depending on which *protocol* the client program specifies via the parameters of :func:`kinterbasdb.connect()`. For example, the Firebird 1.5 client library on Windows is thread-safe if the remote protocol is used, as in .. sourcecode:: python kinterbasdb.connect(dsn=r'localhost:C:\temp\test.db', ...) but is *not* thread-safe if the local protocol is used, as in .. sourcecode:: python kinterbasdb.connect(dsn=r'C:\temp\test.db', ...) Selecting and Activating a KInterbasDB Concurrency Level ======================================================== KInterbasDB 3.2 supports three levels of concurrency: + **Level 0:**  No lock management whatsoever If the C preprocessor symbol `ENABLE_CONCURRENCY` is not defined when KInterbasDB is compiled, no lock management at all is performed at runtime. In fact, the code to initialize and manage the locks is not even compiled in. Level 0 is intended only for compiling KInterbasDB on non-threaded builds of the Python interpreter. It would not be desirable for a client program running on a normal (threaded) build of the Python interpreter to use Level 0, so no overhead is invested in making it possible to transition to Level 0 at runtime. Since Level 0 is intended for use in Python interpreters that have no Global Interpreter Lock (GIL), the GIL is not manipulated. + **Level 1:**  Global Database API Lock (GDAL) is active    (this is the default level) At Level 1, a global lock serializes all calls to the database client library. This lock, called the Global Database API Lock (GDAL), is to the database client library as the GIL is to the Python interpreter: a mechanism to guarantee that at most one thread is using the database client library at any time. Level 1 exists to support those versions of Firebird in which the client library is not thread-safe at the connection level (see the `Overview of Firebird Client Library Thread-Safety `__ for details). In environments where the author ofKInterbasDB creates binaries and distributes them to client programmers, there is no way of knowing at compile time which Firebird client library configuration the KInterbasDB binaries will be used with. Level 1 protects client programmers who are not aware of the thread-safety properties of their version of the client library. For these reasons, Level 1 is the default, but Level 2 can be selected at runtime via the :func:`kinterbasdb.init()` function (see next section). At Level 1, the Python GIL is released and reacquired around most database client library calls in order to avoid blocking the entire Python process for the duration of the call. + **Level 2:**  Global Database API Lock (GDAL) is not active, but connection and disconnection are serialized via the GCDL At Level 2, calls to the database client library are not serialized, except for calls to the connection attachment and detachment functions, which are serialized by a lock called the Global Connection and Disconnection Lock (GCDL). This limited form of serialization is necessary because the Firebird client library makes no guarantees about the thread- safety of connection and disconnection. Since most client programs written with high concurrency in mind use a connection pool that minimizes the need to physically connect and disconnect, the GCDL is not a serious impediment to concurrency. Level 2, which can be activated at runtime by calling `kinterbasdb.init(concurrency_level=2)`, is appropriate for client programmers who are aware of the thread-safety guarantees provided by their version of the Firebird client library, and have written the client program accordingly. For details about the thread-safety of various Firebird client library versions, see the `Overview of Firebird Client Library Thread-Safety `__. At Level 2, the Python GIL is released and reacquired around most database client library calls, just as it is at Level 1. Level 1 is the default, so if you don't understand these subtleties, or are using a client library configuration that is not thread-safe, you do not need to take any action to achieve thread-safety. Level 2 can greatly increase the throughput of a database-centric, multithreaded Python application, so you should use it if possible. Once you've determined that you're using an appropriate connection protocol with a capable client library, you can activate Level 2 at runtime with the following call: .. sourcecode:: python kinterbasdb.init(concurrency_level=2) The `kinterbasdb.init` function can only be called once during the life of a process. If it has not been called explicitly, the function will be called implicitly when the client program tries to perform any database operation. Therefore, the recommended place to call `kinterbasdb.init` is at the top level of one of the main modules of your program. The importation infrastructure of the Python interpreter serializes all imports, so calling `kinterbasdb.init` at import time avoids the potential for multiple simultaneous calls, which could cause subtle problems. Caveats ======= + `threadsafety` versus `concurrency_level` Make sure not to confuse KInterbasDB's `concurrency_level` with its `threadsafety`. `threadsafety`, a module-level property required by the Python DB API Specification 2.0, represents the highest level of granularity at which the DB API implementation remains thread-safe. KInterbasDB is always "`thread-safe at the connection level `__" (DB API `threadsafety 1`), regardless of which `concurrency_level` is active. Think of `threadsafety` as the level of thread-safety that KInterbasDB guarantees, and `concurrency_level` as the degree to which KInterbasDB's internals are able to exploit a client program's potential for concurrency. Tips on Achieving High Concurrency ================================== + Use the Classic server architecture, but the SuperServer client library. At the time of this writing (December 2005), the thread- centric Vulcan had not been released, so the multi-process Classic architecture was the only Firebird server architecture that could take advantage of multiple CPUs. This means that in most scenarios, Classic is far more concurrency-friendly than SuperServer. The Windows version of Firebird--whether Classic or SuperServer--offers a single client library, so the following advice is not relevant to Windows. The non- Windows versions of Firebird Classic include two client libraries: + `fbclient` ( `libfbclient.so`) communicates with the server solely via the network protocol (possibly over an emulated network such as the local loopback). fbclient `is thread-safe in recent versions `__ of Firebird. + `fbembed` ( `libfbembed.so`) uses an in-process Classic server to manipulate the database file directly. `fbembed` is not thread-safe in any version of Firebird; it should never be used with KInterbasDB concurrency level 2. At present, the best way to achieve a concurrency-friendly KInterbasDB/Firebird configuration is to use a version of KInterbasDB linked against `a thread-safe fbclient `__, running at concurrency level 2, and communicating with a Classic server. On Linux, such a setup can be created by installing the Classic server, then compiling KInterbasDB with the `database_lib_name` option in :file:`setup.cfg` set to `fbclient` (this is the default setting). A version of KInterbasDB that was linked against `fbembed` (by setting `database_lib_name=fbembed`) will not work in a multithreaded program if the concurrency level is higher than 1. On Windows, use a Classic server in combination with one of the standard KInterbasDB Windows binaries for Firebird 1.5 or later, and be sure to set KInterbasDB's concurrency level to 2. kinterbasdb-3.3.0/docs/_sources/Python-DB-API-2.0.txt0000644000175000001440000006356511132652262021207 0ustar pcisarusers ##################################### Python Database API Specification 2.0 ##################################### KInterbasDB is the Python Database API 2.0 compliant driver for Firebird. The `Reference / Usage Guide` is therefore divided into three parts: * Python Database API 2.0 specification * KInterbasDB Compliance to Python DB 2.0 API specification. * KInterbasDB features beyond Python DB 2.0 API specification. If you're familiar to Python DB 2.0 API specification, you may skip directly to the next topic. .. note:: This is a local copy of the specification. The online source copy is available at `http://www.python.org/topics/database/DatabaseAPI-2.0.html `__ Introduction ============ This API has been defined to encourage similarity between the Python modules that are used to access databases. By doing this, we hope to achieve a consistency leading to more easily understood modules, code that is generally more portable across databases, and a broader reach of database connectivity from Python. The interface specification consists of several sections: + Module Interface + Connection Objects + Cursor Objects + Type Objects and Constructors + Implementation Hints + Major Changes from 1.0 to 2.0 Comments and questions about this specification may be directed to the `SIG for Database Interfacing with Python `__. For more information on database interfacing with Python and available packages see the `Database Topics Guide `__ on `www.python.org `__. This document describes the Python Database API Specification 2.0. The previous `version 1.0 version `__ is still available as reference. Package writers are encouraged to use this version of the specification as basis for new interfaces. Module Interface ================ Access to the database is made available through connection objects. The module must provide the following constructor for these: .. function:: connect(parameters...) Constructor for creating a connection to the database. Returns a Connection Object . It takes a number of parameters which are database dependent. [#f1]_ These module globals must be defined: .. data:: apilevel String constant stating the supported DB API level. Currently only the strings `'1.0'` and `'2.0'` are allowed. If not given, a `Database API 1.0 `__ level interface should be assumed. .. data:: threadsafety Integer constant stating the level of thread safety the interface supports. Possible values are: - `0` = Threads may not share the module. - `1` = Threads may share the module, but not connections. - `2` = Threads may share the module and connections. - `3` = Threads may share the module, connections and cursors. Sharing in the above context means that two threads may use a resource without wrapping it using a mutex semaphore to implement resource locking. Note that you cannot always make external resources thread safe by managing access using a mutex: the resource may rely on global variables or other external sources that are beyond your control. .. data:: paramstyle String constant stating the type of parameter marker formatting expected by the interface. Possible values are [#f2]_: - `'qmark'` = Question mark style, e.g. '...WHERE name=?' - `'numeric'` = Numeric, positional style, e.g. '...WHERE name=:1' - `'named'` = Named style, e.g. '...WHERE name=:name' - `'format'` = ANSI C printf format codes, e.g. '...WHERE name=%s' - `'pyformat'` = Python extended format codes, e.g. '...WHERE name=%(name)s' The module should make all error information available through these exceptions or subclasses thereof: .. exception:: Warning Exception raised for important warnings like data truncations while inserting, etc. It must be a subclass of the Python StandardError (defined in the module exceptions). .. exception:: Error Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single 'except' statement. Warnings are not considered errors and thus should not use this class as base. It must be a subclass of the Python StandardError (defined in the module exceptions). .. exception:: InterfaceError Exception raised for errors that are related to the database interface rather than the database itself. It must be a subclass of Error. .. exception:: DatabaseError Exception raised for errors that are related to the database. It must be a subclass of Error. .. exception:: DataError Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range, etc. It must be a subclass of DatabaseError. .. exception:: OperationalError Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, a memory allocation error occurred during processing, etc. It must be a subclass of DatabaseError. .. exception:: IntegrityError Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails. It must be a subclass of DatabaseError. .. exception:: InternalError Exception raised when the database encounters an internal error, e.g. the cursor is not valid anymore, the transaction is out of sync, etc. It must be a subclass of DatabaseError. .. exception:: ProgrammingError Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement, wrong number of parameters specified, etc. It must be a subclass of DatabaseError. .. exception:: NotSupportedError Exception raised in case a method or database API was used which is not supported by the database, e.g. requesting a .rollback() on a connection that does not support transaction or has transactions turned off. It must be a subclass of DatabaseError. This is the exception inheritance layout: .. sourcecode:: python StandardError |__Warning |__Error |__InterfaceError |__DatabaseError |__DataError |__OperationalError |__IntegrityError |__InternalError |__ProgrammingError |__NotSupportedError Note: The values of these exceptions are not defined. They should give the user a fairly good idea of what went wrong though. Connection Objects ================== Connections Objects should respond to the following methods: .. class:: Connection .. method:: close() Close the connection now (rather than whenever __del__ is called). The connection will be unusable from this point forward; an `Error` (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. .. method:: commit() Commit any pending transaction to the database. Note that if the database supports an auto-commit feature, this must be initially off. An interface method may be provided to turn it back on. Database modules that do not support transactions should implement this method with void functionality. .. method:: rollback() This method is optional since not all databases provide transaction support. [#f3]_ In case a database does provide transactions this method causes the the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed. .. method:: cursor() Return a new Cursor Object using the connection. If the database does not provide a direct cursor concept, the module will have to emulate cursors using other means to the extent needed by this specification. [#f4]_ Cursor Objects ============== These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursor Objects should respond to the following methods and attributes: .. class:: Cursor .. attribute:: description This read-only attribute is a sequence of 7-item sequences. Each of these sequences contains information describing one result column: `(name, type_code, display_size, internal_size, precision, scale, null_ok)`. This attribute will be `None` for operations that do not return rows or if the cursor has not had an operation invoked via the `executeXXX()` method yet. The `type_code` can be interpreted by comparing it to the Type Objects specified in the section below. .. attribute:: rowcount This read-only attribute specifies the number of rows that the last `executeXXX()` produced (for DQL statements like select) or affected (for DML statements like update or insert ). The attribute is -1 in case no `executeXXX()` has been performed on the cursor or the rowcount of the last operation is not determinable by the interface. [#f7]_ .. method:: callproc(procname[,parameters]) This method is optional since not all databases provide stored procedures. [#f3]_ Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each argument that the procedure expects. The result of the call is returned as modified copy of the input sequence. Input parameters are left untouched, output and input/output parameters replaced with possibly new values. The procedure may also provide a result set as output. This must then be made available through the standard `fetchXXX()` methods. .. method:: close() Close the cursor now (rather than whenever __del__ is called). The cursor will be unusable from this point forward; an `Error` (or subclass) exception will be raised if any operation is attempted with the cursor. .. method:: execute(operation[,parameters]) Prepare and execute a database operation (query or command). Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified in a database-specific notation (see the module's `paramstyle` attribute for details). [#f5]_ A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times). For maximum efficiency when reusing an operation, it is best to use the setinputsizes() method to specify the parameter types and sizes ahead of time. It is legal for a parameter to not match the predefined information; the implementation should compensate, possibly with a loss of efficiency. The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is depreciated: `executemany()` should be used instead. Return values are not defined. .. method:: executemany(operation,seq_of_parameters) Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence `seq_of_parameters`. Modules are free to implement this method using multiple calls to the `execute()` method or by using array operations to have the database process the sequence as a whole in one call. The same comments as for `execute()` also apply accordingly to this method. Return values are not defined. .. method:: fetchone() Fetch the next row of a query result set, returning a single sequence, or `None` when no more data is available. [#f6]_ An `Error` (or subclass) exception is raised if the previous call to `executeXXX()` did not produce any result set or no call was issued yet. .. method:: fetchmany([size=cursor.arraysize]) Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's `arraysize` determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An `Error` (or subclass) exception is raised if the previous call to `executeXXX()` did not produce any result set or no call was issued yet. Note there are performance considerations involved with the size parameter. For optimal performance, it is usually best to use the arraysize attribute. If the size parameter is used, then it is best for it to retain the same value from one `fetchmany()` call to the next. .. method:: fetchall() Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's `arraysize` attribute can affect the performance of this operation. An `Error` (or subclass) exception is raised if the previous call to `executeXXX()` did not produce any result set or no call was issued yet. .. method:: nextset() This method is optional since not all databases support multiple result sets. [#f3]_ This method will make the cursor skip to the next available set, discarding any remaining rows from the current set. If there are no more sets, the method returns `None`. Otherwise, it returns a true value and subsequent calls to the fetch methods will return rows from the next result set. An `Error` (or subclass) exception is raised if the previous call to `executeXXX()` did not produce any result set or no call was issued yet. .. attaribute arraysize This read/write attribute specifies the number of rows to fetch at a time with `fetchmany()`. It defaults to 1 meaning to fetch a single row at a time. Implementations must observe this value with respect to the `fetchmany()` method, but are free to interact with the database a single row at a time. It may also be used in the implementation of `executemany()`. .. method:: setinputsizes(sizes) This can be used before a call to `executeXXX()` to predefine memory areas for the operation's parameters. `sizes` is specified as a sequence -- one item for each input parameter. The item should be a Type Object that corresponds to the input that will be used, or it should be an integer specifying the maximum length of a string parameter. If the item is `None`, then no predefined memory area will be reserved for that column (this is useful to avoid predefined areas for large inputs). This method would be used before the `executeXXX()` method is invoked. Implementations are free to have this method do nothing and users are free to not use it. .. method:: setoutputsize(size[,column]) Set a column buffer size for fetches of large columns (e.g. LONGs, BLOBs, etc.). The column is specified as an index into the result sequence. Not specifying the column will set the default size for all large columns in the cursor. This method would be used before the `executeXXX()` method is invoked. Implementations are free to have this method do nothing and users are free to not use it. Type Objects and Constructors ============================= Many databases need to have the input in a particular format for binding to an operation's input parameters. For example, if an input is destined for a DATE column, then it must be bound to the database in a particular string format. Similar problems exist for "Row ID" columns or large binary items (e.g. blobs or RAW columns). This presents problems for Python since the parameters to the `executeXXX()` method are untyped. When the database module sees a Python string object, it doesn't know if it should be bound as a simple CHAR column, as a raw BINARY item, or as a DATE. To overcome this problem, a module must provide the constructors defined below to create objects that can hold special values. When passed to the cursor methods, the module can then detect the proper type of the input parameter and bind it accordingly. A Cursor Object's `description` attribute returns information about each of the result columns of a query. The `type_code` must compare equal to one of Type Objects defined below. Type Objects may be equal to more than one type code (e.g. DATETIME could be equal to the type codes for date, time and timestamp columns; see the Implementation Hints below for details). The module exports the following constructors and singletons: .. function:: Date(year,month,day) This function constructs an object holding a date value. .. function:: Time(hour,minute,second) This function constructs an object holding a time value. .. function:: Timestamp(year,month,day,hour,minute,second) This function constructs an object holding a time stamp value. .. function:: DateFromTicks(ticks) This function constructs an object holding a date value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details). .. function:: TimeFromTicks(ticks) This function constructs an object holding a time value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details). .. function:: TimestampFromTicks(ticks) This function constructs an object holding a time stamp value from the given ticks value (number of seconds since the epoch; see the documentation of the standard Python time module for details). .. function:: Binary(string) This function constructs an object capable of holding a binary (long) string value. .. data:: STRING This type object is used to describe columns in a database that are string-based (e.g. CHAR). .. data:: BINARY This type object is used to describe (long) binary columns in a database (e.g. LONG, RAW, BLOBs). .. data:: NUMBER This type object is used to describe numeric columns in a database. .. data:: DATETIME This type object is used to describe date/time columns in a database. .. data:: ROWID This type object is used to describe the "Row ID" column in a database. SQL NULL values are represented by the Python `None` singleton on input and output. Note: Usage of Unix ticks for database interfacing can cause troubles because of the limited date range they cover. Implementation Hints ==================== + The preferred object types for the date/time objects are those defined in the `mxDateTime `__ package. It provides all necessary constructors and methods both at Python and C level. + The preferred object type for Binary objects are the buffer types available in standard Python starting with version 1.5.2. Please see the Python documentation for details. For information about the the C interface have a look at Include/bufferobject.h and Objects/bufferobject.c in the Python source distribution. + Here is a sample implementation of the Unix ticks based constructors for date/time delegating work to the generic constructors: .. sourcecode:: python import time def DateFromTicks(ticks): return apply(Date,time.localtime(ticks)[:3]) def TimeFromTicks(ticks): return apply(Time,time.localtime(ticks)[3:6]) def TimestampFromTicks(ticks): return apply(Timestamp,time.localtime(ticks)[:6]) + This Python class allows implementing the above type objects even though the description type code field yields multiple values for on type object: .. sourcecode:: python class DBAPITypeObject: def __init__(self,*values): self.values = values def __cmp__(self,other): if other in self.values: return 0 if other < self.values: return 1 else: return -1 The resulting type object compares equal to all values passed to the constructor. + Here is a snippet of Python code that implements the exception hierarchy defined above: .. sourcecode:: python import exceptions class Error(exceptions.StandardError): pass class Warning(exceptions.StandardError): pass class InterfaceError(Error): pass class DatabaseError(Error): pass class InternalError(DatabaseError): pass class OperationalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class IntegrityError(DatabaseError): pass class DataError(DatabaseError): pass class NotSupportedError(DatabaseError): pass In C you can use the `PyErr_NewException(fullname, base, NULL)` API to create the exception objects. Major Changes from Version 1.0 to Version 2.0 ============================================= The Python Database API 2.0 introduces a few major changes compared to the 1.0 version. Because some of these changes will cause existing `DB API 1.0 `__ based scripts to break, the major version number was adjusted to reflect this change. These are the most important changes from 1.0 to 2.0: + The need for a separate dbi module was dropped and the functionality merged into the module interface itself. + New constructors and Type Objects were added for date/time values, the RAW Type Object was renamed to BINARY. The resulting set should cover all basic data types commonly found in modern SQL databases. + New constants (apilevel, threadlevel, paramstyle) and methods (executemany, nextset) were added to provide better database bindings. + The semantics of .callproc() needed to call stored procedures are now clearly defined. + The definition of the .execute() return value changed. Previously, the return value was based on the SQL statement type (which was hard to implement right) -- it is undefined now; use the more flexible .rowcount attribute instead. Modules are free to return the old style return values, but these are no longer mandated by the specification and should be considered database interface dependent. + Class based exceptions were incorporated into the specification. Module implementors are free to extend the exception layout defined in this specification by subclassing the defined exception classes. Open Issues =========== Although the version 2.0 specification clarifies a lot of questions that were left open in the 1.0 version, there are still some remaining issues: + Define a useful return value for .nextset() for the case where a new result set is available. + Create a fixed point numeric type for use as loss-less monetary and decimal interchange format. Footnotes ========= .. [#f1] As a guideline the connection constructor parameters should be implemented as keyword parameters for more intuitive use and follow this order of parameters: `dsn` = Data source name as string `user` = User name as string (optional) `password` = Password as string (optional) `host` = Hostname (optional) `database` = Database name (optional) E.g. a connect could look like this: `connect(dsn='myhost:MYDB',user='guido',password='234$?')` .. [#f2] Module implementors should prefer 'numeric', 'named' or 'pyformat' over the other formats because these offer more clarity and flexibility. .. [#f3] If the database does not support the functionality required by the method, the interface should throw an exception in case the method is used. The preferred approach is to not implement the method and thus have Python generate an `AttributeError` in case the method is requested. This allows the programmer to check for database capabilities using the standard `hasattr()` function. For some dynamically configured interfaces it may not be appropriate to require dynamically making the method available. These interfaces should then raise a `NotSupportedError` to indicate the non-ability to perform the roll back when the method is invoked. .. [#f4] A database interface may choose to support named cursors by allowing a string argument to the method. This feature is not part of the specification, since it complicates semantics of the `.fetchXXX()` methods. .. [#f5] The module will use the __getitem__ method of the parameters object to map either positions (integers) or names (strings) to parameter values. This allows for both sequences and mappings to be used as input. The term "bound" refers to the process of binding an input value to a database execution buffer. In practical terms, this means that the input value is directly used as a value in the operation. The client should not be required to "escape" the value so that it can be used -- the value should be equal to the actual database value. .. [#f6] Note that the interface may implement row fetching using arrays and other optimizations. It is not guaranteed that a call to this method will only move the associated cursor forward by one row. .. [#f7] The `rowcount` attribute may be coded in a way that updates its value dynamically. This can be useful for databases that return useable rowcount values only after the first call to a `.fetchXXX()` method. kinterbasdb-3.3.0/docs/_sources/license.txt0000644000175000001440000000370311132652265020150 0ustar pcisarusers################### KInterbasDB LICENSE ################### The following contributors hold Copyright (C) over their respective portions of code and documentation: [Author of original version; maintained through version 2.0:] 1998-2001 [alex] Alexander Kuznetsov [Author of ~90% of current code, most of current documentation; maintained through version 3.3:] 2002-2007 [dsr] David S. Rushby [Finishing touch to v3.3; Current maintainer:] 2008-2009 [paci] Pavel Cisar [Significant Contributors:] 2001-2002 [maz] Marek Isalski Marek made important first steps in removing the limitations of version 2.0 in preparation for version 3.0. 2001 [eac] Evgeny A. Cherkashin Evgeny wrote the first version of the distutils build script, which was included in a 2.x point release. 2001-2002 [janez] Janez Jere Janez contributed several bugfixes, including fixes for the date and time parameter conversion code in preparation for version 3.0. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee or royalty is hereby granted, provided that the above copyright notice appears in all copies and that both the copyright notice and this permission notice appear in supporting documentation or portions thereof, including modifications, that you make. The authors disclaim all warranties with regard to this software, including all implied warranties of merchantability and fitness. In no event shall any author be liable for any special, indirect or consequential damages or any damages whatsoever resulting from loss of use, data or profits, whether in an action of contract, negligence or other tortious action, arising out of or in connection with the use or performance of this software. kinterbasdb-3.3.0/docs/_sources/tutorial.txt0000644000175000001440000001773311132652266020402 0ustar pcisarusers ############################ Quick-start Guide / Tutorial ############################ This brief tutorial aims to get the reader started by demonstrating elementary usage of KInterbasDB. It is not a comprehensive Python Database API tutorial, nor is it comprehensive in its coverage of anything else. The numerous advanced features of KInterbasDB are covered in another section of this documentation, which is not in a tutorial format, though it is replete with examples. Connecting to a Database ======================== **Example 1** A database connection is typically established with code such as this: .. sourcecode:: python import kinterbasdb # The server is named 'bison'; the database file is at '/temp/test.db'. con = kinterbasdb.connect(dsn='bison:/temp/test.db', user='sysdba', password='pass') # Or, equivalently: con = kinterbasdb.connect( host='bison', database='/temp/test.db', user='sysdba', password='pass' ) **Example 2** Suppose we want to connect to the database in SQL Dialect 1 and specifying UTF-8 as the character set of the connection: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect( dsn='bison:/temp/test.db', user='sysdba', password='pass', dialect=1, # necessary for all dialect 1 databases charset='UTF8' # specify a character set for the connection ) Executing SQL Statements ======================== For this section, suppose we have a table defined and populated by the following SQL code: .. sourcecode:: sql create table languages ( name varchar(20), year_released integer ); insert into languages (name, year_released) values ('C', 1972); insert into languages (name, year_released) values ('Python', 1991); **Example 1** This example shows the *simplest* way to print the entire contents of the `languages` table: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='/temp/test.db', user='sysdba', password='masterkey') # Create a Cursor object that operates in the context of Connection con: cur = con.cursor() # Execute the SELECT statement: cur.execute("select * from languages order by year_released") # Retrieve all rows as a sequence and print that sequence: print cur.fetchall() Sample output: .. sourcecode:: python [('C', 1972), ('Python', 1991)] **Example 2** Here's another trivial example that demonstrates various ways of fetching a single row at a time from a `SELECT`-cursor: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='/temp/test.db', user='sysdba', password='masterkey') cur = con.cursor() SELECT = "select name, year_released from languages order by year_released" # 1. Iterate over the rows available from the cursor, unpacking the # resulting sequences to yield their elements (name, year_released): cur.execute(SELECT) for (name, year_released) in cur: print '%s has been publicly available since %d.' % (name, year_released) # 2. Equivalently: cur.execute(SELECT) for row in cur: print '%s has been publicly available since %d.' % (row[0], row[1]) # 3. Using mapping-iteration rather than sequence-iteration: cur.execute(SELECT) for row in cur.itermap(): print '%(name)s has been publicly available since %(year_released)d.' % row Sample output: .. sourcecode:: python C has been publicly available since 1972. Python has been publicly available since 1991. C has been publicly available since 1972. Python has been publicly available since 1991. C has been publicly available since 1972. Python has been publicly available since 1991. **Example 3** The following program is a simplistic table printer (applied in this example to `languages`): .. sourcecode:: python import kinterbasdb as k TABLE_NAME = 'languages' SELECT = 'select * from %s order by year_released' % TABLE_NAME con = k.connect(dsn='/temp/test.db', user='sysdba', password='masterkey') cur = con.cursor() cur.execute(SELECT) # Print a header. for fieldDesc in cur.description: print fieldDesc[k.DESCRIPTION_NAME].ljust(fieldDesc[k.DESCRIPTION_DISPLAY_SIZE]) , print # Finish the header with a newline. print '-' * 78 # For each row, print the value of each field left-justified within # the maximum possible width of that field. fieldIndices = range(len(cur.description)) for row in cur: for fieldIndex in fieldIndices: fieldValue = str(row[fieldIndex]) fieldMaxWidth = cur.description[fieldIndex][k.DESCRIPTION_DISPLAY_SIZE] print fieldValue.ljust(fieldMaxWidth) , print # Finish the row with a newline. Sample output: .. sourcecode:: python NAME YEAR_RELEASED ------------------------------------------------------------------------------ C 1972 Python 1991 **Example 4** Let's insert more languages: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='/temp/test.db', user='sysdba', password='masterkey') cur = con.cursor() newLanguages = [ ('Lisp', 1958), ('Dylan', 1995), ] cur.executemany("insert into languages (name, year_released) values (?, ?)", newLanguages ) # The changes will not be saved unless the transaction is committed explicitly: con.commit() Note the use of a *parameterized* SQL statement above. When dealing with repetitive statements, this is much faster and less error-prone than assembling each SQL statement manually. (You can read more about parameterized SQL statements in the section on `Prepared Statements`.) After running Example 4, the table printer from Example 3 would print: .. sourcecode:: python NAME YEAR_RELEASED ------------------------------------------------------------------------------ Lisp 1958 C 1972 Python 1991 Dylan 1995 Calling Stored Procedures ========================= Firebird supports stored procedures written in a proprietary procedural SQL language. Firebird stored procedures can have *input* parameters and/or *output* parameters. Some databases support *input/output* parameters, where the same parameter is used for both input and output; Firebird does not support this. It is important to distinguish between procedures that *return a result set* and procedures that *populate and return their output parameters exactly once*. Conceptually, the latter "return their output parameters" like a Python function, whereas the former "yield result rows" like a Python generator. Firebird's *server-side* procedural SQL syntax makes no such distinction, but *client-side* SQL code (and C API code) must. A result set is retrieved from a stored procedure by `SELECT`ing from the procedure, whereas output parameters are retrieved with an `EXECUTE PROCEDURE` statement. To *retrieve a result set* from a stored procedure with KInterbasDB, use code such as this: .. sourcecode:: python cur.execute("select output1, output2 from the_proc(?, ?)", (input1, input2)) # Ordinary fetch code here, such as: for row in cur: ... # process row con.commit() # If the procedure had any side effects, commit them. To *execute* a stored procedure and *access its output parameters*, use code such as this: .. sourcecode:: python cur.callproc("the_proc", (input1, input2)) # If there are output parameters, retrieve them as though they were the # first row of a result set. For example: outputParams = cur.fetchone() con.commit() # If the procedure had any side effects, commit them. This latter is not very elegant; it would be preferable to access the procedure's output parameters as the return value of `Cursor.callproc()`. The Python DB API specification requires the current behavior, however. kinterbasdb-3.3.0/docs/_sources/index.txt0000644000175000001440000000353611133077256017643 0ustar pcisarusers.. KInterbasDB documentation master file, created by sphinx-quickstart on Wed Jan 7 12:29:48 2009. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. ####################################### Welcome to KInterbasDB's documentation! ####################################### KInterbasDB is a `Python `__ extension package that implements `Python Database API 2.0`-compliant support for the open source relational database `Firebird® `__ and some versions of its proprietary cousin `Interbase® `__. In addition to the minimal feature set of the standard Python DB API, KInterbasDB also exposes nearly the entire native client API of the database engine. KInterbasDB is free -- covered by a permissive BSD-style `license `__ that both commercial and noncommercial users should find agreeable. This documentation set is not a tutorial on Python, SQL, or Firebird; rather, it is a topical presentation of KInterbasDB's feature set, with example code to demonstrate basic usage patterns. For detailed information about Firebird features, see the `Firebird documentation `__, and especially the excellent `The Firebird Book `__ written by Helen Borrie and published by APress. .. module:: kinterbasdb :synopsis: Python Database API 2.0 Compliant driver for Firebird Documentation Contents: *********************** .. toctree:: :maxdepth: 2 installation tutorial Python-DB-API-2.0 python-db-api-compliance beyond-python-db-api concurrency thread-safety-overview links changelog license Indices and tables ****************** * :ref:`genindex` * :ref:`modindex` * :ref:`search` kinterbasdb-3.3.0/docs/_sources/beyond-python-db-api.txt0000644000175000001440000045341211133076420022457 0ustar pcisarusers####################################################################### Native Database Engine Features and Extensions Beyond the Python DB API ####################################################################### .. currentmodule:: kinterbasdb Programmatic Database Creation and Deletion =========================================== The Firebird engine stores a database in a fairly straightforward manner: as a single file or, if desired, as a segmented group of files. The engine supports dynamic database creation via the SQL statement `CREATE DATABASE`. The engine also supports dropping (deleting) databases dynamically, but dropping is a more complicated operation than creating, for several reasons: an existing database may be in use by users other than the one who requests the deletion, it may have supporting objects such as temporary sort files, and it may even have dependent shadow databases. Although the database engine recognizes a `DROP DATABASE` SQL statement, support for that statement is limited to the `isql` command-line administration utility. However, the engine supports the deletion of databases via an API call, which KInterbasDB exposes to Python (see below). KInterbasDB supports dynamic database creation and deletion via the module-level function :func:`kinterbasdb.create_database` and the method :meth:`~kinterbasdb.Connection.drop_database`. These are documented below, then demonstrated by a brief example. .. function:: create_database() Creates a database according to the supplied `CREATE DATABASE` SQL statement. Returns an open connection to the newly created database. Arguments: :sql: string containing the `CREATE DATABASE` statement. Note that this statement may need to include a username and password. :dialect: `optional` - the SQL dialect under which to execute the statement (defaults to `3`). .. method:: Connection.drop_database() Deletes the database to which the connection is attached. This method performs the database deletion in a responsible fashion. Specifically, it: + raises an `OperationalError` instead of deleting the database if there are other active connections to the database + deletes supporting files and logs in addition to the primary database file(s) This method has no arguments. Example program: .. sourcecode:: python import kinterbasdb con = kinterbasdb.create_database( "create database '/temp/db.db' user 'sysdba' password 'pass'" ) con.drop_database() Advanced Transaction Control ============================ For the sake of simplicity, KInterbasDB lets the Python programmer ignore transaction management to the greatest extent allowed by the Python Database API Specification 2.0. The specification says, "if the database supports an auto-commit feature, this must be initially off". At a minimum, therefore, it is necessary to call the `commit` method of the connection in order to persist any changes made to the database. Transactions left unresolved by the programmer will be `rollback`ed when the connection is garbage collected. Remember that because of `ACID `__, every data manipulation operation in the Firebird database engine takes place in the context of a transaction, including operations that are conceptually "read-only", such as a typical `SELECT`. The client programmer of KInterbasDB establishes a transaction implicitly by using any SQL execution method, such as :meth:`~Connection.execute_immediate()`, :meth:`Cursor.execute()`, or :meth:`Cursor.callproc()`. Although KInterbasDB allows the programmer to pay little attention to transactions, it also exposes the full complement of the database engine's advanced transaction control features: transaction parameters, retaining transactions, savepoints, and distributed transactions. Explicit transaction start -------------------------- In addition to the implicit transaction initiation required by Python Database API, KInterbasDB allows the programmer to start transactions explicitly via the `Connection.begin` method. .. method:: Connection.begin(tpb) Starts a transaction explicitly. This is never *required*; a transaction will be started implicitly if necessary. :tpb: Optional transaction parameter buffer (TPB) populated with `kinterbasdb.isc_tpb_*` constants. See the Firebird API guide for these constants' meanings. Transaction Parameters ---------------------- The database engine offers the client programmer an optional facility called *transaction parameter buffers* (TPBs) for tweaking the operating characteristics of the transactions he initiates. These include characteristics such as whether the transaction has read and write access to tables, or read-only access, and whether or not other simultaneously active transactions can share table access with the transaction. Connections have a :attr:`default_tpb` attribute that can be changed to set the default TPB for all transactions subsequently started on the connection. Alternatively, if the programmer only wants to set the TPB for a single transaction, he can start a transaction explicitly via the :meth:`~Connection.begin()` method and pass a TPB for that single transaction. For details about TPB construction, see the Firebird API documentation. In particular, the :file:`ibase.h` supplied with Firebird contains all possible TPB elements -- single bytes that the C API defines as constants whose names begin with `isc_tpb_`. KInterbasDB makes all of those TPB constants available (under the same names) as module-level constants in the form of single-character strings. A transaction parameter *buffer* is handled in C as a character array; KInterbasDB requires that TPBs be constructed as Python strings. Since the constants in the `kinterbasdb.isc_tpb_*` family are single-character Python strings, they can simply be concatenated to create a TPB. .. warning:: This method requires good knowledge of `tpc_block` structure and proper order of various parameters, as Firebird engine will raise an error when badly structured block would be used. Also definition of `table reservation` parameters is uncomfortable as you'll need to mix binary codes with table names passed as Pascal strings (characters preceded by string length). The following program uses explicit transaction initiation and TPB construction to establish an unobtrusive transaction for read-only access to the database: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') # Construct a TPB by concatenating single-character strings (bytes) # from the kinterbasdb.isc_tpb_* family. customTPB = ( kinterbasdb.isc_tpb_read + kinterbasdb.isc_tpb_read_committed + kinterbasdb.isc_tpb_rec_version ) # Explicitly start a transaction with the custom TPB: con.begin(tpb=customTPB) # Now read some data using cursors: ... # Commit the transaction with the custom TPB. Future transactions # opened on con will not use a custom TPB unless it is explicitly # passed to con.begin every time, as it was above, or # con.default_tpb is changed to the custom TPB, as in: # con.default_tpb = customTPB con.commit() For convenient and safe construction of custom `tpb_block`, KInterbasDB provides special utility class `TPB`. .. class:: TPB .. attribute:: access_mode Required access mode. Default `isc_tpb_write`. .. attribute:: isolation_level Required Transaction Isolation Level. Default `isc_tpb_concurrency`. .. attribute:: lock_resolution Required lock resolution method. Default `isc_tpb_wait`. .. attribute:: lock_timeout Required lock timeout. Default `None`. .. attribute:: table_reservation Table reservation specification. Default `None`. Instead of changing the value of the table_reservation object itself, you must change its *elements* by manipulating it as though it were a dictionary that mapped "TABLE_NAME": (sharingMode, accessMode) For example: .. sourcecode:: python tpbBuilder.table_reservation["MY_TABLE"] = (kinterbasdb.isc_tpb_protected, kinterbasdb.isc_tpb_lock_write) .. method:: render() Returns valid `transaction parameter block` according to current values of member attributes. .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') # Use TPB to construct valid transaction parameter block # from the kinterbasdb.isc_tpb_* family. customTPB = TPB() customTPB.access_mode = kinterbasdb.isc_tpb_read customTPB.isolation_level = kinterbasdb.isc_tpb_read_committed + kinterbasdb.isc_tpb_rec_version # Explicitly start a transaction with the custom TPB: con.begin(tpb=customTPB.render()) # Now read some data using cursors: ... # Commit the transaction with the custom TPB. Future transactions # opened on con will not use a custom TPB unless it is explicitly # passed to con.begin every time, as it was above, or # con.default_tpb is changed to the custom TPB, as in: # con.default_tpb = customTPB.render() con.commit() If you want to build only `table reservation` part of `tpb` (for example to add to various custom built parameter blocks), you can use class `TableReservation` instead `TPB`. .. class:: TableReservation This is a `dictionary-like` class, where keys are table names and values must be tuples of access parameters, i.e. "TABLE_NAME": (sharingMode, accessMode) .. method:: render() Returns propely formatted table reservation part of `transaction parameter block` according to current values. Conenction object also exposes two methods that return infromation about current transaction: .. class:: Connection .. method:: trans_info(request) Pythonic wrapper around :meth:`~Connection.transaction_info()` call. :request: One or more information request codes (see transaction_info for details). Multiple codes must be passed as tuple. Returns decoded response(s) for specified request code(s). When multiple requests are passed, returns a dictionary where key is the request code and value is the response from server. .. method:: transaction_info(request, result_type) Thin wrapper around Firebird API `isc_transaction_info` call. This function returns information about active transaction. Raises `ProgrammingError` exception when transaction is not active. :request: One from the next constants: + isc_info_tra_id + isc_info_tra_oldest_interesting + isc_info_tra_oldest_snapshot + isc_info_tra_oldest_active + isc_info_tra_isolation + isc_info_tra_access + isc_info_tra_lock_timeout See Firebird API Guide for details. :result_type: String code for result type: + 'i' for Integer + 's' fro String Retaining Operations -------------------- The `commit` and `rollback` methods of `kinterbasdb.Connection` accept an optional boolean parameter `retaining` (default `False`) to indicate whether to recycle the transactional context of the transaction being resolved by the method call. If `retaining` is `True`, the infrastructural support for the transaction active at the time of the method call will be "retained" (efficiently and transparently recycled) after the database server has committed or rolled back the conceptual transaction. In code that commits or rolls back frequently, "retaining" the transaction yields considerably better performance. However, retaining transactions must be used cautiously because they can interfere with the server's ability to garbage collect old record versions. For details about this issue, read the "Garbage" section of `this document `__ by Ann Harrison. For more information about retaining transactions, see Firebird documentation. Savepoints ---------- Firebird 1.5 introduced support for transaction savepoints. Savepoints are named, intermediate control points within an open transaction that can later be rolled back to, without affecting the preceding work. Multiple savepoints can exist within a single unresolved transaction, providing "multi-level undo" functionality. Although Firebird savepoints are fully supported from SQL alone via the `SAVEPOINT 'name'` and `ROLLBACK TO 'name'` statements, KInterbasDB also exposes savepoints at the Python API level for the sake of convenience. .. method:: Connection.savepoint(name) Establishes a savepoint with the specified `name`. To roll back to a specific savepoint, call the :meth:`~kinterbasdb.Connection.rollback()` method and provide a value (the name of the savepoint) for the optional `savepoint` parameter. If the `savepoint` parameter of :meth:`~kinterbasdb.Connection.rollback()` is not specified, the active transaction is cancelled in its entirety, as required by the Python Database API Specification. The following program demonstrates savepoint manipulation via the KInterbasDB API, rather than raw SQL. .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') cur = con.cursor() cur.execute("recreate table test_savepoints (a integer)") con.commit() print 'Before the first savepoint, the contents of the table are:' cur.execute("select * from test_savepoints") print ' ', cur.fetchall() cur.execute("insert into test_savepoints values (?)", [1]) con.savepoint('A') print 'After savepoint A, the contents of the table are:' cur.execute("select * from test_savepoints") print ' ', cur.fetchall() cur.execute("insert into test_savepoints values (?)", [2]) con.savepoint('B') print 'After savepoint B, the contents of the table are:' cur.execute("select * from test_savepoints") print ' ', cur.fetchall() cur.execute("insert into test_savepoints values (?)", [3]) con.savepoint('C') print 'After savepoint C, the contents of the table are:' cur.execute("select * from test_savepoints") print ' ', cur.fetchall() con.rollback(savepoint='A') print 'After rolling back to savepoint A, the contents of the table are:' cur.execute("select * from test_savepoints") print ' ', cur.fetchall() con.rollback() print 'After rolling back entirely, the contents of the table are:' cur.execute("select * from test_savepoints") print ' ', cur.fetchall() The output of the example program is shown below. .. sourcecode:: python Before the first savepoint, the contents of the table are: [] After savepoint A, the contents of the table are: [(1,)] After savepoint B, the contents of the table are: [(1,), (2,)] After savepoint C, the contents of the table are: [(1,), (2,), (3,)] After rolling back to savepoint A, the contents of the table are: [(1,)] After rolling back entirely, the contents of the table are: [] Using multiple transactions with the same connection ---------------------------------------------------- .. versionadded:: 3.3 Python Database API 2.0 was created with assumption that connection can support only one transactions per single connection. However, Firebird can support multiple independent transactions that can run simultaneously within single connection / attachment to the database. This feature is very important, as applications may require multiple transaction openned simultaneously to perform various tasks, which would require to open multiple connections and thus consume more resources than necessary. KInterbasDB surfaces this Firebird feature through new class :class:`Transaction` and extensions to :class:`~kinterbasdb.Connection` and :class:`~kinterbasdb.Cursor` classes. .. class:: Connection .. method:: trans(tpb=None) Creates a new Transaction that operates within the context of this connection. Cursors can be created within that Transaction via its .cursor() method. .. attribute:: transactions `read-only property` List of non-close()d `Transaction` objects associated with this `Connection`. An element of this list may represent a resolved or unresolved physical transaction. Once a `Transaction` object has been created, it is only removed from the Connection's tracker if the Transaction's `close()` method is called (`Transaction.__del__` triggers an implicit close() call if necessary), or (obviously) if the Connection itself is close()d. The initial implementation will not make any guarantees about the order of the Transactions in this list. .. attribute:: main_transaction `read-only property` Transaction object that represents the DB-API implicit transaction. The implementation guarantees that the same Transaction object will be reused across all DB-API transactions during the lifetime of the Connection. .. method:: prepare() Manually triggers the first phase of a two-phase commit (2PC). Use of this method is optional; if preparation is not triggered manually, it will be performed implicitly by commit() in a 2PC. See also the `Distributed Transactions`_ section for details. .. class:: Cursor .. attribute:: transaction `read-only property` Transaction with which this Cursor is associated. `None` if the Transaction has been close()d, or if the Cursor has been close()d. .. class:: Transaction .. method:: __init__(connection,tpb=None) Constructor requires open :class:`~kinterbasdb.Connection` object and optional `tpb` specification. .. attribute:: connection `read-only property` Connection object on which this Transaction is based. When the Connection's close() method is called, all Transactions that depend on the connection will also be implicitly close()d. If a Transaction has been close()d, its connection property will be None. .. attribute:: closed `read-only property` `True` if Transaction has been closed (explicitly or implicitly). .. attribute:: n_physical `read-only property (int)` Number of physical transactions that have been executed via this Transaction object during its lifetime. .. attribute:: resolution `read-only property (int)` `Zero` if this Transaction object is currently managing an open physical transaction. `One` if the physical transaction has been resolved normally. Note that this is an int property rather than a bool, and is named `resolution` rather than `resolved`, so that the non-zero values other than one can be assigned to convey specific information about the state of the transaction, in a future implementation (consider distributed transaction prepared state, limbo state, etc.). .. attribute:: cursors List of non-close()d Cursor objects associated with this Transaction. When Transaction's close() method is called, whether explicitly or implicitly, it will implicitly close() each of its Cursors. Current implementation do not make any guarantees about the order of the Cursors in this list. .. method:: begin(tpb) See :meth:`Connection.begin()` for details. .. method:: commit(retaining=False) See :meth:`kinterbasdb.Connection.commit()` for details. .. method:: close() Permanently closes the Transaction object and severs its associations with other objects. If the physical transaction is unresolved when this method is called, a rollback() will be performed first. .. method:: prepare() See :meth:`Connection.prepare()` for details. .. method:: rollback(retaining=False) See :meth:`kinterbasdb.Connection.rollback()` for details. .. method:: savepoint() See :meth:`Connection.savepoint()` for details. .. method:: trans_info() See :meth:`Connection.trans_info()` for details. .. method:: transaction_info() See :meth:`Connection.transaction_info()` for details. .. method:: cursor() Creates a new Cursor that will operate in the context of this Transaction. The association between a Cursor and its Transaction is set when the Cursor is created, and cannot be changed during the lifetime of that Cursor. See :meth:`Connection.cursor()` for more details. If you don't want multiple transactions, you can use implicit transaction object associated with `Connection` and control it via transaction-management and cursor methods of the :class:`Connection`. Alternatively, you can directly access the implicit transaction exposed as :attr:`~kinterbasdb.Connection.main_transaction` and control it via its transaction-management methods. To use additional transactions, create new :class:`~kinterbasdb.Transaction` object calling :meth:`Connection.trans()` method. Distributed Transactions ------------------------ Distributed transactions are transactions that span multiple databases. KInterbasDB provides this Firebird feature through `ConnectionGroup` class. .. class:: ConnectionGroup .. method:: __init__(connections=()) Constructor accepts optional list of database connections. Connections cannot be in closed state. .. method:: disband() Forcefully deletes all connections from connection group. If transaction is active, it's canceled (rollback). .. method:: add(con) Adds active connection to the group. If connection altready belong to this or any other ConnectionGroup, has active transaction, or timeout for it is defined, an exception is raised. Group also cannot accept new members when in unresolved transactions. .. method:: remove(con) Removes specified connection from group. Raises an exception if connection doesn't belong to this group or if group has unresolved transaction. .. method:: clear() Removes all connections from group. Raises an exception if group has unresolved transaction. .. method:: members() Returns list of connection objects that belong to this group. .. method:: count() Returns number of connection objects that belong to this group. .. method:: contains(con) Returns True if specified connection belong to this group. .. method:: begin() Starts distributed transaction over member connections. .. method:: commit(retaining=False) Commits distributed transaction over member connections using 2PC. .. method:: prepare() Manually triggers the first phase of a two-phase commit (2PC). Use of this method is optional; if preparation is not triggered manually, it will be performed implicitly by commit() in a 2PC. .. method:: rollback(retaining=False) Rollbacks distributed transaction over member connections. .. note:: While a `Connection` belongs to a `ConnectionGroup`, any calls to the connection's transactional methods ( `begin`, `prepare`, `commit`, `rollback`) will "bubble upward" to apply to the distributed transaction shared by the group as a whole. .. rubric:: Pitfalls and Limitations + Never add more than one connection to the same database to the same `ConnectionGroup`! + Current implementation works only with connection objects and their main transactions. Secondary transaction objects obrained from connection cannot participate in distributed transaction. **Example:** .. sourcecode:: python import kinterbasdb # Establish multiple connections the usual way: con1 = kinterbasdb.connect(dsn='weasel:/temp/test.db', user='sysdba', password='pass') con2 = kinterbasdb.connect(dsn='coyote:/temp/test.db', user='sysdba', password='pass') # Create a ConnectionGroup to associate multiple connections in such a # way that they can participate in a distributed transaction. # !!! # NO TWO MEMBERS OF A SINGLE CONNECTIONGROUP SHOULD BE ATTACHED TO THE SAME DATABASE! # !!! group = kinterbasdb.ConnectionGroup( connections=(con1,con2) ) # Start a distributed transaction involving all of the members of the group # (con1 and con2 in this case) with one of the following approaches: # - Call group.begin() # - Call con1.begin(); the operation will "bubble upward" and apply to the group. # - Call con2.begin(); the operation will "bubble upward" and apply to the group. # - Just start executing some SQL statements on either con1 or con2. # A transaction will be started implicitly; it will be a distributed # transaction because con1 and con2 are members of a ConnectionGroup. group.begin() # Perform some database changes the usual way (via cursors on con1 and con2): ... # Commit or roll back the distributed transaction by calling the commit # or rollback method of the ConnectionGroup itself, or the commit or # rollback method of any member connection (con1 or con2 in this case). group.commit() # Unless you want to perform another distributed transaction, disband the # group so that member connections can operate independently again. group.clear() Prepared Statements =================== When you define a Python function, the interpreter initially parses the textual representation of the function and generates a binary equivalent called bytecode. The bytecode representation can then be executed directly by the Python interpreter any number of times and with a variety of parameters, but the human-oriented textual definition of the function never need be parsed again. Database engines perform a similar series of steps when executing a SQL statement. Consider the following series of statements: .. sourcecode:: python cur.execute("insert into the_table (a,b,c) values ('aardvark', 1, 0.1)") ... cur.execute("insert into the_table (a,b,c) values ('zymurgy', 2147483647, 99999.999)") If there are many statements in that series, wouldn't it make sense to "define a function" to insert the provided "parameters" into the predetermined fields of the predetermined table, instead of forcing the database engine to parse each statement anew and figure out what database entities the elements of the statement refer to? In other words, why not take advantage of the fact that the form of the statement ("the function") stays the same throughout, and only the values ("the parameters") vary? Prepared statements deliver that performance benefit and other advantages as well. The following code is semantically equivalent to the series of insert operations discussed previously, except that it uses a single SQL statement that contains Firebird's parameter marker ( `?`) in the slots where values are expected, then supplies those values as Python tuples instead of constructing a textual representation of each value and passing it to the database engine for parsing: .. sourcecode:: python insertStatement = "insert into the_table (a,b,c) values (?,?,?)" cur.execute(insertStatement, ('aardvark', 1, 0.1)) ... cur.execute(insertStatement, ('zymurgy', 2147483647, 99999.999)) Only the values change as each row is inserted; the statement remains the same. For many years, KInterbasDB has recognized situations similar to this one and automatically reused the same prepared statement in each :meth:`Cursor.execute` call. In KInterbasDB 3.2, the scheme for automatically reusing prepared statements has become more sophisticated, and the API has been extended to offer the client programmer manual control over prepared statement creation and use. The entry point for manual statement preparation is the `Cursor.prep` method. .. method:: Cursor.prep(sql) :sql: string parameter that contains the SQL statement to be prepared. Returns a :class:`PreparedStatement` instance. .. class:: PreparedStatement `PreparedStatement` has no public methods, but does have the following public read-only properties: .. attribute:: sql A reference to the string that was passed to :meth:`~Cursor.prep()` to create this `PreparedStatement`. .. attribute:: statement_type An integer code that can be matched against the statement type constants in the `kinterbasdb.isc_info_sql_stmt_*` series. The following statement type codes are currently available: + `isc_info_sql_stmt_commit` + `isc_info_sql_stmt_ddl` + `isc_info_sql_stmt_delete` + `isc_info_sql_stmt_exec_procedure` + `isc_info_sql_stmt_get_segment` + `isc_info_sql_stmt_insert` + `isc_info_sql_stmt_put_segment` + `isc_info_sql_stmt_rollback` + `isc_info_sql_stmt_savepoint` + `isc_info_sql_stmt_select` + `isc_info_sql_stmt_select_for_upd` + `isc_info_sql_stmt_set_generator` + `isc_info_sql_stmt_start_trans` + `isc_info_sql_stmt_update` .. attribute:: n_input_params The number of input parameters the statement requires. .. attribute:: n_output_params The number of output fields the statement produces. .. attribute:: plan A string representation of the execution plan generated for this statement by the database engine's optimizer. This property can be used, for example, to verify that a statement is using the expected index. .. attribute:: description A Python DB API 2.0 description sequence (of the same format as :attr:`Cursor.description`) that describes the statement's output parameters. Statements without output parameters have a `description` of `None`. In addition to programmatically examining the characteristics of a SQL statement via the properties of `PreparedStatement`, the client programmer can submit a `PreparedStatement` to :meth:`Cursor.execute` or :meth:`Cursor.executemany` for execution. The code snippet below is semantically equivalent to both of the previous snippets in this section, but it explicitly prepares the `INSERT` statement in advance, then submits it to :meth:`Cursor.executemany` for execution: .. sourcecode:: python insertStatement = cur.prep("insert into the_table (a,b,c) values (?,?,?)") inputRows = [ ('aardvark', 1, 0.1), ... ('zymurgy', 2147483647, 99999.999) ] cur.executemany(insertStatement, inputRows) **Example Program** The following program demonstrates the explicit use of PreparedStatements. It also benchmarks explicit `PreparedStatement` reuse against KInterbasDB's automatic `PreparedStatement` reuse, and against an input strategy that prevents `PreparedStatement` reuse. .. sourcecode:: python import time import kinterbasdb con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test-20.firebird', user='sysdba', password='masterkey' ) cur = con.cursor() # Create supporting database entities: cur.execute("recreate table t (a int, b varchar(50))") con.commit() cur.execute("create unique index unique_t_a on t(a)") con.commit() # Explicitly prepare the insert statement: psIns = cur.prep("insert into t (a,b) values (?,?)") print 'psIns.sql: "%s"' % psIns.sql print 'psIns.statement_type == kinterbasdb.isc_info_sql_stmt_insert:', ( psIns.statement_type == kinterbasdb.isc_info_sql_stmt_insert ) print 'psIns.n_input_params: %d' % psIns.n_input_params print 'psIns.n_output_params: %d' % psIns.n_output_params print 'psIns.plan: %s' % psIns.plan print N = 10000 iStart = 0 # The client programmer uses a PreparedStatement explicitly: startTime = time.time() for i in xrange(iStart, iStart + N): cur.execute(psIns, (i, str(i))) print ( 'With explicit prepared statement, performed' '\n %0.2f insertions per second.' % (N / (time.time() - startTime)) ) con.commit() iStart += N # KInterbasDB automatically uses a PreparedStatement "under the hood": startTime = time.time() for i in xrange(iStart, iStart + N): cur.execute("insert into t (a,b) values (?,?)", (i, str(i))) print ( 'With implicit prepared statement, performed' '\n %0.2f insertions per second.' % (N / (time.time() - startTime)) ) con.commit() iStart += N # A new SQL string containing the inputs is submitted every time, so # KInterbasDB is not able to implicitly reuse a PreparedStatement. Also, in a # more complicated scenario where the end user supplied the string input # values, the program would risk SQL injection attacks: startTime = time.time() for i in xrange(iStart, iStart + N): cur.execute("insert into t (a,b) values (%d,'%s')" % (i, str(i))) print ( 'When unable to reuse prepared statement, performed' '\n %0.2f insertions per second.' % (N / (time.time() - startTime)) ) con.commit() # Prepare a SELECT statement and examine its properties. The optimizer's plan # should use the unique index that we created at the beginning of this program. print psSel = cur.prep("select * from t where a = ?") print 'psSel.sql: "%s"' % psSel.sql print 'psSel.statement_type == kinterbasdb.isc_info_sql_stmt_select:', ( psSel.statement_type == kinterbasdb.isc_info_sql_stmt_select ) print 'psSel.n_input_params: %d' % psSel.n_input_params print 'psSel.n_output_params: %d' % psSel.n_output_params print 'psSel.plan: %s' % psSel.plan # The current implementation does not allow PreparedStatements to be prepared # on one Cursor and executed on another: print print 'Note that PreparedStatements are not transferrable from one cursor to another:' cur2 = con.cursor() cur2.execute(psSel) Output: .. sourcecode:: python psIns.sql: "insert into t (a,b) values (?,?)" psIns.statement_type == kinterbasdb.isc_info_sql_stmt_insert: True psIns.n_input_params: 2 psIns.n_output_params: 0 psIns.plan: None With explicit prepared statement, performed 9551.10 insertions per second. With implicit prepared statement, performed 9407.34 insertions per second. When unable to reuse prepared statement, performed 1882.53 insertions per second. psSel.sql: "select * from t where a = ?" psSel.statement_type == kinterbasdb.isc_info_sql_stmt_select: True psSel.n_input_params: 1 psSel.n_output_params: 2 psSel.plan: PLAN (T INDEX (UNIQUE_T_A)) Note that PreparedStatements are not transferrable from one cursor to another: Traceback (most recent call last): File "adv_prepared_statements__overall_example.py", line 86, in ? cur2.execute(psSel) kinterbasdb.ProgrammingError: (0, 'A PreparedStatement can only be used with the Cursor that originally prepared it.') As you can see, the version that prevents the reuse of prepared statements is about five times slower -- *for a trivial statement*. In a real application, SQL statements are likely to be far more complicated, so the speed advantage of using prepared statements would only increase. As the timings indicate, KInterbasDB does a good job of reusing prepared statements even if the client program is written in a style strictly compatible with the Python DB API 2.0 (which accepts only strings -- not :class:`PreparedStatement` objects -- to the :meth:`Cursor.execute()` method). The performance loss in this case is less than one percent. Named Cursors ============= To allow the Python programmer to perform scrolling `UPDATE` or `DELETE` via the "`SELECT ... FOR UPDATE`" syntax, KInterbasDB provides the read/write property `Cursor.name`. .. attribute:: Cursor.name Name for the SQL cursor. This property can be ignored entirely if you don't need to use it. **Example Program** .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') curScroll = con.cursor() curUpdate = con.cursor() curScroll.execute("select city from addresses for update") curScroll.name = 'city_scroller' update = "update addresses set city=? where current of " + curScroll.name for (city,) in curScroll: city = ... # make some changes to city curUpdate.execute( update, (city,) ) con.commit() Parameter Conversion ==================== KInterbasDB converts bound parameters marked with a `?` in SQL code in a standard way. However, the module also offers several extensions to standard parameter binding, intended to make client code more readable and more convenient to write. Implicit Conversion of Input Parameters from Strings ---------------------------------------------------- The database engine treats most SQL data types in a weakly typed fashion: the engine may attempt to convert the raw value to a different type, as appropriate for the current context. For instance, the SQL expressions `123` (integer) and `'123'` (string) are treated equivalently when the value is to be inserted into an `integer` field; the same applies when `'123'` and `123` are to be inserted into a `varchar` field. This weak typing model is quite unlike Python's dynamic yet strong typing. Although weak typing is regarded with suspicion by most experienced Python programmers, the database engine is in certain situations so aggressive about its typing model that KInterbasDB must `compromise `__ in order to remain an elegant means of programming the database engine. An example is the handling of "magic values" for date and time fields. The database engine interprets certain string values such as `'yesterday'` and `'now'` as having special meaning in a date/time context. If KInterbasDB did not accept strings as the values of parameters destined for storage in date/time fields, the resulting code would be awkward. Consider the difference between the two Python snippets below, which insert a row containing an integer and a timestamp into a table defined with the following DDL statement: .. sourcecode:: python create table test_table (i int, t timestamp) .. sourcecode:: python i = 1 t = 'now' sqlWithMagicValues = "insert into test_table (i, t) values (?, '%s')" % t cur.execute( sqlWithMagicValues, (i,) ) .. sourcecode:: python i = 1 t = 'now' cur.execute( "insert into test_table (i, t) values (?, ?)", (i, t) ) If KInterbasDB did not support weak parameter typing, string parameters that the database engine is to interpret as "magic values" would have to be rolled into the SQL statement in a separate operation from the binding of the rest of the parameters, as in the first Python snippet above. Implicit conversion of parameter values from strings allows the consistency evident in the second snippet, which is both more readable and more general. It should be noted that KInterbasDB does not perform the conversion from string itself. Instead, it passes that responsibility to the database engine by changing the parameter metadata structure dynamically at the last moment, then restoring the original state of the metadata structure after the database engine has performed the conversion. A secondary benefit is that when one uses KInterbasDB to import large amounts of data from flat files into the database, the incoming values need not necessarily be converted to their proper Python types before being passed to the database engine. Eliminating this intermediate step may accelerate the import process considerably, although other factors such as the chosen connection protocol and the deactivation of indexes during the import are more consequential. For bulk import tasks, the database engine's external tables also deserve consideration. External tables can be used to suck semi-structured data from flat files directly into the relational database without the intervention of an ad hoc conversion program. Dynamic Type Translation ------------------------ Dynamic type translators are conversion functions registered by the Python programmer to transparently convert database field values to and from their internal representation. The client programmer can choose to ignore translators altogether, in which case KInterbasDB will manage them behind the scenes. Otherwise, the client programmer can use any of several :ref:`standard type translators ` included with KInterbasDB, register custom translators, or set the translators to `None` to deal directly with the KInterbasDB-internal representation of the data type. When translators have been registered for a specific SQL data type, Python objects on their way into a database field of that type will be passed through the input translator before they are presented to the database engine; values on their way out of the database into Python will be passed through the corresponding output translator. Output and input translation for a given type is usually implemented by two different functions. Specifics of the Dynamic Type Translation API --------------------------------------------- Translators are managed with next methods of :class:`~kinterbasdb.Connection` and :class:`~kinterbasdb.Cursor`. .. method:: Connection.get_type_trans_in() Retrieves the inbound type translation map. .. method:: Connection.set_type_trans_in(trans_dict) Changes the inbound type translation map. .. method:: Cursor.get_type_trans_in() Retrieves the inbound type translation map. .. method:: Cursor.set_type_trans_in(trans_dict) Changes the inbound type translation map. The `set_type_trans_[in|out]` methods accept a single argument: a mapping of type name to translator. The `get_type_trans[in|out]` methods return a copy of the translation table. `Cursor`s inherit their `Connection`'s translation settings, but can override them without affecting the connection or other cursors (much as subclasses can override the methods of their base classes). The following code snippet installs an input translator for fixed point types ( `NUMERIC`/ `DECIMAL` SQL types) into a connection: .. sourcecode:: python con.set_type_trans_in( {'FIXED': fixed_input_translator_function} ) The following method call retrieves the type translation table for `con`: .. sourcecode:: python con.get_type_trans_in() The method call above would return a translation table (dictionary) such as this: .. sourcecode:: python { 'DATE': , 'TIMESTAMP': , 'FIXED': at 0x00962DB0>, 'TIME': } Notice that although the sample code registered only one type translator, there are four listed in the mapping returned by the `get_type_trans_in` method. By default, KInterbasDB uses dynamic type translation to implement the conversion of `DATE`, `TIME`, `TIMESTAMP`, `NUMERIC`, and `DECIMAL` values. For the source code locations of KInterbasDB's reference translators, see the :ref:`table ` in the next section. In the sample above, a translator is registered under the key `'FIXED'`, but Firebird has no SQL data type named `FIXED`. The following table lists the names of the database engine's SQL data types in the left column, and the corresponding KInterbasDB-specific key under which client programmers can register translators in the right column. .. _table-mapping-to-keys: **Mapping of SQL Data Type Names to Translator Keys** ========================= =========================================== SQL Type(s) Translator Key ========================= =========================================== CHAR / VARCHAR 'TEXT' for fields with charsets `NONE`, `OCTETS`, or `ASCII` 'TEXT_UNICODE' for all other charsets BLOB 'BLOB' SMALLINT/INTEGER/BIGINT 'INTEGER' FLOAT/ DOUBLE PRECISION 'FLOATING' NUMERIC / DECIMAL 'FIXED' DATE 'DATE' TIME 'TIME' TIMESTAMP 'TIMESTAMP' ========================= =========================================== Consequences of the Dynamic Type Translation in KInterbasDB ----------------------------------------------------------- Dynamic type translation haseliminated KInterbasDB's dependency on :class:`mx.DateTime`. Although KInterbasDB will continue to use :class:`mx.DateTime` as its default date/time representation for the sake of backward compatibility, dynamic type translation allows users to conveniently deal with database date/time values in terms of the new standard library module `datetime`, or any other representation they care to write translators for. Dynamic type translation also allows `NUMERIC`/ `DECIMAL` values to be transparently represented as :class:`decimal.Decimal` objects rather than scaled integers, which is much more convenient. For backward compatibility, `NUMERIC`/ `DECIMAL` values are still represented by default as Python floats, and the older API based on the :attr:`~Connection.precision_mode` attribute is still present. However, all of these representations are now implemented "under the hood" via dynamic type translation. Reference implementations of all of the translators discussed above are provided with KInterbasDB, in these modules: .. _included-translators: **Reference Translators Included with KInterbasDB** +---------------------+-------------------------------------+---------------------------------------+ | SQL Type(s) | Python Type(s) | Reference Implementation In Module | +=====================+=====================================+=======================================+ | NUMERIC/DECIMAL | float *(imprecise)* (default) | kinterbasdb.typeconv_fixed_stdlib | + +-------------------------------------+---------------------------------------+ | | scaled `int` *(precise)* | kinterbasdb.typeconv_fixed_stdlib | + +-------------------------------------+---------------------------------------+ | | `fixedpoint.FixedPoint` *(precise)* | kinterbasdb.typeconv_fixed_fixedpoint | + +-------------------------------------+---------------------------------------+ | | `decimal.Decimal` *(precise)* | kinterbasdb.typeconv_fixed_decimal | +---------------------+-------------------------------------+---------------------------------------+ | DATE/TIME/TIMESTAMP | `mx.DateTime` (default) | kinterbasdb.typeconv_datetime_mx | + +-------------------------------------+---------------------------------------+ | | Python 2.4+ `datetime` | kinterbasdb.typeconv_datetime_stdlib | +---------------------+-------------------------------------+---------------------------------------+ | CHAR/VARCHAR (with | `unicode` | kinterbasdb.typeconv_text_unicode | | any character set | | | | except NONE, OCTETS,| | | | ASCII) | | | +---------------------+-------------------------------------+---------------------------------------+ .. module:: kinterbasdb.typeconv_fixed_stdlib :synopsis: Type conversion: Fixed/Standard Library .. module:: kinterbasdb.typeconv_fixed_fixedpoint :synopsis: Type conversion: Fixed/fixedpoint module (3rd party) .. module:: kinterbasdb.typeconv_fixed_decimal :synopsis: Type conversion: Fixed/Python 2.4+ Standard Library .. module:: kinterbasdb.typeconv_datetime_mx :synopsis: Type conversion: DateTime/eGenix mx.DateTime .. module:: kinterbasdb.typeconv_datetime_stdlib :synopsis: Type conversion: DateTIme/Python 2.4+ Standard Library .. module:: kinterbasdb.typeconv_text_unicode :synopsis: Type conversion: Text/Unicode .. currentmodule:: kinterbasdb Writing Custom Translators -------------------------- Below is a table that specifies the required argument and return value signatures of input and output converters for the various translator keys. Python's native types map perfectly to `'TEXT'`, `'TEXT_UNICODE'`, `'BLOB'`, `'INTEGER'`, and `'FLOATING'` types, so in those cases the translator signatures are very simple. The signatures for `'FIXED'`, `'DATE'`, `'TIME'`, and `'TIMESTAMP'` are not as simple because Python (before 2.4) lacks native types to represent these values with both precision *and* convenience. KInterbasDB handles `'FIXED'` values internally as scaled integers; the date and time types as tuples. KInterbasDB itself uses translators implemented according to the rules in the table below; the code for these reference translators can be found in the Python modules named `kinterbasdb.typeconv_*` (see the table in the previous section for details). Signature Specifications for Translators ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ **Translator Key 'TEXT'** For `CHAR` / `VARCHAR` fields with character sets `NONE`, `OCTETS`, or `ASCII` *Input Translator Argument / Return Value Signature:* Args: a single Python `str`ing argument (or `None`) Returns: a single Python string *Output Translator Signature:* Same signature as input translator, except that return value is not constrained. **Translator Key 'TEXT_UNICODE'** For `CHAR` / `VARCHAR` fields with charsets **other** than `NONE`, `OCTETS`, or `ASCII` *Input Translator Argument / Return Value Signature:* Args: a single Python 2-tuple argument containing a Python `unicode` or `str` object (or `None`) in the first element; the database character set code in the second element (the tuple is of the form `(val, dbCharacterSetCode)`). The database character set codes (which are integers) are defined in RDB$CHARACTER_SETS system table. The module `kinterbasdb.typeconv_text_unicode` contains a dictionary named `DB_TO_PYTHON_ENCODING_MAP` that maps database character set codes to Python codec names. For example, the database character set `UNICODE_FSS` has code `3`; `typeconv_text_unicode.DB_TO_PYTHON_ENCODING_MAP[3]` is `'utf_8'`, the name of a Python codec that can be passed to the `encode` / `decode` methods of `unicode` / `str`. Returns: a Python `str` object containing the encoded representation of the incoming value (typically computed via `val.encode`). *Output Translator Signature:* Args: a single Python 2-tuple argument containing a Python `str` object (or `None`) in the first element; the database character set code in the second element (the tuple is of the form `(val, dbCharacterSetCode)`). `val` contains the encoded representation of the Unicode string. Returns: a Python `unicode` object containing the decoded representation of the outgoing value (typically computed via `val.decode`). **Translator Key 'BLOB'** *Input Translator Argument / Return Value Signature:* By default, same signature as that of 'TEXT'. A special case was introduced in KInterbasDB 3.2 to allow for :ref:`streaming blob handling `. *Output Translator Signature:* Same signature as input translator, except that return value is not constrained. **Translator Key 'INTEGER'** *Input Translator Argument / Return Value Signature:* Args: a single Python `int` argument (or `None`) Returns: a single Python `int` (or `long`, if the number too large to fit in an `int`) *Output Translator Signature:* Same signature as input translator, except that return value is not constrained. **Translator Key 'FLOATING'** *Input Translator Argument / Return Value Signature:* Args: a single Python `float` argument (or `None`) Returns: a single Python `float` *Output Translator Signature:* Same signature as input translator, except that return value is not constrained. **Translator Key 'FIXED'** *Input Translator Argument / Return Value Signature:* Args: a single Python 2-tuple argument containing a scaled Python integer in the first element and the scale factor in the second element (the tuple is of the form `(val, scale)`). Returns: a single Python integer, scaled appropriately *Output Translator Signature:* Same signature as input translator, except that return value is not constrained. **Translator Key 'DATE'** *Input Translator Argument / Return Value Signature:* Args: an instance of the chosen date type (such as Python 2.4+'s `datetime.date`) or `None` Returns: a single Python 3-tuple of the form `(year, month, day)` *Output Translator Signature:* Args: a single Python 3-tuple of the form `(year, month, day)` (or `None` if the database field was `NULL`) Return value is not constrained. **Translator Key 'TIME'** *Input Translator Argument / Return Value Signature:* Args: an instance of the chosen time type (such as Python 2.4+'s `datetime.time`) or `None` Returns: a single Python 4-tuple of the form `(hour, minute, second, microseconds)` *Output Translator Signature:* Args: a single Python 4-tuple of the form `(hour, minute, second, microseconds)` (or `None` if the database field was `NULL`). Return value is not constrained. **Translator Key 'TIMESTAMP'** *Input Translator Argument / Return Value Signature:* Args: an instance of the chosen time type (such as Python 2.4+'s `datetime.datetime`) or `None` Returns: a single Python 7-tuple of the form `(year, month, day, hour, minute, second, microseconds)` *Output Translator Signature:* Args: a single Python 7-tuple of the form `(year, month, day, hour, minute, second, microseconds)`. (or `None` if the database field was `NULL`). Return value is not constrained. Example Programs ---------------- DATE/TIME/TIMESTAMP .. sourcecode:: python import datetime # Python 2.3 standard library module import kinterbasdb import kinterbasdb.typeconv_datetime_stdlib as tc_dt def connect(*args, **kwargs): """ This wrapper around kinterbasdb.connect creates connections that use the datetime module (which entered the standard library in Python 2.3) for both input and output of DATE, TIME, and TIMESTAMP database fields. This wrapper simply registers kinterbasdb's official date/time translators for the datetime module, which reside in the kinterbasdb.typeconv_datetime_stdlib module. An equivalent set of translators for mx.DateTime (which kinterbasdb uses by default for backward compatibility) resides in the kinterbasdb.typeconv_datetime_mx module. Note that because cursors inherit their connection's dynamic type translation settings, cursors created upon connections returned by this function will also use the datetime module. """ con = kinterbasdb.connect(*args, **kwargs) con.set_type_trans_in({ 'DATE': tc_dt.date_conv_in, 'TIME': tc_dt.time_conv_in, 'TIMESTAMP': tc_dt.timestamp_conv_in, }) con.set_type_trans_out({ 'DATE': tc_dt.date_conv_out, 'TIME': tc_dt.time_conv_out, 'TIMESTAMP': tc_dt.timestamp_conv_out, }) return con def _test(): con = connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') cur = con.cursor() # Retrieve the current timestamp of the database server. cur.execute("select current_timestamp from rdb$database") curStamp = cur.fetchone()[0] print 'The type of curStamp is', type(curStamp) print 'curStamp is', curStamp # Create a test table with a single TIMESTAMP column. con.execute_immediate("recreate table test_stamp (a timestamp)") con.commit() # Insert a timestamp into the database, then retrieve it. py23StandardLibTimestamp = datetime.datetime.now() cur.execute("insert into test_stamp values (?)", (py23StandardLibTimestamp,)) cur.execute("select * from test_stamp") curStamp = cur.fetchone()[0] print 'The type of curStamp is', type(curStamp) print 'curStamp is', curStamp if __name__ == '__main__': _test() Sample output: .. sourcecode:: python The type of curStamp is curStamp is 2003-05-20 03:55:42 The type of stamp is stamp is 2003-05-20 03:55:42 Deferred Loading of Dynamic Type Translators -------------------------------------------- KInterbasDB has existed since 1998, five years before the `datetime` module was available in the Python standard library. Therefore, KInterbasDB's default representation for date and time values is the `mx.DateTime` module. This representation is recommended by the Python DB API 2.0 Specification, and was an entirely sensible choice during the many years before the advent of the standard library `datetime` module. Now that the `datetime` module is available in the standard library, many KInterbasDB users prefer it to `mx.DateTime`. For the sake of backward-compatibility, it is necessary to continue to use `mx.DateTime` by default, but it's both burdensome and wasteful to import `mx.DateTime` in programs that don't use it. To address this situation, KInterbasDB's type translation initialization code defers the choice of a default set of translators until the :func:`kinterbasdb.init()` function is called. A client program can explicitly call `kinterbasdb.init` to forestall the import of `mx.DateTime`. .. function:: init(type_conv=200) .. versionchanged:: 3.3 Takes a keyword argument `type_conv`, which controls KInterbasDB's initial choice of type translators. `type_conv` can be either an integer or an object that has all of the attributes named in :data:`kinterbasdb.BASELINE_TYPE_TRANSLATION_FACILITIES` (an example of such an object is the module :mod:`kinterbasdb.typeconv_backcompat`). If `type_conv` is an integer, it will cause KInterbasDB to use one of the following predefined type translator configurations: .. _typeconv-values: +--------------------+----------------------------------------------------------------+ | `type_conv` code | Resulting translator configuration | +====================+================================================================+ | 0 | Minimal type translators that represent date/time values as | | | tuples and fixed point values as either floats or scaled | | | integers, depending on the value of the deprecated | | | `Connection.precision_mode` attribute. | | | | | | Unicode values are **not** encoded or decoded automatically. | | | | | | Implemented by the :mod:`kinterbasdb.typeconv_naked` module. | +--------------------+----------------------------------------------------------------+ | 1 | Backward-compatible type translators that represent date/time | | | values via the `mx.DateTime` module and fixed point values as | | | either floats or scaled integers, depending on the value of the| | | deprecated `Connection.precision_mode` attribute. | | | | | | Unicode values are **not** encoded or decoded automatically. | | | | | | Implemented by the :mod:`kinterbasdb.typeconv_backcompat` | | | | | | This configuration, perfectly mimics the type translation | | | behavior of KInterbasDB 3.0. | +--------------------+----------------------------------------------------------------+ | 100 | This translator configuration, which is intended for use with | | | Python 2.4 and later, represents date/time values via the | | | standard library module `datetime` and fixed point values via | | | the third-party `fixedpoint` module. | | | | | | Unicode values **are** encoded and decoded automatically. | | | | | | Implemented by the :mod:`kinterbasdb.typeconv_23plus` module. | +--------------------+----------------------------------------------------------------+ | 200 (the default) | This translator configuration represents date/time values via | | | the standard library module `datetime` and fixed point values | | | via the `decimal` module. The `decimal` module entered the | | | standard library in Python 2.4, but can also be manually | | | installed in Python 2.3. | | | | | | Unicode values **are** encoded and decoded automatically. | | | | | | Implemented by the :mod:`kinterbasdb.typeconv_24plus` module. | +--------------------+----------------------------------------------------------------+ | 199 | This translator configuration is exactly like `200`, except | | | that it represents fixed point values as `float` objects in | | | order to avoid the substantial memory overhead of the `decimal`| | | module. | | | | | | *It is fundamentally imprecise to represent fixed point values | | | in floating point*, so this convenience code is intended | | | *solely* for users who wish to use `datetime` instead | | | of `mx.DateTime`, but don't care about fixed point values and | | | don't want to suffer the memory overhead of the `decimal` | | | module. | | | | | | Implemented by the :mod:`kinterbasdb.typeconv_23plus_lowmem` | +--------------------+----------------------------------------------------------------+ | 300 (the ideal for | This translator configuration is identical to 200, but textual | | Firebird 2.1 and | blobs are handled in the same way as other textual types, so | | later) | unicode encoding/decoding is performed automagically. When | | | converting in the input direction, this doesn't work with any | | New in v3.3 | Firebird version prior to 2.1, because the Firebird API doesn't| | | make the blob's character set ID available. | +--------------------+----------------------------------------------------------------+ These integer type conversion codes are defined *solely* for convenience. The same functionality is available via the object variant of `type_conv`, but setting it up is more laborious for typical translator configurations. .. warning:: The default `type_conv` value was changed from 1 to 200 in version 3.3 ! .. module:: kinterbasdb.typeconv_naked :synopsis: Type conversion: Minimal .. module:: kinterbasdb.typeconv_backcompat :synopsis: Type conversion: Backward-compatible .. module:: kinterbasdb.typeconv_23plus :synopsis: Type conversion: Progressive .. module:: kinterbasdb.typeconv_23plus_lowmem :synopsis: Type conversion: Even More Progressive .. module:: kinterbasdb.typeconv_24plus :synopsis: Even More Progressive .. currentmodule:: kinterbasdb Deferred Loading: Backward Compatibility Issues ----------------------------------------------- The deferred type translator loading scheme introduced in KInterbasDB 3.1 goes to great lengths to maintain backward compatibility. If the client programmer does not call :func:`kinterbasdb.init()`, KInterbasDB will implicitly initialize itself in a backward-compatible manner ( `type_conv=1`) the first time one of its public functions is called or one of its public classes is instantiated. The only known backward incompatibility is this: the DB API type comparison singleton :data:`~kinterbasdb.DATETIME` will not compare equal to any type until the `kinterbasdb.init()` function has been called (whether explicitly or implicitly). After `kinterbasdb.init()` has been called, `DATETIME` will compare equal to the date, time, and timestamp types that were loaded. This issue should affect hardly any existing KInterbasDB-based programs. Deferred Loading Example ------------------------ .. sourcecode:: python import datetime, decimal, os.path, string, sys import kinterbasdb kinterbasdb.init(type_conv=200) # This program never imports mx.DateTime: assert 'mx' not in sys.modules def test(): dbFilename = r'D:\temp\test-deferred.firebird' prepareTestDatabase(dbFilename) # Connect with character set UNICODE_FSS, to match the default character # set of the test database. con = kinterbasdb.connect(dsn=dbFilename, user='sysdba', password='masterkey', charset='UNICODE_FSS' ) cur = con.cursor() # Create a test table. cur.execute(""" create table test ( a numeric(18,2), b date, c time, d timestamp, e varchar(50), /* Defaults to character set UNICODE_FSS. */ f varchar(50), /* Defaults to character set UNICODE_FSS. */ g varchar(50) character set ASCII ) """) con.commit() # Create an input value for each field in the test table. aIn = decimal.Decimal('4.53') # Notice that the DB API date/time constructors in kinterbasdb generate # datetime-based objects instead of mx-based objects because of our earlier # call to kinterbasdb.init(type_conv=200). bIn = kinterbasdb.Date(2004,1,4) assert isinstance(bIn, datetime.date) cIn = kinterbasdb.Time(16,27,59) assert isinstance(cIn, datetime.time) dIn = kinterbasdb.Timestamp(2004,1,4, 16,27,59) assert isinstance(dIn, datetime.datetime) eIn = u'A unicod\u2211 object stored in a Unicode field.' fIn = 'A str object stored in a Unicode field.' gIn = 'A str object stored in an ASCII field.' print '-' * 70 inputValues = (aIn, bIn, cIn, dIn, eIn, fIn, gIn) reportValues('In', inputValues) cur.execute("insert into test values (?,?,?,?,?,?,?)", inputValues) print '-' * 70 cur.execute("select a,b,c,d,e,f,g from test") (aOut, bOut, cOut, dOut, eOut, fOut, gOut) = outputValues = cur.fetchone() reportValues('Out', outputValues) print '-' * 70 # Notice that all values made the journey to and from the database intact. assert inputValues == outputValues def reportValues(direction, values): for (val, c) in zip(values, string.ascii_lowercase[:len(values)]): varName = c + direction print '%s has type %s, value\n %s' % (varName, type(val), repr(val)) def prepareTestDatabase(dbFilename): # Delete the test database if an old copy is already present. if os.path.isfile(dbFilename): conOld = kinterbasdb.connect(dsn=dbFilename, user='sysdba', password='masterkey' ) conOld.drop_database() # Create the test database afresh. kinterbasdb.create_database(""" create database '%s' user 'sysdba' password 'masterkey' default character set UNICODE_FSS """ % dbFilename ) if __name__ == '__main__': test() Program output: .. sourcecode:: python ---------------------------------------------------------------------- aIn has type , value Decimal("4.53") bIn has type , value datetime.date(2004, 1, 4) cIn has type , value datetime.time(16, 27, 59) dIn has type , value datetime.datetime(2004, 1, 4, 16, 27, 59) eIn has type , value u'A unicod\u2211 object stored in a Unicode field.' fIn has type , value 'A str object stored in a Unicode field.' gIn has type , value 'A str object stored in an ASCII field.' ---------------------------------------------------------------------- aOut has type , value Decimal("4.53") bOut has type , value datetime.date(2004, 1, 4) cOut has type , value datetime.time(16, 27, 59) dOut has type , value datetime.datetime(2004, 1, 4, 16, 27, 59) eOut has type , value u'A unicod\u2211 object stored in a Unicode field.' fOut has type , value u'A str object stored in a Unicode field.' gOut has type , value u'A str object stored in an ASCII field.' ---------------------------------------------------------------------- *Notes about Unicode handling in the example above:* Upon input, the Python `unicode` object `eIn` was transparently encoded for storage in database field `TEST.E` (a `VARCHAR` field with character set `UNICODE_FSS` (that is, UTF-8)). Upon output, the `UNICODE_FSS` value in `TEST.E` was decoded transparently into the Python `unicode` object `eOut`. `TEST.F` accepted a Python `str` object even though it's a Unicode field. The output value `fOut` is a Python `unicode` object rather than a `str`. Although `TEST.G` is an `ASCII` field, and the input value `gIn` is a `str`, the output value `gOut` is a `unicode` object. This is because the connection's charset is `UNICODE_FSS`, and Firebird tries to convert every retrieved value to match that character set. Positional Dymanic Type Translation ----------------------------------- All forms of dynamic type translation discussed so far have used the *type* of the database field as the basis for selecting a translator. KInterbasDB 3.2 also allows the client programmer to control translator selection on the basis of a field's *position* within a `Cursor`. Translator selection based on database field type is called " *typal* translation", while selection based on position is called " *positional* translation". Positional translation can be enabled at the `Cursor` level by including zero-based integer keys in the dictionary passed to `Cursor.set_type_trans[in|out]`. Consider the following example program: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn=r'D:\temp\test-20.firebird', user='sysdba', password='masterkey' ) cur = con.cursor() cur.execute("recreate table test(a int, b int, c int, d int, e float)") con.commit() cur.execute("insert into test values (?,?,?,?,?)", (1, 2, 3, 4, 5.0)) cur.execute("select a,b,c,d,e from test") print 'Before translator modifications, output row is:' print ' ', cur.fetchone() cur.set_type_trans_out({ 'INTEGER': lambda i: i * 10, 1: lambda i: i * 100, 3: lambda i: i * 1000 }) cur.execute("select a,b,c,d,e from test") print 'After translator modifications, output row is:' print ' ', cur.fetchone() Program output: .. sourcecode:: python Before translator modifications, output row is: (1, 2, 3, 4, 5.0) After translator modifications, output row is: (10, 200, 30, 4000, 5.0) The `cur.set_type_trans_out` call in the example program specifies that integer values retrieved by `cur` should be multiplied by `10`, then overrides that setting for specific columns: the value in the second column (position `1`) is multiplied by `100`, while the value in the fourth column (position `3`) is multiplied by `1000`. KInterbasDB uses a cascading method of translator selection, listed below in order from highest to lowest precedence: + *Positional* translation settings, which can only be activated at the `Cursor` level, take precedence over *typal* translation settings. + `Cursor`-level translation settings take precedence over `Connection`-level settings. + `Connection`-level translation settings take precedence over the module-level defaults. + The module-level defaults are established by the call to :func:`kinterbasdb.init()`. If the client programmer does not call :func:`kinterbasdb.init()` explicitly, KInterbasDB's internals will do so implicitly. Database Arrays --------------- KInterbasDB converts database arrays *from* Python sequences (except strings) on input; *to* Python lists on output. On input, the Python sequence must be nested appropriately if the array field is multi- dimensional, and the incoming sequence must not fall short of its maximum possible length (it will not be "padded" implicitly--see below). On output, the lists will be nested if the database array has multiple dimensions. Database arrays have no place in a purely relational data model, which requires that data values be *atomized* (that is, every value stored in the database must be reduced to elementary, non-decomposable parts). The Firebird implementation of database arrays, like that of most relational database engines that support this data type, is fraught with limitations. Database arrays are of fixed size, with a predeclared number of dimensions (max. 16) and number of elements per dimension. Individual array elements cannot be set to `NULL` / `None`, so the mapping between Python lists (which have dynamic length and are therefore *not* normally "padded" with dummy values) and non-trivial database arrays is clumsy. Stored procedures cannot have array parameters. Finally, many interface libraries, GUIs, and even the isql command line utility do not support database arrays. In general, it is preferable to avoid using database arrays unless you have a compelling reason. **Example Program** The following program inserts an array (nested Python list) into a single database field, then retrieves it. .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') con.execute_immediate("recreate table array_table (a int[3,4])") con.commit() cur = con.cursor() arrayIn = [ [1, 2, 3, 4], [5, 6, 7, 8], [9,10,11,12] ] print 'arrayIn: %s' % arrayIn cur.execute("insert into array_table values (?)", (arrayIn,)) cur.execute("select a from array_table") arrayOut = cur.fetchone()[0] print 'arrayOut: %s' % arrayOut con.commit() Output: .. sourcecode:: python arrayIn: [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] arrayOut: [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] .. _blob-conversion: Blobs ----- KInterbasDB supports the insertion and retrieval of blobs either wholly in memory ("materialized mode") or in chunks ("streaming mode") to reduce memory usage when handling large blobs. The default handling mode is "materialized"; the "streaming" method is selectable via a special case of Dynamic Type Translation. In **materialized** mode, input and output blobs are represented as Python `str` objects, with the result that the entirety of each blob's contents is loaded into memory. Unfortunately, flaws in the database engine's C API `prevent `__ automatic Unicode conversion from applying to textual blobs in the way it applies to Unicode `CHAR` and `VARCHAR` fields in any Firebird version prior to version 2.1. .. note:: KInterbasDB 3.3 introduces new :ref:`type_conv mode 300 ` that enables automatic type conversion for textual blobs when you're working with Firebird 2.1 and newer. In **streaming** mode, any Python "file-like" object is acceptable as input for a blob parameter. Obvious examples of such objects are instances of :class:`file` or :class:`StringIO`. Each output blob is represented by a :class:`kinterbasdb.BlobReader` object. .. class:: BlobReader BlobReader is a "file-like" class, so it acts much like a `file` instance opened in `rb` mode. `BlobReader` adds one method not found in the "file-like" interface: .. method:: chunks() Takes a single integer parameter that specifies the number of bytes to retrieve in each chunk (the final chunk may be smaller). For example, if the size of the blob is `50000000` bytes, `BlobReader.chunks(2**20)` will return `47` one-megabyte chunks, and a smaller final chunk of `716928` bytes. Due to the combination of CPython's deterministic finalization with careful programming in KInterbasDB's internals, it is not strictly necessary to close `BlobReader` instances explicitly. A `BlobReader` object will be automatically closed by its `__del__` method when it goes out of scope, or when its `Connection` closes, whichever comes first. However, it is always a better idea to close resources explicitly (via `try...finally`) than to rely on artifacts of the CPython implementation. (For the sake of clarity, the example program does not follow this practice.) **Example Program** The following program demonstrates blob storage and retrieval in both *materialized* and *streaming* modes. .. sourcecode:: python import os.path from cStringIO import StringIO import kinterbasdb con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test-20.firebird', user='sysdba', password='masterkey' ) cur = con.cursor() cur.execute("recreate table blob_test (a blob)") con.commit() # --- Materialized mode (str objects for both input and output) --- # Insertion: cur.execute("insert into blob_test values (?)", ('abcdef',)) cur.execute("insert into blob_test values (?)", ('ghijklmnop',)) # Retrieval: cur.execute("select * from blob_test") print 'Materialized retrieval (as str):' print cur.fetchall() cur.execute("delete from blob_test") # --- Streaming mode (file-like objects for input; kinterbasdb.BlobReader # objects for output) --- cur.set_type_trans_in ({'BLOB': {'mode': 'stream'}}) cur.set_type_trans_out({'BLOB': {'mode': 'stream'}}) # Insertion: cur.execute("insert into blob_test values (?)", (StringIO('abcdef'),)) cur.execute("insert into blob_test values (?)", (StringIO('ghijklmnop'),)) f = file(os.path.abspath(__file__), 'rb') cur.execute("insert into blob_test values (?)", (f,)) f.close() # Retrieval using the "file-like" methods of BlobReader: cur.execute("select * from blob_test") readerA = cur.fetchone()[0] print '\nStreaming retrieval (via kinterbasdb.BlobReader):' # Python "file-like" interface: print 'readerA.mode: "%s"' % readerA.mode print 'readerA.closed: %s' % readerA.closed print 'readerA.tell(): %d' % readerA.tell() print 'readerA.read(2): "%s"' % readerA.read(2) print 'readerA.tell(): %d' % readerA.tell() print 'readerA.read(): "%s"' % readerA.read() print 'readerA.tell(): %d' % readerA.tell() print 'readerA.read(): "%s"' % readerA.read() readerA.close() print 'readerA.closed: %s' % readerA.closed # The chunks method (not part of the Python "file-like" interface, but handy): print '\nFor a blob with contents "ghijklmnop", iterating over' print 'BlobReader.chunks(3) produces:' readerB = cur.fetchone()[0] for chunkNo, chunk in enumerate(readerB.chunks(3)): print 'Chunk %d is: "%s"' % (chunkNo, chunk) Output: .. sourcecode:: python Materialized retrieval (as str): [('abcdef',), ('ghijklmnop',)] Streaming retrieval (via kinterbasdb.BlobReader): readerA.mode: "rb" readerA.closed: False readerA.tell(): 0 readerA.read(2): "ab" readerA.tell(): 2 readerA.read(): "cdef" readerA.tell(): 6 readerA.read(): "" readerA.closed: True For a blob with contents "ghijklmnop", iterating over BlobReader.chunks(3) produces: Chunk 0 is: "ghi" Chunk 1 is: "jkl" Chunk 2 is: "mno" Chunk 3 is: "p" .. _connection-timeout: Connection Timeouts =================== Connection timeouts allow the programmer to request that a connection be automatically closed after a specified period of inactivity. The simplest uses of connection timeouts are trivial, as demonstrated by the following snippet: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db', user='sysdba', password='masterkey', timeout={'period': 120.0} # time out after 120.0 seconds of inactivity ) ... The connection created in the example above is *eligible* to be automatically closed by KInterbasDB if it remains idle for at least 120.0 consecutive seconds. KInterbasDB does not guarantee that the connection will be closed immediately when the specified period has elapsed. On a busy system, there might be a considerable delay between the moment a connection becomes eligible for timeout and the moment KInterbasDB actually closes it. However, the thread that performs connection timeouts is programmed in such a way that on a lightly loaded system, it acts almost instantaneously to take advantage of a connection's eligibility for timeout. After a connection has timed out, KInterbasDB reacts to attempts to reactivate the severed connection in a manner dependent on the state of the connection when it timed out. Consider the following example program: .. sourcecode:: python import time import kinterbasdb con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db', user='sysdba', password='masterkey', timeout={'period': 3.0} ) cur = con.cursor() cur.execute("recreate table test (a int, b char(1))") con.commit() cur.executemany("insert into test (a, b) values (?, ?)", [(1, 'A'), (2, 'B'), (3, 'C')] ) con.commit() cur.execute("select * from test") print 'BEFORE:', cur.fetchall() cur.execute("update test set b = 'X' where a = 2") time.sleep(6.0) cur.execute("select * from test") print 'AFTER: ', cur.fetchall() So, should the example program print .. sourcecode:: python BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')] AFTER: [(1, 'A'), (2, 'X'), (3, 'C')] or .. sourcecode:: python BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')] AFTER: [(1, 'A'), (2, 'B'), (3, 'C')] or should it raise an exception? The answer is more complex than one might think. First of all, we cannot guarantee much about the example program's behavior because there is a race condition between the obvious thread that's executing the example code (which we'll call "UserThread" for the rest of this section) and the KInterbasDB-internal background thread that actually closes connections that have timed out ("TimeoutThread"). If the operating system were to suspend UserThread just after the :func:`kinterbasdb.connect()` call for more than the specified timeout period of 3.0 seconds, the TimeoutThread might close the connection before UserThread had performed any preparatory operations on the database. Although such a scenario is extremely unlikely when more "realistic" timeout periods such as 1800.0 seconds (30 minutes) are used, it is important to consider. We'll explore solutions to this race condition later. The *likely* (but not guaranteed) behavior of the example program is that UserThread will complete all preparatory database operations including the `cur. execute ( "update test set b = 'X' where a = 2" )` statement in the example program, then go to sleep for not less than 6.0 seconds. Not less than 3.0 seconds after UserThread executes the `cur. execute ( "update test set b = 'X' where a = 2" )` statement, TimeoutThread is likely to close the connection because it has become eligible for timeout. The crucial issue is how TimeoutThread should resolve the transaction that UserThread left open on `con`, and what should happen when UserThread reawakens and tries to execute the `cur. execute ( "select * from test" )` statement, since the transaction that UserThread left open will no longer be active. User-Supplied Connection Timeout Callbacks ------------------------------------------ In the context of a particular client program, it is not possible for KInterbasDB to know the best way for TimeoutThread to react when it encounters a connection that is eligible for timeout, but has an unresolved transaction. For this reason, KInterbasDB's connection timeout system offers callbacks that the client programmer can use to guide the TimeoutThread's actions, or to log information about connection timeout patterns. The "Before Timeout" Callback ----------------------------- The client programmer can supply a "before timeout" callback that accepts a single dictionary parameter and returns an integer code to indicate how the TimeoutThread should proceed when it finds a connection eligible for timeout. Within the dictionary, KInterbasDB provides the following entries: :dsn: The `dsn` parameter that was passed to `kinterbasdb.connect` when the connection was created. :has_transaction: A boolean that indicates whether the connection has an unresolved transaction. :active_secs: A `float` that indicates how many seconds elapsed between the point when the connection attached to the server and the last client program activity on the connection. :idle_secs: A `float` that indicates how many seconds have elapsed since the last client program activity on the connection. This value will not be less than the specified timeout period, and is likely to only a fraction of a second longer. Based on those data, the user-supplied callback should return one of the following codes: .. data:: CT_VETO Directs the TimeoutThread not to close the connection at the current time, and not to reconsider timing the connection out until at least another timeout period has passed. For example, if a connection was created with a timeout period of 120.0 seconds, and the user-supplied "before callback" returns `CT_VETO`, the TimeoutThread will not reconsider timing out that particular connection until at least another 120.0 seconds have elapsed. .. data:: CT_NONTRANSPARENT ("Nontransparent rollback") Directs the TimeoutThread to roll back the connection's unresolved transaction (if any), then close the connection. Any future attempt to use the connection will raise a :exc:`kinterbasdb.ConnectionTimedOut` exception. .. data:: CT_ROLLBACK ("Transparent rollback") Directs the TimeoutThread to roll back the connection's unresolved transaction (if any), then close the connection. Upon any future attempt to use the connection, KInterbasDB will *attempt* to transparently reconnect to the database and "resume where it left off" insofar as possible. Of course, network problems and the like could prevent KInterbasDB's *attempt* at transparent resumption from succeeding. Also, highly state-dependent objects such as open result sets, :class:`BlobReader`, and :class:`PreparedStatement` cannot be used transparently across a connection timeout. .. data:: CT_COMMIT ("Transparent commit") Directs the TimeoutThread to commit the connection's unresolved transaction (if any), then close the connection. Upon any future attempt to use the connection, KInterbasDB will *attempt* to transparently reconnect to the database and "resume where it left off" insofar as possible. If the user does not supply a "before timeout" callback, KInterbasDB considers the timeout transparent only if the connection does not have an unresolved transaction. If the user-supplied "before timeout" callback returns anything other than one of the codes listed above, or if it raises an exception, the TimeoutThread will act as though :data:`CT_NONTRANSPARENT` had been returned. You might have noticed that the input dictionary to the "before timeout" callback does *not* include a reference to the :class:`~kinterbasdb.Connection` object itself. This is a deliberate design decision intended to steer the client programmer away from writing callbacks that take a long time to complete, or that manipulate the :class:`~kinterbasdb.Connection` instance directly. See the caveats section for more information. The "After Timeout" Callback ---------------------------- The client programmer can supply an "after timeout" callback that accepts a single dictionary parameter. Within that dictionary, KInterbasDB currently provides the following entries: :dsn: The `dsn` parameter that was passed to :func:`kinterbasdb.connect()` when the connection was created. :active_secs: A `float` that indicates how many seconds elapsed between the point when the connection attached to the server and the last client program activity on the connection. :idle_secs: A `float` that indicates how many seconds elapsed between the last client program activity on the connection and the moment the TimeoutThread closed the connection. KInterbasDB only calls the "after timeout" callback after the connection has actually been closed by the TimeoutThread. If the "before timeout" callback returns :data:`CT_VETO` to cancel the timeout attempt, the "after timeout" callback will not be called. KInterbasDB discards the return value of the "after timeout" callback, and ignores any exceptions. The same caveats that apply to the "before timeout" callback also apply to the "after timeout" callback. User-Supplied Connection Timeout Callback Caveats ------------------------------------------------- + The user-supplied callbacks are executed by the TimeoutThread. They should be designed to avoid blocking the TimeoutThread any longer than absolutely necessary. + Manipulating the :class:`Connection` object that is being timed out (or any of that connection's subordinate objects such as :class:`Cursor`, :class:`BlobReader`, or :class:`PreparedStatement`) from the timeout callbacks is strictly forbidden. Examples -------- **Example: `CT_VETO`** The following program registers a "before timeout" callback that unconditionally returns :data:`CT_VETO`, which means that the TimeoutThread never times the connection out. Although an "after timeout" callback is also registered, it will never be called. .. sourcecode:: python import time import kinterbasdb def callback_before(info): print print 'callback_before called; input parameter contained:' for key, value in info.items(): print ' %s: %s' % (repr(key).ljust(20), repr(value)) print # Unconditionally veto any timeout attempts: return kinterbasdb.CT_VETO def callback_after(info): assert False, 'This will never be called.' con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db', user='sysdba', password='masterkey', timeout={ 'period': 3.0, 'callback_before': callback_before, 'callback_after': callback_after, } ) cur = con.cursor() cur.execute("recreate table test (a int, b char(1))") con.commit() cur.executemany("insert into test (a, b) values (?, ?)", [(1, 'A'), (2, 'B'), (3, 'C')] ) con.commit() cur.execute("select * from test") print 'BEFORE:', cur.fetchall() cur.execute("update test set b = 'X' where a = 2") time.sleep(6.0) cur.execute("select * from test") rows = cur.fetchall() # The value of the second column of the second row of the table is still 'X', # because the transaction that changed it from 'B' to 'X' remains active. assert rows[1][1] == 'X' print 'AFTER: ', rows Sample output: .. sourcecode:: python BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')] callback_before called; input parameter contained: 'dsn' : 'localhost:D:\\temp\\test.db' 'idle_secs' : 3.0 'has_transaction' : True AFTER: [(1, 'A'), (2, 'X'), (3, 'C')] **Example: Supporting Module `timeout_authorizer`** The example programs for :data:`CT_NONTRANSPARENT`, :data:`CT_ROLLBACK`, and :data:`CT_COMMIT` rely on the `TimeoutAuthorizer` class from the module below to guarantee that the TimeoutThread will not time the connection out before the preparatory code has executed. .. sourcecode:: python import threading import kinterbasdb class TimeoutAuthorizer(object): def __init__(self, opCodeWhenAuthorized): self.currentOpCode = kinterbasdb.CT_VETO self.opCodeWhenAuthorized = opCodeWhenAuthorized self.lock = threading.Lock() def authorize(self): self.lock.acquire() try: self.currentOpCode = self.opCodeWhenAuthorized finally: self.lock.release() def __call__(self, info): self.lock.acquire() try: return self.currentOpCode finally: self.lock.release() **Example: `CT_NONTRANSPARENT`** .. sourcecode:: python import threading, time import kinterbasdb import timeout_authorizer authorizer = timeout_authorizer.TimeoutAuthorizer(kinterbasdb.CT_NONTRANSPARENT) connectionTimedOut = threading.Event() def callback_after(info): print print 'The connection was closed nontransparently.' print connectionTimedOut.set() con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db', user='sysdba', password='masterkey', timeout={ 'period': 3.0, 'callback_before': authorizer, 'callback_after': callback_after, } ) cur = con.cursor() cur.execute("recreate table test (a int, b char(1))") con.commit() cur.executemany("insert into test (a, b) values (?, ?)", [(1, 'A'), (2, 'B'), (3, 'C')] ) con.commit() cur.execute("select * from test") print 'BEFORE:', cur.fetchall() cur.execute("update test set b = 'X' where a = 2") authorizer.authorize() connectionTimedOut.wait() # This will raise a kinterbasdb.ConnectionTimedOut exception because the # before callback returned kinterbasdb.CT_NONTRANSPARENT: cur.execute("select * from test") Sample output: .. sourcecode:: python BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')] The connection was closed nontransparently. Traceback (most recent call last): File "connection_timeouts_ct_nontransparent.py", line 42, in ? cur.execute("select * from test") kinterbasdb.ConnectionTimedOut: (0, 'A transaction was still unresolved when this connection timed out, so it cannot be transparently reactivated.') **Example: `CT_ROLLBACK`** .. sourcecode:: python import threading, time import kinterbasdb import timeout_authorizer authorizer = timeout_authorizer.TimeoutAuthorizer(kinterbasdb.CT_ROLLBACK) connectionTimedOut = threading.Event() def callback_after(info): print print 'The unresolved transaction was rolled back; the connection has been' print ' closed transparently.' print connectionTimedOut.set() con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db', user='sysdba', password='masterkey', timeout={ 'period': 3.0, 'callback_before': authorizer, 'callback_after': callback_after, } ) cur = con.cursor() cur.execute("recreate table test (a int, b char(1))") con.commit() cur.executemany("insert into test (a, b) values (?, ?)", [(1, 'A'), (2, 'B'), (3, 'C')] ) con.commit() cur.execute("select * from test") print 'BEFORE:', cur.fetchall() cur.execute("update test set b = 'X' where a = 2") authorizer.authorize() connectionTimedOut.wait() # The value of the second column of the second row of the table will have # reverted to 'B' when the transaction that changed it to 'X' was rolled back. # The cur.execute call on the next line will transparently reactivate the # connection, which was timed out transparently. cur.execute("select * from test") rows = cur.fetchall() assert rows[1][1] == 'B' print 'AFTER: ', rows Sample output: .. sourcecode:: python BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')] The unresolved transaction was rolled back; the connection has been closed transparently. AFTER: [(1, 'A'), (2, 'B'), (3, 'C')] **Example: `CT_COMMIT`** .. sourcecode:: python import threading, time import kinterbasdb import timeout_authorizer authorizer = timeout_authorizer.TimeoutAuthorizer(kinterbasdb.CT_COMMIT) connectionTimedOut = threading.Event() def callback_after(info): print print 'The unresolved transaction was committed; the connection has been' print ' closed transparently.' print connectionTimedOut.set() con = kinterbasdb.connect(dsn=r'localhost:D:\temp\test.db', user='sysdba', password='masterkey', timeout={ 'period': 3.0, 'callback_before': authorizer, 'callback_after': callback_after, } ) cur = con.cursor() cur.execute("recreate table test (a int, b char(1))") con.commit() cur.executemany("insert into test (a, b) values (?, ?)", [(1, 'A'), (2, 'B'), (3, 'C')] ) con.commit() cur.execute("select * from test") print 'BEFORE:', cur.fetchall() cur.execute("update test set b = 'X' where a = 2") authorizer.authorize() connectionTimedOut.wait() # The modification of the value of the second column of the second row of the # table from 'B' to 'X' will have persisted, because the TimeoutThread # committed the transaction before it timed the connection out. # The cur.execute call on the next line will transparently reactivate the # connection, which was timed out transparently. cur.execute("select * from test") rows = cur.fetchall() assert rows[1][1] == 'X' print 'AFTER: ', rows Sample output: .. sourcecode:: python BEFORE: [(1, 'A'), (2, 'B'), (3, 'C')] The unresolved transaction was committed; the connection has been closed transparently. AFTER: [(1, 'A'), (2, 'X'), (3, 'C')] Database Event Notification =========================== What are database events? ------------------------- The database engine features a distributed, interprocess communication mechanism based on messages called *database events*. A database event is a message passed from a trigger or stored procedure to an application to announce the occurrence of a specified condition or action, usually a database change such as an insertion, modification, or deletion of a record. The Firebird event mechanism enables applications to respond to actions and database changes made by other, concurrently running applications without the need for those applications to communicate directly with one another, and without incurring the expense of CPU time required for periodic polling to determine if an event has occurred. Why use database events? ------------------------ Anything that can be accomplished with database events can also be implemented using other techniques, so why bother with events? Since you've chosen to write database-centric programs in Python rather than assembly language, you probably already know the answer to this question, but let's illustrate. A typical application for database events is the handling of administrative messages. Suppose you have an administrative message database with a `messages` table, into which various applications insert timestamped status reports. It may be desirable to react to these messages in diverse ways, depending on the status they indicate: to ignore them, to initiate the update of dependent databases upon their arrival, to forward them by e-mail to a remote administrator, or even to set off an alarm so that on-site administrators will know a problem has occurred. It is undesirable to tightly couple the program whose status is being reported (the *message producer*) to the program that handles the status reports (the *message handler*). There are obvious losses of flexibility in doing so. For example, the message producer may run on a separate machine from the administrative message database and may lack access rights to the downstream reporting facilities (e.g., network access to the SMTP server, in the case of forwarded e-mail notifications). Additionally, the actions required to handle status reports may themselves be time-consuming and error-prone, as in accessing a remote network to transmit e-mail. In the absence of database event support, the message handler would probably be implemented via *polling*. Polling is simply the repetition of a check for a condition at a specified interval. In this case, the message handler would check in an infinite loop to see whether the most recent record in the `messages` table was more recent than the last message it had handled. If so, it would handle the fresh message(s); if not, it would go to sleep for a specified interval, then loop. The *polling-based* implementation of the message handler is fundamentally flawed. Polling is a form of `busy-wait `__; the check for new messages is performed at the specified interval, regardless of the actual activity level of the message producers. If the polling interval is lengthy, messages might not be handled within a reasonable time period after their arrival; if the polling interval is brief, the message handler program (and there may be many such programs) will waste a large amount of CPU time on unnecessary checks. The database server is necessarily aware of the exact moment when a new message arrives. Why not let the message handler program request that the database server send it a notification when a new message arrives? The message handler can then efficiently sleep until the moment its services are needed. Under this *event-based* scheme, the message handler becomes aware of new messages at the instant they arrive, yet it does not waste CPU time checking in vain for new messages when there are none available. How events are exposed to the server and the client process? ------------------------------------------------------------ #. Server Process ("An event just occurred!") To notify any interested listeners that a specific event has occurred, issue the `POST_EVENT` statement from Stored Procedure or Trigger. The `POST_EVENT` statement has one parameter: the name of the event to post. In the preceding example of the administrative message database, `POST_EVENT` might be used from an `after insert` trigger on the `messages` table, like this: .. sourcecode:: python create trigger trig_messages_handle_insert for messages after insert as begin POST_EVENT 'new_message'; end .. note:: The physical notification of the client process does not occur until the transaction in which the `POST_EVENT` took place is actually committed. Therefore, multiple events may *conceptually* occur before the client process is *physically* informed of even one occurrence. Furthermore, the database engine makes no guarantee that clients will be informed of events in the same groupings in which they conceptually occurred. If, within a single transaction, an event named `event_a` is posted once and an event named `event_b` is posted once, the client may receive those posts in separate "batches", despite the fact that they occurred in the same conceptual unit (a single transaction). This also applies to multiple occurrences of *the same* event within a single conceptual unit: the physical notifications may arrive at the client separately. #. Client Process ("Send me a message when an event occurs.") .. note:: If you don't care about the gory details of event notification, skip to the section that describes KInterbasDB's Python-level event handling API. The Firebird C client library offers two forms of event notification. The first form is *synchronous* notification, by way of the function :cfunc:`isc_wait_for_event()`. This form is admirably simple for a C programmer to use, but is inappropriate as a basis for KInterbasDB's event support, chiefly because it's not sophisticated enough to serve as the basis for a comfortable Python-level API. The other form of event notification offered by the database client library is *asynchronous*, by way of the functions :cfunc:`isc_que_events()` (note that the name of that function is misspelled), :cfunc:`isc_cancel_events()`, and others. The details are as nasty as they are numerous, but the essence of using asynchronous notification from C is as follows: #. Call :cfunc:`isc_event_block()` to create a formatted binary buffer that will tell the server which events the client wants to listen for. #. Call :cfunc:`isc_que_events()` (passing the buffer created in the previous step) to inform the server that the client is ready to receive event notifications, and provide a callback that will be asynchronously invoked when one or more of the registered events occurs. #. [The thread that called :cfunc:`isc_que_events()` to initiate event listening must now do something else.] #. When the callback is invoked (the database client library starts a thread dedicated to this purpose), it can use the :cfunc:`isc_event_counts()` function to determine how many times each of the registered events has occurred since the last call to :cfunc:`isc_event_counts()` (if any). #. [The callback thread should now "do its thing", which may include communicating with the thread that called :cfunc:`isc_que_events()`.] #. When the callback thread is finished handling an event notification, it must call :cfunc:`isc_que_events()` again in order to receive future notifications. Future notifications will invoke the callback again, effectively "looping" the callback thread back to Step 4. How events are exposed to the Python programmer? ------------------------------------------------ The KInterbasDB database event API is comprised of the following: the method `Connection.event_conduit` and the class :class:`EventConduit`. .. method:: Connection.event_conduit Creates a conduit (an instance of :class:`~kinterbasdb.EventConduit`) through which database event notifications will flow into the Python program. `event_conduit` is a method of `Connection` rather than a module-level function or a class constructor because the database engine deals with events in the context of a particular database (after all, `POST_EVENT` must be issued by a stored procedure or a trigger). Arguments: :event_names: A sequence of string event names The :meth:`EventConduit.wait()` method will block until the occurrence of at least one of the events named by the strings in `event_names`. KInterbasDB's own event-related code is capable of operating with up to 2147483647 events per conduit. However, it has been observed that the Firebird client library experiences catastrophic problems (including memory corruption) on some platforms with anything beyond about 100 events per conduit. These limitations are dependent on both the Firebird version and the platform. .. class:: EventConduit .. method:: __init__() The `EventConduit` class is not designed to be instantiated directly by the Python programmer. Instead, use the `Connection.event_conduit` method to create `EventConduit` instances. .. method:: wait(timeout=None) Blocks the calling thread until at least one of the events occurs, or the specified `timeout` (if any) expires. If one or more event notifications has arrived since the last call to `wait`, this method will retrieve a notification from the head of the `EventConduit`'s internal queue and return immediately. The names of the relevant events were supplied to the `Connection.event_conduit` method during the creation of this `EventConduit`. In the code snippet below, the relevant events are named `event_a` and `event_b`: .. sourcecode:: python conduit = connection.event_conduit( ('event_a', 'event_b') ) conduit.wait() Arguments: :timeout: *optional* number of seconds (use a `float` to indicate fractions of seconds) If not even one of the relevant events has occurred after `timeout` seconds, this method will unblock and return `None`. The default `timeout` is infinite. Returns: `None` if the wait timed out, otherwise a dictionary that maps `event_name -> event_occurrence_count`. In the code snippet above, if `event_a` occurred once and `event_b` did not occur at all, the return value from `conduit.wait()` would be the following dictionary: .. sourcecode:: python { 'event_a': 1, 'event_b': 0 } .. method:: close() Cancels the standing request for this conduit to be notified of events. After this method has been called, this `EventConduit` object is useless, and should be discarded. (The boolean property `closed` is `True` after an `EventConduit` has been closed.) This method has no arguments. .. method:: flush() This method allows the Python programmer to manually clear any event notifications that have accumulated in the conduit's internal queue. From the moment the conduit is created by the :meth:`Connection.event_conduit()` method, notifications of any events that occur will accumulate asynchronously within the conduit's internal queue until the conduit is closed either explicitly (via the `close` method) or implicitly (via garbage collection). There are two ways to dispose of the accumulated notifications: call `wait()` to receive them one at a time ( `wait()` will block when the conduit's internal queue is empty), or call this method to get rid of all accumulated notifications. This method has no arguments. Returns: The number of event notifications that were flushed from the queue. The "number of event *notifications*" is not necessarily the same as the "number of event *occurrences*", since a single notification can indicate multiple occurrences of a given event (see the return value of the `wait` method). Example Program --------------- The following code (a SQL table definition, a SQL trigger definition, and two Python programs) demonstrates KInterbasDB-based event notification. The example is based on a database at `'localhost:/temp/test.db'`, which contains a simple table named `test_table`.  `test_table` has an `after insert` trigger that posts several events. Note that the trigger posts `test_event_a` twice, `test_event_b` once, and `test_event_c` once. The Python event *handler* program connects to the database and establishes an `EventConduit` in the context of that connection. As specified by the list of `RELEVANT_EVENTS` passed to `event_conduit`, the event conduit will concern itself only with events named `test_event_a` and `test_event_b`. Next, the program calls the conduit's `wait` method without a timeout; it will wait infinitely until *at least one* of the relevant events is posted in a transaction that is subsequently committed. The Python event *producer* program simply connects to the database, inserts a row into `test_table`, and commits the transaction. Notice that except for the printed comment, no code in the producer makes any mention of events -- the events are posted as an implicit consequence of the row's insertion into `test_table`. The insertion into `test_table` causes the trigger to *conceptually* post events, but those events are not *physically* sent to interested listeners until the transaction is committed. When the commit occurs, the handler program returns from the `wait` call and prints the notification that it received. SQL table definition: .. sourcecode:: sql create table test_table (a integer) SQL trigger definition: .. sourcecode:: sql create trigger trig_test_insert_event for test_table after insert as begin post_event 'test_event_a'; post_event 'test_event_b'; post_event 'test_event_c'; post_event 'test_event_a'; end Python event *handler* program: .. sourcecode:: python import kinterbasdb RELEVANT_EVENTS = ['test_event_a', 'test_event_b'] con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') conduit = con.event_conduit(RELEVANT_EVENTS) print 'HANDLER: About to wait for the occurrence of one of %s...\n' % RELEVANT_EVENTS result = conduit.wait() print 'HANDLER: An event notification has arrived:' print result conduit.close() Python event *producer* program: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') cur = con.cursor() cur.execute("insert into test_table values (1)") print 'PRODUCER: Committing transaction that will cause event notification to be sent.' con.commit() Event producer output: .. sourcecode:: python PRODUCER: Committing transaction that will cause event notification to be sent. Event handler output (assuming that the handler was already started and waiting when the event producer program was executed): .. sourcecode:: python HANDLER: About to wait for the occurrence of one of ['test_event_a', 'test_event_b']... HANDLER: An event notification has arrived: {'test_event_a': 2, 'test_event_b': 1} Notice that there is no mention of `test_event_c` in the result dictionary received by the event handler program. Although `test_event_c` was posted by the `after insert` trigger, the event conduit in the handler program was created to listen only for `test_event_a` and `test_event_b` events. Pitfalls and Limitations ------------------------ + Remember that if an `EventConduit` is left active (not yet closed or garbage collected), notifications for any registered events that actually occur will continue to accumulate in the EventConduit's internal queue even if the Python programmer doesn't call :meth:`EventConduit.wait()` to receive the notifications or :meth:`EventConduit.flush()` to clear the queue. The ill-informed may misinterpret this behavior as a memory leak in KInterbasDB; it is not. + NEVER use LOCAL-protocol connections in a multithreaded program that also uses event handling! The database client library implements the local protocol on some platforms in such a way that deadlocks may arise in bizarre places if you do this. *This no-LOCAL prohibition is not limited to connections that are used as the basis for event conduits; it applies to all connections throughout the process.* So why doesn't KInterbasDB protect the Python programmer from this mistake? Because the event handling thread is started by the database client library, and it operates beyond the synchronization domain of KInterbasDB at times. .. note:: The restrictions on the number of active `EventConduit`s in a process, and on the number of event names that a single `EventConduit` can listen for, have been removed in KInterbasDB 3.2. The `database_info` API ======================= Firebird provides various informations about server and connected database via `database_info` API call. KInterbasDB surfaces this API through next methods on Connection object: .. method:: Connection.database_info(request,result_type) This method is a *very thin* wrapper around function :cfunc:`isc_database_info()`. This method does *not* attempt to interpret its results except with regard to whether they are a string or an integer. For example, requesting :cdata:`isc_info_user_names` with the call .. sourcecode:: python con.database_info(kinterbasdb.isc_info_user_names, 's') will return a binary string containing a *raw* succession of length- name pairs. A more convenient way to access the same functionality is via the :meth:`~Connection.db_info()` method. Arguments: :request: One of the `kinterbasdb.isc_info_*` constants. :result_type: Must be either `'s'` if you expect a string result, or `'i'` if you expect an integer result. **Example Program** .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:/temp/test.db', user='sysdba', password='pass') # Retrieving an integer info item is quite simple. bytesInUse = con.database_info(kinterbasdb.isc_info_current_memory, 'i') print 'The server is currently using %d bytes of memory.' % bytesInUse # Retrieving a string info item is somewhat more involved, because the # information is returned in a raw binary buffer that must be parsed # according to the rules defined in the Interbase® 6 API Guide section # entitled "Requesting buffer items and result buffer values" (page 51). # # Often, the buffer contains a succession of length-string pairs # (one byte telling the length of s, followed by s itself). # Function kinterbasdb.raw_byte_to_int is provided to convert a raw # byte to a Python integer (see examples below). buf = con.database_info(kinterbasdb.isc_info_db_id, 's') # Parse the filename from the buffer. beginningOfFilename = 2 # The second byte in the buffer contains the size of the database filename # in bytes. lengthOfFilename = kinterbasdb.raw_byte_to_int(buf[1]) filename = buf[beginningOfFilename:beginningOfFilename + lengthOfFilename] # Parse the host name from the buffer. beginningOfHostName = (beginningOfFilename + lengthOfFilename) + 1 # The first byte after the end of the database filename contains the size # of the host name in bytes. lengthOfHostName = kinterbasdb.raw_byte_to_int(buf[beginningOfHostName - 1]) host = buf[beginningOfHostName:beginningOfHostName + lengthOfHostName] print 'We are connected to the database at %s on host %s.' % (filename, host) Sample output: .. sourcecode:: python The server is currently using 8931328 bytes of memory. We are connected to the database at C:\TEMP\TEST.DB on host WEASEL. As you can see, extracting data with the `database_info` function is rather clumsy. In KInterbasDB 3.2, a higher-level means of accessing the same information is available: the :meth:`~Connection.db_info()` method. Also, the Services API (accessible to Python programmers via the :mod:`kinterbasdb.services` module) provides high-level support for querying database statistics and performing maintenance. .. method:: Connection.db_info(request) High-level convenience wrapper around the :meth:`~Connection.database_info()` method that parses the output of `database_info` into Python-friendly objects instead of returning raw binary uffers in the case of complex result types. If an unrecognized `isc_info_*` code is requested, this method raises `ValueError`. For example, requesting :cdata:`isc_info_user_names` with the call .. sourcecode:: python con.db_info(kinterbasdb.isc_info_user_names) returns a dictionary that maps (username -> number of open connections). If `SYSDBA` has one open connection to the database to which `con` is connected, and `TEST_USER_1` has three open connections to that same database, the return value would be `{'SYSDBA': 1, 'TEST_USER_1': 3}` Arguments: :request: must be either: + A single `kinterbasdb.isc_info_*` info request code. In this case, a single result is returned. + A sequence of such codes. In this case, a mapping of (info request code -> result) is returned. **Example Program** .. sourcecode:: python import os.path import kinterbasdb DB_FILENAME = r'D:\temp\test-20.firebird' DSN = 'localhost:' + DB_FILENAME ############################################################################### # Querying an isc_info_* item that has a complex result: ############################################################################### # Establish three connections to the test database as TEST_USER_1, and one # connection as SYSDBA. Then use the Connection.db_info method to query the # number of attachments by each user to the test database. testUserCons = [] for i in range(3): tCon = kinterbasdb.connect(dsn=DSN, user='test_user_1', password='pass') testUserCons.append(tCon) con = kinterbasdb.connect(dsn=DSN, user='sysdba', password='masterkey') print 'Open connections to this database:' print con.db_info(kinterbasdb.isc_info_user_names) ############################################################################### # Querying multiple isc_info_* items at once: ############################################################################### # Request multiple db_info items at once, specifically the page size of the # database and the number of pages currently allocated. Compare the size # computed by that method with the size reported by the file system. # The advantages of using db_info instead of the file system to compute # database size are: # - db_info works seamlessly on connections to remote databases that reside # in file systems to which the client program lacks access. # - If the database is split across multiple files, db_info includes all of # them. res = con.db_info( [kinterbasdb.isc_info_page_size, kinterbasdb.isc_info_allocation] ) pagesAllocated = res[kinterbasdb.isc_info_allocation] pageSize = res[kinterbasdb.isc_info_page_size] print '\ndb_info indicates database size is', pageSize * pagesAllocated, 'bytes' print 'os.path.getsize indicates size is ', os.path.getsize(DB_FILENAME), 'bytes' Sample output: .. sourcecode:: python Open connections to this database: {'SYSDBA': 1, 'TEST_USER_1': 3} db_info indicates database size is 20684800 bytes os.path.getsize indicates size is 20684800 bytes Using Firebird Services API =========================== .. module:: kinterbasdb.services :synopsis: Access to Firebird Services API Database server maintenance tasks such as user management, load monitoring, and database backup have traditionally been automated by scripting the command-line tools :command:`gbak`, :command:`gfix`, :command:`gsec`, and :command:`gstat`. The API presented to the client programmer by these utilities is inelegant because they are, after all, command-line tools rather than native components of the client language. To address this problem, Firebird has a facility called the Services API, which exposes a uniform interface to the administrative functionality of the traditional command-line tools. The native Services API, though consistent, is much lower-level than a Pythonic API. If the native version were exposed directly, accomplishing a given task would probably require more Python code than scripting the traditional command-line tools. For this reason, KInterbasDB presents its own abstraction over the native API via the :mod:`kinterbasdb.services` module. Establishing Services API Connections ------------------------------------- All Services API operations are performed in the context of a connection to a specific database server, represented by the :class:`kinterbasdb.services.Connection` class. .. function:: connect(host='service_mgr', user='sysdba', password=None) Establish a connection to database server Services and returns :class:`kinterbasdb.services.Connection` object. :host: The network name of the computer on which the database server is running. :user: The name of the database user under whose authority the maintenance tasks are to be performed. :password: User's password. Since maintenance operations are most often initiated by an administrative user on the same computer as the database server, `host` defaults to the local computer, and `user` defaults to `SYSDBA`. The three calls to `kinterbasdb.services.connect()` in the following program are equivalent: .. sourcecode:: python from kinterbasdb import services con = services.connect(password='masterkey') con = services.connect(user='sysdba', password='masterkey') con = services.connect(host='localhost', user='sysdba', password='masterkey') .. class:: Connection .. method:: close() Explicitly terminates a :class:`~kinterbasdb.services.Connection`; if this is not invoked, the underlying connection will be closed implicitly when the `Connection` object is garbage collected. Server Configuration and Activity Levels ---------------------------------------- .. method:: Connection.getServiceManagerVersion To help client programs adapt to version changes, the service manager exposes its version number as an integer. .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getServiceManagerVersion() Output (on Firebird 1.5.0): .. sourcecode:: python 2 `kinterbasdb.services` is a thick wrapper of the Services API that can shield its users from changes in the underlying C API, so this method is unlikely to be useful to the typical Python client programmer. .. method:: Connection.getServerVersion() Returns the server's version string: .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getServerVersion() Output (on Firebird 1.5.0/Win32): .. sourcecode:: python WI-V1.5.0.4290 Firebird 1.5 At first glance, thhis method appears to duplicate the functionality of the :attr:`kinterbasdb.Connection.server_version` property, but when working with Firebird, there is a difference. :attr:`kinterbasdb.Connection.server_version` is based on a C API call (:cfunc:`isc_database_info()`) that existed long before the introduction of the Services API. Some programs written before the advent of Firebird test the version number in the return value of :cfunc:`isc_database_info()`, and refuse to work if it indicates that the server is too old. Since the first stable version of Firebird was labeled `1.0`, this pre-Firebird version testing scheme incorrectly concludes that (e.g.) Firebird 1.0 is older than Interbase 5.0. Firebird addresses this problem by making :cfunc:`isc_database_info()` return a "pseudo-InterBase" version number, whereas the Services API returns the true Firebird version, as shown: .. sourcecode:: python import kinterbasdb con = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey') print 'Interbase-compatible version string:', con.server_version import kinterbasdb.services svcCon = kinterbasdb.services.connect(host='localhost', user='sysdba', password='masterkey') print 'Actual Firebird version string: ', svcCon.getServerVersion() Output (on Firebird 1.5.0/Win32): .. sourcecode:: python Interbase-compatible version string: WI-V6.3.0.4290 Firebird 1.5 Actual Firebird version string: WI-V1.5.0.4290 Firebird 1.5 .. method:: Connection.getArchitecture() Returns platform information for the server, including hardware architecture and operating system family. .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getArchitecture() Output (on Firebird 1.5.0/Windows 2000): .. sourcecode:: python Firebird/x86/Windows NT Unfortunately, the architecture string is almost useless because its format is irregular and sometimes outright idiotic, as with Firebird 1.5.0 running on x86 Linux: .. sourcecode:: python Firebird/linux Intel Magically, Linux becomes a hardware architecture, the ASCII store decides to hold a 31.92% off sale, and Intel grabs an unfilled niche in the operating system market. .. method:: Connection.getHomeDir() Returns the equivalent of the `RootDirectory` setting from :file:`firebird.conf`: .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getHomeDir() Output (on a particular Firebird 1.5.0/Windows 2000 installation): .. sourcecode:: python C:\dev\db\firebird150\ Output (on a particular Firebird 1.5.0/Linux installation): .. sourcecode:: python /opt/firebird/ .. method:: Connection.getSecurityDatabasePath() Returns the location of the server's core security database, which contains user definitions and such. Interbase® and Firebird 1.0 named this database :file:`isc4.gdb`, while in Firebird 1.5 it's renamed to :file:`security.fdb` and to :file:`security2.fdb` in Firebird 2.0 and later. .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getSecurityDatabasePath() Output (on a particular Firebird 1.5.0/Windows 2000 installation): .. sourcecode:: python C:\dev\db\firebird150\security.fdb Output (on a particular Firebird 1.5.0/Linux installation): .. sourcecode:: python /opt/firebird/security.fdb .. method:: Connection.getLockFileDir() The database engine `uses a lock file `__ to coordinate interprocess communication; `getLockFileDir()` returns the directory in which that file resides: .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getLockFileDir() Output (on a particular Firebird 1.5.0/Windows 2000 installation): .. sourcecode:: python C:\dev\db\firebird150\ Output (on a particular Firebird 1.5.0/Linux installation): .. sourcecode:: python /opt/firebird/ .. method:: Connection.getCapabilityMask() The Services API offers "a bitmask representing the capabilities currently enabled on the server", but the only availabledocumentation for this bitmask suggests that it is "reserved for future implementation". kinterbasdb exposes this bitmask as a Python `int` returned from the `getCapabilityMask()` method. .. method:: Connection.getMessageFileDir() To support internationalized error messages/prompts, the database engine stores its messages in a file named :file:`interbase.msg` (Interbase® and Firebird 1.0) or :file:`firebird.msg` (Firebird 1.5 and later). The directory in which this file resides can be determined with the `getMessageFileDir()` method. .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getMessageFileDir() Output (on a particular Firebird 1.5.0/Windows 2000 installation): .. sourcecode:: python C:\dev\db\firebird150\ Output (on a particular Firebird 1.5.0/Linux installation): .. sourcecode:: python /opt/firebird/ .. method:: Connection.getConnectionCount() Returns the number of active connections to databases managed by the server. This count only includes *database* connections (such as open instances of :class:`kinterbasdb.Connection`), not *services manager* connections (such as open instances of :class:`kinterbasdb.services.Connection`). .. sourcecode:: python import kinterbasdb, kinterbasdb.services svcCon = kinterbasdb.services.connect(host='localhost', user='sysdba', password='masterkey') print 'A:', svcCon.getConnectionCount() con1 = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey') print 'B:', svcCon.getConnectionCount() con2 = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey') print 'C:', svcCon.getConnectionCount() con1.close() print 'D:', svcCon.getConnectionCount() con2.close() print 'E:', svcCon.getConnectionCount() On an otherwise inactive server, the example program generates the following output: .. sourcecode:: python A: 0 B: 1 C: 2 D: 1 E: 0 .. method:: Connection.getAttachedDatabaseNames() Returns a list of the names of all databases to which the server is maintaining at least one connection. The database names are not guaranteed to be in any particular order. .. sourcecode:: python import kinterbasdb, kinterbasdb.services svcCon = kinterbasdb.services.connect(host='localhost', user='sysdba', password='masterkey') print 'A:', svcCon.getAttachedDatabaseNames() con1 = kinterbasdb.connect(dsn='localhost:C:/temp/test.db', user='sysdba', password='masterkey') print 'B:', svcCon.getAttachedDatabaseNames() con2 = kinterbasdb.connect(dsn='localhost:C:/temp/test2.db', user='sysdba', password='masterkey') print 'C:', svcCon.getAttachedDatabaseNames() con3 = kinterbasdb.connect(dsn='localhost:C:/temp/test2.db', user='sysdba', password='masterkey') print 'D:', svcCon.getAttachedDatabaseNames() con1.close() print 'E:', svcCon.getAttachedDatabaseNames() con2.close() print 'F:', svcCon.getAttachedDatabaseNames() con3.close() print 'G:', svcCon.getAttachedDatabaseNames() On an otherwise inactive server, the example program generates the following output: .. sourcecode:: python A: [] B: ['C:\\TEMP\\TEST.DB'] C: ['C:\\TEMP\\TEST2.DB', 'C:\\TEMP\\TEST.DB'] D: ['C:\\TEMP\\TEST2.DB', 'C:\\TEMP\\TEST.DB'] E: ['C:\\TEMP\\TEST2.DB'] F: ['C:\\TEMP\\TEST2.DB'] G: [] .. method:: Connection.getLog() Returns the contents of the server's log file (named :file:`interbase.log` by Interbase® and Firebird 1.0; :file:`firebird.log` by Firebird 1.5 and later): .. sourcecode:: python from kinterbasdb import services con = services.connect(host='localhost', user='sysdba', password='masterkey') print con.getLog() Output (on a particular Firebird 1.5.0/Windows 2000 installation): .. sourcecode:: python WEASEL (Client) Thu Jun 03 12:01:35 2004 INET/inet_error: send errno = 10054 WEASEL (Client) Sun Jun 06 19:21:17 2004 INET/inet_error: connect errno = 10061 Database Statistics ------------------- .. method:: Connection.getStatistics( database, showOnlyDatabaseLogPages=0...) Returns a string containing a printout in the same format as the output of the :command:`gstat` command-line utility. This method has one required parameter, the location of the database on which to compute statistics, and five optional boolean parameters for controlling the domain of the statistics. **Map of gstat paremeters to getStatistics options** =========================== ==================================== `gstat` command-line option `getStatistics` boolean parameter =========================== ==================================== -header showOnlyDatabaseHeaderPages -log showOnlyDatabaseLogPages -data showUserDataPages -index showUserIndexPages -system showSystemTablesAndIndexes =========================== ==================================== The following program presents several `getStatistics` calls and their `gstat`-command-line equivalents. In this context, output is considered "equivalent" even if their are some whitespace differences. When collecting textual output from the Services API, kinterbasdb terminates lines with `\n` regardless of the platform's convention; `gstat` is platform-sensitive. .. sourcecode:: python from kinterbasdb import services con = services.connect(user='sysdba', password='masterkey') # Equivalent to 'gstat -u sysdba -p masterkey C:/temp/test.db': print con.getStatistics('C:/temp/test.db') # Equivalent to 'gstat -u sysdba -p masterkey -header C:/temp/test.db': print con.getStatistics('C:/temp/test.db', showOnlyDatabaseHeaderPages=True) # Equivalent to 'gstat -u sysdba -p masterkey -log C:/temp/test.db': print con.getStatistics('C:/temp/test.db', showOnlyDatabaseLogPages=True) # Equivalent to 'gstat -u sysdba -p masterkey -data -index -system C:/temp/test.db': print con.getStatistics('C:/temp/test.db', showUserDataPages=True, showUserIndexPages=True, showSystemTablesAndIndexes=True ) The output of the example program is not shown here because it is quite long. Backup and Restoration ---------------------- KInterbasDB offers convenient programmatic control over database backup and restoration via the `backup` and `restore` methods. At the time of this writing, released versions of Firebird/Interbase® do not implement incremental backup, so we can simplistically define *backup* as the process of generating and storing an archived replica of a live database, and *restoration* as the inverse. The backup/restoration process exposes numerous parameters, which are properly documented in Firebird Documentation to :command:`gbak`. The KInterbasDB API to these parameters is presented with minimal documentation in the sample code below. .. method:: Connection.backup(sourceDatabase, destFilenames, destFileSizes=(), ) Creates a backup file from database content. **Simple Form** The simplest form of `backup` creates a single backup file that contains everything in the database. Although the extension `'.fbk'` is conventional, it is not required. .. sourcecode:: python from kinterbasdb import services con = services.connect(user='sysdba', password='masterkey') backupLog = con.backup('C:/temp/test.db', 'C:/temp/test_backup.fbk') print backupLog In the example, `backupLog` is a string containing a `gbak`-style log of the backup process. It is too long to reproduce here. Although the return value of the `backup` method is a freeform log string, `backup` will raise an exception if there is an error. For example: .. sourcecode:: python from kinterbasdb import services con = services.connect(user='sysdba', password='masterkey') # Pass an invalid backup path to the engine: backupLog = con.backup('C:/temp/test.db', 'BOGUS/PATH/test_backup.fbk') print backupLog .. sourcecode:: python Traceback (most recent call last): File "adv_services_backup_simplest_witherror.py", line 5, in ? backupLog = con.backup('C:/temp/test.db', 'BOGUS/PATH/test_backup.fbk') File "C:\code\projects\kinterbasdb\Kinterbasdb-3.0\build\lib.win32-2.3\kinterbasdb\services.py", line 269, in backup return self._actAndReturnTextualResults(request) File "C:\code\projects\kinterbasdb\Kinterbasdb-3.0\build\lib.win32-2.3\kinterbasdb\services.py", line 613, in _actAndReturnTextualResults self._act(requestBuffer) File "C:\code\projects\kinterbasdb\Kinterbasdb-3.0\build\lib.win32-2.3\kinterbasdb\services.py", line 610, in _act return _ksrv.action_thin(self._C_conn, requestBuffer.render()) kinterbasdb.OperationalError: (-902, '_kiservices could not perform the action: cannot open backup file BOGUS/PATH/test_backup.fbk. ') **Multifile Form** The database engine has built-in support for splitting the backup into multiple files, which is useful for circumventing operating system file size limits or spreading the backup across multiple discs. KInterbasDB exposes this facility via the `Connection.backup` parameters `destFilenames` and `destFileSizes`. `destFilenames` (the second positional parameter of `Connection.backup`) can be either a string (as in the example above, when creating the backup as a single file) or a sequence of strings naming each constituent file of the backup. If `destFilenames` is a string-sequence with length `N`, `destFileSizes` must be a sequence of integer file sizes (in bytes) with length `N-1`. The database engine will constrain the size of each backup constituent file named in `destFilenames[:-1]` to the corresponding size specified in `destFileSizes`; any remaining backup data will be placed in the file name by `destFilenames[-1]`. Unfortunately, the database engine does not appear to expose any convenient means of calculating the total size of a database backup before its creation. The page size of the database and the number of pages in the database are available via :meth:`~kinterbasdb.Connection.database_info()` calls: `database_info(kinterbasdb.isc_info_page_size, 'i')` and `database_info(kinterbasdb.isc_info_db_size_in_pages, 'i')`, respectively, but the size of the backup file is usually smaller than the size of the database. There *should* be no harm in submitting too many constituent specifications; the engine will write an empty header record into the excess constituents. However, at the time of this writing, released versions of the database engine hang the backup task if more than 11 constituents are specified (that is, if `len(destFilenames) > 11`). KInterbasDB does not prevent the programmer from submitting more than 11 constituents, but it does issue a warning. The following program directs the engine to split the backup of the database at :file:`C:/temp/test.db` into :file:`C:/temp/back01.fbk`, a file 4096 bytes in size, :file:`C:/temp/back02.fbk`, a file 16384 bytes in size, and :file:`C:/temp/back03.fbk`, a file containing the remainder of the backup data. .. sourcecode:: python from kinterbasdb import services con = services.connect(user='sysdba', password='masterkey') con.backup('C:/temp/test.db', ('C:/temp/back01.fbk', 'C:/temp/back02.fbk', 'C:/temp/back03.fbk'), destFileSizes=(4096, 16384) ) **Extended Options** In addition to the three parameters documented previously (positional `sourceDatabase`, positional `destFilenames`, and keyword `destFileSizes`), the `Connection.backup` method accepts six boolean parameters that control aspects of the backup process and the backup file output format. These options are well documented so in this document we present only a table of equivalence between :command:`gbak` options and names of the boolean keyword parameters: ============== ===================================== ================= `gbak` option Parameter Name Default Value ============== ===================================== ================= -T transportable True -M metadataOnly False -G garbageCollect True -L ignoreLimboTransactions False -IG ignoreChecksums False -CO convertExternalTablesToInternalTables True ============== ===================================== ================= .. method:: Connection.restore(sourceFilenames, destFilenames, destFilePages=(), ) Restores database from backup file. **Simplest Form** The simplest form of `restore` creates a single-file database, regardless of whether the backup data were split across multiple files. .. sourcecode:: python from kinterbasdb import services con = services.connect(user='sysdba', password='masterkey') restoreLog = con.restore('C:/temp/test_backup.fbk', 'C:/temp/test_restored.db') print restoreLog In the example, `restoreLog` is a string containing a `gbak`-style log of the restoration process. It is too long to reproduce here. **Multifile Form** The database engine has built-in support for splitting the restored database into multiple files, which is useful for circumventing operating system file size limits or spreading the database across multiple discs. KInterbasDB exposes this facility via the `Connection.restore` parameters `destFilenames` and `destFilePages`. `destFilenames` (the second positional argument of `Connection.restore`) can be either a string (as in the example above, when restoring to a single database file) or a sequence of strings naming each constituent file of the restored database. If `destFilenames` is a string-sequence with length `N`, `destFilePages` must be a sequence of integers with length `N-1`. The database engine will constrain the size of each database constituent file named in `destFilenames[:-1]` to the corresponding page count specified in `destFilePages`; any remaining database pages will be placed in the file name by `destFilenames[-1]`. The following program directs the engine to restore the backup file at :file:`C:/temp/test_backup.fbk` into a database with three constituent files: :file:`C:/temp/test_restored01.db`, :file:`C:/temp/test_restored02.db`, and :file:`C:/temp/test_restored03.db`. The engine is instructed to place fifty user data pages in the first file, seventy in the second, and the remainder in the third file. In practice, the first database constituent file will be larger than `pageSize*destFilePages[0]`, because metadata pages must also be stored in the first constituent of a multifile database. .. sourcecode:: python from kinterbasdb import services con = services.connect(user='sysdba', password='masterkey') con.restore('C:/temp/test_backup.fbk', ('C:/temp/test_restored01.db', 'C:/temp/test_restored02.db', 'C:/temp/test_restored03.db'), destFilePages=(50, 70), pageSize=1024, replace=True ) **Extended Options** These options are well documented so in this document we present only a table of equivalence between the :command:`gbak` options and the names of the keyword parameters to `Connection.restore`: ============== ===================================== ==================== `gbak` option Parameter Name Default Value ============== ===================================== ==================== -P pageSize [use server default] -REP replace False -O commitAfterEachTable False -K doNotRestoreShadows False -I deactivateIndexes False -N doNotEnforceConstraints False -USE useAllPageSpace False -MO accessModeReadOnly False -BU cacheBuffers [use server default] ============== ===================================== ==================== Database Operating Modes, Sweeps, and Repair -------------------------------------------- .. method:: Connection.sweep(database, markOutdatedRecordsAsFreeSpace=1) Not yet documented. .. method:: Connection.setSweepInterval(database, n) Not yet documented. .. method:: Conenction.setDefaultPageBuffers(database, n) Not yet documented. .. method:: Conenction.setShouldReservePageSpace(database, shouldReserve) Not yet documented. .. method:: Conenction.setWriteMode(database, mode) Not yet documented. .. method:: Conenction.setAccessMode(database, mode) Not yet documented. .. method:: Conenction.setSQLDialect(database, dialect) Not yet documented. .. method:: Conenction.activateShadowFile(database) Not yet documented. .. method:: Conenction.shutdown(database, shutdownMethod, timeout) Not yet documented. .. method:: Conenction.bringOnline(database) Not yet documented. .. method:: Conenction.getLimboTransactionIDs(database) Not yet documented. .. method:: Conenction.commitLimboTransaction(database, transactionID) Not yet documented. .. method:: Conenction.rollbackLimboTransaction(database, transactionID) Not yet documented. .. method:: Conenction.repair(database, ) Not yet documented. User Maintenance ---------------- .. method:: Conenction.getUsers(username=None) By default, lists all users. .. method:: Conenction.addUser(user) :user: An instance of :class:`User` with *at least* its username and password attributes specified as non-empty values. .. method:: Conenction.modifyUser(user) Changes user data. :user: An instance of :class:`User` with *at least* its username and password attributes specified as non-empty values. .. method:: Conenction.removeUser(user) Accepts either an instance of services.User or a string username, and deletes the specified user. .. method:: Conenction.userExists(user) Returns a boolean that indicates whether the specified user exists. .. class:: User Not yet documented. kinterbasdb-3.3.0/docs/_sources/links.txt0000644000175000001440000000501311132652265017642 0ustar pcisarusers KInterbasDB Links ================= KInterbasDB ^^^^^^^^^^^ + `Home Page `__ Python + Databases ^^^^^^^^^^^^^^^^^^ + `Python `__ + `distutils Home Page `__ + `distutils Installation Instructions for Generic Packages `__ + `Python Database Topic Guide `__ + `Python DB-API Spec 2.0 `__ + `PEP 249 - Augmented Python DB-API Spec 2.0 `__ + `Google's Python Database Module Directory `__ + `Vaults of Parnassus - Database section `__ Zope Database Adapters for Interbase®/Firebird ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + `kinterbasdbDA `__ (based on kinterbasdb) + `ZKInterbasdbDA `__ (based on kinterbasdb) + `ZFireBirdDA `__ (based on kinterbasdb) + `gvibDA `__ (based on gvib) Interbase® and Firebird ^^^^^^^^^^^^^^^^^^^^^^^ + `Firebird `__ + `Interbase® `__ + `IBPhoenix `__ + `IBPhoenix Contributed Downloads `__ + `Google's Interbase® Directory `__ + `IB O Console - nice, free Win32 GUI for Firebird `__ Help ^^^^ + `Brief Python DB API Tutorial `__ + `SQL Tutorials `__ + `An Especially Good SQL Tutorial `__ + `KInterbasDB User forum `__ (on SourceForge) + `Firebird-Python list `__ (Yahoo Group) + `Firebird-Support list `__ (Yahoo Group) + `Python Database SIG mailing list `__ kinterbasdb-3.3.0/docs/_sources/changelog.txt0000644000175000001440000016731611133077256020472 0ustar pcisarusers##################### KInterbasDB Changelog ##################### Version 3.3 =========== New Features ------------ - It is now possible to use multiple transactions simultaneously on a single kinterbasdb.Connection. See discussion: http://sourceforge.net/forum/forum.php?thread_id=1597658&forum_id=30917 - If a unicode object is passed as the SQL statement to any of + kinterbasdb.create_database + Connection.execute_immediate + Cursor.prep + Cursor.execute KInterbasDB will attempt to encode it to the character set of file system (in the case of kinterbasdb.create_database), or to the character set of the connection, in the other cases. Previously, only unicode objects that could be encoded to ASCII were accepted. - Documentation was extended and completely redone using reStructured text and Sphinx (http://sphinx.pocoo.org) Backward-incompatibilities -------------------------- - Default type conversion setting was changed to `type_conv=200`. Applications that doesn't call :func:`kinterbasdb.init()` and rely on Python older than 2.4 and/or mx.DateTime and/or explicit unicode conversion must call :func:`kinterbasdb.init()` with `type_conv=1` as first thing after `kinterbasdb` import. Applications that explicitly call :func:`kinterbasdb.init()` doesn't need to be changed at all. Details about new default setting are described in `Parameter Conversion` section `Deferred Loading of Dynamic Type Translators` of KInterbasDB documentation. Version 3.2.2 ============= Bug Fixes --------- - Fixed bug with wrong Transaction Parameter Block structure. It surfaces with Firebird 2.1 that's more strict about TPB correctness. - Fixed bug with Services under Firebird 2.1. Version 3.2.1 ============= Bug Fixes --------- - Inadequate compatibility with Interbase 7's boolean data type. All official Win32 binaries are compiled and linked against Firebird, not Interbase. Typically, Interbase users either compile their own KInterbasDB binaries or use the official Firebird 1.0-oriented binaries. Previously, the latter were not fully compatible with Interbase 7's boolean type. `Thanks to rmacdurmon for reporting this problem.` - Conditionalized C reference to some constants not included in the Interbase C API. SF bug 1631461: http://sourceforge.net/tracker/index.php?func=detail&aid=1631461&group_id=9913&atid=109913 - The "Signature Specifications for Input and Output Translators" section of the Usage Guide specified the Dynamic Type Translator signature of the TIMESTAMP type incorrectly. SF bug 1554643: http://sourceforge.net/tracker/index.php?func=detail&aid=1554643&group_id=9913&atid=109913 - Removed unnecessary reference to that prevented KInterbasDB from compiling on some primitive Unixy operating systems. - An assertion was triggered when BLOB DTT {'mode': 'materialized'} was set explicitly. This did not actually affect 3.2.0-final binaries, since they were delivered with assertions off, but it is now fixed. SF bug 1652413: http://sourceforge.net/tracker/index.php?func=detail&aid=1652413&group_id=9913&atid=109913 - KInterbasDB had various problems when any of the following were installed at a path that contained non-ASCII characters: - Python - KInterbasDB - Firebird It now works properly. See discussion: http://sourceforge.net/forum/forum.php?thread_id=1695175&forum_id=30917 - As a side effect of that change, SF bug 1676482 was fixed: http://sourceforge.net/tracker/index.php?func=detail&aid=1676482&group_id=9913&atid=109913 Version 3.2 =========== Bug Fixes --------- - At concurrency_level 1, it was possible for a deadlock to occur if KInterbasDB simultaneously raised an exception in one thread while executing a SQL statement in another. This problem did not affect concurrency_level 2. `Thanks to Atsuo Ishimoto for reporting this bug.` - The official implementation of the automagic TEXT_UNICODE type translator (in the kinterbasdb.typeconv_text_unicode module) was missing support for the new character sets introduced in Firebird 2.0 (namely, the corrected version of UTF8, plus KOI8-R, KOI8-U, and WIN1258). `Thanks to Oleg Deribas for bringing this to my attention.` Version 3.2rc1 ============== Bug Fixes --------- - KInterbasDB's "Implicit Conversion of Input Parameters from Strings" now accepts not only str objects, but also unicode objects, as long as they're convertible to ASCII. `Thanks to Ronald Lew for reporting this bug.` For general info about the "Implicit Conversion of Input Parameters from Strings" feature, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_param_conv_implicit_from_string New Features ------------ - KInterbasDB now stores and retrieves the undocumented sub-second component of TIME and TIMESTAMP fields. `Thanks to Petr Jakes and Helen Borrie for bringing the availability of the sub-second data to my attention.` - Passing None to Cursor.execute (instead of a SQL string or a PreparedStatement) now executes the most recently prepared/executed statement, if any. This can enhance convenience because it frees the client programmer from the responsibility of separately tracking the most recent statement in order to execute it again. `Thanks to Igor Youdytsky for suggesting this feature.` - PreparedStatements now have a read-only 'description' property that contains a Python DB API 2.0 description sequence of the same format as Cursor.description. `Thanks to Alexandr Zamaraev for suggesting this feature.` For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#PreparedStatement_description - The following query and resolution methods for limbo transactions have been added to the kinterbasdb.services.Connection class: getLimboTransactionIDs, commitLimboTransaction, rollbackLimboTransaction. Backward-incompatibilities -------------------------- - Dynamic Type Translators for TIME and TIMESTAMP fields must now accomodate an additional tuple element: an integer which represents microseconds. The official TIME and TIMESTAMP type translators in typeconv_datetime_naked.py, typeconv_datetime_stdlib.py, and typeconv_datetime_mx.py have been updated, and can be used as a guide. Version 3.2b1 ============= Bug Fixes --------- - Previously, if KInterbasDB detected that the field to which a NULL value was bound was defined as NOT NULL, KInterbasDB itself immediately raised an exception. This caused problems for fields defined as NOT NULL but populated by BEFORE triggers, so KInterbasDB now submits the illegal NULL and allows the database engine to make the decision. - Fixed an obscure memory leak in Connection.drop_database. - Fixed a few more compatibility problems with Interbase 7.x. - kinterbasdb.Cursor can again be used as a base class for user-defined subclasses. This capability had been removed in KInterbasDB 3.2a1. New Features ------------ - Connection timeouts: KInterbasDB connections can now be asked to time out after a specified period of inactivity. This feature is not supported by the Firebird C API, so it is implemented entirely at the KInterbasDB level. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_ct - Added a TPB class to assist with the construction of complex Transaction Parameter Buffers. This feature has not yet been documented. In the meantime, you can find example code in the test_transactions.py module of the KInterbasDB test suite: http://kinterbasdb.sf.net/test-suite/releases/ - Added methods Connection.transaction_info and Connection.trans_info. transaction_info is a thin wrapper around the C function isc_transaction_info, while trans_info is a Pythonic wrapper around transaction_info. This feature has not yet been documented. In the meantime, you can find example code in the test_services.py module of the KInterbasDB test suite: http://kinterbasdb.sf.net/test-suite/releases/ - Exposed the Firebird header constant FB_API_VER as kinterbasdb.FB_API_VER. This integer represents the version of Firebird against which KInterbasDB was compiled, as follows: - Any version of Interbase, or Firebird 1.0.x: 10 - Firebird 1.5.x: 15 - Firebird 2.0.x: 20 - KInterbasDB now raises a kinterbasdb.TransactionConflict exception (instead of the rather generic ProgrammingError) when it receives a server-side transaction conflict notification. This makes it easier for the client programmer to detect and resolve deadlocks. TransactionConflict is a subclass of kinterbasdb.OperationalError. Backward-incompatibilities -------------------------- - Client programs that encounter transaction conflicts in routine operation, *and* which contain logic to deal with this type of exception specifically (on the basis of the payload of the ProgrammingError) should be updated to use:: try: ... except kinterbasdb.TransactionConflict: ... instead. For more info, see the last item under "New Features" above. Version 3.2a1 ============= New Features ------------ - Better concurrency support. This requires a capable database client library, and must be explicitly activated by specifying keyword argument concurrency_level to function kinterbasdb.init. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#special_issue_concurrency - Streaming blob support (blobs can now be handled without fully materializing them in memory). For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_param_conv_blobs - Support for manual creation and manipulation of prepared statements. KInterbasDB has always used prepared statements under the hood, but now they're directly manipulable by the client programmer. The implicit prepared statement cache has also become more sophisticated. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_prepared_statements - Database event system entirely rewritten. Client programs can now create any number of EventConduits per process, and a given conduit can listen for more than 15 events (the limit is in the hundreds, but the specific limit depends on the database server version and operating system rather than on KInterbasDB). For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_event - Added method Connection.db_info, a Pythonic wrapper around Connection.database_info. Thanks to Pavel Cisar for implementing this. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_prog_maint_db_info - Positional Dynamic Type Translation: It's now possible to specify DTT settings at the Cursor-column level in addition to the previously available levels. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation_positional - Added official support for the Python 2.4+ standard library decimal module, including a new kinterbasdb.init(type_conv=200) convenience code. For more info, see: http://kinterbasdb.sf.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation_deferred_loading - KInterbasDB now detects the "PSQL Stack Trace" generated by FB 2.0+, and includes a nicely formatted rendition of the stack trace in the exception message. For more info, see: Firebird 2.0 Release Notes, section "PSQL Stack Trace" Bug Fixes --------- - KInterbasDB should now compile and run out of the box with Interbase 7.x. DSR doesn't have that version of Interbase, however, so KInterbasDB is not actually tested with it. Backward-incompatibilities -------------------------- - KInterbasDB 3.2 has dropped support for Python versions earlier than 2.3, and (officially) supports only Firebird 1.0 and later. However, Interbase 7.x support has been considerably enhanced, so it could be said that Interbase is "unofficially" supported. - Most Python classes in KInterbasDB have become new-style, for symmetry with the new-style C classes added in 3.2. Notably, kinterbasdb.Cursor is now a new-style class written in pure C. Impact rating: Low (There is practically no reason for a client program to access the affected KInterbasDB classes in such a way that this change would matter.) - Previously, the "infinite timeout value" for EventConduit.wait was 0.0. The choice of that value was a terrible mistake, because attempting to specify an extremely short timeout with a value such as 0.000000000000001 in fact created an infinite timeout. The new "infinite timeout value" is -1.0. Impact rating: Low-Medium (The Usage Guide for KInterbasDB 3.1 specified that "The default timeout is infinite.", but it did not guarantee a particular value. Client programs that use both events and event timeouts should be checked, however.) Version 3.1.3 ============= Bug Fixes --------- - Fixed leak of weak reference objects used by a Connection to track its Cursors. These objects were collected when the Connection closed, but never earlier. `Thanks to Robby Dermody for reporting this bug.` Refs: http://sf.net/forum/forum.php?thread_id=1380653&forum_id=30917 - The database engine's Services API makes no provision for Unicode handling. kinterbasdb should have gracefully rejected Python unicode objects submitted to the Services API, but instead, it choked. `Thanks to Garrett Smith for reporting this bug.` Refs: http://sf.net/forum/forum.php?thread_id=1366918&forum_id=30917 Version 3.1.2 ============= Bug Fixes --------- - Attempting to apply dynamic type translation settings dictionaries that had non-string keys caused a segfault under some some circumstances. - kinterbasdb's Services API infrastructure parsed integer results from the engine as recommended by the IB 6 API Guide, but this was inappropriate on non-Windows x86-64, and could cause invalid memory access. - Input handling of INTEGER ARRAY fields did not work correctly on non-Windows x86-64. - Overridding a connection's dynamic type translation settings for a particular slot with the "naked" translator by passing None as the translator to Cursor.set_type_trans_[in|out] did not work. - The FIXED dynamic type translation slot was not handled properly on dialect 1 connections (dialect 1 databases store NUMERIC/DECIMAL values with precisions 10-18 internally as floating point). - Documentation bug: The "Using KInterbasDB with Embedded Firebird" section of the Usage Guide stated that the Services API did not work with the embedded server architecture. That was written when Firebird 1.5 was in alpha; the Services API *does* work with embedded Firebird 1.5.2. Version 3.1.1 ============= Bug Fixes --------- - kinterbasdb.init(type_conv=100|200) didn't work under Python 2.4. Late in the Python 2.4 development cycle, additional constraints were introduced in Python's funcobject.c that defeated kinterbasdb's attempts to manipulate the 'func_code' attribute of some functions during the execution of kinterbasdb.init. - C type ConnectionType's destructor was called directly (rather than as a result of DECREF) if an error arose in kinterbasdb.connect or kinterbasdb.create_database. This triggered a refcount assertion in debug builds of Python. - Fixed a reference count leak in the C layer's central exception-raising function. - Fixed some potential memory handling problems in exceptional situations in the event handling code. - A trivial problem prevented kinterbasdb 3.1 from compiling with the prereleases of Firebird 2.0. New Features ------------ - In typeconv_text_unicode.py, enabled auto-translation of some Asian Unicode codecs that didn't enter the Python standard library until Python 2.4. Version 3.1 =========== Bug Fixes --------- - Fixed minor problems with the Connection.database_info method. Version 3.1_pre9 ================ Version 3.1_pre9 is being released instead of 3.1 final primarily to test Python 2.4 compatibility. Since the first beta of Python 2.4 has now been released, it is expected that these binaries will continue to work throughout 2.4's lifespan (including maintenance releases - 2.4.x). New Features ------------ - Python 2.4 support (that is, a few build script changes and the availability of official Windows binaries for Python 2.4). Bug Fixes --------- - kinterbasdb sometimes caused an exception to be raised during the Python interpreter's shutdown process. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=1011513&group_id=9913&atid=109913 - Fixed a potential concurrency problem regarding memory allocation in kinterbasdb's event handling code. Version 3.1_pre8 ================ Version 3.1_pre8 is the recommended stable version of kinterbasdb. New Features ------------ - kinterbasdb._RowMapping has a richer dict-like interface (now implements __len__, __getitem__, get, __contains__, keys, values, items, __iter__, iterkeys, itervalues, iteritems). Bug Fixes --------- - The kinterbasdb.typeconv_fixed_fixedpoint.fixed_conv_out_precise dynamic type translator was unable to convert some NUMERIC/DECIMAL database values. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=949669&group_id=9913&atid=109913 - The kinterbasdb.typeconv_text_unicode.unicode_conv_[in|out] dynamic type translators did not work with non-default collations. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=876564&group_id=9913&atid=109913 - The kinterbasdb.services.Connection.getLog method should not have accepted a database parameter; it no longer does. - The kinterbasdb.services.Connection.backup method now returns a gbak-style log string (as the kinterbasdb.services.Connection.restore method has done all along). - Applied Mac OS X compatibility patch to setup.py. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=909886&group_id=9913&atid=309913 - Ported to the AMD64 architecture. Tested with a prerelease version of Firebird 1.5.1/AMD64 on Fedora Core 1/AMD64. Refs: http://firebird.sourceforge.net/download/prerelease/1.5.1 http://firebird.sourceforge.net/download/prerelease/1.5.1/FirebirdSS-1.5.1.4424-public3.amd64.rpm Backward-incompatibilities -------------------------- - The kinterbasdb.services.Connection.getEnvironmentMessage method has been renamed to getMessageFileDir. - The kinterbasdb.services.Connection.getLog method should not have accepted a database parameter; it no longer does. DOCUMENTATION CHANGES --------------------- - Documented about 66% of the Services API (kinterbasdb.services module) in the KInterbasDB Usage Guide. KNOWN ISSUES ------------ - The third-party fixedpoint.py module contains an incompatibility with Python 2.1 that is exposed by a bugfix applied to the kinterbasdb.typeconv_fixed_fixedpoint module in 3.1_pre8. No attempt will be made to fix this problem (which is a fixedpoint bug, not a kinterbasdb bug); users should either upgrade to a newer version of Python or refrain from using fixedpoint. Version 3.1_pre7 ================ Version 3.1_pre7 should be considered a release candidate. It is thought to be ready for production use. New Features ------------ - Introduced dynamic type translation slot TEXT_UNICODE, which applies to all CHAR/VARCHAR fields except those with character sets NONE, OCTETS, or ASCII. Used in combination with the official translators in the kinterbasdb.typeconv_text_unicode module, TEXT_UNICODE enables automatic encoding/decoding of Unicode values. This translator is not active by default except when kinterbasdb is initialized with kinterbasdb.init(type_conv=100); the backward compatibility implications are discussed in detail in the Backward-incompatibilities section below. Refs: docs/usage.html#faq_fep_unicode - Added read-only .charset attribute to Connection. - On Windows, kinterbasdb now conforms to the client library loading scheme introduced in FB 1.5 RC7, so fbclient.dll need not be explicitly placed in a directory on the PATH if the registry settings are present. Bug Fixes --------- - The type slot in cursor.description is now updated dynamically to reflect dynamic type translation settings. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=814276&group_id=9913&atid=109913 - Added logic to prevent inappropriate calls to isc_dsql_free_statement, which were observed to cause a segfault in a heavily multithreaded environment. - Added special case to field precision determination code to accommodate the database client library's irregular handling of RDB$DATABASE.RDB$DB_KEY Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=818609&group_id=9913&atid=109913 Backward-incompatibilities -------------------------- - Programs that use BOTH of the following: - the TEXT dynamic type translation slot - unicode database fields will need to be updated to take the new TEXT_UNICODE slot into account. Since the TEXT slot is not particularly useful, this incompatibility is expected to affect very few existing programs. Refs: docs/usage.html#faq_fep_unicode - Convenience code 100 for the kinterbasdb.init function now activates the new TEXT_UNICODE translation slot, so unicode values are automatically encoded and decoded. Convenience code 1 remains the default, however, and it does not activate the TEXT_UNICODE slot. Programs that do BOTH of the following: - invoke kinterbasdb.init(type_conv=100) - use unicode database fields will need to be updated to take the new TEXT_UNICODE slot into account. Refs: docs/usage.html#adv_param_conv_dynamic_type_translation_tbl_convenience_codes Version 3.1_pre6 ================ Version 3.1_pre6 should be considered a release candidate. It is thought to be stable. New Features ------------ - Added support for manual control over the phases of two-phase commit. The client programmer now has the option of triggering the first phase manually via Connection.prepare() or ConnectionGroup.prepare(). This is useful when integrating with third-party transaction managers. - KInterbasDB can now be compiled "out of the box" with MinGW when building against Firebird 1.5 (but not Firebird 1.0 or Interbase). See docs/installation-source.html for instructions. Bug Fixes --------- - Connection.drop_database() now rolls back the connection's active transaction (if any) before dropping the database. Previously, the database could be dropped with the transaction still active; when the connection was subsequently garbage collected, a rollback request was issued for the transaction (in a nonexistent database), resulting in memory corruption. - String values returned by input dynamic type translators were sometimes prematurely garbage collected before the database engine had read their contents. - SQL fields with dynamic definitions (such as expressions in a SELECT list) that involved fixed point data types (NUMERIC or DECIMAL) didn't get passed through the FIXED dynamic type translator because the database engine does not flag dynamically defined fields properly. Though this is a bug in the database engine rather than KInterbasDB, a workaround was added to KInterbasDB. `Thanks to Bert Hughes for reporting this bug.` - The installation action of the setup script ('setup.py install') did not place the supporting files (documentation) in the proper directory on Linux. `Thanks to Treeve Jelbert for reporting this bug.` Version 3.1_pre5 ================ Version 3.1_pre5 should be considered a release candidate. It is thought to be stable. New Features ------------ - Deferred loading of dynamic type translators: KInterbasDB's choice of initial dynamic type translators for date/time and fixed point types is now deferred as late as possible, and the programmer has the *option* of controlling the choice via the type_conv parameter of the new kinterbasdb.init function. This feature is documented in the Usage Guide at: usage.html#adv_param_conv_dynamic_type_translation_deferred_loading - KInterbasDB's setup script is now capable of compiling the source distribution "out of the box" with MinGW on Windows, but only with Firebird 1.5 or later (Borland C++ can be used with Firebird 1.0). This feature is documented in the installation guide for the source distribution at: installation-source.html#compiler_specific_compilation_notes Bug Fixes --------- - During blob insertion, not enough memory was allocated to hold the blob ID returned by the database engine, resulting in an overflow. - Implicit conversion of DATE/TIME/TIMESTAMP input parameters from strings to the appropriate internal types was accidentally disallowed in 3.1_pre4. This feature has been enabled again. - The Services API method kinterbasdb.services.Connection.restore was incapable of restoring a backup into a multi-file database because it sent the wrong cluster identifier for destination file page counts. Backward-incompatibilities -------------------------- - Because of the new "Deferred loading of dynamic type translators" feature, the DB API type comparison singleton kinterbasdb.DATETIME will not compare equal to *any* type until the kinterbasdb.init function has been called (whether explicitly or implicitly). This issue--which is expected to affect little or no existing code--is documented in the Usage Guide at: usage.html#adv_param_conv_dynamic_type_translation_deferred_loading_backcompat - The dynamic type translation module typeconv_preferred has been renamed to typeconv_23plus. Version 3.1_pre4 ================ Version 3.1_pre4 should be considered a late beta release. It is thought to be stable, and there are no plans to add new features before 3.1 final (only to fix bugs and finish updating the documentation). Note that the KInterbasDB Usage Guide has been considerably updated, though it is not quite complete. When complete, it will document all of the numerous new features in kinterbasdb 3.1; it's a "must read" even now. The Usage Guide is distributed with KInterbasDB (kinterbasdb-installation-dir/docs/usage.html), and is available online at: http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/*checkout*/kinterbasdb/Kinterbasdb-3.0/docs/usage.html New Features ------------ - DATABASE EVENT HANDLING has been reinstated, ported to POSIX, and timeout support has been added. This feature is thoroughly documented in the updated Usage Guide. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=637796&group_id=9913&atid=109913 - DISTRIBUTED TRANSACTIONS are now supported via the kinterbasdb.ConnectionGroup class. Although the Usage Guide does not yet fully document this feature, it does contain an example program and a few hints: http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/*checkout*/kinterbasdb/Kinterbasdb-3.0/docs/usage.html#adv_trans_control_distributed - DYNAMIC TYPE TRANSLATION KInterbasDB 3.1_pre4 implements two-way "dynamic type translation". This feature allows the client programmer to change the type conversion methods for specific SQL data types and achieve complete "type transparency". For example, KInterbasDB 3.1_pre4 includes reference implementations of converters for both input and output of 'mx.DateTime' and Python 2.3 stdlib 'datetime' for TIME/DATE/TIMESTAMP fields. One consequence of two-way dynamic type translation support is that KInterbasDB 3.1_pre4 can be used with Python 2.3's datetime module occupying the former role mx.DateTime. For backward compatibility, mx.DateTime is still the default, and it will remain so. This feature is documented in the updated Usage Guide. - Cursor.rowcount support has been added (insofar as the database engine supports it). This feature is documented in the updated Usage Guide. Refs: http://sourceforge.net/forum/forum.php?thread_id=866629&forum_id=30917 - SAVEPOINTs (a Firebird 1.5 feature) are exposed at the Python level via the Connection.savepoint(savepoint='name') method and the optional $savepoint argument to the Cursor.rollback method. This feature is documented in the updated Usage Guide. - New attributes suggested by the "Optional DB API Extensions" section of PEP 249: - Access to a cursor's connection via the Cursor.connection attribute. - Access to kinterbasdb's exception classes via connection attributes. Refs: http://www.python.org/peps/pep-0249.html - A cursor can now be reused after it has caused an exception. Bug Fixes --------- - Passing the wrong number of parameters for a parameterized SQL statement sometimes caused a crash instead of an exception with kinterbasdb 3.1_pre3. This would not have affected client programs that were written correctly, but it was still a bug. - The kinterbasdb.create_database function leaked memory if it encountered an error. - Additional Windows binaries are being released to avoid dynamic linking problems with Interbase 5.5 and Firebird 1.5-embedded. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=707644&group_id=9913&atid=109913 http://sourceforge.net/forum/forum.php?thread_id=855348&forum_id=30917 - kinterbasdb now builds with less hassle on FreeBSD. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=720021&group_id=9913&atid=109913 - Whenever a transactional context is needed, a transaction is now started implicitly if the Python programmer has not started one explicitly with Connection.begin. This implicit behavior is implicitly required by the Python DB API specification. Refs: http://mail.python.org/pipermail/db-sig/2003-February/003158.html - The mapping objects returned from the Cursor.fetch*map() method now accept "double-quoted" field names (lookup keys). If the field name is double-quoted, its original case will be preserved--instead of being normalized to upper case--when the result set is searched for a field with that name. For example, if a table were defined this way:: create table tbl ("sTRanGelyCasEDfieldnAme" integer) and the statement:: cur.execute("select * from tbl") were executed against it, the mapping objects returned by:: cur.fetchonemap() would have rejected the lookup key 'sTRanGelyCasEDfieldnAme', converting it instead to 'STRANGELYCASEDFIELDNAME' and then failing to find the upper-cased field name. The solution available in 3.1_pre4 is to perform the lookup this way:: cur.execute("select * from tbl") mapping = cur.fetchonemap() mapping['"sTRanGelyCasEDfieldnAme"'] ^-----double-quoted-----^ which will force the preservation of the field name's case. An easy way to avoid problems such as this is to refrain from using quoted identifiers; in that case, the database engine will treat identifiers in a case-insensitive manner. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=720130&group_id=9913&atid=109913 INTERNAL CHANGES ---------------- - kinterbasdb now implements its standard date/time and fixed point handling via the new, general-purpose dynamic type translation feature. This eliminates the C-compile-time dependency on mx.DateTime. Although mx.DateTime (for date/time types) and Connection.precision_mode (for precise fixed point types) still function as before, dynamic type translation allows other types to be transparently substituted (such as those in Python 2.3's standard library datetime module for date/time types, or those in the fixedpoint module for precise fixed point types). For more information, see the Usage Guide. Backward-incompatibilities -------------------------- There are no outright incompatibilities, but there is one deprecation: - Although Connection.precision_mode continues to function as in earlier versions, it is deprecated in favor of dynamic type translation. The functionality that Connection.precision_mode delivers (precise I/O of fixed point values) is now implemented at the Python level via dynamic type translation, rather than at the C level. If you explicitly use both Connection.precision_mode *and* dynamic type translation, beware that changing the value of `Connection.precision_mode` will cause changes to the registered dynamic type converters under the hood. For more information, see the INTERNAL CHANGES section above, and the Usage Guide. Version 3.1_pre3 ================ Version 3.1_pre3 should be considered a beta release. New Features ------------ - database array support Database arrays are mapped from Python sequences (except strings) on input; to Python lists on output. On output, the lists will be nested if the database array has multiple dimensions. I'm not impressed by the Interbase/Firebird implementation of database arrays. The database engine claims to support up to 16 dimensions, but actually malfunctions catastrophically* above 10. The arrays are of fixed size, with a predeclared number of dimensions and number of elements per dimension. Individual array elements cannot be set to NULL/None**, so the mapping between Python lists (which have dynamic length and are therefore not normally null-padded) and non-trivial database arrays is clumsy. Arrays cannot be passed as parameters to, or returned from, stored procedures. Finally, many interface libraries, GUIs, and even the isql command line utility do not support arrays. Refs: * http://sourceforge.net/tracker/?func=detail&aid=659610&group_id=9028&atid=109028 ** Interbase 6 API Guide page 153. - retaining commit/retaining rollback The commit() and rollback() methods of kinterbasdb.Connection now accept an optional boolean parameter 'retaining' (default False). If retaining is True, the infrastructural support for the transaction active at the time of the method call will be "retained" (efficiently and transparently recycled) after the database server has committed or rolled back the conceptual transaction. In code that commits or rolls back frequently, 'retaining' the transaction yields considerably better performance. 'retaining' will become the default at some point in the future if the switch can be made without serious backward compatibility issues. Refs: http://sourceforge.net/forum/forum.php?thread_id=799246&forum_id=30917 Interbase 6 API Guide page 74. - unicode strings can now be executed via: - kinterbasdb.Cursor.execute[many]() - kinterbasdb.Cursor.callproc() - kinterbasdb.Connection.execute_immediate() However, the encoding of the incoming unicode string is rather simplistic--via PyUnicode_AsASCIIString. Bug Fixes --------- - Addressed buffer overflow potential in: - kinterbasdb.create_database() - kinterbasdb.connect() - kinterbasdb.Connection.begin() - kinterbasdb.Connection.execute_immediate() - kinterbasdb.Cursor.execute() (and thence, executemany() and callproc()) - Fixed reference count leaks in: - exception handling (_exception_functions.c) - field precision determination (_kiconversion_field_precision.c) - Fixed kinterbasdb.Connection.close() bug: The physical connection to the database server was not actually closed until the kinterbasdb.Connection instance was garbage collected. - Fixed a bug in the kinterbasdb.services.Connection.userExists() method. Usernames are now normalized to upper case. - Database version compatibility: - kinterbasdb compiles properly against Firebird 1.5. - kinterbasdb compiles against and ought to work with (but has not been tested with) Interbase 5.5, albeit with some lost functionality, namely: - field precision determination (the precision entry in cursor.description) - Services API support - retaining rollback - various data storage options, such as precise 64-bit integer storage of NUMERIC and DECIMAL values (IB 5.5 uses doubles instead, which is not really adequate) and more diverse date/time types. Refs: http://sourceforge.net/tracker/index.php?func=detail&aid=627816&group_id=9913&atid=109913 IB 6 Data Definition Guide page 65. - Improved DB API compliance: - Now, there need not be an active transaction before any execute(), commit(), or rollback() call; transaction establishment is implicit in these cases. - Cursors no longer need to be discarded after an exception; the same cursor can be reused. Of course if the cursor was in the process of fetching a result set, the remainder of the set will not be available after the exception. INTERNAL CHANGES ---------------- - Numerous modest optimizations, especially with regard to memory handling. Among these is a move to take advantage of Python 2.3's specialized, Python-oriented memory manager. - MAJOR code refactoring and tidying. Backward-incompatibilities -------------------------- - Invalid argument combinations to the connect() function now raise a ProgrammingError rather than an InterfaceError. Note that this refers to invalid *combinations* of arguments, not necessarily to invalid *values* for those arguments. - Non-keyword-argument forms of connect() are now deprecated; passing non-keyword arguments to connect() results in a DeprecationWarning being issued via the standard Python warning framework. This is a warning, not an incompatibility in the strict sense. Refs: http://www.python.org/doc/current/lib/module-warnings.html - Official support for database event handling has been deferred until 3.2. A Win32-only prototype will still be included with the kinterbasdb 3.1 source distribution (but not compiled by default). Refs: docs/usage.html#database_events_unsupported Version 3.1_pre2 ================ New Features ------------ - Global Interpreter Lock management Previously, kinterbasdb operated in a serial manner, with the sole exception of the event handling code, whose parallelism is "under the hood". Aside from event handling, all kinterbasdb operations, including potentially long-running Firebird API calls, were serialized by the Python GIL. With the advent of kinterbasdb 3.1_pre2, kinterbasdb releases the GIL where appropriate--that is, when it is about to make a potentially long- running Firebird API call, and can do so without invoking the Python API, or otherwise operating on Python structures. However, the Firebird client library itself is not threadsafe, so Firebird API calls must also be serialized. To that end, kinterbasdb maintains a process-wide thread lock around which all Firebird API calls are serialized. When kinterbasdb is about to make a potentially long-running Firebird API call, it follows these steps: 1. Extract necessary parameter data from Python structures 2. Release the Python GIL 3. Acquire the kinterbasdb process-wide Firebird client thread lock 4. Execute the Firebird API call 5. Release the kinterbasdb process-wide Firebird client thread lock 6. Acquire the Python GIL 7. Modify Python structures to reflect the results of the Firebird API call The addition of GIL management should improve kinterbasdb's maximum possible throughput for multi-threaded Python programs on multi-processor systems (one processor can run the Python interpreter while another executes a Firebird client library operation). GIL management may also yield greater "responsiveness" for multi-threaded Python programs running on single-processor systems. The addition of GIL management required fairly extensive internal changes, and therefore warranted a whole prerelease version virtually unto itself. - Cursor name support The read/write property Cursor.name allows the Python programmer to perform scrolling UPDATEs or DELETions via the "SELECT ... FOR UPDATE" syntax. If you don't know what this means, refer to the database SQL syntax documentation of the FOR UPDATE clause of the SELECT statement. The Cursor.name property can be ignored entirely if you don't need to use it. Here's an example code fragment: .. sourcecode:: python con = ... # establish a kinterbasdb.Connection curScroll = con.cursor() curUpdate = con.cursor() curScroll.execute('select city from customer for update') curScroll.name = 'city_scroller' update = 'update customer set city=? where current of ' + curScroll.name for (city,) in curScroll: city = ... # make some changes to city curUpdate.execute( update, (city,) ) Version 3.1_pre1 ================ Version 3.1_pre1 should be considered an early alpha release. New Features ------------ This list of new features represents the state of kinterbasdb 3.1_pre1, which does not include some features slated for inclusion in the final release of kinterbasdb 3.1. For a discussion of the ultimate goals of version 3.1, see: http://sourceforge.net/forum/forum.php?thread_id=696302&forum_id=30917 Also, the documentation has not yet been updated to cover these new features, nor will it be for at least another month. In the meantime, those who need to use the new features must refer to the source code. - Cursor Iteration Support When used with Python 2.2 or later, kinterbasdb's Cursors now support "natural" iteration. For example: .. sourcecode:: python # Index-based field lookup (based on Cursor.fetchone): cur = con.cursor() cur.execute("select col1, col2 from the_table") for row in cur: col1 = row[0] # Key-based field lookup (based on Cursor.fetchonemap): cur = con.cursor() cur.execute("select col1, col2 from the_table") for rowMap in cur.itermap(): col1 = rowMap['col1'] The iterator-based pattern supercedes the ugly fetch pattern of old (though of course the old pattern will still work): .. sourcecode:: python # Index-based field lookup (based on Cursor.fetchone): cur = con.cursor() cur.execute("select col1, col2 from the_table") while 1: row = cur.fetchone() if not row: break col1 = row[0] - Implicit Parameter Conversion Implicit parameter conversion allows any SQL datatype supported by kinterbasdb to be passed to the database engine as a Python string. This is especially useful for parameterized statements that involve date/time datatypes, because they can now accept server-computed "magic" values such as 'now' and 'current_date' more naturally. Implicit parameter conversion is also likely to yield a speedup for programs that load external data from flat files into the database, since the incoming values do not need to be converted from their original string representation into an acceptable Python type before being forwarded to the database. For a more thorough discussion of this new feature, see: http://sourceforge.net/tracker/index.php?func=detail&aid=531828&group_id=9913&atid=309913 - Services API Support (see IB 6 API Guide Chapter 12) The database engine provides an API (the Services API) to facilitate programmatic invocation of the maintenance tasks available through the command-line tools gbak, gfix, etc. I've wrapped nearly the entire Services API in a thick Python API of my own design. My API design is only provisional; I seek feedback as to how it could be made more elegant. The Services API support is accessible via the kinterbasdb.services module. - Database Event Support (see IB 6 API Guide Chapter 11) The database engine allows client programs to register to be informed of the occurrence of database events, which can be raised with the POST_EVENT statement in stored procedures or triggers. kinterbasdb 3.1 supports a subset of this functionality (synchronous waiting only) via the Connection.wait(eventNames) method. The current implementation is only a rough prototype; though usable, it is not recommended for production environments. The current implementation suffers from a major limitation: only one thread per process is allowed to listen for event notification. This is so because the current implementation resorts to some roundabout trickery to circumvent the lack of database API support for synchronous event notification on Windows. Because the database API only starts one asynchronous event handler thread per process, I doubt that support for multiple event-listening threads in a single process will materialize. Bug Fixes --------- - In the past, the opaque mapping object returned by the Cursor.fetch*map methods returned None when asked for a field not in its select list, rather than raising a KeyError. It now raises a KeyError in such a case. For example: .. sourcecode:: python cur = con.cursor() cur.execute("select col1, col2 from the_table") for rowMap in cur.itermap(): x = rowMap['col3'] # Used to return None. Now raises KeyError, # because col3 was not SELECTed. Backward-incompatibilities -------------------------- - Although kinterbasdb 3.1 is significantly different internally, there is only one known API incompatibility with version 3.0.2. It would only arise in code that relies on the erroneous behavior of the mapping-fetch bug mentioned above. - Python versions prior to 2.1 are no longer officially supported. Although kinterbasdb might still compile against Python 2.0 and earlier, I will not go out of my way to ensure that it does. Version 3.0.2 ============= Bug Fixes --------- - Fixed a CHAR-handling bug that caused CHAR values inserted into the database to lack their trailing spaces. Instead, the values were null- terminated. This left CHAR values inserted by kinterbasdb incompatible with standard tools, which expect trailing spaces. For more information, see http://sourceforge.net/tracker/index.php?func=detail&aid=594908&group_id=9913&atid=109913 Version 3.0.1 ============= Bug Fixes --------- - Adjusted input handling of NULL values. The new scheme raises an exception immediately when it detects that a Python None value has arrived for storage in a database field or parameter that disallows NULL values. The old scheme simply accepted the Python None value and then tried to execute the query, relying on the database API to detect the error. With certain data types, the database API would silently insert a bogus value rather than detecting the error. - Scrutinized the datetime input/output facilities, found some incompatibilities with the DB API, and corrected them. These changes are backward-incompatible, but are warranted because the previous behavior was in defiance of the specification. See further notes about the nature of these changes in the backward-incompatibilities section. - Fixed a memory leak that affected the storage of Python string input parameters in BLOB fields. - Fixed a rollback-related bug that arose if a connection to a database was established, but a transaction was never started on that connection. In such a case, a spurious exception was raised when the connection was garbage collected. Normal code would not have invoked this bug, but it was still a bug. Backward-incompatibilities -------------------------- - Datetime input/output has changed to better comply with the DB API (see datetime bugfix discussion above). Code that uses the mx.DateTime module directly (rather than the kinterbasdb DB API datetime constructors) should not be affected. For details, see the comments in the code block in __init__.py tagged with "DSR:2002.07.19". Version 3.0.1_pre3 ================== Bug Fixes --------- - Bug #572326 (which was not present in kinterbasdb 3.0 and never affected Python 2.2+) caused several numeric types to not be transferred from Python to the database engine when they were passed as query parameters. This was a serious bug; it caused even such fundamental operations as: cursor.execute("insert into the_table values (?)", (1,)) to not work correctly. Version 3.0.1_pre2 ================== Bug Fixes --------- - CHAR output now doesn't have such problems with multibyte character sets and values shorter than the maximum declared length of the field. CHARs are no longer returned with their trailing blanks intact. The trailing blanks have been abandoned because they were in fact NULL characters, not spaces. kinterbasdb would fill in the spaces manually, except for the problems that approach causes with multibyte character sets. - Fixed a potential buffer overflow, but the fix only applies when compiled against Python 2.2 or later. Backward-incompatibilities -------------------------- - See coverage of CHAR output changes in the 'Bug Fixes' section. In a nutshell: CHAR output values no longer have trailing NULL bytes. Version 3.0.1_pre1 ================== New Features ------------ - It is now possible to connect to a database under a specific role by using the 'role' keyword argument of the kinterbasdb.connect function. - The following methods now accept any sequence except a string for their 'parameter' argument, rather than demanding a tuple: Cursor.execute, Cursor.executemany and Cursor.callproc. Bug Fixes --------- - kinterbasdb supports IB 5.x again. Various identifiers specific to IB 6.x/Firebird had crept into unguarded areas of __init__.py and _kinterbasdb.c, but this has been changed so that kinterbasdb compiles gracefully with IB 5.x. See: http://sourceforge.net/tracker/index.php?func=detail&aid=553184&group_id=9913&atid=209913 - The distutils setup script no longer raises a ValueError on Windows 2000 or XP. - The precision slot in Cursor.description was always zero. It now contains the correct value if that value can reasonably be determined. Note that the database engine records the precision of some fields as zero (e.g., FLOAT), and the slot will also be zero in cases where the database engine does not expose the precision of the field (e.g., dynamic fields such as "SELECT 33.5 FROM RDB$DATABASE"). Since the database API does not provide the field's precision figure in the XSQLVAR structure, it is necessary to query the system tables. In order to minimize the performance hit, precision figures are cached per Connection; the determination of a given field's precision figure in the context of a given Connection will require only dictionary lookups after it is determined the first time with a system table query. An unfortunate side effect of this caching is that if a field's precision is altered after the figure has been cached in by a Connection, cursors based on that Connection will still show the old precision figure. In practice, this situation will almost never arise. See: http://sourceforge.net/tracker/index.php?func=detail&aid=549982&group_id=9913&atid=109913 - On Linux, attempting to fetch immediately after having executed a non-query statement resulted in a segfault. An exception is now raised instead. The problem did not afflict Windows, which always raised the exception. See: http://sourceforge.net/tracker/index.php?func=detail&aid=551098&group_id=9913&atid=109913 - The message carried by this exception grew without bound in on both Windows and Linux. It no longer does. - Under some circumstances, the fetched values of CHAR fields were incorrect. CHAR values now appear as expected (they are left-padded with spaces and always of length equal to their field's designated maximum length). - Cursor.fetchmany raised an error if there were no remaining values to fetch. It now returns an empty sequence instead, as required by the DB API Specification. - Field domains are checked more strictly. It is now impossible to (for example) issue a statement that attempts to insert a 12-character string into a 10-character CHAR field without encountering an exception. This checking is not perfect, since it validates against the field's internal storage type rather than the field's declared type. For example, a NUMERIC(1,1), which is stored internally as a short, will erroneously accept the value 12.5 because 125 fits in a short. - When operating in imprecise mode (connection.precision_mode == 0), kinterbasdb 3.0 sometimes interpreted integer values as though it were operating in precise mode. Version 3.0 versus 2.0-0.3.1 ============================ New Features ------------ The new features are thoroughly documented in the KInterbasDB Usage Guide (usage.html); they need not be reiterated here. However, backward-incompatible changes *have* been documented in this changelog (see the Backward-incompatibilities section). Bug Fixes --------- Many bugs have been fixed, including (but not limited to) the following, which were registered with the KInterbasDB bug tracker at SourceForge ( http://sourceforge.net/tracker/index.php?group_id=9913&atid=109913 ): - 433090 cannot connect to firebird server - 438130 cursor.callproc not adding param code - 468304 fetchmany return all record - 498086 ignores column aliases in select - 498403 fetching after a callproc hangs program - 498414 execute procedure message length error - 505950 inconsistent fetch* return types - 515974 Wrong decoding of FB isc_version - 517093 broken fixed-point handling in 3.0 - 517840 C function normalize_double inf. loop - 517842 fetch bug - program hangs - 520793 poor DB API compliance ^ a *BIG* fix that entailed many changes - 522230 error with blobs larger than (2^16) - 1 - 522774 inconsistent fixed-point conv in 3.0-rc2 - 523348 memory leak in Blob2PyObject - immediate execution facilities unreliable in 2.x Backward-incompatibilities -------------------------- As a result of the changes required for some of the bugfixes (especially #520793 - "poor DB API compliance") and general reengineering, several areas of backward-incompatibility have arisen: - fetch* return types The standard fetch(one|many|all) methods now return just a sequence, not a combined sequence/mapping. If you want a mapping, use one of the fetch(one|many|all)map methods. Note the "'absolutely no guarantees' except..." caveats in the KInterbasDB Usage Guide regarding the return types of the Cursor.fetch* methods and the contents of the Cursor.description attribute. This is a significant backward-incompatibility, and was not undertaken without serious consideration (for evidence see http://sourceforge.net/forum/forum.php?thread_id=622782&forum_id=30919 ). - Fixed point number handling Fixed point number handling has been remodelled. By default, fixed point numbers (NUMERIC/DECIMAL field values) are now represented (with a potential loss of precision) as Python floats. A Connection.precision_mode attribute has been added so that precise representation of fixed point values as scaled Python integers (as in KInterbasDB 2.x) can be used at will. For more information, see the KInterbasDB Usage Guide. - Connection.dialect In KInterbasDB 2.x, the default connection dialect was 1 (the backward-compatibility dialect for use with Interbase 5.5 and earlier). KInterbasDB 3.0 is being released into quite a different climate. Interbase 6.0 was released nearly two years ago, and Firebird 1.0 has recently been released. Because it is expected that KInterbasDB 3.0 will be used most frequently with Interbase 6.0+ and Firebird, the default connection dialect is 3. Using KInterbasDB 3.0 with Interbase 5.5 and earlier is still possible, though untested by the developers of KInterbasDB 3.0. See the Connection.dialect documentation in the KInterbasDB Usage Guide for an explanation of how to initialize a connection with a dialect other than 3. - Connection.server_version The Connection.server_version attribute is now a string rather than an integer. An integer simply was not expressive enough to represent the numerous Interbase variants that exist today (including Firebird, which does not fit neatly into the Interbase version progression). For more information, see the KInterbasDB Usage Guide. - kinterbasdb.execute_immediate The kinterbasdb.execute_immediate function has been removed. A similar function named kinterbasdb.create_database has been added. The primary differences between kinterbasdb.execute_immediate and kinterbasdb.create_database are: - kinterbasdb.create_database is not as general - kinterbasdb.create_database actually works The execute_immediate method of the Connection class has been retained. For more information, see the KInterbasDB Usage Guide. kinterbasdb-3.3.0/docs/_sources/installation.txt0000644000175000001440000004306311132652265021232 0ustar pcisarusers ############################## KInterbasDB Installation Guide ############################## Dependencies ************ KInterbasDB requires a valid combination of the dependencies in the list below. Detailed instructions on how to install each dependency are beyond the scope of this document; consult the dependency distributor for installation instructions. Satisfying the dependencies is not difficult! For mainstream operating systems -- including Windows and Linux -- easily installable binary distributions are available for *all* of KInterbasDB's dependencies (see the download links below). #. Operating System - one of: + Win32 (NT 4, 2000, XP, 2003, ...) + Win64 (Should work fine, but no binary distributions are available.) + Linux (Known to work fine on both x86 and x86-64.) + Other Unix or Unix-like operating system #. `Firebird `__ 2.0 or later - client or server installation [`download here `__] If you want to use KInterbasDB 3.3 with Firebird 1.5 and older or InterBase, you will need to do `installation from source distribution`_ instead. #. `Python `__ [`download here `__] 2.4 or later #. `eGenix.com mx Extensions for Python `__, version 2.0.1 or later [`download here `__] By default, KInterbasDB uses the `DateTime` module of the eGenix.com `mx` Extensions to represent date and time values, as recommended by the *Python Database API Specification*. *However, it is not strictly necessary to use the `mx.DateTime` module to handle dates and times, especially when you're using Python 2.5 and newer.* See `this FAQ `__. Installation from binary distribution ************************************* Note: If a binary distribution of KInterbasDB (e.g., a Windows executable installer) is not available for your platform, Python or Firebird version, you will need to do `installation from source distribution`_ instead. Windows ======= Binary distributions of KInterbasDB for Windows come in the form of a conventional executable installer or MSI package. Just invoke the installer and follow the wizard prompts. Because KInterbasDB is compatible with numerous versions of Python, you must choose a binary distribution that matches your Python version. There are currently Windows binary distributions of KInterbasDB compiled for use with Firebird 2.x for each of Python 2.4, 2.5 and 2.6. Linux and Other Unix Variants ============================= Currently, Linux users must typically install from `source distribution`_ as only Mandriva Linux offer the pre-built KInterbasDB package. The source distribution will *probably* also install (and function) on most other POSIX-compliant Unix variants, as long as all of the dependencies_ are also installed and functional. Because the KInterbasDB source distribution supports the standard Python package installation facility (`distutils `__), installing the source distribution on a typical Linux system is downright easy. .. _`source distribution`: Installation from source distribution ************************************* Shortcut for the Experienced and Impatient:: (decompress KInterbasDB into *temp_dir*) cd *temp_dir* python setup.py build python setup.py install python -c "import kinterbasdb" (delete *temp_dir*) Then hit the `Usage Guide `__. Compile KInterbasDB =================== You will need a C compiler for that. VC or MinGW to compile KInterbasDB on Windows, and GCC to compile it on Linux/POSIX. Once you have successfully installed the dependencies, you may proceed with the installation of KInterbasDB itself. Beginning with version 3.0, KInterbasDB has full support for the `distutils `__, the standard facility for Python package distribution and installation. Full instructions for using the distutils are available `here `__, but you can skip them unless you have an otherwise insoluble problem. Open a command prompt, change to the directory where you decompressed the kinterbasdb source distribution, and type:: python setup.py build The installation script, :file:`setup.py`, will attempt to automatically detect the information needed by the C compiler; then it will invoke the distutils to perform the actual compilation. If you installed automatic distributions of the dependencies that place themselves in standard locations (on UNIX-style operating systems) or record their locations in the system registry (on Windows), the compilation should proceed without incident. On Windows, compilers other than Microsoft Visual C++ usually require some library conversion to work with Python or Firebird. With Firebird 1.5 and MinGW or Firebird 1.0 and Borland C++, :file:`setup.py` will perform this conversion automatically. If the automatic conversion fails, ensure that your compiler is installed properly (especially that its :file:`bin` directory is in your :file:`PATH`). For more information, see thecompiler-specific notes in this document, as well as the Python standard library documentation on "Installing Python Modules". If :file:`setup.py` raises no errors and its output concludes with something like "Creating library...", then you are ready to proceed to the next step. If you receive an error message, examine its contents and then consult the following table: +----------------------+----------------------------------------------------------------------+ | Error Message Header | Explanation | +======================+======================================================================+ | **LIBRARY | The setup script was unable to automatically find one or more files | | AUTODETECTION ERROR**| needed for the compilation process, such as a library needed by the | | | C compiler. | | | | | | Using a text editor, you will need to manually specify the relevant | | | paths in the `manual_config` section of the setup configuration file,| | | :file:`setup.cfg` (in the root directory of the KInterbasDB source | | | distribution). Uncomment the item in question and provide a value | | | appropriate to your system. Save the newly modified | | | :file:`setup.cfg`, then repeat the compilation step. | | | | | | If manually specifying the library paths fails to solve the problem: | | | - Your C compiler or linker may not be properly configured. | | | - You may have a corrupt or incomplete installation of one or more | | | KInterbasDB dependencies. | | +----------------------------------------------------------------------+ | | *Note for non-Windows platforms:* | | | If the compiler indicates that it cannot find the include file | | | :file:`Python.h`, this probably means that you have the user-oriented| | | Python package installed, but not the developer-oriented package that| | | would enable you to compile C extensions. | | | | | | For example, RedHat-derived distributions such as Fedora split the | | | core Python distribution into :file:`python-{x.y.z}`, :file:`python- | | | devel-{x.y.z}` and :file:`python-docs-{x.y.z}` packages. | | | You'll need to install the :file:`python-devel-{x.y.z}` package | | | in order to compile KInterbasDB. | | | | | | The use of C extensions to Python is quite common, so Python | | | repackagers such as Linux distributions should include the files | | | necessary to compile C extensions in their basic Python package. The | | | Python core developers have noticed these repackaging mistakes and | | | complained about them, but apparently without effect. | +----------------------+----------------------------------------------------------------------+ | **COMPILER | The setup script could not function because of the current | | CONFIGURATION ERROR**| configuration of your compiler. The error message should provide | | | details about what went wrong, and perhaps a suggestion of how to fix| | | the problem. | | | | | | If you are not using the standard compiler for your platform, consult| | | the compiler-specific notes. | +----------------------+----------------------------------------------------------------------+ | **LIBRARY CONVERSION | The setup script's attempt to convert libraries intended for use with| | ERROR** | Microsoft Visual C++ into a format compatible with your compiler was | | | not successful. | | | | | | Consult the compiler-specific notes in this document, as well as the | | | Python standard library documentation on "Installing Python Modules".| +----------------------+----------------------------------------------------------------------+ | **PYTHON SYSTEM | Your Python installation is outdated, lacks some crucial modules, or | | ERROR** | is otherwise inadequate. The error message will indicate what your | | | options are, which may include installing a more recent Python | | | version, compiling additional C extension modules for your current | | | Python version, or editing :file:`setup.cfg` to manually specify | | | library paths, thus relieving :file:`setup.py` of the burden | | | of detecting them. | +----------------------+----------------------------------------------------------------------+ | **KINTERBASDB | The setup script cannot find a file that was supposed to be included | | DISTRIBUTION ERROR** | with the KInterbasDB source distribution. Try downloading the | | | KInterbasDB source distribution again and decompressing it into a | | | fresh temporary directory, then repeat the compilation step. | +----------------------+----------------------------------------------------------------------+ | **LIBRARY MANUAL | One of the library paths specified in :file:`setup.cfg` is not valid.| | SPECIFICATION ERROR**| Verify the location of the library, then edit :file:`setup.cfg` to | | | reflect the correct path. | | | | | | If you had no particular reason to manually specify the library path | | | in the first place, try commenting out that entry in | | | :file:`setup.cfg`, then repeat the compilation step and let the setup| | | script attempt to automatically detect the location of the library. | +----------------------+----------------------------------------------------------------------+ If the problem persists after you have followed the advice in the error message itself and in the table above, visit the `KInterbasDB support list `__ and report your problem. Compiler-Specific Notes ----------------------- + Microsoft Windows + Microsoft Visual C++ 6.0 #. The Visual C++ command-line utilities must be available on your system path, and their required environment variables must be initialized to meaningful values. If, when you installed Visual C++, you did *not* allow it to register the paths needed for command-line compilation, you will need to run the :file:`vcvars32.bat` batch file from the :file:`bin` subdirectory of your Visual C++ installation. By default, this directory is :file:`C:\\Program Files\\Microsoft Visual Studio\\VC98\\bin` #. Use the * same * command prompt window to run the following command in the temporary directory into which you decompressed KInterbasDB: :command:`python setup.py build` + `MinGW `__ (Windows port of GCC) Note that KInterbasDB supports MinGW only with Firebird 1.5 or later, not Firebird 1.0 or Interbase®. With earlier versions of the database, use Microsoft Visual C++. #. Make sure that the :file:`bin` subdirectory of the directory where you installed MinGW is in your PATH. KInterbasDB requires numerous MinGW sub-packages, so it's easiest to install the monolithic distribution of MinGW, rather than piecing together individual sub-packages. The monolithic distribution is an executable installer; installation is trivial. If you do decide to install individual MinGW sub-packages, you must install at least the following: + binutils + gcc-core + mingw-runtime + mingw-utils + w32api KInterbasDB's setup script will automatically perform all of the required preparatory steps for compiling an extension with MinGW on your Python installation. #. In the temporary directory into which you decompressed KInterbasDB, run the command: :command:`python setup.py build --compiler=mingw32` Install KInterbasDB =================== During this step, the setup script moves the KInterbasDB package (including the newly compiled C extensions) to the standard package directory of your Python installation so that Python will be able to :command:`import kinterbasdb` and :command:`import kinterbasdb.services` In addition to the Python code and shared library files actually used by the Python interpreter, the setup script typically installs some supporting files, such as documentation. Depending on your system configuration, these supporting files may be placed in the same directory or a different directory from the files used by the Python interpreter. Run the following command: :command:`python setup.py install` The setup script will install KInterbasDB, listing each file it installs. Errors during this step are rare because compilation (the finicky part of this process) has already taken place; installation is really just a matter of copying files. However, there will be file system permission errors if the Python installation directory is not writable by the user running the setup script. If you encounter such an error, try one of the following: - Log in as a user who has the required file system permissions and repeatthe installation step. - Manually copy the directory :file:`build/lib.{platform}-{pyver}/kinterbasdb` (which contains the Python modules and compiled library files created during the compilation step) to a directory in your PYTHONPATH. This approach will not install the supporting files, but they are for the benefit of the programmer rather than the Python interpreter anyway. Test your KInterbasDB installation ********************************** KInterbasDB has an extensive test suite, but it is not really intended for routine public use. To verify that KInterbasDB is installed properly, switch to a directory *other than the temporary directory into which you decompressed the source distribution* (to avoid conflict between the copy of kinterbasdb in that directory and the copy placed under the standard Python `site-packages` directory), then verify the importability of your KInterbasDB installation by issuing the following command:: python -c "import kinterbasdb as k; print k.__version__" If the import attempt does not encounter any errors and the version number is what you expected, you are finished. Next, consider reading the KInterbasDB Usage Guide. You should not encounter any errors at this stage since you have already completed the compilation and installation steps successfully. If you do, please report them to the `KInterbasDB support list `__. kinterbasdb-3.3.0/docs/_sources/python-db-api-compliance.txt0000644000175000001440000002373711132652265023322 0ustar pcisarusers##################################### Compliance to Python Database API 2.0 ##################################### .. currentmodule:: kinterbasdb Incompatibilities ================= .. data:: DATETIME KInterbasDB's deferred loading of dynamic type translators causes this singleton to behave in violation of the standard until the :func:`kinterbasdb.init()` function has been called (whether explicitly or implicitly). For more information, see the documnation section about `Deferred Loading of Dynamic Type Translators`. Unsupported Optional Features ============================= .. method:: Cursor.nextset() This method is not implemented because the database engine does not support opening multiple result sets simultaneously with a single cursor. Nominally Supported Optional Features ===================================== .. class:: Cursor .. attribute:: arraysize As required by the spec, the value of this attribute is observed with respect to the `fetchmany` method. However, changing the value of this attribute does not make any difference in fetch efficiency because the database engine only supports fetching a single row at a time. .. method:: setinputsizes() Although this method is present, it does nothing, as allowed by the spec. .. method:: setoutputsize() Although this method is present, it does nothing, as allowed by the spec. Extensions and Caveats ====================== KInterbasDB offers a large feature set beyond the minimal requirements of the Python DB API. Most of these extensions are documented in the section of this document entitled `Native Database Engine Features and Extensions Beyond the Python DB API`. This section attempts to document only those features that overlap with the DB API, or are too insignificant to warrant their own subsection elsewhere. .. function:: connect() This function supports the following optional keyword arguments in addition to those required by the spec: :role: For connecting to a database with a specific SQL role. *Example:* .. sourcecode:: python kinterbasdb.connect(dsn='host:/path/database.db', user='limited_user', password='pass', role='MORE_POWERFUL_ROLE') :charset: For explicitly specifying the character set of the connection. See Firebird Documentation for a list of available character sets, and `Unicode Fields and KInterbasDB` section for information on handling extended character sets with KInterbasDB. *Example:* .. sourcecode:: python kinterbasdb.connect(dsn='host:/path/database.db', user='sysdba', password='pass', charset='UTF8') :dialect: The SQL dialect is feature for backward compatibility with Interbase® 5.5 or earlier. The default dialect is `3` (the most featureful dialect, default for Firebird). If you want to connect to `legacy` databases, you must explicitly set this argument's value to `1`. Dialect `2` is a transitional dialect that is normally used only during ports from IB < 6 to IB >= 6 or Firebird. See Firebird documentation for more information about SQL Dialects. *Example:* .. sourcecode:: python kinterbasdb.connect(dsn='host:/path/database.db', user='sysdba', password='pass', dialect=1) :timeout: (`Optional`) Dictionary with timeout and action specification. See section about `Connection Timeouts `_ for details. .. class:: Connection .. attribute:: charset *(read-only)* The character set of the connection (set via the `charset` parameter of :func:`kinterbasdb.connect()`). See Firebird Documentation for a list of available character sets, and `Unicode Fields and KInterbasDB` section for information on handling extended character sets with KInterbasDB. .. attribute:: dialect This integer attribute indicates which SQL dialect the connection is using. You should not change a connection's dialect; instead, discard the connection and establish a new one with the desired dialect. For more information, see the documentation of the `dialect` argument of the `connect` function. .. attribute:: server_version *(read-only)* The version string of the database server to which this connection is connected. For example, a connection to Firebird 1.0 on Windows has the following `server_version`: `WI-V6.2.794 Firebird 1.0` .. method:: execute_immediate Executes a statement without caching its prepared form. The statement must *not* be of a type that returns a result set. In most cases (especially cases in which the same statement -- perhaps a parameterized statement -- is executed repeatedly), it is better to create a cursor using the connection's `cursor` method, then execute the statement using one of the cursor's execute methods. Arguments: :sql: String containing the SQL statement to execute. .. attribute:: precision_mode Although this attribute is present in KInterbasDB 3.1+ and works in a backward-compatible fashion, it is deprecated in favor of the more general dynamic type translation feature. .. method:: commit(retaining=False) .. method:: rollback(retaining=False) The `commit` and `rollback` methods accept an optional boolean parameter `retaining` (default `False`) that indicates whether the transactional context of the transaction being resolved should be recycled. For details, see the Advanced Transaction Control: Retaining Operations section of this document. The `rollback` method accepts an optional string parameter `savepoint` that causes the transaction to roll back only as far as the designated savepoint, rather than rolling back entirely. For details, see the Advanced Transaction Control: Savepoints section of this document. .. class:: Cursor .. attribute:: description KInterbasDB makes absolutely no guarantees about `description` except those required by the Python Database API Specification 2.0 (that is, `description` is either `None` or a sequence of 7-element sequences). Therefore, client programmers should *not* rely on `description` being an instance of a particular class or type. KInterbasDB provides several named positional constants to be used as indices into a given element of `description` . The contents of all `description` elements are defined by the DB API spec; these constants are provided merely for convenience. .. sourcecode:: python DESCRIPTION_NAME DESCRIPTION_TYPE_CODE DESCRIPTION_DISPLAY_SIZE DESCRIPTION_INTERNAL_SIZE DESCRIPTION_PRECISION DESCRIPTION_SCALE DESCRIPTION_NULL_OK Here is an example of accessing the *name* of the first field in the `description` of cursor `cur`: .. sourcecode:: python nameOfFirstField = cur.description[0][kinterbasdb.DESCRIPTION_NAME] For more information, see the documentation of Cursor.description in the `DB API Specification `__. .. attribute:: rowcount Although KInterbasDB's `Cursor`s implement this attribute, the database engine's own support for the determination of "rows affected"/"rows selected" is quirky. The database engine only supports the determination of rowcount for `INSERT`, `UPDATE`, `DELETE`, and `SELECT` statements. When stored procedures become involved, row count figures are usually not available to the client. Determining rowcount for `SELECT` statements is problematic: the rowcount is reported as zero until at least one row has been fetched from the result set, and the rowcount is misreported if the result set is larger than 1302 rows. The server apparently marshals result sets internally in batches of 1302, and will misreport the rowcount for result sets larger than 1302 rows until the 1303rd row is fetched, result sets larger than 2604 rows until the 2605th row is fetched, and so on, in increments of 1302. As required by the Python DB API Spec, the rowcount attribute "is -1 in case no executeXX() has been performed on the cursor or the rowcount of the last operation is not determinable by the interface". .. method:: fetchone() .. method:: fetchmany() .. method:: fetchall() KInterbasDB makes absolutely no guarantees about the return value of the `fetchone` / `fetchmany` / `fetchall` methods except that it is a sequence indexed by field position. KInterbasDB makes absolutely no guarantees about the return value of the `fetchonemap` / `fetchmanymap` / `fetchallmap` methods (documented below) except that it is a mapping of field name to field value. Therefore, client programmers should *not* rely on the return value being an instance of a particular class or type. .. method:: fetchonemap() This method is just like the standard `fetchone` method of the DB API, except that it returns a mapping of field name to field value, rather than a sequence. .. method:: fetchmanymap() This method is just like the standard `fetchmany` method of the DB API, except that it returns a sequence of mappings of field name to field value, rather than a sequence of sequences. .. method:: fetchallmap() This method is just like the standard `fetchall` method of the DB API, except that it returns a sequence of mappings of field name to field value, rather than a sequence of sequences. .. method:: iter() .. method:: itermap() These methods are equivalent to the `fetchall` and `fetchallmap` methods, respectively, except that they return iterators rather than materialized sequences. `iter` and `itermap` are exercised in this example. kinterbasdb-3.3.0/docs/searchindex.js0000644000175000001440000010720411133077256017001 0ustar pcisarusersSearch.setIndex({desctypes:{"0":"method","1":"function","2":"attribute","3":"exception","4":"data","5":"class"},terms:{compiler_specific_compilation_not:4,the_proc:10,interchang:7,four:9,sleep:9,asian:4,fout:9,whose:[9,4],adv_ev:4,display_s:7,isc_tpb_concurr:9,under:[9,7,1,4],spec:[5,2],u2211:9,merchant:3,everi:9,risk:9,downstream:9,"void":[7,3],null_ok:7,appar:[5,1],trans_dict:9,modifyus:9,unblock:9,nfor:9,gvibda:2,direct:[9,0,7],commerci:0,consequ:[9,4],second:[9,7,4],even:[9,7,8,4],asid:4,threadlevel:7,"new":[9,5,7,4],net:[3,4],metadata:9,getsiz:9,isc_info_sql_stmt_delet:9,centric:[9,8],behavior:[9,7,10,4],internalerror:7,never:[9,8,4],forum_id:4,here:[1,4,5,7,9,10],set_type_trans_in:9,studio:1,path:[9,5,1,4],interpret:[9,7,1,8,4],isc_info_sql_stmt_ddl:9,cdef:9,forum:[2,4],anymor:7,precis:[9,7,4],datetim:[9,7,5,1,4],bitmask:9,portabl:7,"_test":9,unix:[7,1],printf:7,type_cod:7,unit:9,highli:9,redon:4,describ:[9,7,4],would:[1,4,7,8,9,10],call:[0,4,5,7,8,9,10],blobread:9,bufferobject:7,type:[0,1,4,5,7,9],until:[9,5,4],database_lib_nam:8,time_conv_in:9,notif:[9,0,4],yahoo:2,notic:[9,1,3],warn:[9,7,4],"__iter__":4,loss:[9,7,3,4],herebi:3,unpack:10,must:[1,4,5,7,8,9,10],word:9,insignific:5,restor:[9,4],raw_byte_to_int:9,setup:[1,8,4],work:[1,4,5,7,8,9],oleg:4,root:[0,1],overrid:[9,4],defer:[9,5,4],create_databas:[9,4],climat:4,give:7,autodetect:1,smtp:9,indic:[9,0,5,7,1],replet:10,normalize_doubl:4,want:[9,5,1,10,4],"_c_conn":9,despit:9,end:[9,4],hoc:9,datefromtick:7,ordinari:10,how:[9,1,4],answer:9,verifi:[9,1],config:6,test_backup:9,updat:[9,5,7,4],ct_nontranspar:9,dialect:[9,5,10,4],recogn:9,jelbert:4,after:[9,7,1,10,4],badli:9,wrong:[7,1,4],transactionid:9,parallel:[6,4],demonstr:[9,0,10],set_type_trans_:[9,4],attempt:[9,7,5,1,4],third:[9,4],tutori:[0,10,2],opaqu:4,minim:[9,0,5,8,4],think:[9,8],receiv:[9,1,4],maintain:[9,3,4],environ:[1,8,4],incorpor:7,enter:[9,4],first:[1,3,4,5,7,9,10],order:[1,4,7,8,9,10],oper:[1,4,5,6,7,8,9,10],feedback:4,offici:[9,4],becaus:[1,4,5,7,8,9],pascal:9,incid:1,affect:[9,5,7,4],gcdl:8,flexibl:[9,7],vari:[9,6],smallint:9,directli:[9,7,8,4],fit:[9,3,4],fix:[9,7,1,3,4],bytecod:9,fetchal:[9,5,7,10],persist:[9,1],comprehens:[10,8],mydb:7,easier:4,them:[1,4,7,8,9,10],thei:[1,4,5,7,9,10],proce:[9,1],slate:[6,4],safe:[9,6,7,8],"break":[7,4],downright:1,choic:[9,4],alex:3,changelog:[0,4],thenc:4,accommod:4,timeout:[9,0,5,4],each:[9,7,1,10],debug:4,went:[7,1],side:[10,4],mean:[1,4,6,7,8,9],prohibit:9,dout:9,resum:9,extract:[9,4],isc_tpb_read_commit:9,decref:4,network:[9,8],fraught:9,goe:9,newli:[9,1],availabledocument:9,crucial:[9,1],dsn:[9,5,7,10,8],adapt:[9,0,2],reader:10,unrecogn:9,veto:9,situat:[9,4],free:[0,7,2,4],standard:[0,1,4,5,6,7,8,9],tcon:9,convent:[9,1],"0x0093e090":9,metadataonli:9,traceback:9,activateshadowfil:9,unabl:[9,1,4],subtl:8,confus:[6,8],tpc_block:9,semaphor:[7,4],rang:[9,7,10],perfectli:9,render:9,isolation_level:9,independ:9,getlog:[9,4],wast:9,typeconv_datetime_mx:[9,4],restrict:9,unlik:9,alreadi:[9,7,1],wrapper:[9,4],"2a1":[0,4],lock_resolut:9,subtleti:8,primari:[9,4],hood:[9,4],rewritten:4,nomin:[0,5],top:8,sometim:[9,4],yesterdai:9,master:0,too:[9,5],listen:[9,4],"_kinterbasdb":4,tool:[9,4],took:9,vc98:1,incur:9,conold:9,somewhat:9,helen:[0,4],removeus:9,n_physic:9,bewar:4,keyword:[9,5,7,4],provid:[1,3,4,5,7,8,9],nextset:[5,7],older:[9,1,4],zero:[9,5,7,4],project:9,matter:[1,4],minut:[9,7],requestbuff:9,thu:[9,7,1],provis:4,fashion:[9,5,6],entail:4,sharingmod:9,modern:7,mind:8,raw:[9,7],manner:[9,4],increment:[9,5],"__main__":9,destfilenam:9,incompat:[9,0,5,4],seek:4,"__databaseerror":7,showsystemtablesandindex:9,recreat:9,unresolv:9,transaction_info:[9,4],latter:[10,4],thorough:4,weakli:9,transmit:9,thoroughli:4,current_d:4,though:[9,7,10,4],usernam:[9,4],object:[9,0,7,10,4],what:[9,7,6,1,4],microsecond:[9,4],rootdirectori:9,artifact:9,mxdatetim:7,destfilepag:9,phase:[9,4],prematur:4,tradit:9,simplic:9,don:[9,8,4],zymurgi:9,doc:[1,8,4],alarm:9,flow:9,doe:[1,4,5,7,9,10],dummi:9,declar:4,place:[9,1,8,4],api:[0,1,2,4,5,6,7,8,9,10],speedup:4,syntax:[9,7,10,4],freeform:9,identifi:4,involv:[9,5,7,4],absolut:[9,5,4],layout:7,istart:9,configur:[9,7,6,1,8],busi:9,timestamp_conv_in:9,"__call__":9,service_mgr:9,nasti:9,compli:4,roundabout:4,report:[9,5,1,4],reengin:4,bat:1,convertexternaltablestointernalt:9,excel:0,method:[9,5,7,4],twice:9,ct_rollback:9,respond:[9,7],isc_info_sql_stmt_:9,getsecuritydatabasepath:9,elimin:[9,4],datatyp:4,typeconv_fixed_stdlib:9,respons:[9,4],fail:[7,1,4],eventnam:4,best:[9,7,8],the_tabl:[9,4],prope:9,said:4,databas:[0,1,2,4,5,7,8,9,10],timestampfromtick:7,tablereserv:9,figur:[9,5,4],simplest:[9,10],awai:9,approach:[9,7,1,4],attribut:[9,5,6,7,4],accord:[9,6],extend:[9,5,7,4],xrang:9,weak:[9,4],extens:[9,0,5,1,4],preprocessor:8,extent:[9,7],cousin:0,protect:[9,8],accident:4,easi:[1,4],irregular:[9,4],ill:9,scrutin:4,against:[9,7,8,4],logic:4,com:[1,3],con:[9,10,4],compromis:9,vain:9,resumpt:9,guid:[0,1,2,4,7,9,10],assum:[9,7],duplic:9,fro:9,three:[9,7,8],been:[4,5,7,8,9,10],accumul:9,trigger:[9,4],trickeri:4,interest:9,basic:[0,7,1,8],"__len__":4,life:8,rather:[0,1,4,5,6,7,9,10],argument:[9,5,7,4],multithread:[9,6,8,4],vulcan:8,"catch":7,ugli:4,ident:9,dist_doc:4,servic:[9,0,1,4],properti:[9,6,8,4],sourceforg:[3,2,4],aim:[6,10],calcul:9,ain:9,publicli:10,aid:4,lock_timeout:9,conv:4,tabl:[0,1,4,6,7,9,10],getenvironmentmessag:4,kwarg:9,conf:9,sever:[9,5,7,3,4],incorrectli:[9,4],perform:[1,3,4,5,7,8,9],suggest:[9,1,4],make:[1,3,4,5,7,8,9,10],transpar:[9,4],fbclient:[8,4],complex:[9,4],split:[9,1],complet:[9,0,1,4],con1:9,con2:9,con3:9,evid:[9,4],hang:[9,4],jkl:9,fairli:[9,7,4],rais:[9,7,1,4],dylan:10,undesir:9,scenario:[9,8],typeconv_fixed_decim:9,inherit:[9,7],client:[0,1,4,5,6,7,8,9,10],greatest:9,thi:[0,1,3,4,5,6,7,8,9,10],programm:[1,4,5,7,8,9],everyth:9,left:[9,7,10,4],protocol:[9,6,8],just:[9,5,1,8,4],human:9,func_cod:4,"_actandreturntextualresult":9,yet:[9,7,4],languag:[9,10],previous:[9,7,4],relevant_ev:9,nstream:9,typeconv_nak:9,expos:[9,0,4],interfer:9,thick:[9,4],had:[1,4,7,8,9,10],seq_of_paramet:7,dbfilenam:9,spread:9,primit:4,els:[9,7,10],save:[1,10],opt:9,applic:[9,6,8,4],preserv:4,background:9,shadow:9,trig_messages_handle_insert:9,specif:[0,1,4,5,7,8,9,10],deprec:[9,5,4],manual:[9,1,10,4],rollbacklimbotransact:[9,4],unnecessari:[9,4],underli:[9,6],www:[7,4],right:[9,7],old:[9,7,4],deal:[9,6,10,4],interv:9,isc_info_sql_stmt_upd:9,userexist:[9,4],intern:[9,5,7,8,4],printer:10,toctre:0,indirect:3,successfulli:1,test_restor:9,insensit:4,icc:3,subclass:[9,7,4],xsqlvar:4,buffer:[9,7,4],tracker:[9,4],overcom:7,condit:9,my_tabl:9,operationalerror:[9,7,4],core:[9,1,4],plu:4,sensibl:9,fbemb:8,connectiongroup:[9,4],post:9,chapter:4,execute_immedi:[9,5,4],alexand:3,choke:4,unfortun:[9,4],internation:9,internal_s:7,isc_vers:4,commit:[9,5,7,10,4],marshal:5,"_rowmap":4,kuznetsov:3,"float":[9,4],encod:[9,4],bound:[9,7,4],bizarr:9,wrap:[7,4],storag:[9,4],accordingli:[7,8],wai:[9,7,10,8,4],isc_que_ev:9,quirki:5,support:[0,1,2,3,4,5,6,7,8,9,10],"class":[9,5,7,4],avail:[1,4,5,7,9,10],width:10,reli:[9,5,7,4],editor:1,fraction:9,overhead:[9,8],gil:[8,4],isc_info_tra_lock_timeout:9,lowest:9,bert:4,head:9,medium:4,form:[9,5,1,8,4],offer:[9,7,5,1,8],forc:[9,4],recommend:[9,1,8,4],refcount:4,paramstyl:7,isc_info_page_s:9,"true":[9,7,4],callproc:[9,7,10,4],understood:7,rowmap:4,bugfix:[3,4],depreci:7,pcisar:3,maximum:[9,7,10,4],reiter:4,primarili:4,anew:9,absenc:9,fundament:[9,4],unique_t_a:9,featur:[0,4,5,7,9,10],classic:[6,8],"abstract":9,exist:[9,7,8,4],glanc:9,"__programmingerror":7,inact:[9,4],assembl:[9,10],tip:[0,8],refactor:4,isc_tpb_protect:9,role:[5,4],borri:[0,4],test:[0,1,4,8,9,10],hundr:4,relat:[9,0,7,4],intend:[9,1,8],stringio:9,consid:[9,7,1,4],sql:[0,2,4,5,7,9,10],transferr:9,main_transact:9,faster:10,furthermor:9,pseudo:9,db_info:[9,4],ignor:[9,4],time:[1,3,4,5,6,7,8,9,10],time_conv_out:9,backward:[9,5,4],concept:7,"0x009201b0":9,skip:[9,7,1],global:[7,8,4],restorelog:9,koi8:4,row:[9,5,7,10,4],prereleas:4,decid:[9,1],hold:[9,7,3,4],depend:[0,1,4,7,8,9],tpbbuilder:9,thhi:9,decim:[9,7,4],intermedi:9,decis:[9,4],isinst:9,sourc:[9,0,7,1,4],setoutputs:[5,7],string:[9,5,7,4],unicode_fss:9,jere:3,getattacheddatabasenam:9,isc_info_user_nam:9,exact:9,din:9,administr:9,level:[0,4,6,7,8,9],did:[9,7,1,4],gui:[9,2,4],iter:[9,5,10,4],item:[9,7,1,4],unsupport:[0,5],localhost:[9,8],quick:[0,10],ndb_info:9,dir:4,prevent:[9,4],slower:9,compens:7,donotrestoreshadow:9,port:[5,1,4],appear:[9,3,4],"__notsupportederror":7,conceptu:[9,10,4],uniform:9,current:[1,3,4,6,7,9,10],sinc:[1,4,7,8,9,10],unobtrus:9,commitaftereacht:9,deriv:1,gener:[2,4,5,7,9,10],satisfi:1,modif:[9,3],pagesalloc:9,address:[9,4],setsweepinterv:9,wait:[9,4],box:4,manual_config:1,destfiles:9,set_type_tran:9,queue:9,throughput:[8,4],behav:5,extrem:[9,4],adv_param_conv_dynamic_type_translation_deferred_loading_backcompat:4,commonli:7,ipc:6,semant:[9,7],regardless:[9,8],circumv:[9,4],tweak:9,modul:[0,1,2,4,7,8,9],output1:10,prefer:[9,7,10],unicode_conv_:4,marker:[9,7],instal:[0,1,2,4,8,9],market:9,memori:[9,7,4],sake:9,visit:1,todai:4,faq_fep_unicod:4,test_event_a:9,test_event_b:9,handler:[9,4],msi:1,msg:9,scope:[9,1],checkout:4,prep:[9,4],tightli:9,gethomedir:9,claus:4,refrain:4,enhanc:4,visual:1,prototyp:4,examin:[9,1],easiest:1,prepar:[0,3,4,5,7,9,10],dmitri:6,date_conv_out:9,ct_commit:9,can:[0,1,4,6,7,8,9,10],inadequ:[1,4],purpos:[9,3,4],databaseapi:7,backuplog:9,input2:10,claim:4,input1:10,stream:[9,4],fixed_input_translator_funct:9,fragment:4,topic:[0,7,2],"__interfaceerror":7,unguard:4,recycl:[9,5,4],occur:[9,7,4],alwai:[9,7,8,4],multipl:[9,5,7,8,4],charset:[9,5,10,4],testusercon:9,fieldvalu:10,write:[9,7,8,4],uffer:9,actual:[9,7,1,4],fourth:9,rocketmail:3,sysdba:[9,5,10],parameter:[5,10,4],map:[9,5,7,10,4],product:4,evgeni:3,max:9,maz:3,setinputs:[5,7],per:[9,7,4],mac:4,mai:[9,7,1,4],suck:9,data:[9,7,3,4],goal:4,practic:[9,7,4],cherkashin:3,divid:7,explicit:[9,4],produc:[9,7],inform:[0,1,4,5,7,9],"switch":[1,4],preced:9,combin:[9,1,8,4],crept:4,isc_info_sql_stmt_get_seg:9,untest:4,ghi:9,adv_param_conv_dynamic_type_translation_posit:4,libfbclient:8,shield:9,"__cmp__":7,deactivateindex:9,svccon:9,entitl:[9,5],still:[9,7,8,4],dynam:[9,5,7,4],entiti:9,snippet:[9,7],disconnect:[7,8],monitor:9,platform:[9,1],window:[1,4,5,6,8,9],suspicion:9,array_t:9,mail:[9,2,4],main:[9,8],isc_info_sql_stmt_savepoint:9,limbo:[9,4],non:[9,7,1,8,4],within:[9,10],standarderror:7,savepoint:[9,5,4],col2:4,col3:4,initi:[9,7,1,8,4],col1:4,kinterbasdbda:2,now:[9,7,4],provision:4,discuss:[9,4],nor:[10,4],introduct:[9,0,7],term:[9,7],name:[0,4,5,7,9,10],realist:9,drop:[9,7,4],advent:[9,4],atsuo:4,separ:[9,7,4],attributeerror:7,eugeneai:3,compil:[1,8,4],domain:[9,4],replac:[9,7],individu:[9,1,4],continu:[9,4],ensur:[1,4],significantli:4,year:[9,7,4],distributor:1,setwritemod:9,happen:9,opcodewhenauthor:9,dispos:9,shown:9,accomplish:9,space:4,profit:3,db_filenam:9,isc_info_alloc:9,correct:[1,4],special_issue_concurr:4,earlier:[9,5,1,4],state:[9,7,4],ibas:9,seventi:9,org:[7,4],"byte":[9,4],typal:9,care:9,befor:[9,7,4],synchron:[9,4],modest:4,refus:9,turn:7,test_restored02:9,test_restored03:9,test_restored01:9,imposs:4,frequent:[9,4],lambda:9,origin:[9,3,4],clumsi:[9,4],suspend:9,reimplement:6,redhat:1,qmark:7,carri:4,onc:[9,1,10,8],arrai:[9,7,4],submit:[9,4],symmetri:4,ghijklmnop:9,open:[9,0,5,7,1],predefin:[9,7],size:[9,7,4],given:[9,5,7,4],silent:4,workaround:4,citi:[9,4],necessarili:[9,7,4],tell:9,connectiontimedout:9,getlockfiledir:9,conveni:[9,5,4],includ:[1,3,4,7,8,9],getconnectioncount:9,especi:[0,5,1,2,4],copi:[9,7,1,3],specifi:[1,4,5,6,7,8,9,10],gin:9,limited_us:5,than:[1,4,5,6,7,8,9,10],serv:9,wide:4,dyman:9,posix:[1,4],were:[9,7,10,4],posit:[9,5,7,4],seri:9,pre:[9,1],sai:[9,6],ann:9,ani:[1,3,4,5,6,7,8,9,10],utf_8:9,burdensom:9,deliv:[9,4],isc_tpb_wait:9,engin:[9,0,5,4],techniqu:9,advic:[1,8],alias:4,isc_info_tra_access:9,note:[1,4,6,7,8,9,10],altogeth:9,ideal:9,isc_event_block:9,take:[9,7,8,4],noth:[5,7],begin:[9,1,4],sure:[1,8],trace:4,normal:[9,5,8,4],multipli:9,post_ev:[9,4],beta:4,predeclar:[9,4],pair:9,neatli:4,pages:9,pyunicode_asasciistr:4,renam:[9,7,4],later:[9,6,1,8,4],sale:9,runtim:[1,8],gracefulli:4,newer:[9,1,4],show:[10,4],preparedstat:[9,4],rendit:4,idle_sec:9,concurr:[9,0,6,8,4],permiss:[0,1,3],fifti:9,slot:[9,4],onli:[1,4,5,6,7,8,9],explicitli:[9,5,10,8,4],firebirdss:4,transact:[0,4,5,7,9,10],accessmod:9,activ:[9,0,8,4],enough:[9,4],dict:4,over:[3,4,7,8,9,10],nearli:[0,4],variou:[9,10,8,4],insertstat:9,repr:9,secondari:9,cannot:[9,7,1,4],isc_info_sql_stmt_commit:9,utf:[9,10],pipermail:4,requir:[1,4,5,7,8,9,10],yield:[9,7,10,4],arrays:[5,7],bison:10,where:[1,4,7,8,9,10],newlanguag:10,"__warn":7,unfil:9,cin:9,labori:9,isc_tpb_read:9,eout:9,concern:9,infinit:[9,4],detect:[7,1,4],enumer:9,label:9,behind:9,between:[9,7,1,10,4],"import":[1,3,4,7,8,9,10],across:[9,7],assumpt:9,cycl:4,uncondition:9,come:[9,1],"2605th":5,"__operationalerror":7,avoid:[9,7,1,8,4],job:9,contract:3,inconsist:4,present:[0,4,5,6,7,8,9],mani:[9,7,8,4],event_occurrence_count:9,among:4,acceler:9,undocu:4,overview:[0,6,8],period:[9,4],exploit:8,cancel:9,poll:9,isc_tpb_rec_vers:9,damag:3,better:[9,5,7,4],ultim:4,coupl:9,ofkinterbasdb:8,ineleg:9,timeout_author:9,hardli:9,invers:9,mark:[9,7],dbapitypeobject:7,"_act":9,valueerror:[9,4],resolut:[9,4],fieldmaxwidth:10,catastroph:[9,4],impati:1,former:[10,4],hasattr:7,"case":[9,5,7,4],db_to_python_encoding_map:9,fin:9,invok:[9,7,1,4],familiar:7,typeconv_pref:4,apilevel:7,ein:9,advantag:[9,8,4],destin:[9,7,4],adv_param_conv_dynamic_type_translation_tbl_convenience_cod:4,cluster:4,uncom:1,abcdef:9,ascii:[9,4],youdytski:4,develop:[1,4],author:[9,8,3],insofar:[9,4],isc_info_sql_stmt_start_tran:9,same:[1,4,5,6,7,9,10],check:[9,7,4],binari:[0,1,4,7,8,9],epoch:7,html:[7,4],pad:[9,4],pai:9,preparatori:[9,1],document:[0,1,3,4,5,6,7,8,9,10],gsec:9,finish:[9,1,10,3,4],utf8:[5,10,4],nest:[9,4],assist:[6,4],driver:7,decompress:1,capabl:[9,7,8,4],improv:4,extern:[9,7,4],event_conduit:9,tradition:9,isc_info_sql_stmt_exec_procedur:9,appropri:[9,7,1,8,4],reliev:1,megabyt:9,justifi:10,without:[1,3,4,5,7,9],model:9,dimension:9,execut:[0,1,4,5,7,9,10],when:[1,4,5,6,7,8,9,10],rest:9,gdb:9,harrison:9,touch:3,speed:9,versu:[0,8,4],sqlwithmagicvalu:9,hint:[0,7,4],except:[9,5,7,8,4],littl:[9,4],blob:[9,7,4],exercis:5,getmessagefiledir:[9,4],real:9,jake:4,around:[9,8,4],read:[1,4,5,7,8,9,10],beginningofhostnam:9,intel:9,whitespac:9,attaribut:7,integ:[9,5,7,10,4],server:[1,4,5,6,8,9,10],benefit:[9,1],either:[9,5,7,4],cascad:9,output:[9,7,1,10,4],manag:[9,7,8,4],fixedpoint:[9,4],afflict:4,zfirebirdda:2,bringonlin:9,"1_pre8":[0,4],rowid:7,"1_pre3":[0,4],"1_pre2":[0,4],"1_pre1":[0,4],intact:[9,4],"1_pre7":[0,4],"1_pre6":[0,4],"1_pre5":[0,4],"1_pre4":[0,4],loopback:8,definit:[9,0,6,7,4],legal:7,reactiv:9,inject:9,inputvalu:9,complic:[9,7],refer:[9,7,8,4],ct_veto:9,garbag:[9,4],finicki:1,adv_trans_control_distribut:4,broken:4,starttim:9,found:[9,7,4],"__name__":9,setdefaultpagebuff:9,"throw":7,comparison:[9,4],timefromtick:7,firebird:[0,1,2,4,5,6,7,8,9,10],get_type_trans_in:9,degre:8,stand:9,act:9,backup:[9,4],processor:4,routin:[1,4],effici:[9,5,7,4],elementari:[9,10],thread_id:4,isc_info_sql_stmt_insert:9,monolith:1,yemanov:6,your:[0,7,1,8],rushbi:[6,3],complianc:[0,5,7,4],mingw32:1,area:[7,4],enable_concurr:8,start:[9,0,7,10,4],compliant:[0,7,1],shutdownmethod:9,interfac:[9,0,5,7,4],low:4,lot:7,strictli:[9,1,4],deserv:9,machin:9,haselimin:9,outdat:1,tupl:[9,7,4],regard:[9,3,4],jun:9,bytesinus:9,gori:9,longer:[9,7,4],iterkei:4,notat:7,possibl:[9,7,10,8,4],"default":[1,4,5,7,8,9],adv_services_backup_simplest_witherror:9,embed:[6,4],deadlock:[9,4],connect:[0,3,4,5,6,7,8,9,10],creat:[0,1,4,5,7,8,9,10],multibyt:4,certain:[9,4],fetchonemap:[5,4],file:[0,1,4,8,9,10],fill:4,incorrect:4,again:[9,7,1,4],showonlydatabaseheaderpag:9,beyond:[9,0,5,7,1],googl:2,idiot:9,compel:9,event:[9,0,3,4],field:[9,5,7,10,4],reconsid:9,writabl:1,you:[0,1,3,4,5,6,7,8,9,10],architectur:[9,6,8,4],poor:4,isc_event_count:9,event_nam:9,registri:[1,4],sequenc:[9,5,7,10,4],symbol:8,ansi:7,sourcefilenam:9,track:4,pyerr_newexcept:7,pool:[6,8],set_type_trans_out:9,reduc:[9,6],deliber:9,directori:[9,1,2,4],invest:8,descript:[9,5,7,10,4],mimic:9,potenti:[8,4],escap:7,cpu:[9,8],togeth:1,represent:[9,4],all:[1,3,4,5,7,8,9,10],consider:[9,7,4],illustr:9,dsr:[3,4],forbidden:9,lack:[9,1,4],month:[9,7,4],disc:9,deprecationwarn:4,abil:[9,7],mno:9,follow:[1,3,4,5,6,7,8,9,10],"__internalerror":7,content:[0,1,4,5,9,10],isc_wait_for_ev:9,incap:4,curscrol:[9,4],init:[9,5,8,4],program:[1,4,7,8,9,10],those:[9,5,7,8,4],neglig:3,introduc:[9,7,4],blob2pyobject:4,consum:9,straightforward:9,far:[9,5,8],faq:1,util:[9,1,4],candid:4,mechan:[9,8],fall:9,veri:[9,10,4],unoffici:4,condition:4,get_type_tran:9,alexan:3,list:[1,2,4,5,7,9],signific:[3,4],emul:[7,8],adjust:[7,4],superced:4,fb_api_v:4,dimens:[9,4],sync:7,past:4,getservicemanagervers:9,rate:4,design:[9,5,4],pass:[9,5,7,10,4],further:4,isc_info_current_memori:9,cursor:[0,4,5,7,9,10],deleg:7,sub:[1,4],sun:9,section:[1,2,4,5,7,8,9,10],abl:[9,1,8],brief:[9,10,2],delet:[9,0,5,1,4],version:[0,1,3,4,5,6,7,8,9],consecut:9,localtim:7,"public":[9,1],essenc:9,full:[9,1],themselv:[9,1],gstat:9,misspel:9,sophist:[9,4],excess:9,strong:9,modifi:[7,1,3,4],invoc:4,valu:[1,4,5,7,9,10],search:[0,4],misinterpret:9,ahead:7,text_unicod:[9,4],prior:[9,8,4],amount:9,action:[9,5,8,3,4],isc_tpb_lock_writ:9,via:[9,5,7,8,4],readabl:9,transit:[5,8],default_tpb:9,friendli:[9,8],filenam:9,inappropri:[9,4],send:9,famili:9,establish:[9,5,10,4],select:[0,4,5,7,8,9,10],aggress:9,distinct:10,regist:[9,1,4],two:[9,7,8,4],coverag:[10,4],almost:[9,4],isc_info_:9,taken:1,isc_info_sql_stmt_rollback:9,forcefulli:9,minor:4,more:[1,4,5,7,8,9,10],flat:[9,4],flaw:9,desir:[9,5,8],abspath:9,flag:4,particular:[1,4,5,6,7,9],known:[9,1,4],cach:[5,4],obrain:9,psql:4,none:[9,5,7,4],hour:[9,7],dev:9,remain:[9,7,8,4],bigint:9,caveat:[9,0,5,8,4],abandon:4,def:[9,7],instantan:9,dbcharactersetcod:9,prompt:[9,1],bogu:[9,4],useabl:7,adv_param_conv_dynamic_type_translation_deferred_load:4,integrityerror:7,share:[9,7,1],accept:[9,5,4],minimum:9,unreli:4,explor:9,whichev:9,cours:[9,4],newlin:10,awkward:9,secur:9,programmat:[9,0,4],anoth:[9,10,4],comfort:9,hugh:4,divis:7,"__init__":[9,7,4],reject:4,simpl:[9,7],nontranspar:9,resourc:[9,7],variant:[9,1,4],transactionconflict:4,reflect:[7,1,4],associ:[9,7],circumst:4,"short":[9,4],caus:[9,5,7,8,4],callback:9,eventconduit:[9,4],chiefli:9,useallpagespac:9,constitu:9,help:[9,0,2],documn:5,n_output_param:9,insolubl:1,through:[9,7,3,4],altreadi:9,reconnect:9,hierarchi:7,more_powerful_rol:5,suffer:[9,4],character_set:9,paramet:[0,3,4,5,7,8,9,10],style:[9,0,7,1,4],itervalu:4,late:4,segfault:4,pend:7,bout:9,might:[9,8,4],alter:4,libfbemb:8,wouldn:9,good:[9,7,2],"return":[4,5,6,7,9,10],timestamp:[9,7,4],framework:4,test_stamp:9,thereof:[7,3],detach:8,complain:1,concurrency_level:[8,4],instruct:[9,1,2,4],easili:[7,1],achiev:[0,7,8,4],new_messag:9,compris:9,fulli:[9,4],intervent:9,typeconv_23plu:[9,4],truncat:7,harm:9,hard:7,idea:[9,7],procedur:[0,4,5,7,9,10],realli:[1,4],expect:[9,7,1,4],masterkei:[9,10],thing:[9,4],orient:[9,1,4],getarchitectur:9,"try":[9,7,1,4],isc_info_sql_stmt_select_for_upd:9,safeti:[0,6,7,8],publish:0,payload:4,footnot:[0,7],adv_ct:4,print:[9,1,10],get:[9,10,4],occurr:[9,4],advanc:[9,0,5,10],typeconv_24plu:9,reason:[9,1,8,4],base:[9,7,2,4],arrayout:9,ask:4,gdal:8,basi:[9,7,4],thread:[0,4,6,7,8,9],pssel:9,misreport:5,perhap:[5,1],isc_info_tra_oldest_act:9,perman:9,ignorelimbotransact:9,lifetim:9,assign:9,major:[0,7,4],notifi:9,obviou:9,upper:4,number:[9,7,1,4],alexandr:4,done:4,construct:[9,7,4],stdlib:4,blank:4,stabl:[9,4],implementor:7,miss:4,differ:[1,4,5,6,7,8,9],php:4,script:[9,7,1,3,4],interact:7,least:[9,0,5,1,4],statement:[0,4,5,7,9,10],natur:4,lengthoffilenam:9,connectiontyp:4,scheme:[9,4],store:[0,4,5,7,9,10],conduit:[9,4],option:[0,1,4,5,7,8,9],reawaken:9,illeg:4,accessmodereadonli:9,pars:[9,4],consult:1,off:[9,7,4],albeit:4,kind:7,simplist:[9,10,4],whenev:[7,4],remot:[9,6,8],remov:[9,3,4],reus:[9,7,4],str:[9,10,4],reinstat:4,dtt:4,comput:[9,4],gvib:2,igor:4,packag:[0,7,1,2],expir:9,consol:2,dedic:9,"null":[9,7,4],remodel:4,entireti:9,built:[9,1],equival:[9,5,10],self:[9,7],violat:5,also:[9,0,7,1,4],build:[9,1,8,3,4],typeconv_datetime_stdlib:[9,4],aout:9,distribut:[0,1,3,4,7,8,9],previou:[9,7,4],reach:7,react:9,most:[1,3,4,5,7,8,9],plan:[9,4],alpha:4,isc_tpb_:9,tpb:[9,4],clear:9,cover:[0,7,10,4],security2:9,active_sec:9,part:[9,7,1],reportvalu:9,microsoft:1,particularli:4,fine:1,find:[9,0,1,4],impact:4,ignorechecksum:9,copyright:3,writer:7,solut:[9,4],factor:9,firebird150:9,hit:[1,4],unus:7,"0x00920648":9,"__file__":9,express:[9,4],interprocess:9,grew:4,liabl:3,uncomfort:9,city_scrol:[9,4],cachebuff:9,common:1,seamlessli:9,wrote:3,set:[0,4,5,7,8,9,10],isc_info_sql_stmt_set_gener:9,aspect:9,printout:9,decompos:9,see:[0,1,4,5,7,8,9],isql:[9,4],arg:9,close:[9,7,4],whatsoev:[8,3],executexxx:7,adv_prepared_statements__overall_exampl:9,someth:[9,1],particip:9,unixi:4,restructur:4,mutex:7,experi:9,altern:9,signatur:[9,4],numer:[9,7,1,10,4],complement:9,sole:[9,8,4],isol:9,disallow:4,succeed:9,solv:1,popul:[9,10,4],both:[0,1,3,4,7,9,10],last:[9,5,7,4],alon:9,foreign:7,context:[4,5,6,7,9,10],pdf:8,whole:[9,7,4],load:[9,5,4],simpli:[9,4],point:[9,7,3,4],instanti:9,sweep:9,threadsafeti:[7,8],header:[9,1,10,4],weasel:9,shutdown:[9,4],suppli:9,mistak:[9,1,4],throughout:[9,4],along:4,stamp:[9,7],due:[9,7],empti:[9,7,4],destructor:4,markoutdatedrecordsasfreespac:9,bubbl:9,nonexist:4,strategi:9,zamaraev:4,clariti:[9,7],consequenti:[9,3],coordin:9,understand:8,func:4,demand:4,repetit:[9,10],look:7,erron:4,batch:[9,5,1],durat:8,"while":[9,7,4],executemani:[9,7,10,4],match:[9,7,1],abov:[1,3,4,7,9,10],guido:7,paci:3,loop:[9,4],subsect:5,table_nam:[9,10],readi:[9,1,4],binutil:1,itself:[9,7,1,4],rid:9,fedora:[1,4],grant:3,belong:9,"_kiservic":9,shorter:4,lengthi:9,decod:[9,4],zope:[0,2],conflict:[1,4],higher:[9,8],x86:[9,1,4],optim:[9,7,4],rdb:[9,4],wherea:[9,10],curupd:[9,4],moment:9,temporari:[9,1],user:[0,1,2,3,4,5,7,9,10],impedi:8,stack:4,recent:[9,1,8,4],lower:9,task:[9,4],unicod:[9,5,4],eleg:[9,10,4],entri:[9,7,1,4],rmacdurmon:4,expens:9,mandriva:1,programmingerror:[9,7,4],explan:[1,4],predetermin:9,cout:9,getserververs:9,obscur:4,getlimbotransactionid:[9,4],description_precis:5,unto:4,mandat:7,password:[9,5,7,10],cur:[9,5,10,4],shortcut:1,output2:10,userthread:9,readera:9,readerb:9,setshouldreservepagespac:9,input:[9,7,10,4],subsequ:[9,7,4],useless:9,bin:[9,1,4],gbak:[9,4],varchar:[9,10,4],format:[9,7,1,10,4],big:4,intuit:7,preparetestdatabas:9,bit:4,characterist:[9,8],outright:[9,4],success:[9,1],semi:9,"__error":7,threadsaf:4,vcvars32:1,resolv:[9,5,4],elaps:9,collect:[9,4],isc_info_sql_stmt_select:9,group_id:4,encount:[9,7,1,4],often:9,creation:[9,0,4],some:[0,1,4,7,9,10],back:[9,5,7,4],isfil:9,fbk:9,sampl:[9,7,10],server_vers:[9,5,4],scale:[9,7,4],shall:3,pep:[2,4],description_internal_s:5,pyver:1,substitut:4,larg:[9,5,7],reproduc:9,noncommerci:0,cgi:4,run:[9,1,10,8,4],public3:4,ljust:[9,10],test_servic:4,precision_mod:[9,5,4],step:[9,1,3,4],meantim:4,impos:8,cautious:9,zip:9,constraint:4,materi:[9,5,4],rowcount:[5,7,4],idl:9,block:[9,8,4],repair:9,viewcv:4,pythonpath:1,isc_cancel_ev:9,bsd:0,contributor:3,chang:[0,1,4,5,7,9,10],announc:9,occupi:4,inclus:4,span:9,errno:9,question:[9,7,1],live:9,textual:9,custom:[9,4],test_event_c:9,pocoo:4,suit:[1,4],forward:[9,7,4],coyot:9,customtpb:9,properli:[9,1,8,4],repeatedli:5,link:[0,1,8,2,4],translat:[9,5,4],fielddesc:10,atom:9,line:[9,1,4],info:[9,4],concaten:9,fdb:9,py23standardlibtimestamp:9,consist:[9,7],tpb_block:9,"1_pre9":[0,4],lifespan:4,adequ:4,similar:[9,7,4],sort:9,constant:[9,5,7,4],fieldindic:10,doesn:[9,7,4],repres:[9,7,1,8,4],"char":[9,7,4],egenix:1,incomplet:1,year_releas:10,curstamp:9,invalid:[9,4],nich:9,codec:[9,4],timestamp_conv_out:9,description_type_cod:5,nice:[2,4],cur2:9,clean:6,benchmark:9,date_conv_in:9,meaning:1,connection_timeouts_ct_nontranspar:9,param:4,database_info:[9,0,4],ago:4,algorithm:7,interfaceerror:[7,4],fresh:[9,1],scroll:[9,4],adv_prepared_stat:4,code:[0,1,3,4,7,8,9,10],queri:[9,7,4],broader:7,lengthofhostnam:9,adv_param_conv_blob:4,sensit:9,elsewher:5,"_exception_funct":4,setaccessmod:9,granular:8,has_transact:9,aris:[9,3,4],sent:[9,4],deactiv:9,rollback:[9,5,7,4],mainstream:1,ddl:9,timeoutthread:9,mingw:[1,4],untouch:7,implicitli:[9,5,8,4],relev:[9,6,1,8],tri:[9,8,4],magic:[9,4],notabl:4,showuserindexpag:9,fewer:7,"_ksrv":9,race:9,tc_dt:9,fetchmanymap:5,databaseerror:7,pleas:[7,1],impli:3,smaller:9,cfg:[1,8],agreeabl:0,accomod:4,observ:[9,5,7,4],download:[1,2,4],fullnam:7,acid:9,append:9,compat:[9,5,1,4],index:[9,0,5,7,4],compar:[9,7,4],fetchxxx:7,access:[4,5,6,7,9,10],back03:9,back02:9,back01:9,typeconv_:9,test_user_1:9,ascii_lowercas:9,iteritem:4,len:[9,10],database_events_unsupport:4,let:[9,1,10],lew:4,myhost:7,becom:[9,5,4],implicit:[9,7,4],isalski:3,great:9,convers:[9,0,1,3,4],aardvark:9,larger:[9,5,4],isc_info_db_size_in_pag:9,defeat:4,earli:4,typic:[9,1,10,4],problemat:5,although:[9,5,6,7,4],inputrow:9,appli:[9,7,10,4],"boolean":[9,5,4],uniqu:9,getus:9,fee:3,from:[0,1,3,4,5,6,7,9,10],callback_befor:9,commun:[9,8],isc_tpb_writ:9,doubl:[9,4],upgrad:4,next:[9,7,1,8],implic:4,few:[7,4],doubt:4,imprecis:[9,4],fixed_conv_out_precis:4,inet:9,remaind:[9,4],interbas:[0,1,2,4,5,9],impress:4,trail:4,central:4,greatli:8,account:4,retriev:[9,10,4],augment:2,obvious:9,thin:[9,4],table_reserv:9,fetch:[5,7,10,4],control:[9,0,5,7,4],quickstart:[0,8],process:[1,4,7,8,9,10],lock:[9,7,8,4],high:[9,0,8],tag:4,proprietari:[0,10],onlin:[7,4],serial:[6,8,4],delai:9,chunkno:9,surfac:[9,4],six:9,sig:[7,2,4],subdirectori:1,instead:[9,7,5,1,4],repeatth:1,treev:4,db_kei:4,typeconv_backcompat:9,apress:0,gcc:1,statement_typ:9,attent:[9,4],physic:[9,8,4],n_input_param:9,alloc:[9,7,4],nativ:[9,0,5],bind:[9,7],correspond:[9,7],element:[9,5,10,4],issu:[9,0,7,1,4],mainten:[9,4],description_nam:[5,10],allow:[9,7,5,1,4],elig:9,move:[7,1,4],addus:9,perfect:4,chosen:9,infrastructur:[9,8,4],temp_dir:1,total:9,therefor:[9,5,7,8,4],typeconv_23plus_lowmem:9,crash:4,pure:[9,4],"__getitem__":[7,4],isc_info_tra_id:9,python:[0,1,2,4,5,7,8,9,10],auto:[9,7,4],dai:[9,7],devel:1,mention:[9,4],facilit:4,blob_test:9,anyth:[9,10],edit:1,tran:9,februari:4,executexx:5,mode:[9,4],strangelycasedfieldnam:4,acquir:[9,4],upward:9,subset:4,chunk:9,callback_aft:9,isc_dsql_free_stat:4,"1303rd":5,"2rc1":[0,4],our:9,patch:4,special:[9,7,3,4],out:[9,7,1,3,4],variabl:[7,1],shouldreserv:9,psin:9,automag:[9,4],guarante:[9,5,7,8,4],isc_info_sql_stmt_put_seg:9,hardwar:9,ref:4,wholli:9,clarifi:7,fieldindex:10,manipul:[9,6,8,4],undo:9,wizard:1,preparedstatement_descript:4,usabl:4,dictionari:[9,5,4],releas:[9,8,3,4],typeconv_fixed_fixedpoint:[9,4],log:[9,1,4],ronald:4,result_typ:9,deriba:4,could:[9,7,1,8,4],ishimoto:4,david:[6,3],length:[9,7,4],retain:[9,5,7,4],showonlydatabaselogpag:9,softwar:3,scene:9,date:[9,7,1,3,4],commitlimbotransact:[9,4],lib:[9,1,4],facil:[9,1,4],journei:9,"long":[9,7,1,4],outputparam:10,strict:4,licens:[0,3],robbi:4,system:[9,6,1,4],messag:[9,1,4],attach:[9,8],attack:9,arrayin:9,pavel:[3,4],termin:[9,4],"final":[9,4],prone:[9,10],"__del__":[9,7],fetchmani:[5,7,4],exactli:[9,10],description_display_s:[5,10],paremet:9,cpython:9,bother:9,richer:4,structur:[9,4],charact:[9,5,10,4],sens:[9,4],disband:9,seriou:[8,4],other:[9,7,1,3,4],steer:9,result:[3,4,5,7,9,10],terribl:4,gout:9,linker:1,clearli:7,corrupt:[9,1,4],trans_info:[9,4],lightli:9,reserv:[9,7],need:[9,7,1,8,4],tidi:4,isc_info_db_id:9,atid:4,win1258:4,mix:9,which:[1,3,4,5,6,7,8,9,10],conclud:[9,1],typeconv_text_unicod:[9,4],divers:[9,4],singl:[4,5,7,8,9,10],beginningoffilenam:9,unless:[9,1,10,8],freebsd:4,awar:[9,8],timeoutauthor:9,who:[9,1,8,4],gfix:[9,4],ibphoenix:[3,2],segment:9,why:9,afresh:9,garbagecollect:9,request:[9,7,4],baseline_type_translation_facil:9,type_conv:[9,4],error:[1,4,6,7,9,10],determin:[4,5,6,7,8,9],constrain:9,fact:[9,8,4],dbi:7,sourcedatabas:9,text:[9,1,4],bring:4,rough:4,trivial:[9,1,10,4],anywai:1,varnam:9,locat:[9,1],much:[9,10],notsupportederror:7,test_tabl:9,should:[0,1,4,5,7,8,9],jan:0,suppos:[9,1,10],local:[9,6,7,8],hope:7,kinterbasdb:[0,1,2,3,4,5,7,8,9,10],contribut:[3,2],convert:[9,1,4],petr:4,autom:9,test_savepoint:9,tbl:4,increas:[9,8],cstringio:9,action_thin:9,pyformat:7,enabl:[9,1,4],"2b1":[0,4],integr:[7,4],contain:[0,1,4,5,7,9],marek:3,conform:4,legaci:5,dml:7,knowledg:9,adv_param_conv_implicit_from_str:4,malfunct:4,currentopcod:9,troubl:7,statu:9,aros:4,test_transact:4,w32api:1,correctli:[8,4],pattern:[9,0,4],dll:4,favor:[5,4],written:[0,4,6,8,9,10],progress:4,cisar:[3,4],repackag:1,inet_error:9,"__integrityerror":7,kei:[9,7,4],janez:3,"2pc":9,entir:[0,4,5,8,9,10],isc_info_tra_oldest_snapshot:9,group:[9,2],"0x00962db0":9,funcobject:4,addit:[9,0,5,1,4],event_b:9,event_a:9,instant:9,nameoffirstfield:5,"__dataerror":7,equal:[9,7,4],etc:[9,7,4],instanc:[9,5,4],admir:9,isc_info_tra_oldest_interest:9,comment:[9,7,1,4],guidelin:7,arriv:[9,4],distinguish:10,respect:[9,5,7,3],rpm:4,quit:[9,6,1,4],"_kiconversion_field_precis":4,infrom:9,addition:9,compon:[9,4],treat:[9,4],immedi:[9,4],adv_prog_maint_db_info:4,description_scal:5,showuserdatapag:9,dermodi:4,inbound:9,forestal:9,bulk:9,untyp:7,resort:4,woodsplitt:3,arisen:4,statist:9,determinist:9,multi:[9,8,4],description_null_ok:5,defin:[4,5,6,7,8,9,10],howev:[9,5,1,10,4],itermap:[5,10,4],isc4:9,layer:4,reacquir:8,site:[9,1],roll:[9,5,7,4],archiv:9,substanti:9,incom:[9,4],disclaim:3,greater:4,welcom:0,parti:[9,4],setsqldialect:9,member:9,handl:[9,5,1,8,4],defianc:4,"__version__":1,fetchon:[9,5,7,10,4],difficult:1,http:[7,4],hostnam:7,upon:9,effect:[9,7,1,10,4],handi:9,tortiou:3,vault:2,collat:4,distutil:[1,3,2,4],zkinterbasdbda:2,well:[9,1],thought:4,exampl:[0,1,4,5,6,7,8,9,10],command:[9,7,1,4],choos:[9,7,1],undefin:7,undertaken:4,outputvalu:9,usual:[9,7,5,1],test2:9,librari:[0,1,4,6,8,9],less:[9,7,10,4],drop_databas:[9,4],detail:[0,1,4,5,7,8,9],current_timestamp:9,heavili:4,simultan:[9,5,8,4],trig_test_insert_ev:9,amd64:4,wed:0,smith:4,add:[9,4],valid:[9,7,1,4],dql:7,bool:9,warrant:[5,4],ought:4,superserv:[6,8],royalti:3,piec:1,assert:[9,4],five:9,know:[9,7,8,4],burden:1,isc_database_info:9,tick:7,insert:[9,5,7,10,4],resid:9,like:[0,1,4,5,7,9,10],lost:4,necessari:[1,4,7,8,9,10],have:[1,4,7,8,9,10],page:[9,0,2,4],didn:4,convei:9,revert:9,linux:[9,6,1,8,4],isc_transaction_info:[9,4],"export":7,flush:9,proper:[9,7,4],home:2,transport:9,win32:[9,1,2,4],borland:[1,4],lead:7,"__contains__":4,leak:[9,4],garrett:4,octet:[9,4],thank:4,overlap:5,leas:6,outgo:9,nake:4,encourag:7,win64:1,usag:[0,1,4,7,9,10],host:[9,5,7,10],nutshel:4,thecompil:1,monetari:7,stage:1,about:[0,1,4,5,7,8,9,10],rare:1,column:[9,7,4],isc_info_tra_isol:9,constructor:[9,0,7,4],fals:[9,5,4],discard:[9,5,7,4],donotenforceconstraint:9,own:[9,5,4],parnassu:2,automat:[9,1,4],warranti:3,pitfal:9,conenct:9,mere:5,merg:7,val:9,transfer:4,procnam:7,hassl:4,"function":[1,4,5,7,8,9,10],unexpect:7,eac:3,keyerror:4,access_mod:9,jaybird:6,multifil:9,spuriou:4,overflow:4,highest:[9,6,8],buf:9,bug:4,count:[9,5,4],made:[9,7,3,4],temp:[9,10,8],whether:[3,4,5,6,8,9],wish:9,rc7:4,getstatist:9,rc2:4,asynchron:[9,4],record:[9,1,4],below:[9,7,5,1,4],limit:[9,7,8,3,4],otherwis:[9,7,1,4],problem:[9,7,1,8,4],subordin:9,typeconv_datetime_nak:4,"int":9,dure:[1,4,5,7,8,9],getcapabilitymask:9,dataerror:7,replica:9,implement:[0,4,5,7,8,9],inf:4,ing:[9,10],probabl:[9,1],quot:4,percent:9,fetchallmap:5,virtual:4,book:0,lookup:4,futur:[9,4],rememb:9,varieti:9,lisp:10,repeat:1,grab:9,singleton:[9,5,7,4],stai:9,experienc:[9,1],sphinx:[0,4],rule:9,portion:3,decemb:8,rep:9},titles:["Welcome to KInterbasDB’s documentation!","KInterbasDB Installation Guide","KInterbasDB Links","KInterbasDB LICENSE","KInterbasDB Changelog","Compliance to Python Database API 2.0","Overview of Firebird Client Library Thread-Safety","Python Database API Specification 2.0","Concurrency","Native Database Engine Features and Extensions Beyond the Python DB API","Quick-start Guide / Tutorial"],modules:{"kinterbasdb.typeconv_23plus_lowmem":9,kinterbasdb:0,"kinterbasdb.typeconv_naked":9,"kinterbasdb.typeconv_fixed_fixedpoint":9,"kinterbasdb.typeconv_23plus":9,"kinterbasdb.typeconv_fixed_decimal":9,"kinterbasdb.typeconv_fixed_stdlib":9,"kinterbasdb.typeconv_datetime_stdlib":9,"kinterbasdb.typeconv_text_unicode":9,"kinterbasdb.typeconv_24plus":9,"kinterbasdb.typeconv_backcompat":9,"kinterbasdb.services":9,"kinterbasdb.typeconv_datetime_mx":9},descrefs:{"":{Binary:[7,1],InternalError:[7,3],Error:[7,3],NUMBER:[7,4],connect:[7,1],threadsafety:[7,4],TimeFromTicks:[7,1],Timestamp:[7,1],Date:[7,1],ProgrammingError:[7,3],TimestampFromTicks:[7,1],DataError:[7,3],STRING:[7,4],NotSupportedError:[7,3],IntegrityError:[7,3],DATETIME:[7,4],Cursor:[7,5],Connection:[7,5],Time:[7,1],DateFromTicks:[7,1],BINARY:[7,4],InterfaceError:[7,3],paramstyle:[7,4],Warning:[7,3],OperationalError:[7,3],ROWID:[7,4],apilevel:[7,4],DatabaseError:[7,3]},kinterbasdb:{Transaction:[9,5],TPB:[9,5],TableReservation:[9,5],create_database:[9,1],CT_NONTRANSPARENT:[9,4],init:[9,1],CT_COMMIT:[9,4],DATETIME:[5,4],Cursor:[9,5],Connection:[9,5],EventConduit:[9,5],CT_VETO:[9,4],connect:[5,1],ConnectionGroup:[9,5],BlobReader:[9,5],PreparedStatement:[9,5],CT_ROLLBACK:[9,4]},"kinterbasdb.services":{Connection:[9,5],connect:[9,1],User:[9,5]},"kinterbasdb.Transaction":{begin:[9,0],rollback:[9,0],closed:[9,2],prepare:[9,0],transaction_info:[9,0],savepoint:[9,0],cursor:[9,0],connection:[9,2],trans_info:[9,0],n_physical:[9,2],cursors:[9,2],commit:[9,0],close:[9,0],resolution:[9,2],"__init__":[9,0]},"kinterbasdb.services.Connection":{restore:[9,0],getConnectionCount:[9,0],getHomeDir:[9,0],getSecurityDatabasePath:[9,0],getStatistics:[9,0],setSweepInterval:[9,0],getLog:[9,0],sweep:[9,0],getAttachedDatabaseNames:[9,0],getServiceManagerVersion:[9,0],getCapabilityMask:[9,0],getMessageFileDir:[9,0],close:[9,0],getArchitecture:[9,0],getServerVersion:[9,0],backup:[9,0],getLockFileDir:[9,0]},"kinterbasdb.PreparedStatement":{description:[9,2],plan:[9,2],sql:[9,2],n_output_params:[9,2],n_input_params:[9,2],statement_type:[9,2]},"kinterbasdb.Cursor":{fetchall:[5,0],transaction:[9,2],description:[5,2],set_type_trans_in:[9,0],rowcount:[5,2],fetchmany:[5,0],nextset:[5,0],get_type_trans_in:[9,0],fetchonemap:[5,0],iter:[5,0],name:[9,2],setoutputsize:[5,0],fetchallmap:[5,0],setinputsizes:[5,0],arraysize:[5,2],fetchmanymap:[5,0],fetchone:[5,0],prep:[9,0],itermap:[5,0]},Cursor:{callproc:[7,0],fetchall:[7,0],execute:[7,0],executemany:[7,0],description:[7,2],rowcount:[7,2],fetchone:[7,0],nextset:[7,0],setoutputsize:[7,0],setinputsizes:[7,0],close:[7,0],fetchmany:[7,0]},"kinterbasdb.ConnectionGroup":{count:[9,0],begin:[9,0],rollback:[9,0],members:[9,0],prepare:[9,0],clear:[9,0],contains:[9,0],remove:[9,0],add:[9,0],disband:[9,0],commit:[9,0],"__init__":[9,0]},Connection:{cursor:[7,0],commit:[7,0],rollback:[7,0],close:[7,0]},"kinterbasdb.TableReservation":{render:[9,0]},"kinterbasdb.EventConduit":{close:[9,0],wait:[9,0],"__init__":[9,0],flush:[9,0]},"kinterbasdb.services.Conenction":{modifyUser:[9,0],activateShadowFile:[9,0],setWriteMode:[9,0],getLimboTransactionIDs:[9,0],bringOnline:[9,0],repair:[9,0],userExists:[9,0],setAccessMode:[9,0],setSQLDialect:[9,0],setShouldReservePageSpace:[9,0],removeUser:[9,0],commitLimboTransaction:[9,0],shutdown:[9,0],rollbackLimboTransaction:[9,0],addUser:[9,0],getUsers:[9,0],setDefaultPageBuffers:[9,0]},"kinterbasdb.Connection":{trans_info:[9,0],dialect:[5,2],main_transaction:[9,2],drop_database:[9,0],prepare:[9,0],execute_immediate:[5,0],get_type_trans_in:[9,0],savepoint:[9,0],charset:[5,2],begin:[9,0],db_info:[9,0],set_type_trans_in:[9,0],transaction_info:[9,0],transactions:[9,2],commit:[5,0],rollback:[5,0],event_conduit:[9,0],trans:[9,0],precision_mode:[5,2],server_version:[5,2],database_info:[9,0]},"kinterbasdb.TPB":{lock_timeout:[9,2],render:[9,0],isolation_level:[9,2],table_reservation:[9,2],lock_resolution:[9,2],access_mode:[9,2]},"kinterbasdb.BlobReader":{chunks:[9,0]}},filenames:["index","installation","links","license","changelog","python-db-api-compliance","thread-safety-overview","Python-DB-API-2.0","concurrency","beyond-python-db-api","tutorial"]})kinterbasdb-3.3.0/docs/thread-safety-overview.html0000644000175000001440000002211711132652266021436 0ustar pcisarusers Overview of Firebird Client Library Thread-Safety — KInterbasDB v3.3.0 documentation

Overview of Firebird Client Library Thread-Safety¶

The thread-safety properties of the standard Firebird client library vary according to the following:

  • Firebird version
  • operating system
  • Firebird server architecture (SuperServer, Classic, Embedded)
  • underlying connection protocol (embedded vs. local vs. IPC vs. remote)

Determining whether the client library you’re using is thread-safe can be quite confusing. This document aims to reduce that confusion by defining what thread-safety means in the context of the Firebird client library, then presenting a table that specifies which client library configurations are thread-safe.

Note that this document deals only with the standard Firebird client library that underlies the Firebird C API. It has no relevance to the clean-slate reimplementations in access libraries such as Jaybird.

Definition of “Thread-Safety”¶

Currently, the highest level of concurrency supported by any version of the Firebird client library is thread-safety at the connection level.

When we say that the Firebird client library is thread-safe at the connection level, we mean that it is safe to use a particular connection in only one thread at a time, although the same connection can be manipulated by different threads in a serial fashion, and different connections can be manipulated by different threads in parallel.

For example, in a multithreaded application server, it is safe for a particular connection to be leased from a connection pool by Thread A, used, and returned to the pool for later lease by Thread B. It is not safe for Thread A and Thread B to use the same connection at the same time.

Thread-Safety Table¶

FB Version OS FB Architecture Remote / Local Thread-Safe?
1.0.3 Windows SuperServer Local No
Remote Yes
Classic Local No such config.
Remote No such config.
Embedded Local No such config.
Remote No such config.
Linux SuperServer Local No such config.
Remote No
Classic Local No
Remote No
1.5 Windows SuperServer Local No
Remote Yes
Classic Local No such config.
Remote Yes
Embedded Local Yes
Remote No such config.
Linux SuperServer Local No such config.
Remote Yes
Classic Local No
Remote No
2.0/2.1 Windows SuperServer Local Yes
Remote Yes
Classic Local Yes
Remote Yes
Embedded Local Yes
Remote No such config.
Linux SuperServer Local No such config.
Remote Yes
Classic Local No
Remote No

This document was written by David Rushby, with assistance from Dmitry Yemanov. Errors are attributable to Rushby rather than Yemanov.

kinterbasdb-3.3.0/docs/license.html0000644000175000001440000001476511132652265016465 0ustar pcisarusers KInterbasDB LICENSE — KInterbasDB v3.3.0 documentation

KInterbasDB LICENSE¶

The following contributors hold Copyright (C) over their respective portions of code and documentation:

[Author of original version; maintained through version 2.0:]

1998-2001 [alex] Alexander Kuznetsov <alexan@users.sourceforge.net>

[Author of ~90% of current code, most of current documentation; maintained through version 3.3:]

2002-2007 [dsr] David S. Rushby <woodsplitter@rocketmail.com>

[Finishing touch to v3.3; Current maintainer:]

2008-2009 [paci] Pavel Cisar <pcisar@ibphoenix.cz>

[Significant Contributors:]

2001-2002 [maz] Marek Isalski <kinterbasdb@maz.nu>

Marek made important first steps in removing the limitations of version 2.0 in preparation for version 3.0.

2001 [eac] Evgeny A. Cherkashin <eugeneai@icc.ru>

Evgeny wrote the first version of the distutils build script, which was included in a 2.x point release.

2001-2002 [janez] Janez Jere <janez.jere@void.si>

Janez contributed several bugfixes, including fixes for the date and time parameter conversion code in preparation for version 3.0.

Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee or royalty is hereby granted, provided that the above copyright notice appears in all copies and that both the copyright notice and this permission notice appear in supporting documentation or portions thereof, including modifications, that you make.

The authors disclaim all warranties with regard to this software, including all implied warranties of merchantability and fitness. In no event shall any author be liable for any special, indirect or consequential damages or any damages whatsoever resulting from loss of use, data or profits, whether in an action of contract, negligence or other tortious action, arising out of or in connection with the use or performance of this software.

Previous topic

KInterbasDB Changelog

This Page

Quick search

kinterbasdb-3.3.0/docs/installation.html0000644000175000001440000006041711132652265017537 0ustar pcisarusers KInterbasDB Installation Guide — KInterbasDB v3.3.0 documentation

KInterbasDB Installation Guide¶

Dependencies¶

KInterbasDB requires a valid combination of the dependencies in the list below.

Detailed instructions on how to install each dependency are beyond the scope of this document; consult the dependency distributor for installation instructions.

Satisfying the dependencies is not difficult! For mainstream operating systems – including Windows and Linux – easily installable binary distributions are available for all of KInterbasDB’s dependencies (see the download links below).

  1. Operating System - one of:

    • Win32 (NT 4, 2000, XP, 2003, ...)
    • Win64 (Should work fine, but no binary distributions are available.)
    • Linux (Known to work fine on both x86 and x86-64.)
    • Other Unix or Unix-like operating system
  2. Firebird 2.0 or later - client or server installation [download here] If you want to use KInterbasDB 3.3 with Firebird 1.5 and older or InterBase, you will need to do installation from source distribution instead.

  3. Python [download here] 2.4 or later

  4. eGenix.com mx Extensions for Python, version 2.0.1 or later [download here] By default, KInterbasDB uses the DateTime module of the eGenix.com mx Extensions to represent date and time values, as recommended by the Python Database API Specification. However, it is not strictly necessary to use the `mx.DateTime` module to handle dates and times, especially when you’re using Python 2.5 and newer. See this FAQ.

Installation from binary distribution¶

Note: If a binary distribution of KInterbasDB (e.g., a Windows executable installer) is not available for your platform, Python or Firebird version, you will need to do installation from source distribution instead.

Windows¶

Binary distributions of KInterbasDB for Windows come in the form of a conventional executable installer or MSI package. Just invoke the installer and follow the wizard prompts.

Because KInterbasDB is compatible with numerous versions of Python, you must choose a binary distribution that matches your Python version. There are currently Windows binary distributions of KInterbasDB compiled for use with Firebird 2.x for each of Python 2.4, 2.5 and 2.6.

Linux and Other Unix Variants¶

Currently, Linux users must typically install from source distribution as only Mandriva Linux offer the pre-built KInterbasDB package.

The source distribution will probably also install (and function) on most other POSIX-compliant Unix variants, as long as all of the dependencies are also installed and functional.

Because the KInterbasDB source distribution supports the standard Python package installation facility (distutils), installing the source distribution on a typical Linux system is downright easy.

Installation from source distribution¶

Shortcut for the Experienced and Impatient:

(decompress KInterbasDB into *temp_dir*)
cd *temp_dir*
python setup.py build
python setup.py install
python -c "import kinterbasdb"
(delete *temp_dir*)

Then hit the Usage Guide.

Compile KInterbasDB¶

You will need a C compiler for that. VC or MinGW to compile KInterbasDB on Windows, and GCC to compile it on Linux/POSIX.

Once you have successfully installed the dependencies, you may proceed with the installation of KInterbasDB itself.

Beginning with version 3.0, KInterbasDB has full support for the distutils, the standard facility for Python package distribution and installation. Full instructions for using the distutils are available here, but you can skip them unless you have an otherwise insoluble problem.

Open a command prompt, change to the directory where you decompressed the kinterbasdb source distribution, and type:

python setup.py build

The installation script, setup.py, will attempt to automatically detect the information needed by the C compiler; then it will invoke the distutils to perform the actual compilation. If you installed automatic distributions of the dependencies that place themselves in standard locations (on UNIX-style operating systems) or record their locations in the system registry (on Windows), the compilation should proceed without incident.

On Windows, compilers other than Microsoft Visual C++ usually require some library conversion to work with Python or Firebird. With Firebird 1.5 and MinGW or Firebird 1.0 and Borland C++, setup.py will perform this conversion automatically. If the automatic conversion fails, ensure that your compiler is installed properly (especially that its bin directory is in your PATH). For more information, see thecompiler-specific notes in this document, as well as the Python standard library documentation on “Installing Python Modules”.

If setup.py raises no errors and its output concludes with something like “Creating library...”, then you are ready to proceed to the next step.

If you receive an error message, examine its contents and then consult the following table:

Error Message Header Explanation
LIBRARY AUTODETECTION ERROR

The setup script was unable to automatically find one or more files needed for the compilation process, such as a library needed by the C compiler.

Using a text editor, you will need to manually specify the relevant paths in the manual_config section of the setup configuration file, setup.cfg (in the root directory of the KInterbasDB source distribution). Uncomment the item in question and provide a value appropriate to your system. Save the newly modified setup.cfg, then repeat the compilation step.

If manually specifying the library paths fails to solve the problem:
  • Your C compiler or linker may not be properly configured.
  • You may have a corrupt or incomplete installation of one or more KInterbasDB dependencies.

Note for non-Windows platforms: If the compiler indicates that it cannot find the include file Python.h, this probably means that you have the user-oriented Python package installed, but not the developer-oriented package that would enable you to compile C extensions.

For example, RedHat-derived distributions such as Fedora split the core Python distribution into python-x.y.z, python- devel-x.y.z and python-docs-x.y.z packages. You’ll need to install the python-devel-x.y.z package in order to compile KInterbasDB.

The use of C extensions to Python is quite common, so Python repackagers such as Linux distributions should include the files necessary to compile C extensions in their basic Python package. The Python core developers have noticed these repackaging mistakes and complained about them, but apparently without effect.

COMPILER CONFIGURATION ERROR

The setup script could not function because of the current configuration of your compiler. The error message should provide details about what went wrong, and perhaps a suggestion of how to fix the problem.

If you are not using the standard compiler for your platform, consult the compiler-specific notes.

LIBRARY CONVERSION ERROR

The setup script’s attempt to convert libraries intended for use with Microsoft Visual C++ into a format compatible with your compiler was not successful.

Consult the compiler-specific notes in this document, as well as the Python standard library documentation on “Installing Python Modules”.

PYTHON SYSTEM ERROR Your Python installation is outdated, lacks some crucial modules, or is otherwise inadequate. The error message will indicate what your options are, which may include installing a more recent Python version, compiling additional C extension modules for your current Python version, or editing setup.cfg to manually specify library paths, thus relieving setup.py of the burden of detecting them.
KINTERBASDB DISTRIBUTION ERROR The setup script cannot find a file that was supposed to be included with the KInterbasDB source distribution. Try downloading the KInterbasDB source distribution again and decompressing it into a fresh temporary directory, then repeat the compilation step.
LIBRARY MANUAL SPECIFICATION ERROR

One of the library paths specified in setup.cfg is not valid. Verify the location of the library, then edit setup.cfg to reflect the correct path.

If you had no particular reason to manually specify the library path in the first place, try commenting out that entry in setup.cfg, then repeat the compilation step and let the setup script attempt to automatically detect the location of the library.

If the problem persists after you have followed the advice in the error message itself and in the table above, visit the KInterbasDB support list and report your problem.

Compiler-Specific Notes¶

  • Microsoft Windows

    • Microsoft Visual C++ 6.0

      1. The Visual C++ command-line utilities must be available on your system path, and their required environment variables must be initialized to meaningful values. If, when you installed Visual C++, you did not allow it to register the paths needed for command-line compilation, you will need to run the vcvars32.bat batch file from the bin subdirectory of your Visual C++ installation. By default, this directory is C:\Program Files\Microsoft Visual Studio\VC98\bin
      2. Use the * same * command prompt window to run the following command in the temporary directory into which you decompressed KInterbasDB: python setup.py build
    • MinGW (Windows port of GCC) Note that KInterbasDB supports MinGW only with Firebird 1.5 or later, not Firebird 1.0 or Interbase®. With earlier versions of the database, use Microsoft Visual C++.

      1. Make sure that the bin subdirectory of the directory where you installed MinGW is in your PATH. KInterbasDB requires numerous MinGW sub-packages, so it’s easiest to install the monolithic distribution of MinGW, rather than piecing together individual sub-packages. The monolithic distribution is an executable installer; installation is trivial. If you do decide to install individual MinGW sub-packages, you must install at least the following:

        • binutils
        • gcc-core
        • mingw-runtime
        • mingw-utils
        • w32api

        KInterbasDB’s setup script will automatically perform all of the required preparatory steps for compiling an extension with MinGW on your Python installation.

      2. In the temporary directory into which you decompressed KInterbasDB, run the command: python setup.py build –compiler=mingw32

Install KInterbasDB¶

During this step, the setup script moves the KInterbasDB package (including the newly compiled C extensions) to the standard package directory of your Python installation so that Python will be able to import kinterbasdb and import kinterbasdb.services

In addition to the Python code and shared library files actually used by the Python interpreter, the setup script typically installs some supporting files, such as documentation. Depending on your system configuration, these supporting files may be placed in the same directory or a different directory from the files used by the Python interpreter.

Run the following command: python setup.py install

The setup script will install KInterbasDB, listing each file it installs.

Errors during this step are rare because compilation (the finicky part of this process) has already taken place; installation is really just a matter of copying files. However, there will be file system permission errors if the Python installation directory is not writable by the user running the setup script. If you encounter such an error, try one of the following:

  • Log in as a user who has the required file system permissions and repeatthe installation step.
  • Manually copy the directory build/lib.platform-pyver/kinterbasdb (which contains the Python modules and compiled library files created during the compilation step) to a directory in your PYTHONPATH. This approach will not install the supporting files, but they are for the benefit of the programmer rather than the Python interpreter anyway.

Test your KInterbasDB installation¶

KInterbasDB has an extensive test suite, but it is not really intended for routine public use.

To verify that KInterbasDB is installed properly, switch to a directory other than the temporary directory into which you decompressed the source distribution (to avoid conflict between the copy of kinterbasdb in that directory and the copy placed under the standard Python site-packages directory), then verify the importability of your KInterbasDB installation by issuing the following command:

python -c "import kinterbasdb as k; print k.__version__"

If the import attempt does not encounter any errors and the version number is what you expected, you are finished. Next, consider reading the KInterbasDB Usage Guide.

You should not encounter any errors at this stage since you have already completed the compilation and installation steps successfully. If you do, please report them to the KInterbasDB support list.

kinterbasdb-3.3.0/docs/_static/0000755000175000001440000000000011133100174015552 5ustar pcisaruserskinterbasdb-3.3.0/docs/_static/navigation.png0000644000175000001440000000033211133077256020433 0ustar pcisarusers‰PNG  IHDR<§ñÐúsRGB®Îé pHYs  šœtIMEØ y݉štEXtCommentCreated with GIMPWGIDATÓ½ÁÀ0 ÂûOëB©t~8qgª*°m,© 0Ö{,šB†ÀÌt—Ø6oœ.™qú\Y›~t7Ö"Lð“ßIEND®B`‚kinterbasdb-3.3.0/docs/_static/rightsidebar.css0000644000175000001440000000027311133077256020753 0ustar pcisarusers/** * Sphinx Doc Design -- Right Side Bar Overrides */ div.sphinxsidebar { float: right; } div.bodywrapper { margin: 0 230px 0 0; } div.inlinecomments { right: 250px; } kinterbasdb-3.3.0/docs/_static/stickysidebar.css0000644000175000001440000000037011133077256021142 0ustar pcisarusers/** * Sphinx Doc Design -- Sticky sidebar Overrides */ div.sphinxsidebar { top: 30px; left: 0px; position: fixed; margin: 0; float: none; } div.related { position: fixed; } div.documentwrapper { margin-top: 30px; } kinterbasdb-3.3.0/docs/_static/plus.png0000644000175000001440000000030711133077256017261 0ustar pcisarusers‰PNG  IHDR &Îàq pHYs  šœtIME× 1l9tEXtCommentöÌ–¿RIDATÓczô(BÅñãÇáÒpö¿ÿ¨èˆip»‘¹P÷îÝÃc· ¸ |¶IEND®B`‚kinterbasdb-3.3.0/docs/_static/contents.png0000644000175000001440000000031211133077256020127 0ustar pcisarusers‰PNG  IHDR(?¶wsRGB®Îé pHYs  šœtIMEØ 7C{´ÌtEXtCommentCreated with GIMPW7IDAT×¥9 ÂZþÿ^']ÔxŒ.$@”Z[‚!£8EÈž-«oöÃoì\éà¦K©IEND®B`‚kinterbasdb-3.3.0/docs/_static/default.css0000644000175000001440000002275011133077256017734 0ustar pcisarusers/** * KInterbasDB Sphinx Doc Design */ body { font-family: sans-serif; font-size: 100%; background-color: #6f1313; color: #000; margin: 0; padding: 0; } /* :::: LAYOUT :::: */ div.document { background-color: #7e2222; } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { margin: 0 0 0 230px; } div.body { background-color: white; padding: 0 20px 30px 20px; } div.sphinxsidebarwrapper { padding: 10px 5px 0 10px; } div.sphinxsidebar { float: left; width: 230px; margin-left: -100%; font-size: 90%; } div.clearer { clear: both; } div.footer { color: #fff; width: 100%; padding: 9px 0 9px 0; text-align: center; font-size: 75%; } div.footer a { color: #fff; text-decoration: underline; } div.related { background-color: #6f1313; color: #fff; width: 100%; line-height: 30px; font-size: 90%; } div.related h3 { display: none; } div.related ul { margin: 0; padding: 0 0 0 10px; list-style: none; } div.related li { display: inline; } div.related li.right { float: right; margin-right: 5px; } div.related a { color: white; } /* ::: TOC :::: */ div.sphinxsidebar h3 { font-family: 'Trebuchet MS', sans-serif; color: white; font-size: 1.4em; font-weight: normal; margin: 0; padding: 0; } div.sphinxsidebar h3 a { color: white; } div.sphinxsidebar h4 { font-family: 'Trebuchet MS', sans-serif; color: white; font-size: 1.3em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { color: white; } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 10px; padding: 0; list-style: none; color: white; } div.sphinxsidebar ul ul, div.sphinxsidebar ul.want-points { margin-left: 20px; list-style: square; } div.sphinxsidebar ul ul { margin-top: 0; margin-bottom: 0; } div.sphinxsidebar a { color: white ; -- #98dbcc; } div.sphinxsidebar form { margin-top: 10px; } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } /* :::: MODULE CLOUD :::: */ div.modulecloud { margin: -5px 10px 5px 10px; padding: 10px; line-height: 160%; border: 1px solid #cbe7e5; background-color: #f2fbfd; } div.modulecloud a { padding: 0 5px 0 5px; } /* :::: SEARCH :::: */ ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } /* :::: COMMON FORM STYLES :::: */ div.actions { padding: 5px 10px 5px 10px; border-top: 1px solid #cbe7e5; border-bottom: 1px solid #cbe7e5; background-color: #e0f6f4; } form dl { color: #333; } form dt { clear: both; float: left; min-width: 110px; margin-right: 10px; padding-top: 2px; } input#homepage { display: none; } div.error { margin: 5px 20px 0 0; padding: 5px; border: 1px solid #d00; font-weight: bold; } /* :::: INDEX PAGE :::: */ table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } /* :::: INDEX STYLES :::: */ table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } form.pfform { margin: 10px 0 20px 0; } /* :::: GLOBAL STYLES :::: */ .docwarning { background-color: #ffe4e4; padding: 10px; margin: 0 -20px 0 -20px; border-bottom: 1px solid #f66; } p.subhead { font-weight: bold; margin-top: 20px; } a { color: #355f7c; text-decoration: none; } a:hover { text-decoration: underline; } div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-family: 'Trebuchet MS', sans-serif; background-color: #f2f2f2; font-weight: normal; color: #20435c; border-bottom: 1px solid #ccc; margin: 20px -20px 10px -20px; padding: 3px 0 3px 10px; } div.body h1 { margin-top: 0; font-size: 200%; } div.body h2 { font-size: 160%; } div.body h3 { font-size: 140%; } div.body h4 { font-size: 120%; } div.body h5 { font-size: 110%; } div.body h6 { font-size: 100%; } a.headerlink { color: #c60f0f; font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #c60f0f; color: white; } div.body p, div.body dd, div.body li { text-align: justify; line-height: 130%; } div.body p.caption { text-align: inherit; } div.body td { text-align: left; } ul.fakelist { list-style: none; margin: 10px 0 10px 20px; padding: 0; } .field-list ul { padding-left: 1em; } .first { margin-top: 0 !important; } /* "Footnotes" heading */ p.rubric { margin-top: 30px; font-weight: bold; } /* Sidebars */ div.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; padding: 7px 7px 0 7px; background-color: #ffe; width: 40%; float: right; } p.sidebar-title { font-weight: bold; } /* "Topics" */ div.topic { background-color: #eee; border: 1px solid #ccc; padding: 7px 7px 0 7px; margin: 10px 0 10px 0; } p.topic-title { font-size: 1.1em; font-weight: bold; margin-top: 10px; } /* Admonitions */ div.admonition { margin-top: 10px; margin-bottom: 10px; padding: 7px; } div.admonition dt { font-weight: bold; } div.admonition dl { margin-bottom: 0; } div.admonition p.admonition-title + p { display: inline; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.warning { background-color: #ffe4e4; border: 1px solid #f66; } div.note { background-color: #eee; border: 1px solid #ccc; } p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; display: inline; } p.admonition-title:after { content: ":"; } div.body p.centered { text-align: center; margin-top: 25px; } table.docutils { border: 0; } table.docutils td, table.docutils th { padding: 1px 8px 1px 0; border-top: 0; border-left: 0; border-right: 0; border-bottom: 1px solid #aaa; } table.field-list td, table.field-list th { border: 0 !important; } table.footnote td, table.footnote th { border: 0 !important; } .field-list ul { margin: 0; padding-left: 1em; } .field-list p { margin: 0; } dl { margin-bottom: 15px; clear: both; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .refcount { color: #060; } dt:target, .highlight { background-color: #fbe54e; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } th { text-align: left; padding-right: 5px; } pre { padding: 5px; background-color: #efc; color: #333; border: 1px solid #ac9; border-left: none; border-right: none; overflow: auto; } td.linenos pre { padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } tt { background-color: #ecf0f3; padding: 0 1px 0 1px; font-size: 0.95em; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; } tt.descclassname { background-color: transparent; } tt.xref, a tt { background-color: transparent; font-weight: bold; } .footnote:target { background-color: #ffa } h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { background-color: transparent; } .optional { font-size: 1.3em; } .versionmodified { font-style: italic; } form.comment { margin: 0; padding: 10px 30px 10px 30px; background-color: #eee; } form.comment h3 { background-color: #326591; color: white; margin: -10px -30px 10px -30px; padding: 5px; font-size: 1.4em; } form.comment input, form.comment textarea { border: 1px solid #ccc; padding: 2px; font-family: sans-serif; font-size: 100%; } form.comment input[type="text"] { width: 240px; } form.comment textarea { width: 100%; height: 200px; margin-bottom: 10px; } .system-message { background-color: #fda; padding: 5px; border: 3px solid red; } img.math { vertical-align: middle; } div.math p { text-align: center; } span.eqno { float: right; } img.logo { border: 0; } /* :::: PRINT :::: */ @media print { div.document, div.documentwrapper, div.bodywrapper { margin: 0; width : 100%; } div.sphinxsidebar, div.related, div.footer, div#comments div.new-comment-box, #top-link { display: none; } } kinterbasdb-3.3.0/docs/_static/minus.png0000644000175000001440000000030711133077256017431 0ustar pcisarusers‰PNG  IHDR &Îàq pHYs  šœtIME× <®8åtEXtCommentöÌ–¿RIDATÓc -1) start = i; }); start = Math.max(start - 120, 0); var excerpt = ((start > 0) ? '...' : '') + $.trim(text.substr(start, 240)) + ((start + 240 - text.length) ? '...' : ''); var rv = $('
').text(excerpt); $.each(hlwords, function() { rv = rv.highlightText(this, 'highlight'); }); return rv; } /** * Porter Stemmer */ var PorterStemmer = function() { var step2list = { ational: 'ate', tional: 'tion', enci: 'ence', anci: 'ance', izer: 'ize', bli: 'ble', alli: 'al', entli: 'ent', eli: 'e', ousli: 'ous', ization: 'ize', ation: 'ate', ator: 'ate', alism: 'al', iveness: 'ive', fulness: 'ful', ousness: 'ous', aliti: 'al', iviti: 'ive', biliti: 'ble', logi: 'log' }; var step3list = { icate: 'ic', ative: '', alize: 'al', iciti: 'ic', ical: 'ic', ful: '', ness: '' }; var c = "[^aeiou]"; // consonant var v = "[aeiouy]"; // vowel var C = c + "[^aeiouy]*"; // consonant sequence var V = v + "[aeiou]*"; // vowel sequence var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 var s_v = "^(" + C + ")?" + v; // vowel in stem this.stemWord = function (w) { var stem; var suffix; var firstch; var origword = w; if (w.length < 3) return w; var re; var re2; var re3; var re4; firstch = w.substr(0,1); if (firstch == "y") w = firstch.toUpperCase() + w.substr(1); // Step 1a re = /^(.+?)(ss|i)es$/; re2 = /^(.+?)([^s])s$/; if (re.test(w)) w = w.replace(re,"$1$2"); else if (re2.test(w)) w = w.replace(re2,"$1$2"); // Step 1b re = /^(.+?)eed$/; re2 = /^(.+?)(ed|ing)$/; if (re.test(w)) { var fp = re.exec(w); re = new RegExp(mgr0); if (re.test(fp[1])) { re = /.$/; w = w.replace(re,""); } } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1]; re2 = new RegExp(s_v); if (re2.test(stem)) { w = stem; re2 = /(at|bl|iz)$/; re3 = new RegExp("([^aeiouylsz])\\1$"); re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); if (re2.test(w)) w = w + "e"; else if (re3.test(w)) { re = /.$/; w = w.replace(re,""); } else if (re4.test(w)) w = w + "e"; } } // Step 1c re = /^(.+?)y$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(s_v); if (re.test(stem)) w = stem + "i"; } // Step 2 re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = new RegExp(mgr0); if (re.test(stem)) w = stem + step2list[suffix]; } // Step 3 re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = new RegExp(mgr0); if (re.test(stem)) w = stem + step3list[suffix]; } // Step 4 re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; re2 = /^(.+?)(s|t)(ion)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(mgr1); if (re.test(stem)) w = stem; } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1] + fp[2]; re2 = new RegExp(mgr1); if (re2.test(stem)) w = stem; } // Step 5 re = /^(.+?)e$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(mgr1); re2 = new RegExp(meq1); re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) w = stem; } re = /ll$/; re2 = new RegExp(mgr1); if (re.test(w) && re2.test(w)) { re = /.$/; w = w.replace(re,""); } // and turn initial Y back to y if (firstch == "y") w = firstch.toLowerCase() + w.substr(1); return w; } } /** * Search Module */ var Search = { _index : null, _queued_query : null, _pulse_status : -1, init : function() { var params = $.getQueryParameters(); if (params.q) { var query = params.q[0]; $('input[@name="q"]')[0].value = query; this.performSearch(query); } }, /** * Sets the index */ setIndex : function(index) { var q; this._index = index; if ((q = this._queued_query) !== null) { this._queued_query = null; Search.query(q); } }, hasIndex : function() { return this._index !== null; }, deferQuery : function(query) { this._queued_query = query; }, stopPulse : function() { this._pulse_status = 0; }, startPulse : function() { if (this._pulse_status >= 0) return; function pulse() { Search._pulse_status = (Search._pulse_status + 1) % 4; var dotString = ''; for (var i = 0; i < Search._pulse_status; i++) dotString += '.'; Search.dots.text(dotString); if (Search._pulse_status > -1) window.setTimeout(pulse, 500); }; pulse(); }, /** * perform a search for something */ performSearch : function(query) { // create the required interface elements this.out = $('#search-results'); this.title = $('

' + _('Searching') + '

').appendTo(this.out); this.dots = $('').appendTo(this.title); this.status = $('

').appendTo(this.out); this.output = $('