PyGreSQL-5.1/0000755000175100077410000000000013470245543012726 5ustar darcypyg00000000000000PyGreSQL-5.1/pginternal.c0000644000175100077410000012515613466770070015252 0ustar darcypyg00000000000000/* * $Id: pginternal.c 985 2019-04-22 22:07:43Z cito $ * * PyGreSQL - a Python interface for the PostgreSQL database. * * Internal functions - this file is part a of the C extension module. * * Copyright (c) 2019 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. * */ /* PyGreSQL internal types */ /* Simple types */ #define PYGRES_INT 1 #define PYGRES_LONG 2 #define PYGRES_FLOAT 3 #define PYGRES_DECIMAL 4 #define PYGRES_MONEY 5 #define PYGRES_BOOL 6 /* Text based types */ #define PYGRES_TEXT 8 #define PYGRES_BYTEA 9 #define PYGRES_JSON 10 #define PYGRES_OTHER 11 /* Array types */ #define PYGRES_ARRAY 16 /* Shared functions for encoding and decoding strings */ static PyObject * get_decoded_string(const char *str, Py_ssize_t size, int encoding) { if (encoding == pg_encoding_utf8) return PyUnicode_DecodeUTF8(str, size, "strict"); if (encoding == pg_encoding_latin1) return PyUnicode_DecodeLatin1(str, size, "strict"); if (encoding == pg_encoding_ascii) return PyUnicode_DecodeASCII(str, size, "strict"); /* encoding name should be properly translated to Python here */ return PyUnicode_Decode(str, size, pg_encoding_to_char(encoding), "strict"); } static PyObject * get_encoded_string(PyObject *unicode_obj, int encoding) { if (encoding == pg_encoding_utf8) return PyUnicode_AsUTF8String(unicode_obj); if (encoding == pg_encoding_latin1) return PyUnicode_AsLatin1String(unicode_obj); if (encoding == pg_encoding_ascii) return PyUnicode_AsASCIIString(unicode_obj); /* encoding name should be properly translated to Python here */ return PyUnicode_AsEncodedString(unicode_obj, pg_encoding_to_char(encoding), "strict"); } /* Helper functions */ /* Get PyGreSQL internal types for a PostgreSQL type. */ static int get_type(Oid pgtype) { int t; switch (pgtype) { /* simple types */ case INT2OID: case INT4OID: case CIDOID: case OIDOID: case XIDOID: t = PYGRES_INT; break; case INT8OID: t = PYGRES_LONG; break; case FLOAT4OID: case FLOAT8OID: t = PYGRES_FLOAT; break; case NUMERICOID: t = PYGRES_DECIMAL; break; case CASHOID: t = decimal_point ? PYGRES_MONEY : PYGRES_TEXT; break; case BOOLOID: t = PYGRES_BOOL; break; case BYTEAOID: t = bytea_escaped ? PYGRES_TEXT : PYGRES_BYTEA; break; case JSONOID: case JSONBOID: t = jsondecode ? PYGRES_JSON : PYGRES_TEXT; break; case BPCHAROID: case CHAROID: case TEXTOID: case VARCHAROID: case NAMEOID: case REGTYPEOID: t = PYGRES_TEXT; break; /* array types */ case INT2ARRAYOID: case INT4ARRAYOID: case CIDARRAYOID: case OIDARRAYOID: case XIDARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_INT | PYGRES_ARRAY); break; case INT8ARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_LONG | PYGRES_ARRAY); break; case FLOAT4ARRAYOID: case FLOAT8ARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_FLOAT | PYGRES_ARRAY); break; case NUMERICARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_DECIMAL | PYGRES_ARRAY); break; case MONEYARRAYOID: t = array_as_text ? PYGRES_TEXT : ((decimal_point ? PYGRES_MONEY : PYGRES_TEXT) | PYGRES_ARRAY); break; case BOOLARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_BOOL | PYGRES_ARRAY); break; case BYTEAARRAYOID: t = array_as_text ? PYGRES_TEXT : ((bytea_escaped ? PYGRES_TEXT : PYGRES_BYTEA) | PYGRES_ARRAY); break; case JSONARRAYOID: case JSONBARRAYOID: t = array_as_text ? PYGRES_TEXT : ((jsondecode ? PYGRES_JSON : PYGRES_TEXT) | PYGRES_ARRAY); break; case BPCHARARRAYOID: case CHARARRAYOID: case TEXTARRAYOID: case VARCHARARRAYOID: case NAMEARRAYOID: case REGTYPEARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_TEXT | PYGRES_ARRAY); break; default: t = PYGRES_OTHER; } return t; } /* Get PyGreSQL column types for all result columns. */ static int * get_col_types(PGresult *result, int nfields) { int *types, *t, j; if (!(types = PyMem_Malloc(sizeof(int) * nfields))) { return (int*) PyErr_NoMemory(); } for (j = 0, t = types; j < nfields; ++j) { *t++ = get_type(PQftype(result, j)); } return types; } /* Cast a bytea encoded text based type to a Python object. This assumes the text is null-terminated character string. */ static PyObject * cast_bytea_text(char *s) { PyObject *obj; char *tmp_str; size_t str_len; /* this function should not be called when bytea_escaped is set */ tmp_str = (char *) PQunescapeBytea((unsigned char*) s, &str_len); obj = PyBytes_FromStringAndSize(tmp_str, str_len); if (tmp_str) { PQfreemem(tmp_str); } return obj; } /* Cast a text based type to a Python object. This needs the character string, size and encoding. */ static PyObject * cast_sized_text(char *s, Py_ssize_t size, int encoding, int type) { PyObject *obj, *tmp_obj; char *tmp_str; size_t str_len; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_BYTEA: /* this type should not be passed when bytea_escaped is set */ /* we need to add a null byte */ tmp_str = (char *) PyMem_Malloc(size + 1); if (!tmp_str) { return PyErr_NoMemory(); } memcpy(tmp_str, s, size); s = tmp_str; *(s + size) = '\0'; tmp_str = (char *) PQunescapeBytea((unsigned char*) s, &str_len); PyMem_Free(s); if (!tmp_str) return PyErr_NoMemory(); obj = PyBytes_FromStringAndSize(tmp_str, str_len); if (tmp_str) { PQfreemem(tmp_str); } break; case PYGRES_JSON: /* this type should only be passed when jsondecode is set */ obj = get_decoded_string(s, size, encoding); if (obj && jsondecode) { /* was able to decode */ tmp_obj = Py_BuildValue("(O)", obj); obj = PyObject_CallObject(jsondecode, tmp_obj); Py_DECREF(tmp_obj); } break; default: /* PYGRES_TEXT */ #if IS_PY3 obj = get_decoded_string(s, size, encoding); if (!obj) /* cannot decode */ #endif obj = PyBytes_FromStringAndSize(s, size); } return obj; } /* Cast an arbitrary type to a Python object using a callback function. This needs the character string, size, encoding, the Postgres type and the external typecast function to be called. */ static PyObject * cast_other(char *s, Py_ssize_t size, int encoding, Oid pgtype, PyObject *cast_hook) { PyObject *obj; obj = cast_sized_text(s, size, encoding, PYGRES_TEXT); if (cast_hook) { PyObject *tmp_obj = obj; obj = PyObject_CallFunction(cast_hook, "(OI)", obj, pgtype); Py_DECREF(tmp_obj); } return obj; } /* Cast a simple type to a Python object. This needs a character string representation with a given size. */ static PyObject * cast_sized_simple(char *s, Py_ssize_t size, int type) { PyObject *obj, *tmp_obj; char buf[64], *t; int i, j, n; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_INT: n = sizeof(buf) / sizeof(buf[0]) - 1; if ((int) size < n) { n = (int) size; } for (i = 0, t = buf; i < n; ++i) { *t++ = *s++; } *t = '\0'; obj = PyInt_FromString(buf, NULL, 10); break; case PYGRES_LONG: n = sizeof(buf) / sizeof(buf[0]) - 1; if ((int) size < n) { n = (int) size; } for (i = 0, t = buf; i < n; ++i) { *t++ = *s++; } *t = '\0'; obj = PyLong_FromString(buf, NULL, 10); break; case PYGRES_FLOAT: tmp_obj = PyStr_FromStringAndSize(s, size); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_MONEY: /* this type should only be passed when decimal_point is set */ n = sizeof(buf) / sizeof(buf[0]) - 1; for (i = 0, j = 0; i < size && j < n; ++i, ++s) { if (*s >= '0' && *s <= '9') { buf[j++] = *s; } else if (*s == decimal_point) { buf[j++] = '.'; } else if (*s == '(' || *s == '-') { buf[j++] = '-'; } } if (decimal) { buf[j] = '\0'; obj = PyObject_CallFunction(decimal, "(s)", buf); } else { tmp_obj = PyStr_FromString(buf); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); } break; case PYGRES_DECIMAL: tmp_obj = PyStr_FromStringAndSize(s, size); obj = decimal ? PyObject_CallFunctionObjArgs( decimal, tmp_obj, NULL) : PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_BOOL: /* convert to bool only if bool_as_text is not set */ if (bool_as_text) { obj = PyStr_FromString(*s == 't' ? "t" : "f"); } else { obj = *s == 't' ? Py_True : Py_False; Py_INCREF(obj); } break; default: /* other types should never be passed, use cast_sized_text */ obj = PyStr_FromStringAndSize(s, size); } return obj; } /* Cast a simple type to a Python object. This needs a null-terminated character string representation. */ static PyObject * cast_unsized_simple(char *s, int type) { PyObject *obj, *tmp_obj; char buf[64]; int j, n; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_INT: obj = PyInt_FromString(s, NULL, 10); break; case PYGRES_LONG: obj = PyLong_FromString(s, NULL, 10); break; case PYGRES_FLOAT: tmp_obj = PyStr_FromString(s); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_MONEY: /* this type should only be passed when decimal_point is set */ n = sizeof(buf) / sizeof(buf[0]) - 1; for (j = 0; *s && j < n; ++s) { if (*s >= '0' && *s <= '9') { buf[j++] = *s; } else if (*s == decimal_point) { buf[j++] = '.'; } else if (*s == '(' || *s == '-') { buf[j++] = '-'; } } buf[j] = '\0'; s = buf; /* FALLTHROUGH */ /* no break here */ case PYGRES_DECIMAL: if (decimal) { obj = PyObject_CallFunction(decimal, "(s)", s); } else { tmp_obj = PyStr_FromString(s); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); } break; case PYGRES_BOOL: /* convert to bool only if bool_as_text is not set */ if (bool_as_text) { obj = PyStr_FromString(*s == 't' ? "t" : "f"); } else { obj = *s == 't' ? Py_True : Py_False; Py_INCREF(obj); } break; default: /* other types should never be passed, use cast_sized_text */ obj = PyStr_FromString(s); } return obj; } /* Quick case insensitive check if given sized string is null. */ #define STR_IS_NULL(s, n) (n == 4 && \ (s[0] == 'n' || s[0] == 'N') && \ (s[1] == 'u' || s[1] == 'U') && \ (s[2] == 'l' || s[2] == 'L') && \ (s[3] == 'l' || s[3] == 'L')) /* Cast string s with size and encoding to a Python list, using the input and output syntax for arrays. Use internal type or cast function to cast elements. The parameter delim specifies the delimiter for the elements, since some types do not use the default delimiter of a comma. */ static PyObject * cast_array(char *s, Py_ssize_t size, int encoding, int type, PyObject *cast, char delim) { PyObject *result, *stack[MAX_ARRAY_DEPTH]; char *end = s + size, *t; int depth, ranges = 0, level = 0; if (type) { type &= ~PYGRES_ARRAY; /* get the base type */ if (!type) type = PYGRES_TEXT; } if (!delim) { delim = ','; } else if (delim == '{' || delim =='}' || delim=='\\') { PyErr_SetString(PyExc_ValueError, "Invalid array delimiter"); return NULL; } /* strip blanks at the beginning */ while (s != end && *s == ' ') ++s; if (*s == '[') { /* dimension ranges */ int valid; for (valid = 0; !valid;) { if (s == end || *s++ != '[') break; while (s != end && *s == ' ') ++s; if (s != end && (*s == '+' || *s == '-')) ++s; if (s == end || *s < '0' || *s > '9') break; while (s != end && *s >= '0' && *s <= '9') ++s; if (s == end || *s++ != ':') break; if (s != end && (*s == '+' || *s == '-')) ++s; if (s == end || *s < '0' || *s > '9') break; while (s != end && *s >= '0' && *s <= '9') ++s; if (s == end || *s++ != ']') break; while (s != end && *s == ' ') ++s; ++ranges; if (s != end && *s == '=') { do ++s; while (s != end && *s == ' '); valid = 1; } } if (!valid) { PyErr_SetString(PyExc_ValueError, "Invalid array dimensions"); return NULL; } } for (t = s, depth = 0; t != end && (*t == '{' || *t == ' '); ++t) { if (*t == '{') ++depth; } if (!depth) { PyErr_SetString(PyExc_ValueError, "Array must start with a left brace"); return NULL; } if (ranges && depth != ranges) { PyErr_SetString(PyExc_ValueError, "Array dimensions do not match content"); return NULL; } if (depth > MAX_ARRAY_DEPTH) { PyErr_SetString(PyExc_ValueError, "Array is too deeply nested"); return NULL; } depth--; /* next level of parsing */ result = PyList_New(0); if (!result) return NULL; do ++s; while (s != end && *s == ' '); /* everything is set up, start parsing the array */ while (s != end) { if (*s == '}') { PyObject *subresult; if (!level) break; /* top level array ended */ do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ if (*s == delim) { do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ if (*s != '{') { PyErr_SetString(PyExc_ValueError, "Subarray expected but not found"); Py_DECREF(result); return NULL; } } else if (*s != '}') break; /* error */ subresult = result; result = stack[--level]; if (PyList_Append(result, subresult)) { Py_DECREF(result); return NULL; } } else if (level == depth) { /* we expect elements at this level */ PyObject *element; char *estr; Py_ssize_t esize; int escaped = 0; if (*s == '{') { PyErr_SetString(PyExc_ValueError, "Subarray found where not expected"); Py_DECREF(result); return NULL; } if (*s == '"') { /* quoted element */ estr = ++s; while (s != end && *s != '"') { if (*s == '\\') { ++s; if (s == end) break; escaped = 1; } ++s; } esize = s - estr; do ++s; while (s != end && *s == ' '); } else { /* unquoted element */ estr = s; /* can contain blanks inside */ while (s != end && *s != '"' && *s != '{' && *s != '}' && *s != delim) { if (*s == '\\') { ++s; if (s == end) break; escaped = 1; } ++s; } t = s; while (t > estr && *(t - 1) == ' ') --t; if (!(esize = t - estr)) { s = end; break; /* error */ } if (STR_IS_NULL(estr, esize)) /* NULL gives None */ estr = NULL; } if (s == end) break; /* error */ if (estr) { if (escaped) { char *r; Py_ssize_t i; /* create unescaped string */ t = estr; estr = (char *) PyMem_Malloc(esize); if (!estr) { Py_DECREF(result); return PyErr_NoMemory(); } for (i = 0, r = estr; i < esize; ++i) { if (*t == '\\') ++t, ++i; *r++ = *t++; } esize = r - estr; } if (type) { /* internal casting of base type */ if (type & PYGRES_TEXT) element = cast_sized_text(estr, esize, encoding, type); else element = cast_sized_simple(estr, esize, type); } else { /* external casting of base type */ #if IS_PY3 element = encoding == pg_encoding_ascii ? NULL : get_decoded_string(estr, esize, encoding); if (!element) /* no decoding necessary or possible */ #endif element = PyBytes_FromStringAndSize(estr, esize); if (element && cast) { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs( cast, element, NULL); Py_DECREF(tmp); } } if (escaped) PyMem_Free(estr); if (!element) { Py_DECREF(result); return NULL; } } else { Py_INCREF(Py_None); element = Py_None; } if (PyList_Append(result, element)) { Py_DECREF(element); Py_DECREF(result); return NULL; } Py_DECREF(element); if (*s == delim) { do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ } else if (*s != '}') break; /* error */ } else { /* we expect arrays at this level */ if (*s != '{') { PyErr_SetString(PyExc_ValueError, "Subarray must start with a left brace"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ stack[level++] = result; if (!(result = PyList_New(0))) return NULL; } } if (s == end || *s != '}') { PyErr_SetString(PyExc_ValueError, "Unexpected end of array"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s != end) { PyErr_SetString(PyExc_ValueError, "Unexpected characters after end of array"); Py_DECREF(result); return NULL; } return result; } /* Cast string s with size and encoding to a Python tuple. using the input and output syntax for composite types. Use array of internal types or cast function or sequence of cast functions to cast elements. The parameter len is the record size. The parameter delim can specify a delimiter for the elements, although composite types always use a comma as delimiter. */ static PyObject * cast_record(char *s, Py_ssize_t size, int encoding, int *type, PyObject *cast, Py_ssize_t len, char delim) { PyObject *result, *ret; char *end = s + size, *t; Py_ssize_t i; if (!delim) { delim = ','; } else if (delim == '(' || delim ==')' || delim=='\\') { PyErr_SetString(PyExc_ValueError, "Invalid record delimiter"); return NULL; } /* strip blanks at the beginning */ while (s != end && *s == ' ') ++s; if (s == end || *s != '(') { PyErr_SetString(PyExc_ValueError, "Record must start with a left parenthesis"); return NULL; } result = PyList_New(0); if (!result) return NULL; i = 0; /* everything is set up, start parsing the record */ while (++s != end) { PyObject *element; if (*s == ')' || *s == delim) { Py_INCREF(Py_None); element = Py_None; } else { char *estr; Py_ssize_t esize; int quoted = 0, escaped = 0; estr = s; quoted = *s == '"'; if (quoted) ++s; esize = 0; while (s != end) { if (!quoted && (*s == ')' || *s == delim)) break; if (*s == '"') { ++s; if (s == end) break; if (!(quoted && *s == '"')) { quoted = !quoted; continue; } } if (*s == '\\') { ++s; if (s == end) break; } ++s, ++esize; } if (s == end) break; /* error */ if (estr + esize != s) { char *r; escaped = 1; /* create unescaped string */ t = estr; estr = (char *) PyMem_Malloc(esize); if (!estr) { Py_DECREF(result); return PyErr_NoMemory(); } quoted = 0; r = estr; while (t != s) { if (*t == '"') { ++t; if (!(quoted && *t == '"')) { quoted = !quoted; continue; } } if (*t == '\\') ++t; *r++ = *t++; } } if (type) { /* internal casting of element type */ int etype = type[i]; if (etype & PYGRES_ARRAY) element = cast_array( estr, esize, encoding, etype, NULL, 0); else if (etype & PYGRES_TEXT) element = cast_sized_text(estr, esize, encoding, etype); else element = cast_sized_simple(estr, esize, etype); } else { /* external casting of base type */ #if IS_PY3 element = encoding == pg_encoding_ascii ? NULL : get_decoded_string(estr, esize, encoding); if (!element) /* no decoding necessary or possible */ #endif element = PyBytes_FromStringAndSize(estr, esize); if (element && cast) { if (len) { PyObject *ecast = PySequence_GetItem(cast, i); if (ecast) { if (ecast != Py_None) { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs( ecast, element, NULL); Py_DECREF(tmp); } } else { Py_DECREF(element); element = NULL; } } else { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs( cast, element, NULL); Py_DECREF(tmp); } } } if (escaped) PyMem_Free(estr); if (!element) { Py_DECREF(result); return NULL; } } if (PyList_Append(result, element)) { Py_DECREF(element); Py_DECREF(result); return NULL; } Py_DECREF(element); if (len) ++i; if (*s != delim) break; /* no next record */ if (len && i >= len) { PyErr_SetString(PyExc_ValueError, "Too many columns"); Py_DECREF(result); return NULL; } } if (s == end || *s != ')') { PyErr_SetString(PyExc_ValueError, "Unexpected end of record"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s != end) { PyErr_SetString(PyExc_ValueError, "Unexpected characters after end of record"); Py_DECREF(result); return NULL; } if (len && i < len) { PyErr_SetString(PyExc_ValueError, "Too few columns"); Py_DECREF(result); return NULL; } ret = PyList_AsTuple(result); Py_DECREF(result); return ret; } /* Cast string s with size and encoding to a Python dictionary. using the input and output syntax for hstore values. */ static PyObject * cast_hstore(char *s, Py_ssize_t size, int encoding) { PyObject *result; char *end = s + size; result = PyDict_New(); /* everything is set up, start parsing the record */ while (s != end) { char *key, *val; PyObject *key_obj, *val_obj; Py_ssize_t key_esc = 0, val_esc = 0, size; int quoted; while (s != end && *s == ' ') ++s; if (s == end) break; quoted = *s == '"'; if (quoted) { key = ++s; while (s != end) { if (*s == '"') break; if (*s == '\\') { if (++s == end) break; ++key_esc; } ++s; } if (s == end) { PyErr_SetString(PyExc_ValueError, "Unterminated quote"); Py_DECREF(result); return NULL; } } else { key = s; while (s != end) { if (*s == '=' || *s == ' ') break; if (*s == '\\') { if (++s == end) break; ++key_esc; } ++s; } if (s == key) { PyErr_SetString(PyExc_ValueError, "Missing key"); Py_DECREF(result); return NULL; } } size = s - key - key_esc; if (key_esc) { char *r = key, *t; key = (char *) PyMem_Malloc(size); if (!key) { Py_DECREF(result); return PyErr_NoMemory(); } t = key; while (r != s) { if (*r == '\\') { ++r; if (r == s) break; } *t++ = *r++; } } key_obj = cast_sized_text(key, size, encoding, PYGRES_TEXT); if (key_esc) PyMem_Free(key); if (!key_obj) { Py_DECREF(result); return NULL; } if (quoted) ++s; while (s != end && *s == ' ') ++s; if (s == end || *s++ != '=' || s == end || *s++ != '>') { PyErr_SetString(PyExc_ValueError, "Invalid characters after key"); Py_DECREF(key_obj); Py_DECREF(result); return NULL; } while (s != end && *s == ' ') ++s; quoted = *s == '"'; if (quoted) { val = ++s; while (s != end) { if (*s == '"') break; if (*s == '\\') { if (++s == end) break; ++val_esc; } ++s; } if (s == end) { PyErr_SetString(PyExc_ValueError, "Unterminated quote"); Py_DECREF(result); return NULL; } } else { val = s; while (s != end) { if (*s == ',' || *s == ' ') break; if (*s == '\\') { if (++s == end) break; ++val_esc; } ++s; } if (s == val) { PyErr_SetString(PyExc_ValueError, "Missing value"); Py_DECREF(key_obj); Py_DECREF(result); return NULL; } if (STR_IS_NULL(val, s - val)) val = NULL; } if (val) { size = s - val - val_esc; if (val_esc) { char *r = val, *t; val = (char *) PyMem_Malloc(size); if (!val) { Py_DECREF(key_obj); Py_DECREF(result); return PyErr_NoMemory(); } t = val; while (r != s) { if (*r == '\\') { ++r; if (r == s) break; } *t++ = *r++; } } val_obj = cast_sized_text(val, size, encoding, PYGRES_TEXT); if (val_esc) PyMem_Free(val); if (!val_obj) { Py_DECREF(key_obj); Py_DECREF(result); return NULL; } } else { Py_INCREF(Py_None); val_obj = Py_None; } if (quoted) ++s; while (s != end && *s == ' ') ++s; if (s != end) { if (*s++ != ',') { PyErr_SetString(PyExc_ValueError, "Invalid characters after val"); Py_DECREF(key_obj); Py_DECREF(val_obj); Py_DECREF(result); return NULL; } while (s != end && *s == ' ') ++s; if (s == end) { PyErr_SetString(PyExc_ValueError, "Missing entry"); Py_DECREF(key_obj); Py_DECREF(val_obj); Py_DECREF(result); return NULL; } } PyDict_SetItem(result, key_obj, val_obj); Py_DECREF(key_obj); Py_DECREF(val_obj); } return result; } /* Get appropriate error type from sqlstate. */ static PyObject * get_error_type(const char *sqlstate) { switch (sqlstate[0]) { case '0': switch (sqlstate[1]) { case 'A': return NotSupportedError; } break; case '2': switch (sqlstate[1]) { case '0': case '1': return ProgrammingError; case '2': return DataError; case '3': return IntegrityError; case '4': case '5': return InternalError; case '6': case '7': case '8': return OperationalError; case 'B': case 'D': case 'F': return InternalError; } break; case '3': switch (sqlstate[1]) { case '4': return OperationalError; case '8': case '9': case 'B': return InternalError; case 'D': case 'F': return ProgrammingError; } break; case '4': switch (sqlstate[1]) { case '0': return OperationalError; case '2': case '4': return ProgrammingError; } break; case '5': case 'H': return OperationalError; case 'F': case 'P': case 'X': return InternalError; } return DatabaseError; } /* Set database error message and sqlstate attribute. */ static void set_error_msg_and_state(PyObject *type, const char *msg, int encoding, const char *sqlstate) { PyObject *err_obj, *msg_obj, *sql_obj = NULL; #if IS_PY3 if (encoding == -1) /* unknown */ msg_obj = PyUnicode_DecodeLocale(msg, NULL); else msg_obj = get_decoded_string(msg, strlen(msg), encoding); if (!msg_obj) /* cannot decode */ #endif msg_obj = PyBytes_FromString(msg); if (sqlstate) { sql_obj = PyStr_FromStringAndSize(sqlstate, 5); } else { Py_INCREF(Py_None); sql_obj = Py_None; } err_obj = PyObject_CallFunctionObjArgs(type, msg_obj, NULL); if (err_obj) { Py_DECREF(msg_obj); PyObject_SetAttrString(err_obj, "sqlstate", sql_obj); Py_DECREF(sql_obj); PyErr_SetObject(type, err_obj); Py_DECREF(err_obj); } else { PyErr_SetString(type, msg); } } /* Set given database error message. */ static void set_error_msg(PyObject *type, const char *msg) { set_error_msg_and_state(type, msg, pg_encoding_ascii, NULL); } /* Set database error from connection and/or result. */ static void set_error(PyObject *type, const char * msg, PGconn *cnx, PGresult *result) { char *sqlstate = NULL; int encoding = pg_encoding_ascii; if (cnx) { char *err_msg = PQerrorMessage(cnx); if (err_msg) { msg = err_msg; encoding = PQclientEncoding(cnx); } } if (result) { sqlstate = PQresultErrorField(result, PG_DIAG_SQLSTATE); if (sqlstate) type = get_error_type(sqlstate); } set_error_msg_and_state(type, msg, encoding, sqlstate); } #ifdef SSL_INFO /* Get SSL attributes and values as a dictionary. */ static PyObject * get_ssl_attributes(PGconn *cnx) { PyObject *attr_dict = NULL; const char * const *s; if (!(attr_dict = PyDict_New())) { return NULL; } for (s = PQsslAttributeNames(cnx); *s; ++s) { const char *val = PQsslAttribute(cnx, *s); if (val) { PyObject * val_obj = PyStr_FromString(val); PyDict_SetItemString(attr_dict, *s, val_obj); Py_DECREF(val_obj); } else { PyDict_SetItemString(attr_dict, *s, Py_None); } } return attr_dict; } #endif /* SSL_INFO */ /* Format result (mostly useful for debugging). Note: This is similar to the Postgres function PQprint(). PQprint() is not used because handing over a stream from Python to PostgreSQL can be problematic if they use different libs for streams and because using PQprint() and tp_print is not recommended any more. */ static PyObject * format_result(const PGresult *res) { const int n = PQnfields(res); if (n > 0) { char * const aligns = (char *) PyMem_Malloc(n * sizeof(char)); int * const sizes = (int *) PyMem_Malloc(n * sizeof(int)); if (aligns && sizes) { const int m = PQntuples(res); int i, j; size_t size; char *buffer; /* calculate sizes and alignments */ for (j = 0; j < n; ++j) { const char * const s = PQfname(res, j); const int format = PQfformat(res, j); sizes[j] = s ? (int) strlen(s) : 0; if (format) { aligns[j] = '\0'; if (m && sizes[j] < 8) /* "" must fit */ sizes[j] = 8; } else { const Oid ftype = PQftype(res, j); switch (ftype) { case INT2OID: case INT4OID: case INT8OID: case FLOAT4OID: case FLOAT8OID: case NUMERICOID: case OIDOID: case XIDOID: case CIDOID: case CASHOID: aligns[j] = 'r'; break; default: aligns[j] = 'l'; } } } for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { if (aligns[j]) { const int k = PQgetlength(res, i, j); if (sizes[j] < k) /* value must fit */ sizes[j] = k; } } } size = 0; /* size of one row */ for (j = 0; j < n; ++j) size += sizes[j] + 1; /* times number of rows incl. heading */ size *= (m + 2); /* plus size of footer */ size += 40; /* is the buffer size that needs to be allocated */ buffer = (char *) PyMem_Malloc(size); if (buffer) { char *p = buffer; PyObject *result; /* create the header */ for (j = 0; j < n; ++j) { const char * const s = PQfname(res, j); const int k = sizes[j]; const int h = (k - (int) strlen(s)) / 2; sprintf(p, "%*s", h, ""); sprintf(p + h, "%-*s", k - h, s); p += k; if (j + 1 < n) *p++ = '|'; } *p++ = '\n'; for (j = 0; j < n; ++j) { int k = sizes[j]; while (k--) *p++ = '-'; if (j + 1 < n) *p++ = '+'; } *p++ = '\n'; /* create the body */ for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { const char align = aligns[j]; const int k = sizes[j]; if (align) { sprintf(p, align == 'r' ? "%*s" : "%-*s", k, PQgetvalue(res, i, j)); } else { sprintf(p, "%-*s", k, PQgetisnull(res, i, j) ? "" : ""); } p += k; if (j + 1 < n) *p++ = '|'; } *p++ = '\n'; } /* free memory */ PyMem_Free(aligns); PyMem_Free(sizes); /* create the footer */ sprintf(p, "(%d row%s)", m, m == 1 ? "" : "s"); /* return the result */ result = PyStr_FromString(buffer); PyMem_Free(buffer); return result; } else { PyMem_Free(aligns); PyMem_Free(sizes); return PyErr_NoMemory(); } } else { PyMem_Free(aligns); PyMem_Free(sizes); return PyErr_NoMemory(); } } else return PyStr_FromString("(nothing selected)"); } /* Internal function converting a Postgres datestyles to date formats. */ static const char * date_style_to_format(const char *s) { static const char *formats[] = { "%Y-%m-%d", /* 0 = ISO */ "%m-%d-%Y", /* 1 = Postgres, MDY */ "%d-%m-%Y", /* 2 = Postgres, DMY */ "%m/%d/%Y", /* 3 = SQL, MDY */ "%d/%m/%Y", /* 4 = SQL, DMY */ "%d.%m.%Y" /* 5 = German */ }; switch (s ? *s : 'I') { case 'P': /* Postgres */ s = strchr(s + 1, ','); if (s) do ++s; while (*s && *s == ' '); return formats[s && *s == 'D' ? 2 : 1]; case 'S': /* SQL */ s = strchr(s + 1, ','); if (s) do ++s; while (*s && *s == ' '); return formats[s && *s == 'D' ? 4 : 3]; case 'G': /* German */ return formats[5]; default: /* ISO */ return formats[0]; /* ISO is the default */ } } /* Internal function converting a date format to a Postgres datestyle. */ static const char * date_format_to_style(const char *s) { static const char *datestyle[] = { "ISO, YMD", /* 0 = %Y-%m-%d */ "Postgres, MDY", /* 1 = %m-%d-%Y */ "Postgres, DMY", /* 2 = %d-%m-%Y */ "SQL, MDY", /* 3 = %m/%d/%Y */ "SQL, DMY", /* 4 = %d/%m/%Y */ "German, DMY" /* 5 = %d.%m.%Y */ }; switch (s ? s[1] : 'Y') { case 'm': switch (s[2]) { case '/': return datestyle[3]; /* SQL, MDY */ default: return datestyle[1]; /* Postgres, MDY */ } case 'd': switch (s[2]) { case '/': return datestyle[4]; /* SQL, DMY */ case '.': return datestyle[5]; /* German */ default: return datestyle[2]; /* Postgres, DMY */ } default: return datestyle[0]; /* ISO */ } } /* Internal wrapper for the notice receiver callback. */ static void notice_receiver(void *arg, const PGresult *res) { PyGILState_STATE gstate = PyGILState_Ensure(); connObject *self = (connObject*) arg; PyObject *func = self->notice_receiver; if (func) { noticeObject *notice = PyObject_NEW(noticeObject, ¬iceType); PyObject *ret; if (notice) { notice->pgcnx = arg; notice->res = res; } else { Py_INCREF(Py_None); notice = (noticeObject *)(void *) Py_None; } ret = PyObject_CallFunction(func, "(O)", notice); Py_XDECREF(ret); } PyGILState_Release(gstate); } PyGreSQL-5.1/pgquery.c0000644000175100077410000005401313466770070014574 0ustar darcypyg00000000000000/* * $Id: pgquery.c 985 2019-04-22 22:07:43Z cito $ * * PyGreSQL - a Python interface for the PostgreSQL database. * * The query object - this file is part a of the C extension module. * * Copyright (c) 2019 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. * */ /* Deallocate the query object. */ static void query_dealloc(queryObject *self) { Py_XDECREF(self->pgcnx); if (self->col_types) { PyMem_Free(self->col_types); } if (self->result) { PQclear(self->result); } PyObject_Del(self); } /* Return query as string in human readable form. */ static PyObject * query_str(queryObject *self) { return format_result(self->result); } /* Return length of a query object. */ static Py_ssize_t query_len(PyObject *self) { PyObject *tmp; Py_ssize_t len; tmp = PyLong_FromLong(((queryObject*) self)->max_row); len = PyLong_AsSsize_t(tmp); Py_DECREF(tmp); return len; } /* Return the value in the given column of the current row. */ static PyObject * _query_value_in_column(queryObject *self, int column) { char *s; int type; if (PQgetisnull(self->result, self->current_row, column)) { Py_INCREF(Py_None); return Py_None; } /* get the string representation of the value */ /* note: this is always null-terminated text format */ s = PQgetvalue(self->result, self->current_row, column); /* get the PyGreSQL type of the column */ type = self->col_types[column]; /* cast the string representation into a Python object */ if (type & PYGRES_ARRAY) return cast_array(s, PQgetlength(self->result, self->current_row, column), self->encoding, type, NULL, 0); if (type == PYGRES_BYTEA) return cast_bytea_text(s); if (type == PYGRES_OTHER) return cast_other(s, PQgetlength(self->result, self->current_row, column), self->encoding, PQftype(self->result, column), self->pgcnx->cast_hook); if (type & PYGRES_TEXT) return cast_sized_text(s, PQgetlength(self->result, self->current_row, column), self->encoding, type); return cast_unsized_simple(s, type); } /* Return the current row as a tuple. */ static PyObject * _query_row_as_tuple(queryObject *self) { PyObject *row_tuple = NULL; int j; if (!(row_tuple = PyTuple_New(self->num_fields))) { return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *val = _query_value_in_column(self, j); if (!val) { Py_DECREF(row_tuple); return NULL; } PyTuple_SET_ITEM(row_tuple, j, val); } return row_tuple; } /* Return given item from a query object. */ static PyObject * query_getitem(PyObject *self, Py_ssize_t i) { queryObject *q = (queryObject *) self; PyObject *tmp; long row; tmp = PyLong_FromSize_t(i); row = PyLong_AsLong(tmp); Py_DECREF(tmp); if (row < 0 || row >= q->max_row) { PyErr_SetNone(PyExc_IndexError); return NULL; } q->current_row = row; return _query_row_as_tuple(q); } /* __iter__() method of the queryObject: Returns the default iterator yielding rows as tuples. */ static PyObject* query_iter(queryObject *self) { self->current_row = 0; Py_INCREF(self); return (PyObject*) self; } /* __next__() method of the queryObject: Returns the current current row as a tuple and moves to the next one. */ static PyObject * query_next(queryObject *self, PyObject *noargs) { PyObject *row_tuple = NULL; if (self->current_row >= self->max_row) { PyErr_SetNone(PyExc_StopIteration); return NULL; } row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; return row_tuple; } /* Get number of rows. */ static char query_ntuples__doc__[] = "ntuples() -- return number of tuples returned by query"; static PyObject * query_ntuples(queryObject *self, PyObject *noargs) { return PyInt_FromLong(self->max_row); } /* List field names from query result. */ static char query_listfields__doc__[] = "listfields() -- List field names from result"; static PyObject * query_listfields(queryObject *self, PyObject *noargs) { int i; char *name; PyObject *fieldstuple, *str; /* builds tuple */ fieldstuple = PyTuple_New(self->num_fields); if (fieldstuple) { for (i = 0; i < self->num_fields; ++i) { name = PQfname(self->result, i); str = PyStr_FromString(name); PyTuple_SET_ITEM(fieldstuple, i, str); } } return fieldstuple; } /* Get field name from number in last result. */ static char query_fieldname__doc__[] = "fieldname(num) -- return name of field from result from its position"; static PyObject * query_fieldname(queryObject *self, PyObject *args) { int i; char *name; /* gets args */ if (!PyArg_ParseTuple(args, "i", &i)) { PyErr_SetString(PyExc_TypeError, "Method fieldname() takes an integer as argument"); return NULL; } /* checks number validity */ if (i >= self->num_fields) { PyErr_SetString(PyExc_ValueError, "Invalid field number"); return NULL; } /* gets fields name and builds object */ name = PQfname(self->result, i); return PyStr_FromString(name); } /* Get field number from name in last result. */ static char query_fieldnum__doc__[] = "fieldnum(name) -- return position in query for field from its name"; static PyObject * query_fieldnum(queryObject *self, PyObject *args) { int num; char *name; /* gets args */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method fieldnum() takes a string as argument"); return NULL; } /* gets field number */ if ((num = PQfnumber(self->result, name)) == -1) { PyErr_SetString(PyExc_ValueError, "Unknown field"); return NULL; } return PyInt_FromLong(num); } /* Retrieve one row from the result as a tuple. */ static char query_one__doc__[] = "one() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a tuple of fields.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_one(queryObject *self, PyObject *noargs) { PyObject *row_tuple; if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; return row_tuple; } /* Retrieve the single row from the result as a tuple. */ static char query_single__doc__[] = "single() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as a tuple of fields.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_single(queryObject *self, PyObject *noargs) { PyObject *row_tuple; if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; return row_tuple; } /* Retrieve the last query result as a list of tuples. */ static char query_getresult__doc__[] = "getresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a tuple of fields\n" "in the order returned by the server.\n"; static PyObject * query_getresult(queryObject *self, PyObject *noargs) { PyObject *result_list; int i; if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (i = self->current_row = 0; i < self->max_row; ++i) { PyObject *row_tuple = query_next(self, noargs); if (!row_tuple) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, i, row_tuple); } return result_list; } /* Return the current row as a dict. */ static PyObject * _query_row_as_dict(queryObject *self) { PyObject *row_dict = NULL; int j; if (!(row_dict = PyDict_New())) { return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *val = _query_value_in_column(self, j); if (!val) { Py_DECREF(row_dict); return NULL; } PyDict_SetItemString(row_dict, PQfname(self->result, j), val); Py_DECREF(val); } return row_dict; } /* Return the current current row as a dict and move to the next one. */ static PyObject * query_next_dict(queryObject *self, PyObject *noargs) { PyObject *row_dict = NULL; if (self->current_row >= self->max_row) { PyErr_SetNone(PyExc_StopIteration); return NULL; } row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; return row_dict; } /* Retrieve one row from the result as a dictionary. */ static char query_onedict__doc__[] = "onedict() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a dictionary with\n" "the field names used as the keys.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onedict(queryObject *self, PyObject *noargs) { PyObject *row_dict; if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; return row_dict; } /* Retrieve the single row from the result as a dictionary. */ static char query_singledict__doc__[] = "singledict() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as a dictionary with\n" "the field names used as the keys.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_singledict(queryObject *self, PyObject *noargs) { PyObject *row_dict; if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; return row_dict; } /* Retrieve the last query result as a list of dictionaries. */ static char query_dictresult__doc__[] = "dictresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a dictionary with\n" "the field names used as the keys.\n"; static PyObject * query_dictresult(queryObject *self, PyObject *noargs) { PyObject *result_list; int i; if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (i = self->current_row = 0; i < self->max_row; ++i) { PyObject *row_dict = query_next_dict(self, noargs); if (!row_dict) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, i, row_dict); } return result_list; } /* Retrieve last result as iterator of dictionaries. */ static char query_dictiter__doc__[] = "dictiter() -- Get the result of a query\n\n" "The result is returned as an iterator of rows, each one a a dictionary\n" "with the field names used as the keys.\n"; static PyObject * query_dictiter(queryObject *self, PyObject *noargs) { if (!dictiter) { return query_dictresult(self, noargs); } return PyObject_CallFunction(dictiter, "(O)", self); } /* Retrieve one row from the result as a named tuple. */ static char query_onenamed__doc__[] = "onenamed() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a named tuple of fields.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onenamed(queryObject *self, PyObject *noargs) { if (!namednext) { return query_one(self, noargs); } if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } return PyObject_CallFunction(namednext, "(O)", self); } /* Retrieve the single row from the result as a tuple. */ static char query_singlenamed__doc__[] = "singlenamed() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as named tuple of fields.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_singlenamed(queryObject *self, PyObject *noargs) { if (!namednext) { return query_single(self, noargs); } if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; return PyObject_CallFunction(namednext, "(O)", self); } /* Retrieve last result as list of named tuples. */ static char query_namedresult__doc__[] = "namedresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a named tuple of fields\n" "in the order returned by the server.\n"; static PyObject * query_namedresult(queryObject *self, PyObject *noargs) { PyObject *res, *res_list; if (!namediter) { return query_getresult(self, noargs); } res = PyObject_CallFunction(namediter, "(O)", self); if (!res) return NULL; if (PyList_Check(res)) return res; res_list = PySequence_List(res); Py_DECREF(res); return res_list; } /* Retrieve last result as iterator of named tuples. */ static char query_namediter__doc__[] = "namediter() -- Get the result of a query\n\n" "The result is returned as an iterator of rows, each one a named tuple\n" "of fields in the order returned by the server.\n"; static PyObject * query_namediter(queryObject *self, PyObject *noargs) { PyObject *res, *res_iter; if (!namediter) { return query_iter(self); } res = PyObject_CallFunction(namediter, "(O)", self); if (!res) return NULL; if (!PyList_Check(res)) return res; res_iter = (Py_TYPE(res)->tp_iter)((PyObject *) self); Py_DECREF(res); return res_iter; } /* Retrieve the last query result as a list of scalar values. */ static char query_scalarresult__doc__[] = "scalarresult() -- Get query result as scalars\n\n" "The result is returned as a list of scalar values where the values\n" "are the first fields of the rows in the order returned by the server.\n"; static PyObject * query_scalarresult(queryObject *self, PyObject *noargs) { PyObject *result_list; if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (self->current_row = 0; self->current_row < self->max_row; ++self->current_row) { PyObject *value = _query_value_in_column(self, 0); if (!value) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, self->current_row, value); } return result_list; } /* Retrieve the last query result as iterator of scalar values. */ static char query_scalariter__doc__[] = "scalariter() -- Get query result as scalars\n\n" "The result is returned as an iterator of scalar values where the values\n" "are the first fields of the rows in the order returned by the server.\n"; static PyObject * query_scalariter(queryObject *self, PyObject *noargs) { if (!scalariter) { return query_scalarresult(self, noargs); } if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } return PyObject_CallFunction(scalariter, "(O)", self); } /* Retrieve one result as scalar value. */ static char query_onescalar__doc__[] = "onescalar() -- Get one scalar value from the result of a query\n\n" "Returns the first field of the next row from the result as a scalar value.\n" "This method can be called multiple times to return more rows as scalars.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onescalar(queryObject *self, PyObject *noargs) { PyObject *value; if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } value = _query_value_in_column(self, 0); if (value) ++self->current_row; return value; } /* Retrieves the single row from the result as a tuple. */ static char query_singlescalar__doc__[] = "singlescalar() -- Get scalar value from single result of a query\n\n" "Returns the first field of the next row from the result as a scalar value.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_singlescalar(queryObject *self, PyObject *noargs) { PyObject *value; if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; value = _query_value_in_column(self, 0); if (value) ++self->current_row; return value; } /* Query sequence protocol methods */ static PySequenceMethods query_sequence_methods = { (lenfunc) query_len, /* sq_length */ 0, /* sq_concat */ 0, /* sq_repeat */ (ssizeargfunc) query_getitem, /* sq_item */ 0, /* sq_ass_item */ 0, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; /* Query object methods */ static struct PyMethodDef query_methods[] = { {"getresult", (PyCFunction) query_getresult, METH_NOARGS, query_getresult__doc__}, {"dictresult", (PyCFunction) query_dictresult, METH_NOARGS, query_dictresult__doc__}, {"dictiter", (PyCFunction) query_dictiter, METH_NOARGS, query_dictiter__doc__}, {"namedresult", (PyCFunction) query_namedresult, METH_NOARGS, query_namedresult__doc__}, {"namediter", (PyCFunction) query_namediter, METH_NOARGS, query_namediter__doc__}, {"one", (PyCFunction) query_one, METH_NOARGS, query_one__doc__}, {"single", (PyCFunction) query_single, METH_NOARGS, query_single__doc__}, {"onedict", (PyCFunction) query_onedict, METH_NOARGS, query_onedict__doc__}, {"singledict", (PyCFunction) query_singledict, METH_NOARGS, query_singledict__doc__}, {"onenamed", (PyCFunction) query_onenamed, METH_NOARGS, query_onenamed__doc__}, {"singlenamed", (PyCFunction) query_singlenamed, METH_NOARGS, query_singlenamed__doc__}, {"scalarresult", (PyCFunction) query_scalarresult, METH_NOARGS, query_scalarresult__doc__}, {"scalariter", (PyCFunction) query_scalariter, METH_NOARGS, query_scalariter__doc__}, {"onescalar", (PyCFunction) query_onescalar, METH_NOARGS, query_onescalar__doc__}, {"singlescalar", (PyCFunction) query_singlescalar, METH_NOARGS, query_singlescalar__doc__}, {"fieldname", (PyCFunction) query_fieldname, METH_VARARGS, query_fieldname__doc__}, {"fieldnum", (PyCFunction) query_fieldnum, METH_VARARGS, query_fieldnum__doc__}, {"listfields", (PyCFunction) query_listfields, METH_NOARGS, query_listfields__doc__}, {"ntuples", (PyCFunction) query_ntuples, METH_NOARGS, query_ntuples__doc__}, {NULL, NULL} }; static char query__doc__[] = "PyGreSQL query object"; /* Query type definition */ static PyTypeObject queryType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Query", /* tp_name */ sizeof(queryObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor) query_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ &query_sequence_methods, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) query_str, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT |Py_TPFLAGS_HAVE_ITER, /* tp_flags */ query__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc) query_iter, /* tp_iter */ (iternextfunc) query_next, /* tp_iternext */ query_methods, /* tp_methods */ }; PyGreSQL-5.1/README.rst0000644000175100077410000000153313466770070014422 0ustar darcypyg00000000000000PyGreSQL - Python interface for PostgreSQL ========================================== PyGreSQL is a Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. PyGreSQL is developed and tested on a NetBSD system, but it should also run on most other platforms where PostgreSQL and Python is running. It is based on the PyGres95 code written by Pascal Andre. D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 and serves as the "BDFL" of PyGreSQL. Installation ------------ The simplest way to install PyGreSQL is to type:: $ pip install PyGreSQL For other ways of installing PyGreSQL and requirements, see the documentation. Documentation ------------- The documentation is available at http://www.pygresql.org/. PyGreSQL-5.1/pgtypes.h0000644000175100077410000001071613466770070014602 0ustar darcypyg00000000000000/* pgtypes - PostgreSQL type definitions These are the standard PostgreSQL 11.1 built-in types, extracted from src/backend/catalog/pg_type_d.h, because that header file is sometimes not available or needs other header files to get properly included. You can also query pg_type to get this information. */ #ifndef PG_TYPE_D_H #define BOOLOID 16 #define BYTEAOID 17 #define CHAROID 18 #define NAMEOID 19 #define INT8OID 20 #define INT2OID 21 #define INT2VECTOROID 22 #define INT4OID 23 #define REGPROCOID 24 #define TEXTOID 25 #define OIDOID 26 #define TIDOID 27 #define XIDOID 28 #define CIDOID 29 #define OIDVECTOROID 30 #define JSONOID 114 #define XMLOID 142 #define XMLARRAYOID 143 #define JSONARRAYOID 199 #define PGNODETREEOID 194 #define PGNDISTINCTOID 3361 #define PGDEPENDENCIESOID 3402 #define PGDDLCOMMANDOID 32 #define SMGROID 210 #define POINTOID 600 #define LSEGOID 601 #define PATHOID 602 #define BOXOID 603 #define POLYGONOID 604 #define LINEOID 628 #define LINEARRAYOID 629 #define FLOAT4OID 700 #define FLOAT8OID 701 #define ABSTIMEOID 702 #define RELTIMEOID 703 #define TINTERVALOID 704 #define UNKNOWNOID 705 #define CIRCLEOID 718 #define CIRCLEARRAYOID 719 #define CASHOID 790 #define MONEYARRAYOID 791 #define MACADDROID 829 #define INETOID 869 #define CIDROID 650 #define MACADDR8OID 774 #define BOOLARRAYOID 1000 #define BYTEAARRAYOID 1001 #define CHARARRAYOID 1002 #define NAMEARRAYOID 1003 #define INT2ARRAYOID 1005 #define INT2VECTORARRAYOID 1006 #define INT4ARRAYOID 1007 #define REGPROCARRAYOID 1008 #define TEXTARRAYOID 1009 #define OIDARRAYOID 1028 #define TIDARRAYOID 1010 #define XIDARRAYOID 1011 #define CIDARRAYOID 1012 #define OIDVECTORARRAYOID 1013 #define BPCHARARRAYOID 1014 #define VARCHARARRAYOID 1015 #define INT8ARRAYOID 1016 #define POINTARRAYOID 1017 #define LSEGARRAYOID 1018 #define PATHARRAYOID 1019 #define BOXARRAYOID 1020 #define FLOAT4ARRAYOID 1021 #define FLOAT8ARRAYOID 1022 #define ABSTIMEARRAYOID 1023 #define RELTIMEARRAYOID 1024 #define TINTERVALARRAYOID 1025 #define POLYGONARRAYOID 1027 #define ACLITEMOID 1033 #define ACLITEMARRAYOID 1034 #define MACADDRARRAYOID 1040 #define MACADDR8ARRAYOID 775 #define INETARRAYOID 1041 #define CIDRARRAYOID 651 #define CSTRINGARRAYOID 1263 #define BPCHAROID 1042 #define VARCHAROID 1043 #define DATEOID 1082 #define TIMEOID 1083 #define TIMESTAMPOID 1114 #define TIMESTAMPARRAYOID 1115 #define DATEARRAYOID 1182 #define TIMEARRAYOID 1183 #define TIMESTAMPTZOID 1184 #define TIMESTAMPTZARRAYOID 1185 #define INTERVALOID 1186 #define INTERVALARRAYOID 1187 #define NUMERICARRAYOID 1231 #define TIMETZOID 1266 #define TIMETZARRAYOID 1270 #define BITOID 1560 #define BITARRAYOID 1561 #define VARBITOID 1562 #define VARBITARRAYOID 1563 #define NUMERICOID 1700 #define REFCURSOROID 1790 #define REFCURSORARRAYOID 2201 #define REGPROCEDUREOID 2202 #define REGOPEROID 2203 #define REGOPERATOROID 2204 #define REGCLASSOID 2205 #define REGTYPEOID 2206 #define REGROLEOID 4096 #define REGNAMESPACEOID 4089 #define REGPROCEDUREARRAYOID 2207 #define REGOPERARRAYOID 2208 #define REGOPERATORARRAYOID 2209 #define REGCLASSARRAYOID 2210 #define REGTYPEARRAYOID 2211 #define REGROLEARRAYOID 4097 #define REGNAMESPACEARRAYOID 4090 #define UUIDOID 2950 #define UUIDARRAYOID 2951 #define LSNOID 3220 #define PG_LSNARRAYOID 3221 #define TSVECTOROID 3614 #define GTSVECTOROID 3642 #define TSQUERYOID 3615 #define REGCONFIGOID 3734 #define REGDICTIONARYOID 3769 #define TSVECTORARRAYOID 3643 #define GTSVECTORARRAYOID 3644 #define TSQUERYARRAYOID 3645 #define REGCONFIGARRAYOID 3735 #define REGDICTIONARYARRAYOID 3770 #define JSONBOID 3802 #define JSONBARRAYOID 3807 #define TXID_SNAPSHOTOID 2970 #define TXID_SNAPSHOTARRAYOID 2949 #define INT4RANGEOID 3904 #define INT4RANGEARRAYOID 3905 #define NUMRANGEOID 3906 #define NUMRANGEARRAYOID 3907 #define TSRANGEOID 3908 #define TSRANGEARRAYOID 3909 #define TSTZRANGEOID 3910 #define TSTZRANGEARRAYOID 3911 #define DATERANGEOID 3912 #define DATERANGEARRAYOID 3913 #define INT8RANGEOID 3926 #define INT8RANGEARRAYOID 3927 #define RECORDOID 2249 #define RECORDARRAYOID 2287 #define CSTRINGOID 2275 #define ANYOID 2276 #define ANYARRAYOID 2277 #define VOIDOID 2278 #define TRIGGEROID 2279 #define EVTTRIGGEROID 3838 #define LANGUAGE_HANDLEROID 2280 #define INTERNALOID 2281 #define OPAQUEOID 2282 #define ANYELEMENTOID 2283 #define ANYNONARRAYOID 2776 #define ANYENUMOID 3500 #define FDW_HANDLEROID 3115 #define INDEX_AM_HANDLEROID 325 #define TSM_HANDLEROID 3310 #define ANYRANGEOID 3831 #endif /* PG_TYPE_D_H */ PyGreSQL-5.1/pglarge.c0000644000175100077410000003137413466770070014526 0ustar darcypyg00000000000000/* * $Id: pglarge.c 985 2019-04-22 22:07:43Z cito $ * * PyGreSQL - a Python interface for the PostgreSQL database. * * Large object support - this file is part a of the C extension module. * * Copyright (c) 2019 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. * */ /* Deallocate large object. */ static void large_dealloc(largeObject *self) { if (self->lo_fd >= 0 && self->pgcnx->valid) lo_close(self->pgcnx->cnx, self->lo_fd); Py_XDECREF(self->pgcnx); PyObject_Del(self); } /* Return large object as string in human readable form. */ static PyObject * large_str(largeObject *self) { char str[80]; sprintf(str, self->lo_fd >= 0 ? "Opened large object, oid %ld" : "Closed large object, oid %ld", (long) self->lo_oid); return PyStr_FromString(str); } /* Check validity of large object. */ static int _check_lo_obj(largeObject *self, int level) { if (!_check_cnx_obj(self->pgcnx)) return 0; if (!self->lo_oid) { set_error_msg(IntegrityError, "Object is not valid (null oid)"); return 0; } if (level & CHECK_OPEN) { if (self->lo_fd < 0) { PyErr_SetString(PyExc_IOError, "Object is not opened"); return 0; } } if (level & CHECK_CLOSE) { if (self->lo_fd >= 0) { PyErr_SetString(PyExc_IOError, "Object is already opened"); return 0; } } return 1; } /* Get large object attributes. */ static PyObject * large_getattr(largeObject *self, PyObject *nameobj) { const char *name = PyStr_AsString(nameobj); /* list postgreSQL large object fields */ /* associated pg connection object */ if (!strcmp(name, "pgcnx")) { if (_check_lo_obj(self, 0)) { Py_INCREF(self->pgcnx); return (PyObject *) (self->pgcnx); } PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } /* large object oid */ if (!strcmp(name, "oid")) { if (_check_lo_obj(self, 0)) return PyInt_FromLong(self->lo_oid); PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } /* error (status) message */ if (!strcmp(name, "error")) return PyStr_FromString(PQerrorMessage(self->pgcnx->cnx)); /* seeks name in methods (fallback) */ return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Get the list of large object attributes. */ static PyObject * large_dir(largeObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[sss]", "oid", "pgcnx", "error"); return attrs; } /* Open large object. */ static char large_open__doc__[] = "open(mode) -- open access to large object with specified mode\n\n" "The mode must be one of INV_READ, INV_WRITE (module level constants).\n"; static PyObject * large_open(largeObject *self, PyObject *args) { int mode, fd; /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &mode)) { PyErr_SetString(PyExc_TypeError, "The open() method takes an integer argument"); return NULL; } /* check validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* opens large object */ if ((fd = lo_open(self->pgcnx->cnx, self->lo_oid, mode)) == -1) { PyErr_SetString(PyExc_IOError, "Can't open large object"); return NULL; } self->lo_fd = fd; /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Close large object. */ static char large_close__doc__[] = "close() -- close access to large object data"; static PyObject * large_close(largeObject *self, PyObject *noargs) { /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* closes large object */ if (lo_close(self->pgcnx->cnx, self->lo_fd)) { PyErr_SetString(PyExc_IOError, "Error while closing large object fd"); return NULL; } self->lo_fd = -1; /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Read from large object. */ static char large_read__doc__[] = "read(size) -- read from large object to sized string\n\n" "Object must be opened in read mode before calling this method.\n"; static PyObject * large_read(largeObject *self, PyObject *args) { int size; PyObject *buffer; /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &size)) { PyErr_SetString(PyExc_TypeError, "Method read() takes an integer argument"); return NULL; } if (size <= 0) { PyErr_SetString(PyExc_ValueError, "Method read() takes a positive integer as argument"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* allocate buffer and runs read */ buffer = PyBytes_FromStringAndSize((char *) NULL, size); if ((size = lo_read(self->pgcnx->cnx, self->lo_fd, PyBytes_AS_STRING((PyBytesObject *) (buffer)), size)) == -1) { PyErr_SetString(PyExc_IOError, "Error while reading"); Py_XDECREF(buffer); return NULL; } /* resize buffer and returns it */ _PyBytes_Resize(&buffer, size); return buffer; } /* Write to large object. */ static char large_write__doc__[] = "write(string) -- write sized string to large object\n\n" "Object must be opened in read mode before calling this method.\n"; static PyObject * large_write(largeObject *self, PyObject *args) { char *buffer; int size, bufsize; /* gets arguments */ if (!PyArg_ParseTuple(args, "s#", &buffer, &bufsize)) { PyErr_SetString(PyExc_TypeError, "Method write() expects a sized string as argument"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* sends query */ if ((size = lo_write(self->pgcnx->cnx, self->lo_fd, buffer, bufsize)) != bufsize) { PyErr_SetString(PyExc_IOError, "Buffer truncated during write"); return NULL; } /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Go to position in large object. */ static char large_seek__doc__[] = "seek(offset, whence) -- move to specified position\n\n" "Object must be opened before calling this method. The whence option\n" "can be SEEK_SET, SEEK_CUR or SEEK_END (module level constants).\n"; static PyObject * large_seek(largeObject *self, PyObject *args) { /* offset and whence are initialized to keep compiler happy */ int ret, offset = 0, whence = 0; /* gets arguments */ if (!PyArg_ParseTuple(args, "ii", &offset, &whence)) { PyErr_SetString(PyExc_TypeError, "Method lseek() expects two integer arguments"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* sends query */ if ((ret = lo_lseek( self->pgcnx->cnx, self->lo_fd, offset, whence)) == -1) { PyErr_SetString(PyExc_IOError, "Error while moving cursor"); return NULL; } /* returns position */ return PyInt_FromLong(ret); } /* Get large object size. */ static char large_size__doc__[] = "size() -- return large object size\n\n" "The object must be opened before calling this method.\n"; static PyObject * large_size(largeObject *self, PyObject *noargs) { int start, end; /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* gets current position */ if ((start = lo_tell(self->pgcnx->cnx, self->lo_fd)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting current position"); return NULL; } /* gets end position */ if ((end = lo_lseek(self->pgcnx->cnx, self->lo_fd, 0, SEEK_END)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting end position"); return NULL; } /* move back to start position */ if ((start = lo_lseek( self->pgcnx->cnx, self->lo_fd, start, SEEK_SET)) == -1) { PyErr_SetString(PyExc_IOError, "Error while moving back to first position"); return NULL; } /* returns size */ return PyInt_FromLong(end); } /* Get large object cursor position. */ static char large_tell__doc__[] = "tell() -- give current position in large object\n\n" "The object must be opened before calling this method.\n"; static PyObject * large_tell(largeObject *self, PyObject *noargs) { int start; /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* gets current position */ if ((start = lo_tell(self->pgcnx->cnx, self->lo_fd)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting position"); return NULL; } /* returns size */ return PyInt_FromLong(start); } /* Export large object as unix file. */ static char large_export__doc__[] = "export(filename) -- export large object data to specified file\n\n" "The object must be closed when calling this method.\n"; static PyObject * large_export(largeObject *self, PyObject *args) { char *name; /* checks validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "The method export() takes a filename as argument"); return NULL; } /* runs command */ if (lo_export(self->pgcnx->cnx, self->lo_oid, name) != 1) { PyErr_SetString(PyExc_IOError, "Error while exporting large object"); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Delete a large object. */ static char large_unlink__doc__[] = "unlink() -- destroy large object\n\n" "The object must be closed when calling this method.\n"; static PyObject * large_unlink(largeObject *self, PyObject *noargs) { /* checks validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* deletes the object, invalidate it on success */ if (lo_unlink(self->pgcnx->cnx, self->lo_oid) != 1) { PyErr_SetString(PyExc_IOError, "Error while unlinking large object"); return NULL; } self->lo_oid = 0; Py_INCREF(Py_None); return Py_None; } /* Large object methods */ static struct PyMethodDef large_methods[] = { {"__dir__", (PyCFunction) large_dir, METH_NOARGS, NULL}, {"open", (PyCFunction) large_open, METH_VARARGS, large_open__doc__}, {"close", (PyCFunction) large_close, METH_NOARGS, large_close__doc__}, {"read", (PyCFunction) large_read, METH_VARARGS, large_read__doc__}, {"write", (PyCFunction) large_write, METH_VARARGS, large_write__doc__}, {"seek", (PyCFunction) large_seek, METH_VARARGS, large_seek__doc__}, {"size", (PyCFunction) large_size, METH_NOARGS, large_size__doc__}, {"tell", (PyCFunction) large_tell, METH_NOARGS, large_tell__doc__}, {"export",(PyCFunction) large_export, METH_VARARGS, large_export__doc__}, {"unlink",(PyCFunction) large_unlink, METH_NOARGS, large_unlink__doc__}, {NULL, NULL} }; static char large__doc__[] = "PostgreSQL large object"; /* Large object type definition */ static PyTypeObject largeType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.LargeObject", /* tp_name */ sizeof(largeObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor) large_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) large_str, /* tp_str */ (getattrofunc) large_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ large__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ large_methods, /* tp_methods */ }; PyGreSQL-5.1/PyGreSQL.egg-info/0000755000175100077410000000000013470245541016024 5ustar darcypyg00000000000000PyGreSQL-5.1/PyGreSQL.egg-info/top_level.txt0000644000175100077410000000001413470245541020551 0ustar darcypyg00000000000000_pg pg pgdb PyGreSQL-5.1/PyGreSQL.egg-info/SOURCES.txt0000644000175100077410000001450213470245541017712 0ustar darcypyg00000000000000LICENSE.txt MANIFEST.in README.rst pg.py pgconn.c pgdb.py pginternal.c pglarge.c pgmodule.c pgnotice.c pgquery.c pgsource.c pgtypes.h py3c.h setup.cfg setup.py PyGreSQL.egg-info/PKG-INFO PyGreSQL.egg-info/SOURCES.txt PyGreSQL.egg-info/dependency_links.txt PyGreSQL.egg-info/not-zip-safe PyGreSQL.egg-info/top_level.txt docs/Makefile docs/about.rst docs/about.txt docs/announce.rst docs/conf.py docs/copyright.rst docs/make.bat docs/requirements.txt docs/start.txt docs/toc.txt docs/_build/html/about.html docs/_build/html/announce.html docs/_build/html/copyright.html docs/_build/html/genindex.html docs/_build/html/index.html docs/_build/html/py-modindex.html docs/_build/html/search.html docs/_build/html/searchindex.js docs/_build/html/_sources/about.rst.txt docs/_build/html/_sources/announce.rst.txt docs/_build/html/_sources/copyright.rst.txt docs/_build/html/_sources/index.rst.txt docs/_build/html/_sources/community/index.rst.txt docs/_build/html/_sources/contents/changelog.rst.txt docs/_build/html/_sources/contents/examples.rst.txt docs/_build/html/_sources/contents/general.rst.txt docs/_build/html/_sources/contents/index.rst.txt docs/_build/html/_sources/contents/install.rst.txt docs/_build/html/_sources/contents/tutorial.rst.txt docs/_build/html/_sources/contents/pg/adaptation.rst.txt docs/_build/html/_sources/contents/pg/connection.rst.txt docs/_build/html/_sources/contents/pg/db_types.rst.txt docs/_build/html/_sources/contents/pg/db_wrapper.rst.txt docs/_build/html/_sources/contents/pg/index.rst.txt docs/_build/html/_sources/contents/pg/introduction.rst.txt docs/_build/html/_sources/contents/pg/large_objects.rst.txt docs/_build/html/_sources/contents/pg/module.rst.txt docs/_build/html/_sources/contents/pg/notification.rst.txt docs/_build/html/_sources/contents/pg/query.rst.txt docs/_build/html/_sources/contents/pgdb/adaptation.rst.txt docs/_build/html/_sources/contents/pgdb/connection.rst.txt docs/_build/html/_sources/contents/pgdb/cursor.rst.txt docs/_build/html/_sources/contents/pgdb/index.rst.txt docs/_build/html/_sources/contents/pgdb/introduction.rst.txt docs/_build/html/_sources/contents/pgdb/module.rst.txt docs/_build/html/_sources/contents/pgdb/typecache.rst.txt docs/_build/html/_sources/contents/pgdb/types.rst.txt docs/_build/html/_sources/contents/postgres/advanced.rst.txt docs/_build/html/_sources/contents/postgres/basic.rst.txt docs/_build/html/_sources/contents/postgres/func.rst.txt docs/_build/html/_sources/contents/postgres/index.rst.txt docs/_build/html/_sources/contents/postgres/syscat.rst.txt docs/_build/html/_sources/download/index.rst.txt docs/_build/html/_static/ajax-loader.gif docs/_build/html/_static/basic.css docs/_build/html/_static/classic.css docs/_build/html/_static/comment-bright.png docs/_build/html/_static/comment-close.png docs/_build/html/_static/comment.png docs/_build/html/_static/default.css docs/_build/html/_static/doctools.js docs/_build/html/_static/down-pressed.png docs/_build/html/_static/down.png docs/_build/html/_static/favicon.ico docs/_build/html/_static/file.png docs/_build/html/_static/jquery-3.1.0.js docs/_build/html/_static/jquery.js docs/_build/html/_static/minus.png docs/_build/html/_static/plus.png docs/_build/html/_static/pygments.css docs/_build/html/_static/pygresql.css docs/_build/html/_static/pygresql.png docs/_build/html/_static/searchtools.js docs/_build/html/_static/sidebar.js docs/_build/html/_static/underscore-1.3.1.js docs/_build/html/_static/underscore.js docs/_build/html/_static/up-pressed.png docs/_build/html/_static/up.png docs/_build/html/_static/websupport.js docs/_build/html/community/index.html docs/_build/html/contents/changelog.html docs/_build/html/contents/examples.html docs/_build/html/contents/general.html docs/_build/html/contents/index.html docs/_build/html/contents/install.html docs/_build/html/contents/tutorial.html docs/_build/html/contents/pg/adaptation.html docs/_build/html/contents/pg/connection.html docs/_build/html/contents/pg/db_types.html docs/_build/html/contents/pg/db_wrapper.html docs/_build/html/contents/pg/index.html docs/_build/html/contents/pg/introduction.html docs/_build/html/contents/pg/large_objects.html docs/_build/html/contents/pg/module.html docs/_build/html/contents/pg/notification.html docs/_build/html/contents/pg/query.html docs/_build/html/contents/pgdb/adaptation.html docs/_build/html/contents/pgdb/connection.html docs/_build/html/contents/pgdb/cursor.html docs/_build/html/contents/pgdb/index.html docs/_build/html/contents/pgdb/introduction.html docs/_build/html/contents/pgdb/module.html docs/_build/html/contents/pgdb/typecache.html docs/_build/html/contents/pgdb/types.html docs/_build/html/contents/postgres/advanced.html docs/_build/html/contents/postgres/basic.html docs/_build/html/contents/postgres/func.html docs/_build/html/contents/postgres/index.html docs/_build/html/contents/postgres/syscat.html docs/_build/html/download/index.html docs/_static/favicon.ico docs/_static/pygresql.css_t docs/_static/pygresql.png docs/_templates/layout.html docs/community/bugtracker.rst docs/community/homes.rst docs/community/index.rst docs/community/mailinglist.rst docs/community/source.rst docs/community/support.rst docs/contents/changelog.rst docs/contents/examples.rst docs/contents/general.rst docs/contents/index.rst docs/contents/install.rst docs/contents/tutorial.rst docs/contents/pg/adaptation.rst docs/contents/pg/connection.rst docs/contents/pg/db_types.rst docs/contents/pg/db_wrapper.rst docs/contents/pg/index.rst docs/contents/pg/introduction.rst docs/contents/pg/large_objects.rst docs/contents/pg/module.rst docs/contents/pg/notification.rst docs/contents/pg/query.rst docs/contents/pgdb/adaptation.rst docs/contents/pgdb/connection.rst docs/contents/pgdb/cursor.rst docs/contents/pgdb/index.rst docs/contents/pgdb/introduction.rst docs/contents/pgdb/module.rst docs/contents/pgdb/typecache.rst docs/contents/pgdb/types.rst docs/contents/postgres/advanced.rst docs/contents/postgres/basic.rst docs/contents/postgres/func.rst docs/contents/postgres/index.rst docs/contents/postgres/syscat.rst docs/download/download.rst docs/download/files.rst docs/download/index.rst tests/__init__.py tests/dbapi20.py tests/test_classic.py tests/test_classic_connection.py tests/test_classic_dbwrapper.py tests/test_classic_functions.py tests/test_classic_largeobj.py tests/test_classic_notification.py tests/test_dbapi20.py tests/test_dbapi20_copy.py tests/test_tutorial.pyPyGreSQL-5.1/PyGreSQL.egg-info/dependency_links.txt0000644000175100077410000000000113470245541022072 0ustar darcypyg00000000000000 PyGreSQL-5.1/PyGreSQL.egg-info/not-zip-safe0000644000175100077410000000000113470245541020252 0ustar darcypyg00000000000000 PyGreSQL-5.1/PyGreSQL.egg-info/PKG-INFO0000644000175100077410000000265013470245541017124 0ustar darcypyg00000000000000Metadata-Version: 1.1 Name: PyGreSQL Version: 5.1 Summary: Python PostgreSQL Interfaces Home-page: http://www.pygresql.org Author: D'Arcy J. M. Cain Author-email: darcy@PyGreSQL.org License: PostgreSQL Download-URL: http://www.pygresql.org/download/ Description: PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. Keywords: pygresql postgresql database api dbapi Platform: any Classifier: Development Status :: 6 - Mature Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: PostgreSQL License Classifier: Operating System :: OS Independent Classifier: Programming Language :: C Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development :: Libraries :: Python Modules PyGreSQL-5.1/PKG-INFO0000644000175100077410000000265013470245543014026 0ustar darcypyg00000000000000Metadata-Version: 1.1 Name: PyGreSQL Version: 5.1 Summary: Python PostgreSQL Interfaces Home-page: http://www.pygresql.org Author: D'Arcy J. M. Cain Author-email: darcy@PyGreSQL.org License: PostgreSQL Download-URL: http://www.pygresql.org/download/ Description: PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. Keywords: pygresql postgresql database api dbapi Platform: any Classifier: Development Status :: 6 - Mature Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: PostgreSQL License Classifier: Operating System :: OS Independent Classifier: Programming Language :: C Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development :: Libraries :: Python Modules PyGreSQL-5.1/setup.py0000755000175100077410000002057713466770070014461 0ustar darcypyg00000000000000#!/usr/bin/python # # $Id: setup.py 991 2019-04-24 19:46:20Z cito $ # # PyGreSQL - a Python interface for the PostgreSQL database. # # Copyright (c) 2019 by the PyGreSQL Development Team # # Please see the LICENSE.TXT file for specific restrictions. """Setup script for PyGreSQL version 5.1 PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. Authors and history: * PyGreSQL written 1997 by D'Arcy J.M. Cain * based on code written 1995 by Pascal Andre * setup script created 2000 by Mark Alexander * improved 2000 by Jeremy Hylton * improved 2001 by Gerhard Haering * improved 2006 to 2018 by Christoph Zwerschke Prerequisites to be installed: * Python including devel package (header files and distutils) * PostgreSQL libs and devel packages (header file of the libpq client) * PostgreSQL pg_config tool (usually included in the devel package) (the Windows installer has it as part of the database server feature) PyGreSQL currently supports Python versions 2.6, 2.7 and 3.3 to 3.7, and PostgreSQL versions 9.0 to 9.6 and 10 or 11. Use as follows: python setup.py build # to build the module python setup.py install # to install it See docs.python.org/doc/install/ for more information on using distutils to install Python programs. """ version = '5.1' import sys if (not (2, 6) <= sys.version_info[:2] < (3, 0) and not (3, 3) <= sys.version_info[:2] < (4, 0)): raise Exception("Sorry, PyGreSQL %s" " does not support this Python version" % version) import os import platform import re import warnings try: from setuptools import setup except ImportError: from distutils.core import setup from distutils.extension import Extension from distutils.command.build_ext import build_ext from distutils.ccompiler import get_default_compiler from distutils.sysconfig import get_python_inc, get_python_lib # For historical reasons, PyGreSQL does not install itself as a single # "pygresql" package, but as two top-level modules "pg", providing the # classic interface, and "pgdb" for the modern DB-API 2.0 interface. # These two top-level Python modules share the same C extension "_pg". py_modules = ['pg', 'pgdb'] c_sources = ['pgmodule.c'] def pg_config(s): """Retrieve information about installed version of PostgreSQL.""" f = os.popen('pg_config --%s' % s) d = f.readline().strip() if f.close() is not None: raise Exception("pg_config tool is not available.") if not d: raise Exception("Could not get %s information." % s) return d def pg_version(): """Return the PostgreSQL version as a tuple of integers.""" match = re.search(r'(\d+)\.(\d+)', pg_config('version')) if match: return tuple(map(int, match.groups())) return (9, 0) pg_version = pg_version() libraries = ['pq'] # Make sure that the Python header files are searched before # those of PostgreSQL, because PostgreSQL can have its own Python.h include_dirs = [get_python_inc(), pg_config('includedir')] library_dirs = [get_python_lib(), pg_config('libdir')] define_macros = [('PYGRESQL_VERSION', version)] undef_macros = [] extra_compile_args = ['-O2', '-funsigned-char', '-Wall', '-Werror'] class build_pg_ext(build_ext): """Customized build_ext command for PyGreSQL.""" description = "build the PyGreSQL C extension" user_options = build_ext.user_options + [ ('direct-access', None, "enable direct access functions"), ('large-objects', None, "enable large object support"), ('default-vars', None, "enable default variables use"), ('escaping-funcs', None, "enable string escaping functions"), ('ssl-info', None, "use new ssl info functions")] boolean_options = build_ext.boolean_options + [ 'direct-access', 'large-objects', 'default-vars', 'escaping-funcs', 'ssl-info'] def get_compiler(self): """Return the C compiler used for building the extension.""" return self.compiler or get_default_compiler() def initialize_options(self): build_ext.initialize_options(self) self.direct_access = True self.large_objects = True self.default_vars = True self.escaping_funcs = pg_version >= (9, 0) self.ssl_info = pg_version >= (9, 5) if pg_version < (9, 0): warnings.warn("PygreSQL does not support this PostgreSQL version.") def finalize_options(self): """Set final values for all build_pg options.""" build_ext.finalize_options(self) if self.direct_access: define_macros.append(('DIRECT_ACCESS', None)) if self.large_objects: define_macros.append(('LARGE_OBJECTS', None)) if self.default_vars: define_macros.append(('DEFAULT_VARS', None)) if self.escaping_funcs and pg_version >= (9, 0): define_macros.append(('ESCAPING_FUNCS', None)) if self.ssl_info and pg_version >= (9, 5): define_macros.append(('SSL_INFO', None)) if sys.platform == 'win32': bits = platform.architecture()[0] if bits == '64bit': # we need to find libpq64 for path in os.environ['PATH'].split(os.pathsep) + [ r'C:\Program Files\PostgreSQL\libpq64']: library_dir = os.path.join(path, 'lib') if not os.path.isdir(library_dir): continue lib = os.path.join(library_dir, 'libpqdll.') if not (os.path.exists(lib + 'lib') or os.path.exists(lib + 'a')): continue include_dir = os.path.join(path, 'include') if not os.path.isdir(include_dir): continue if library_dir not in library_dirs: library_dirs.insert(1, library_dir) if include_dir not in include_dirs: include_dirs.insert(1, include_dir) libraries[0] += 'dll' # libpqdll instead of libpq break compiler = self.get_compiler() if compiler == 'mingw32': # MinGW if bits == '64bit': # needs MinGW-w64 define_macros.append(('MS_WIN64', None)) elif compiler == 'msvc': # Microsoft Visual C++ libraries[0] = 'lib' + libraries[0] extra_compile_args[1:] = ['-J', '-W3', '-WX', '-Dinline=__inline'] # needed for MSVC 9 setup( name="PyGreSQL", version=version, description="Python PostgreSQL Interfaces", long_description=__doc__.split('\n\n', 2)[1], # first passage keywords="pygresql postgresql database api dbapi", author="D'Arcy J. M. Cain", author_email="darcy@PyGreSQL.org", url="http://www.pygresql.org", download_url="http://www.pygresql.org/download/", platforms=["any"], license="PostgreSQL", py_modules=py_modules, ext_modules=[Extension('_pg', c_sources, include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros, undef_macros=undef_macros, libraries=libraries, extra_compile_args=extra_compile_args)], zip_safe=False, cmdclass=dict(build_ext=build_pg_ext), test_suite='tests.discover', classifiers=[ "Development Status :: 6 - Mature", "Intended Audience :: Developers", "License :: OSI Approved :: PostgreSQL License", "Operating System :: OS Independent", "Programming Language :: C", 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', "Programming Language :: SQL", "Topic :: Database", "Topic :: Database :: Front-Ends", "Topic :: Software Development :: Libraries :: Python Modules"] ) PyGreSQL-5.1/pgsource.c0000644000175100077410000005701413466770070014733 0ustar darcypyg00000000000000/* * $Id: pgsource.c 985 2019-04-22 22:07:43Z cito $ * * PyGreSQL - a Python interface for the PostgreSQL database. * * The source object - this file is part a of the C extension module. * * Copyright (c) 2019 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. * */ /* Deallocate source object. */ static void source_dealloc(sourceObject *self) { if (self->result) PQclear(self->result); Py_XDECREF(self->pgcnx); PyObject_Del(self); } /* Return source object as string in human readable form. */ static PyObject * source_str(sourceObject *self) { switch (self->result_type) { case RESULT_DQL: return format_result(self->result); case RESULT_DDL: case RESULT_DML: return PyStr_FromString(PQcmdStatus(self->result)); case RESULT_EMPTY: default: return PyStr_FromString("(empty PostgreSQL source object)"); } } /* Check source object validity. */ static int _check_source_obj(sourceObject *self, int level) { if (!self->valid) { set_error_msg(OperationalError, "Object has been closed"); return 0; } if ((level & CHECK_RESULT) && !self->result) { set_error_msg(DatabaseError, "No result"); return 0; } if ((level & CHECK_DQL) && self->result_type != RESULT_DQL) { set_error_msg(DatabaseError, "Last query did not return tuples"); return 0; } if ((level & CHECK_CNX) && !_check_cnx_obj(self->pgcnx)) { return 0; } return 1; } /* Get source object attributes. */ static PyObject * source_getattr(sourceObject *self, PyObject *nameobj) { const char *name = PyStr_AsString(nameobj); /* pg connection object */ if (!strcmp(name, "pgcnx")) { if (_check_source_obj(self, 0)) { Py_INCREF(self->pgcnx); return (PyObject *) (self->pgcnx); } Py_INCREF(Py_None); return Py_None; } /* arraysize */ if (!strcmp(name, "arraysize")) return PyInt_FromLong(self->arraysize); /* resulttype */ if (!strcmp(name, "resulttype")) return PyInt_FromLong(self->result_type); /* ntuples */ if (!strcmp(name, "ntuples")) return PyInt_FromLong(self->max_row); /* nfields */ if (!strcmp(name, "nfields")) return PyInt_FromLong(self->num_fields); /* seeks name in methods (fallback) */ return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Set source object attributes. */ static int source_setattr(sourceObject *self, char *name, PyObject *v) { /* arraysize */ if (!strcmp(name, "arraysize")) { if (!PyInt_Check(v)) { PyErr_SetString(PyExc_TypeError, "arraysize must be integer"); return -1; } self->arraysize = PyInt_AsLong(v); return 0; } /* unknown attribute */ PyErr_SetString(PyExc_TypeError, "Not a writable attribute"); return -1; } /* Close object. */ static char source_close__doc__[] = "close() -- close query object without deleting it\n\n" "All instances of the query object can no longer be used after this call.\n"; static PyObject * source_close(sourceObject *self, PyObject *noargs) { /* frees result if necessary and invalidates object */ if (self->result) { PQclear(self->result); self->result_type = RESULT_EMPTY; self->result = NULL; } self->valid = 0; /* return None */ Py_INCREF(Py_None); return Py_None; } /* Database query. */ static char source_execute__doc__[] = "execute(sql) -- execute a SQL statement (string)\n\n" "On success, this call returns the number of affected rows, or None\n" "for DQL (SELECT, ...) statements. The fetch (fetch(), fetchone()\n" "and fetchall()) methods can be used to get result rows.\n"; static PyObject * source_execute(sourceObject *self, PyObject *sql) { PyObject *tmp_obj = NULL; /* auxiliary string object */ char *query; int encoding; /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } encoding = PQclientEncoding(self->pgcnx->cnx); if (PyBytes_Check(sql)) { query = PyBytes_AsString(sql); } else if (PyUnicode_Check(sql)) { tmp_obj = get_encoded_string(sql, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ query = PyBytes_AsString(tmp_obj); } else { PyErr_SetString(PyExc_TypeError, "Method execute() expects a string as argument"); return NULL; } /* frees previous result */ if (self->result) { PQclear(self->result); self->result = NULL; } self->max_row = 0; self->current_row = 0; self->num_fields = 0; self->encoding = encoding; /* gets result */ Py_BEGIN_ALLOW_THREADS self->result = PQexec(self->pgcnx->cnx, query); Py_END_ALLOW_THREADS /* we don't need the auxiliary string any more */ Py_XDECREF(tmp_obj); /* checks result validity */ if (!self->result) { PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } /* this may have changed the datestyle, so we reset the date format in order to force fetching it newly when next time requested */ self->pgcnx->date_format = date_format; /* this is normally NULL */ /* checks result status */ switch (PQresultStatus(self->result)) { /* query succeeded */ case PGRES_TUPLES_OK: /* DQL: returns None (DB-SIG compliant) */ self->result_type = RESULT_DQL; self->max_row = PQntuples(self->result); self->num_fields = PQnfields(self->result); Py_INCREF(Py_None); return Py_None; case PGRES_COMMAND_OK: /* other requests */ case PGRES_COPY_OUT: case PGRES_COPY_IN: { long num_rows; char *tmp; tmp = PQcmdTuples(self->result); if (tmp[0]) { self->result_type = RESULT_DML; num_rows = atol(tmp); } else { self->result_type = RESULT_DDL; num_rows = -1; } return PyInt_FromLong(num_rows); } /* query failed */ case PGRES_EMPTY_QUERY: PyErr_SetString(PyExc_ValueError, "Empty query"); break; case PGRES_BAD_RESPONSE: case PGRES_FATAL_ERROR: case PGRES_NONFATAL_ERROR: set_error(ProgrammingError, "Cannot execute command", self->pgcnx->cnx, self->result); break; default: set_error_msg(InternalError, "Internal error: unknown result status"); } /* frees result and returns error */ PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; return NULL; } /* Get oid status for last query (valid for INSERTs, 0 for other). */ static char source_oidstatus__doc__[] = "oidstatus() -- return oid of last inserted row (if available)"; static PyObject * source_oidstatus(sourceObject *self, PyObject *noargs) { Oid oid; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT)) { return NULL; } /* retrieves oid status */ if ((oid = PQoidValue(self->result)) == InvalidOid) { Py_INCREF(Py_None); return Py_None; } return PyInt_FromLong(oid); } /* Fetch rows from last result. */ static char source_fetch__doc__[] = "fetch(num) -- return the next num rows from the last result in a list\n\n" "If num parameter is omitted arraysize attribute value is used.\n" "If size equals -1, all rows are fetched.\n"; static PyObject * source_fetch(sourceObject *self, PyObject *args) { PyObject *res_list; int i, k; long size; #if IS_PY3 int encoding; #endif /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL | CHECK_CNX)) { return NULL; } /* checks args */ size = self->arraysize; if (!PyArg_ParseTuple(args, "|l", &size)) { PyErr_SetString(PyExc_TypeError, "fetch(num), with num (integer, optional)"); return NULL; } /* seeks last line */ /* limit size to be within the amount of data we actually have */ if (size == -1 || (self->max_row - self->current_row) < size) { size = self->max_row - self->current_row; } /* allocate list for result */ if (!(res_list = PyList_New(0))) return NULL; #if IS_PY3 encoding = self->encoding; #endif /* builds result */ for (i = 0, k = self->current_row; i < size; ++i, ++k) { PyObject *rowtuple; int j; if (!(rowtuple = PyTuple_New(self->num_fields))) { Py_DECREF(res_list); return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *str; if (PQgetisnull(self->result, k, j)) { Py_INCREF(Py_None); str = Py_None; } else { char *s = PQgetvalue(self->result, k, j); Py_ssize_t size = PQgetlength(self->result, k, j); #if IS_PY3 if (PQfformat(self->result, j) == 0) { /* textual format */ str = get_decoded_string(s, size, encoding); if (!str) /* cannot decode */ str = PyBytes_FromStringAndSize(s, size); } else #endif str = PyBytes_FromStringAndSize(s, size); } PyTuple_SET_ITEM(rowtuple, j, str); } if (PyList_Append(res_list, rowtuple)) { Py_DECREF(rowtuple); Py_DECREF(res_list); return NULL; } Py_DECREF(rowtuple); } self->current_row = k; return res_list; } /* Change current row (internal wrapper for all "move" methods). */ static PyObject * _source_move(sourceObject *self, int move) { /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) { return NULL; } /* changes the current row */ switch (move) { case QUERY_MOVEFIRST: self->current_row = 0; break; case QUERY_MOVELAST: self->current_row = self->max_row - 1; break; case QUERY_MOVENEXT: if (self->current_row != self->max_row) ++self->current_row; break; case QUERY_MOVEPREV: if (self->current_row > 0) self->current_row--; break; } Py_INCREF(Py_None); return Py_None; } /* Move to first result row. */ static char source_movefirst__doc__[] = "movefirst() -- move to first result row"; static PyObject * source_movefirst(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVEFIRST); } /* Move to last result row. */ static char source_movelast__doc__[] = "movelast() -- move to last valid result row"; static PyObject * source_movelast(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVELAST); } /* Move to next result row. */ static char source_movenext__doc__[] = "movenext() -- move to next result row"; static PyObject * source_movenext(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVENEXT); } /* Move to previous result row. */ static char source_moveprev__doc__[] = "moveprev() -- move to previous result row"; static PyObject * source_moveprev(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVEPREV); } /* Put copy data. */ static char source_putdata__doc__[] = "putdata(buffer) -- send data to server during copy from stdin"; static PyObject * source_putdata(sourceObject *self, PyObject *buffer) { PyObject *tmp_obj = NULL; /* an auxiliary object */ char *buf; /* the buffer as encoded string */ Py_ssize_t nbytes; /* length of string */ char *errormsg = NULL; /* error message */ int res; /* direct result of the operation */ PyObject *ret; /* return value */ /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } /* make sure that the connection object is valid */ if (!self->pgcnx->cnx) { return NULL; } if (buffer == Py_None) { /* pass None for terminating the operation */ buf = errormsg = NULL; } else if (PyBytes_Check(buffer)) { /* or pass a byte string */ PyBytes_AsStringAndSize(buffer, &buf, &nbytes); } else if (PyUnicode_Check(buffer)) { /* or pass a unicode string */ tmp_obj = get_encoded_string( buffer, PQclientEncoding(self->pgcnx->cnx)); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &buf, &nbytes); } else if (PyErr_GivenExceptionMatches(buffer, PyExc_BaseException)) { /* or pass a Python exception for sending an error message */ tmp_obj = PyObject_Str(buffer); if (PyUnicode_Check(tmp_obj)) { PyObject *obj = tmp_obj; tmp_obj = get_encoded_string( obj, PQclientEncoding(self->pgcnx->cnx)); Py_DECREF(obj); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ } errormsg = PyBytes_AsString(tmp_obj); buf = NULL; } else { PyErr_SetString(PyExc_TypeError, "Method putdata() expects a buffer, None" " or an exception as argument"); return NULL; } /* checks validity */ if (!_check_source_obj(self, CHECK_CNX | CHECK_RESULT) || PQresultStatus(self->result) != PGRES_COPY_IN) { PyErr_SetString(PyExc_IOError, "Connection is invalid or not in copy_in state"); Py_XDECREF(tmp_obj); return NULL; } if (buf) { res = nbytes ? PQputCopyData(self->pgcnx->cnx, buf, (int) nbytes) : 1; } else { res = PQputCopyEnd(self->pgcnx->cnx, errormsg); } Py_XDECREF(tmp_obj); if (res != 1) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } if (buf) { /* buffer has been sent */ ret = Py_None; Py_INCREF(ret); } else { /* copy is done */ PGresult *result; /* final result of the operation */ Py_BEGIN_ALLOW_THREADS; result = PQgetResult(self->pgcnx->cnx); Py_END_ALLOW_THREADS; if (PQresultStatus(result) == PGRES_COMMAND_OK) { char *tmp; long num_rows; tmp = PQcmdTuples(result); num_rows = tmp[0] ? atol(tmp) : -1; ret = PyInt_FromLong(num_rows); } else { if (!errormsg) errormsg = PQerrorMessage(self->pgcnx->cnx); PyErr_SetString(PyExc_IOError, errormsg); ret = NULL; } PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; } return ret; /* None or number of rows */ } /* Get copy data. */ static char source_getdata__doc__[] = "getdata(decode) -- receive data to server during copy to stdout"; static PyObject * source_getdata(sourceObject *self, PyObject *args) { int *decode = 0; /* decode flag */ char *buffer; /* the copied buffer as encoded byte string */ Py_ssize_t nbytes; /* length of the byte string */ PyObject *ret; /* return value */ /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } /* make sure that the connection object is valid */ if (!self->pgcnx->cnx) { return NULL; } if (!PyArg_ParseTuple(args, "|i", &decode)) { return NULL; } /* checks validity */ if (!_check_source_obj(self, CHECK_CNX | CHECK_RESULT) || PQresultStatus(self->result) != PGRES_COPY_OUT) { PyErr_SetString(PyExc_IOError, "Connection is invalid or not in copy_out state"); return NULL; } nbytes = PQgetCopyData(self->pgcnx->cnx, &buffer, 0); if (!nbytes || nbytes < -1) { /* an error occurred */ PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } if (nbytes == -1) { /* copy is done */ PGresult *result; /* final result of the operation */ Py_BEGIN_ALLOW_THREADS; result = PQgetResult(self->pgcnx->cnx); Py_END_ALLOW_THREADS; if (PQresultStatus(result) == PGRES_COMMAND_OK) { char *tmp; long num_rows; tmp = PQcmdTuples(result); num_rows = tmp[0] ? atol(tmp) : -1; ret = PyInt_FromLong(num_rows); } else { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); ret = NULL; } PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; } else { /* a row has been returned */ ret = decode ? get_decoded_string( buffer, nbytes, PQclientEncoding(self->pgcnx->cnx)) : PyBytes_FromStringAndSize(buffer, nbytes); PQfreemem(buffer); } return ret; /* buffer or number of rows */ } /* Find field number from string/integer (internal use only). */ static int _source_fieldindex(sourceObject *self, PyObject *param, const char *usage) { int num; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) return -1; /* gets field number */ if (PyStr_Check(param)) { num = PQfnumber(self->result, PyBytes_AsString(param)); } else if (PyInt_Check(param)) { num = (int) PyInt_AsLong(param); } else { PyErr_SetString(PyExc_TypeError, usage); return -1; } /* checks field validity */ if (num < 0 || num >= self->num_fields) { PyErr_SetString(PyExc_ValueError, "Unknown field"); return -1; } return num; } /* Build field information from position (internal use only). */ static PyObject * _source_buildinfo(sourceObject *self, int num) { PyObject *result; /* allocates tuple */ result = PyTuple_New(5); if (!result) { return NULL; } /* affects field information */ PyTuple_SET_ITEM(result, 0, PyInt_FromLong(num)); PyTuple_SET_ITEM(result, 1, PyStr_FromString(PQfname(self->result, num))); PyTuple_SET_ITEM(result, 2, PyInt_FromLong(PQftype(self->result, num))); PyTuple_SET_ITEM(result, 3, PyInt_FromLong(PQfsize(self->result, num))); PyTuple_SET_ITEM(result, 4, PyInt_FromLong(PQfmod(self->result, num))); return result; } /* Lists fields info. */ static char source_listinfo__doc__[] = "listinfo() -- get information for all fields (position, name, type oid)"; static PyObject * source_listInfo(sourceObject *self, PyObject *noargs) { PyObject *result, *info; int i; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) { return NULL; } /* builds result */ if (!(result = PyTuple_New(self->num_fields))) { return NULL; } for (i = 0; i < self->num_fields; ++i) { info = _source_buildinfo(self, i); if (!info) { Py_DECREF(result); return NULL; } PyTuple_SET_ITEM(result, i, info); } /* returns result */ return result; }; /* List fields information for last result. */ static char source_fieldinfo__doc__[] = "fieldinfo(desc) -- get specified field info (position, name, type oid)"; static PyObject * source_fieldinfo(sourceObject *self, PyObject *desc) { int num; /* checks args and validity */ if ((num = _source_fieldindex( self, desc, "Method fieldinfo() needs a string or integer as argument")) == -1) { return NULL; } /* returns result */ return _source_buildinfo(self, num); }; /* Retrieve field value. */ static char source_field__doc__[] = "field(desc) -- return specified field value"; static PyObject * source_field(sourceObject *self, PyObject *desc) { int num; /* checks args and validity */ if ((num = _source_fieldindex( self, desc, "Method field() needs a string or integer as argument")) == -1) { return NULL; } return PyStr_FromString( PQgetvalue(self->result, self->current_row, num)); } /* Get the list of source object attributes. */ static PyObject * source_dir(connObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[sssss]", "pgcnx", "arraysize", "resulttype", "ntuples", "nfields"); return attrs; } /* Source object methods */ static PyMethodDef source_methods[] = { {"__dir__", (PyCFunction) source_dir, METH_NOARGS, NULL}, {"close", (PyCFunction) source_close, METH_NOARGS, source_close__doc__}, {"execute", (PyCFunction) source_execute, METH_O, source_execute__doc__}, {"oidstatus", (PyCFunction) source_oidstatus, METH_NOARGS, source_oidstatus__doc__}, {"fetch", (PyCFunction) source_fetch, METH_VARARGS, source_fetch__doc__}, {"movefirst", (PyCFunction) source_movefirst, METH_NOARGS, source_movefirst__doc__}, {"movelast", (PyCFunction) source_movelast, METH_NOARGS, source_movelast__doc__}, {"movenext", (PyCFunction) source_movenext, METH_NOARGS, source_movenext__doc__}, {"moveprev", (PyCFunction) source_moveprev, METH_NOARGS, source_moveprev__doc__}, {"putdata", (PyCFunction) source_putdata, METH_O, source_putdata__doc__}, {"getdata", (PyCFunction) source_getdata, METH_VARARGS, source_getdata__doc__}, {"field", (PyCFunction) source_field, METH_O, source_field__doc__}, {"fieldinfo", (PyCFunction) source_fieldinfo, METH_O, source_fieldinfo__doc__}, {"listinfo", (PyCFunction) source_listInfo, METH_NOARGS, source_listinfo__doc__}, {NULL, NULL} }; static char source__doc__[] = "PyGreSQL source object"; /* Source type definition */ static PyTypeObject sourceType = { PyVarObject_HEAD_INIT(NULL, 0) "pgdb.Source", /* tp_name */ sizeof(sourceObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor) source_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ (setattrfunc) source_setattr, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) source_str, /* tp_str */ (getattrofunc) source_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ source__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ source_methods, /* tp_methods */ }; PyGreSQL-5.1/pgconn.c0000644000175100077410000013054513466770070014371 0ustar darcypyg00000000000000/* * $Id: pgconn.c 985 2019-04-22 22:07:43Z cito $ * * PyGreSQL - a Python interface for the PostgreSQL database. * * The connection object - this file is part a of the C extension module. * * Copyright (c) 2019 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. * */ /* Deallocate connection object. */ static void conn_dealloc(connObject *self) { if (self->cnx) { Py_BEGIN_ALLOW_THREADS PQfinish(self->cnx); Py_END_ALLOW_THREADS } Py_XDECREF(self->cast_hook); Py_XDECREF(self->notice_receiver); PyObject_Del(self); } /* Get connection attributes. */ static PyObject * conn_getattr(connObject *self, PyObject *nameobj) { const char *name = PyStr_AsString(nameobj); /* * Although we could check individually, there are only a few * attributes that don't require a live connection and unless someone * has an urgent need, this will have to do. */ /* first exception - close which returns a different error */ if (strcmp(name, "close") && !self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* list PostgreSQL connection fields */ /* postmaster host */ if (!strcmp(name, "host")) { char *r = PQhost(self->cnx); if (!r || r[0] == '/') /* Pg >= 9.6 can return a Unix socket path */ r = "localhost"; return PyStr_FromString(r); } /* postmaster port */ if (!strcmp(name, "port")) return PyInt_FromLong(atol(PQport(self->cnx))); /* selected database */ if (!strcmp(name, "db")) return PyStr_FromString(PQdb(self->cnx)); /* selected options */ if (!strcmp(name, "options")) return PyStr_FromString(PQoptions(self->cnx)); /* error (status) message */ if (!strcmp(name, "error")) return PyStr_FromString(PQerrorMessage(self->cnx)); /* connection status : 1 - OK, 0 - BAD */ if (!strcmp(name, "status")) return PyInt_FromLong(PQstatus(self->cnx) == CONNECTION_OK ? 1 : 0); /* provided user name */ if (!strcmp(name, "user")) return PyStr_FromString(PQuser(self->cnx)); /* protocol version */ if (!strcmp(name, "protocol_version")) return PyInt_FromLong(PQprotocolVersion(self->cnx)); /* backend version */ if (!strcmp(name, "server_version")) return PyInt_FromLong(PQserverVersion(self->cnx)); /* descriptor number of connection socket */ if (!strcmp(name, "socket")) { return PyInt_FromLong(PQsocket(self->cnx)); } /* PID of backend process */ if (!strcmp(name, "backend_pid")) { return PyInt_FromLong(PQbackendPID(self->cnx)); } /* whether the connection uses SSL */ if (!strcmp(name, "ssl_in_use")) { #ifdef SSL_INFO if (PQsslInUse(self->cnx)) { Py_INCREF(Py_True); return Py_True; } else { Py_INCREF(Py_False); return Py_False; } #else set_error_msg(NotSupportedError, "SSL info functions not supported"); return NULL; #endif } /* SSL attributes */ if (!strcmp(name, "ssl_attributes")) { #ifdef SSL_INFO return get_ssl_attributes(self->cnx); #else set_error_msg(NotSupportedError, "SSL info functions not supported"); return NULL; #endif } return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Check connection validity. */ static int _check_cnx_obj(connObject *self) { if (!self || !self->valid || !self->cnx) { set_error_msg(OperationalError, "Connection has been closed"); return 0; } return 1; } /* Create source object. */ static char conn_source__doc__[] = "source() -- create a new source object for this connection"; static PyObject * conn_source(connObject *self, PyObject *noargs) { sourceObject *source_obj; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* allocates new query object */ if (!(source_obj = PyObject_NEW(sourceObject, &sourceType))) { return NULL; } /* initializes internal parameters */ Py_XINCREF(self); source_obj->pgcnx = self; source_obj->result = NULL; source_obj->valid = 1; source_obj->arraysize = PG_ARRAYSIZE; return (PyObject *) source_obj; } /* Base method for execution of both unprepared and prepared queries */ static PyObject * _conn_query(connObject *self, PyObject *args, int prepared) { PyObject *query_str_obj, *param_obj = NULL; PGresult* result; queryObject* query_obj; char *query; int encoding, status, nparms = 0; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* get query args */ if (!PyArg_ParseTuple(args, "O|O", &query_str_obj, ¶m_obj)) { return NULL; } encoding = PQclientEncoding(self->cnx); if (PyBytes_Check(query_str_obj)) { query = PyBytes_AsString(query_str_obj); query_str_obj = NULL; } else if (PyUnicode_Check(query_str_obj)) { query_str_obj = get_encoded_string(query_str_obj, encoding); if (!query_str_obj) return NULL; /* pass the UnicodeEncodeError */ query = PyBytes_AsString(query_str_obj); } else { PyErr_SetString(PyExc_TypeError, "Method query() expects a string as first argument"); return NULL; } /* If param_obj is passed, ensure it's a non-empty tuple. We want to treat * an empty tuple the same as no argument since we'll get that when the * caller passes no arguments to db.query(), and historic behaviour was * to call PQexec() in that case, which can execute multiple commands. */ if (param_obj) { param_obj = PySequence_Fast( param_obj, "Method query() expects a sequence as second argument"); if (!param_obj) { Py_XDECREF(query_str_obj); return NULL; } nparms = (int) PySequence_Fast_GET_SIZE(param_obj); /* if there's a single argument and it's a list or tuple, it * contains the positional arguments. */ if (nparms == 1) { PyObject *first_obj = PySequence_Fast_GET_ITEM(param_obj, 0); if (PyList_Check(first_obj) || PyTuple_Check(first_obj)) { Py_DECREF(param_obj); param_obj = PySequence_Fast(first_obj, NULL); nparms = (int) PySequence_Fast_GET_SIZE(param_obj); } } } /* gets result */ if (nparms) { /* prepare arguments */ PyObject **str, **s; const char **parms, **p; register int i; str = (PyObject **) PyMem_Malloc(nparms * sizeof(*str)); parms = (const char **) PyMem_Malloc(nparms * sizeof(*parms)); if (!str || !parms) { PyMem_Free((void *) parms); PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); return PyErr_NoMemory(); } /* convert optional args to a list of strings -- this allows * the caller to pass whatever they like, and prevents us * from having to map types to OIDs */ for (i = 0, s = str, p = parms; i < nparms; ++i, ++p) { PyObject *obj = PySequence_Fast_GET_ITEM(param_obj, i); if (obj == Py_None) { *p = NULL; } else if (PyBytes_Check(obj)) { *p = PyBytes_AsString(obj); } else if (PyUnicode_Check(obj)) { PyObject *str_obj = get_encoded_string(obj, encoding); if (!str_obj) { PyMem_Free((void *) parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); /* pass the UnicodeEncodeError */ return NULL; } *s++ = str_obj; *p = PyBytes_AsString(str_obj); } else { PyObject *str_obj = PyObject_Str(obj); if (!str_obj) { PyMem_Free((void *) parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); PyErr_SetString( PyExc_TypeError, "Query parameter has no string representation"); return NULL; } *s++ = str_obj; *p = PyStr_AsString(str_obj); } } Py_BEGIN_ALLOW_THREADS result = prepared ? PQexecPrepared(self->cnx, query, nparms, parms, NULL, NULL, 0) : PQexecParams(self->cnx, query, nparms, NULL, parms, NULL, NULL, 0); Py_END_ALLOW_THREADS PyMem_Free((void *) parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); } else { Py_BEGIN_ALLOW_THREADS result = prepared ? PQexecPrepared(self->cnx, query, 0, NULL, NULL, NULL, 0) : PQexec(self->cnx, query); Py_END_ALLOW_THREADS } /* we don't need the query and its params any more */ Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); /* checks result validity */ if (!result) { PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); return NULL; } /* this may have changed the datestyle, so we reset the date format in order to force fetching it newly when next time requested */ self->date_format = date_format; /* this is normally NULL */ /* checks result status */ if ((status = PQresultStatus(result)) != PGRES_TUPLES_OK) { switch (status) { case PGRES_EMPTY_QUERY: PyErr_SetString(PyExc_ValueError, "Empty query"); break; case PGRES_BAD_RESPONSE: case PGRES_FATAL_ERROR: case PGRES_NONFATAL_ERROR: set_error(ProgrammingError, "Cannot execute query", self->cnx, result); break; case PGRES_COMMAND_OK: { /* INSERT, UPDATE, DELETE */ Oid oid = PQoidValue(result); if (oid == InvalidOid) { /* not a single insert */ char *ret = PQcmdTuples(result); if (ret[0]) { /* return number of rows affected */ PyObject *obj = PyStr_FromString(ret); PQclear(result); return obj; } PQclear(result); Py_INCREF(Py_None); return Py_None; } /* for a single insert, return the oid */ PQclear(result); return PyInt_FromLong(oid); } case PGRES_COPY_OUT: /* no data will be received */ case PGRES_COPY_IN: PQclear(result); Py_INCREF(Py_None); return Py_None; default: set_error_msg(InternalError, "Unknown result status"); } PQclear(result); return NULL; /* error detected on query */ } if (!(query_obj = PyObject_NEW(queryObject, &queryType))) return PyErr_NoMemory(); /* stores result and returns object */ Py_XINCREF(self); query_obj->pgcnx = self; query_obj->result = result; query_obj->encoding = encoding; query_obj->current_row = 0; query_obj->max_row = PQntuples(result); query_obj->num_fields = PQnfields(result); query_obj->col_types = get_col_types(result, query_obj->num_fields); if (!query_obj->col_types) { Py_DECREF(query_obj); Py_DECREF(self); return NULL; } return (PyObject *) query_obj; } /* Database query */ static char conn_query__doc__[] = "query(sql, [arg]) -- create a new query object for this connection\n\n" "You must pass the SQL (string) request and you can optionally pass\n" "a tuple with positional parameters.\n"; static PyObject * conn_query(connObject *self, PyObject *args) { return _conn_query(self, args, 0); } /* Execute prepared statement. */ static char conn_query_prepared__doc__[] = "query_prepared(name, [arg]) -- execute a prepared statement\n\n" "You must pass the name (string) of the prepared statement and you can\n" "optionally pass a tuple with positional parameters.\n"; static PyObject * conn_query_prepared(connObject *self, PyObject *args) { return _conn_query(self, args, 1); } /* Create prepared statement. */ static char conn_prepare__doc__[] = "prepare(name, sql) -- create a prepared statement\n\n" "You must pass the name (string) of the prepared statement and the\n" "SQL (string) request for later execution.\n"; static PyObject * conn_prepare(connObject *self, PyObject *args) { char *name, *query; int name_length, query_length; PGresult *result; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#s#", &name, &name_length, &query, &query_length)) { PyErr_SetString(PyExc_TypeError, "Method prepare() takes two string arguments"); return NULL; } /* create prepared statement */ Py_BEGIN_ALLOW_THREADS result = PQprepare(self->cnx, name, query, 0, NULL); Py_END_ALLOW_THREADS if (result && PQresultStatus(result) == PGRES_COMMAND_OK) { PQclear(result); Py_INCREF(Py_None); return Py_None; /* success */ } set_error(ProgrammingError, "Cannot create prepared statement", self->cnx, result); if (result) PQclear(result); return NULL; /* error */ } /* Describe prepared statement. */ static char conn_describe_prepared__doc__[] = "describe_prepared(name) -- describe a prepared statement\n\n" "You must pass the name (string) of the prepared statement.\n"; static PyObject * conn_describe_prepared(connObject *self, PyObject *args) { char *name; int name_length; PGresult *result; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#", &name, &name_length)) { PyErr_SetString(PyExc_TypeError, "Method prepare() takes a string argument"); return NULL; } /* describe prepared statement */ Py_BEGIN_ALLOW_THREADS result = PQdescribePrepared(self->cnx, name); Py_END_ALLOW_THREADS if (result && PQresultStatus(result) == PGRES_COMMAND_OK) { queryObject *query_obj = PyObject_NEW(queryObject, &queryType); if (!query_obj) return PyErr_NoMemory(); Py_XINCREF(self); query_obj->pgcnx = self; query_obj->result = result; query_obj->encoding = PQclientEncoding(self->cnx); query_obj->current_row = 0; query_obj->max_row = PQntuples(result); query_obj->num_fields = PQnfields(result); query_obj->col_types = get_col_types(result, query_obj->num_fields); return (PyObject *) query_obj; } set_error(ProgrammingError, "Cannot describe prepared statement", self->cnx, result); if (result) PQclear(result); return NULL; /* error */ } #ifdef DIRECT_ACCESS static char conn_putline__doc__[] = "putline(line) -- send a line directly to the backend"; /* Direct access function: putline. */ static PyObject * conn_putline(connObject *self, PyObject *args) { char *line; int line_length; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#", &line, &line_length)) { PyErr_SetString(PyExc_TypeError, "Method putline() takes a string argument"); return NULL; } /* sends line to backend */ if (PQputline(self->cnx, line)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Direct access function: getline. */ static char conn_getline__doc__[] = "getline() -- get a line directly from the backend"; static PyObject * conn_getline(connObject *self, PyObject *noargs) { char line[MAX_BUFFER_SIZE]; PyObject *str = NULL; /* GCC */ if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* gets line */ switch (PQgetline(self->cnx, line, MAX_BUFFER_SIZE)) { case 0: str = PyStr_FromString(line); break; case 1: PyErr_SetString(PyExc_MemoryError, "Buffer overflow"); str = NULL; break; case EOF: Py_INCREF(Py_None); str = Py_None; break; } return str; } /* Direct access function: end copy. */ static char conn_endcopy__doc__[] = "endcopy() -- synchronize client and server"; static PyObject * conn_endcopy(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* ends direct copy */ if (PQendcopy(self->cnx)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); return NULL; } Py_INCREF(Py_None); return Py_None; } #endif /* DIRECT_ACCESS */ /* Insert table */ static char conn_inserttable__doc__[] = "inserttable(table, data) -- insert list into table\n\n" "The fields in the list must be in the same order as in the table.\n"; static PyObject * conn_inserttable(connObject *self, PyObject *args) { PGresult *result; char *table, *buffer, *bufpt; int encoding; size_t bufsiz; PyObject *list, *sublist, *item; PyObject *(*getitem) (PyObject *, Py_ssize_t); PyObject *(*getsubitem) (PyObject *, Py_ssize_t); Py_ssize_t i, j, m, n; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "sO:filter", &table, &list)) { PyErr_SetString( PyExc_TypeError, "Method inserttable() expects a string and a list as arguments"); return NULL; } /* checks list type */ if (PyList_Check(list)) { m = PyList_Size(list); getitem = PyList_GetItem; } else if (PyTuple_Check(list)) { m = PyTuple_Size(list); getitem = PyTuple_GetItem; } else { PyErr_SetString( PyExc_TypeError, "Method inserttable() expects a list or a tuple" " as second argument"); return NULL; } /* allocate buffer */ if (!(buffer = PyMem_Malloc(MAX_BUFFER_SIZE))) return PyErr_NoMemory(); /* starts query */ sprintf(buffer, "copy %s from stdin", table); Py_BEGIN_ALLOW_THREADS result = PQexec(self->cnx, buffer); Py_END_ALLOW_THREADS if (!result) { PyMem_Free(buffer); PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); return NULL; } encoding = PQclientEncoding(self->cnx); PQclear(result); n = 0; /* not strictly necessary but avoids warning */ /* feed table */ for (i = 0; i < m; ++i) { sublist = getitem(list, i); if (PyTuple_Check(sublist)) { j = PyTuple_Size(sublist); getsubitem = PyTuple_GetItem; } else if (PyList_Check(sublist)) { j = PyList_Size(sublist); getsubitem = PyList_GetItem; } else { PyErr_SetString( PyExc_TypeError, "The second argument must contain a tuple or a list"); return NULL; } if (i) { if (j != n) { PyMem_Free(buffer); PyErr_SetString( PyExc_TypeError, "Arrays contained in second arg must have same size"); return NULL; } } else { n = j; /* never used before this assignment */ } /* builds insert line */ bufpt = buffer; bufsiz = MAX_BUFFER_SIZE - 1; for (j = 0; j < n; ++j) { if (j) { *bufpt++ = '\t'; --bufsiz; } item = getsubitem(sublist, j); /* convert item to string and append to buffer */ if (item == Py_None) { if (bufsiz > 2) { *bufpt++ = '\\'; *bufpt++ = 'N'; bufsiz -= 2; } else bufsiz = 0; } else if (PyBytes_Check(item)) { const char* t = PyBytes_AsString(item); while (*t && bufsiz) { if (*t == '\\' || *t == '\t' || *t == '\n') { *bufpt++ = '\\'; --bufsiz; if (!bufsiz) break; } *bufpt++ = *t++; --bufsiz; } } else if (PyUnicode_Check(item)) { PyObject *s = get_encoded_string(item, encoding); if (!s) { PyMem_Free(buffer); return NULL; /* pass the UnicodeEncodeError */ } else { const char* t = PyBytes_AsString(s); while (*t && bufsiz) { if (*t == '\\' || *t == '\t' || *t == '\n') { *bufpt++ = '\\'; --bufsiz; if (!bufsiz) break; } *bufpt++ = *t++; --bufsiz; } Py_DECREF(s); } } else if (PyInt_Check(item) || PyLong_Check(item)) { PyObject* s = PyObject_Str(item); const char* t = PyStr_AsString(s); while (*t && bufsiz) { *bufpt++ = *t++; --bufsiz; } Py_DECREF(s); } else { PyObject* s = PyObject_Repr(item); const char* t = PyStr_AsString(s); while (*t && bufsiz) { if (*t == '\\' || *t == '\t' || *t == '\n') { *bufpt++ = '\\'; --bufsiz; if (!bufsiz) break; } *bufpt++ = *t++; --bufsiz; } Py_DECREF(s); } if (bufsiz <= 0) { PyMem_Free(buffer); return PyErr_NoMemory(); } } *bufpt++ = '\n'; *bufpt = '\0'; /* sends data */ if (PQputline(self->cnx, buffer)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PQendcopy(self->cnx); PyMem_Free(buffer); return NULL; } } /* ends query */ if (PQputline(self->cnx, "\\.\n")) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PQendcopy(self->cnx); PyMem_Free(buffer); return NULL; } if (PQendcopy(self->cnx)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PyMem_Free(buffer); return NULL; } PyMem_Free(buffer); /* no error : returns nothing */ Py_INCREF(Py_None); return Py_None; } /* Get transaction state. */ static char conn_transaction__doc__[] = "transaction() -- return the current transaction status"; static PyObject * conn_transaction(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } return PyInt_FromLong(PQtransactionStatus(self->cnx)); } /* Get parameter setting. */ static char conn_parameter__doc__[] = "parameter(name) -- look up a current parameter setting"; static PyObject * conn_parameter(connObject *self, PyObject *args) { const char *name; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* get query args */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method parameter() takes a string as argument"); return NULL; } name = PQparameterStatus(self->cnx, name); if (name) return PyStr_FromString(name); /* unknown parameter, return None */ Py_INCREF(Py_None); return Py_None; } /* Get current date format. */ static char conn_date_format__doc__[] = "date_format() -- return the current date format"; static PyObject * conn_date_format(connObject *self, PyObject *noargs) { const char *fmt; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* check if the date format is cached in the connection */ fmt = self->date_format; if (!fmt) { fmt = date_style_to_format(PQparameterStatus(self->cnx, "DateStyle")); self->date_format = fmt; /* cache the result */ } return PyStr_FromString(fmt); } #ifdef ESCAPING_FUNCS /* Escape literal */ static char conn_escape_literal__doc__[] = "escape_literal(str) -- escape a literal constant for use within SQL"; static PyObject * conn_escape_literal(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_literal() expects a string as argument"); return NULL; } to = PQescapeLiteral(self->cnx, from, (size_t) from_length); to_length = strlen(to); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, to_length); else to_obj = get_decoded_string(to, to_length, encoding); if (to) PQfreemem(to); return to_obj; } /* Escape identifier */ static char conn_escape_identifier__doc__[] = "escape_identifier(str) -- escape an identifier for use within SQL"; static PyObject * conn_escape_identifier(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_identifier() expects a string as argument"); return NULL; } to = PQescapeIdentifier(self->cnx, from, (size_t) from_length); to_length = strlen(to); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, to_length); else to_obj = get_decoded_string(to, to_length, encoding); if (to) PQfreemem(to); return to_obj; } #endif /* ESCAPING_FUNCS */ /* Escape string */ static char conn_escape_string__doc__[] = "escape_string(str) -- escape a string for use within SQL"; static PyObject * conn_escape_string(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_string() expects a string as argument"); return NULL; } to_length = 2*from_length + 1; if ((Py_ssize_t) to_length < from_length) { /* overflow */ to_length = from_length; from_length = (from_length - 1)/2; } to = (char *) PyMem_Malloc(to_length); to_length = PQescapeStringConn(self->cnx, to, from, (size_t) from_length, NULL); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, to_length); else to_obj = get_decoded_string(to, to_length, encoding); PyMem_Free(to); return to_obj; } /* Escape bytea */ static char conn_escape_bytea__doc__[] = "escape_bytea(data) -- escape binary data for use within SQL as type bytea"; static PyObject * conn_escape_bytea(connObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(data, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_bytea() expects a string as argument"); return NULL; } to = (char *) PQescapeByteaConn(self->cnx, (unsigned char *) from, (size_t) from_length, &to_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, to_length - 1); else to_obj = get_decoded_string(to, to_length - 1, encoding); if (to) PQfreemem(to); return to_obj; } #ifdef LARGE_OBJECTS /* Constructor for large objects (internal use only) */ static largeObject * large_new(connObject *pgcnx, Oid oid) { largeObject *large_obj; if (!(large_obj = PyObject_NEW(largeObject, &largeType))) { return NULL; } Py_XINCREF(pgcnx); large_obj->pgcnx = pgcnx; large_obj->lo_fd = -1; large_obj->lo_oid = oid; return large_obj; } /* Create large object. */ static char conn_locreate__doc__[] = "locreate(mode) -- create a new large object in the database"; static PyObject * conn_locreate(connObject *self, PyObject *args) { int mode; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &mode)) { PyErr_SetString(PyExc_TypeError, "Method locreate() takes an integer argument"); return NULL; } /* creates large object */ lo_oid = lo_creat(self->cnx, mode); if (lo_oid == 0) { set_error_msg(OperationalError, "Can't create large object"); return NULL; } return (PyObject *) large_new(self, lo_oid); } /* Init from already known oid. */ static char conn_getlo__doc__[] = "getlo(oid) -- create a large object instance for the specified oid"; static PyObject * conn_getlo(connObject *self, PyObject *args) { int oid; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &oid)) { PyErr_SetString(PyExc_TypeError, "Method getlo() takes an integer argument"); return NULL; } lo_oid = (Oid) oid; if (lo_oid == 0) { PyErr_SetString(PyExc_ValueError, "The object oid can't be null"); return NULL; } /* creates object */ return (PyObject *) large_new(self, lo_oid); } /* Import unix file. */ static char conn_loimport__doc__[] = "loimport(name) -- create a new large object from specified file"; static PyObject * conn_loimport(connObject *self, PyObject *args) { char *name; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method loimport() takes a string argument"); return NULL; } /* imports file and checks result */ lo_oid = lo_import(self->cnx, name); if (lo_oid == 0) { set_error_msg(OperationalError, "Can't create large object"); return NULL; } return (PyObject *) large_new(self, lo_oid); } #endif /* LARGE_OBJECTS */ /* Reset connection. */ static char conn_reset__doc__[] = "reset() -- reset connection with current parameters\n\n" "All derived queries and large objects derived from this connection\n" "will not be usable after this call.\n"; static PyObject * conn_reset(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* resets the connection */ PQreset(self->cnx); Py_INCREF(Py_None); return Py_None; } /* Cancel current command. */ static char conn_cancel__doc__[] = "cancel() -- abandon processing of the current command"; static PyObject * conn_cancel(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* request that the server abandon processing of the current command */ return PyInt_FromLong((long) PQrequestCancel(self->cnx)); } /* Get connection socket. */ static char conn_fileno__doc__[] = "fileno() -- return database connection socket file handle"; static PyObject * conn_fileno(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } #ifdef NO_PQSOCKET return PyInt_FromLong((long) self->cnx->sock); #else return PyInt_FromLong((long) PQsocket(self->cnx)); #endif } /* Set external typecast callback function. */ static char conn_set_cast_hook__doc__[] = "set_cast_hook(func) -- set a fallback typecast function"; static PyObject * conn_set_cast_hook(connObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(self->cast_hook); self->cast_hook = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(self->cast_hook); self->cast_hook = func; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Method set_cast_hook() expects" " a callable or None as argument"); } return ret; } /* Get notice receiver callback function. */ static char conn_get_cast_hook__doc__[] = "get_cast_hook() -- get the fallback typecast function"; static PyObject * conn_get_cast_hook(connObject *self, PyObject *noargs) { PyObject *ret = self->cast_hook;; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Set notice receiver callback function. */ static char conn_set_notice_receiver__doc__[] = "set_notice_receiver(func) -- set the current notice receiver"; static PyObject * conn_set_notice_receiver(connObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(self->notice_receiver); self->notice_receiver = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(self->notice_receiver); self->notice_receiver = func; PQsetNoticeReceiver(self->cnx, notice_receiver, self); Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Method set_notice_receiver() expects" " a callable or None as argument"); } return ret; } /* Get notice receiver callback function. */ static char conn_get_notice_receiver__doc__[] = "get_notice_receiver() -- get the current notice receiver"; static PyObject * conn_get_notice_receiver(connObject *self, PyObject *noargs) { PyObject *ret = self->notice_receiver; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Close without deleting. */ static char conn_close__doc__[] = "close() -- close connection\n\n" "All instances of the connection object and derived objects\n" "(queries and large objects) can no longer be used after this call.\n"; static PyObject * conn_close(connObject *self, PyObject *noargs) { /* connection object cannot already be closed */ if (!self->cnx) { set_error_msg(InternalError, "Connection already closed"); return NULL; } Py_BEGIN_ALLOW_THREADS PQfinish(self->cnx); Py_END_ALLOW_THREADS self->cnx = NULL; Py_INCREF(Py_None); return Py_None; } /* Get asynchronous notify. */ static char conn_get_notify__doc__[] = "getnotify() -- get database notify for this connection"; static PyObject * conn_get_notify(connObject *self, PyObject *noargs) { PGnotify *notify; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* checks for NOTIFY messages */ PQconsumeInput(self->cnx); if (!(notify = PQnotifies(self->cnx))) { Py_INCREF(Py_None); return Py_None; } else { PyObject *notify_result, *tmp; if (!(tmp = PyStr_FromString(notify->relname))) { return NULL; } if (!(notify_result = PyTuple_New(3))) { return NULL; } PyTuple_SET_ITEM(notify_result, 0, tmp); if (!(tmp = PyInt_FromLong(notify->be_pid))) { Py_DECREF(notify_result); return NULL; } PyTuple_SET_ITEM(notify_result, 1, tmp); /* extra exists even in old versions that did not support it */ if (!(tmp = PyStr_FromString(notify->extra))) { Py_DECREF(notify_result); return NULL; } PyTuple_SET_ITEM(notify_result, 2, tmp); PQfreemem(notify); return notify_result; } } /* Get the list of connection attributes. */ static PyObject * conn_dir(connObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[sssssssssssss]", "host", "port", "db", "options", "error", "status", "user", "protocol_version", "server_version", "socket", "backend_pid", "ssl_in_use", "ssl_attributes"); return attrs; } /* Connection object methods */ static struct PyMethodDef conn_methods[] = { {"__dir__", (PyCFunction) conn_dir, METH_NOARGS, NULL}, {"source", (PyCFunction) conn_source, METH_NOARGS, conn_source__doc__}, {"query", (PyCFunction) conn_query, METH_VARARGS, conn_query__doc__}, {"query_prepared", (PyCFunction) conn_query_prepared, METH_VARARGS, conn_query_prepared__doc__}, {"prepare", (PyCFunction) conn_prepare, METH_VARARGS, conn_prepare__doc__}, {"describe_prepared", (PyCFunction) conn_describe_prepared, METH_VARARGS, conn_describe_prepared__doc__}, {"reset", (PyCFunction) conn_reset, METH_NOARGS, conn_reset__doc__}, {"cancel", (PyCFunction) conn_cancel, METH_NOARGS, conn_cancel__doc__}, {"close", (PyCFunction) conn_close, METH_NOARGS, conn_close__doc__}, {"fileno", (PyCFunction) conn_fileno, METH_NOARGS, conn_fileno__doc__}, {"get_cast_hook", (PyCFunction) conn_get_cast_hook, METH_NOARGS, conn_get_cast_hook__doc__}, {"set_cast_hook", (PyCFunction) conn_set_cast_hook, METH_O, conn_set_cast_hook__doc__}, {"get_notice_receiver", (PyCFunction) conn_get_notice_receiver, METH_NOARGS, conn_get_notice_receiver__doc__}, {"set_notice_receiver", (PyCFunction) conn_set_notice_receiver, METH_O, conn_set_notice_receiver__doc__}, {"getnotify", (PyCFunction) conn_get_notify, METH_NOARGS, conn_get_notify__doc__}, {"inserttable", (PyCFunction) conn_inserttable, METH_VARARGS, conn_inserttable__doc__}, {"transaction", (PyCFunction) conn_transaction, METH_NOARGS, conn_transaction__doc__}, {"parameter", (PyCFunction) conn_parameter, METH_VARARGS, conn_parameter__doc__}, {"date_format", (PyCFunction) conn_date_format, METH_NOARGS, conn_date_format__doc__}, #ifdef ESCAPING_FUNCS {"escape_literal", (PyCFunction) conn_escape_literal, METH_O, conn_escape_literal__doc__}, {"escape_identifier", (PyCFunction) conn_escape_identifier, METH_O, conn_escape_identifier__doc__}, #endif /* ESCAPING_FUNCS */ {"escape_string", (PyCFunction) conn_escape_string, METH_O, conn_escape_string__doc__}, {"escape_bytea", (PyCFunction) conn_escape_bytea, METH_O, conn_escape_bytea__doc__}, #ifdef DIRECT_ACCESS {"putline", (PyCFunction) conn_putline, METH_VARARGS, conn_putline__doc__}, {"getline", (PyCFunction) conn_getline, METH_NOARGS, conn_getline__doc__}, {"endcopy", (PyCFunction) conn_endcopy, METH_NOARGS, conn_endcopy__doc__}, #endif /* DIRECT_ACCESS */ #ifdef LARGE_OBJECTS {"locreate", (PyCFunction) conn_locreate, METH_VARARGS, conn_locreate__doc__}, {"getlo", (PyCFunction) conn_getlo, METH_VARARGS, conn_getlo__doc__}, {"loimport", (PyCFunction) conn_loimport, METH_VARARGS, conn_loimport__doc__}, #endif /* LARGE_OBJECTS */ {NULL, NULL} /* sentinel */ }; static char conn__doc__[] = "PostgreSQL connection object"; /* Connection type definition */ static PyTypeObject connType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Connection", /* tp_name */ sizeof(connObject), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor) conn_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ (getattrofunc) conn_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ conn__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ conn_methods, /* tp_methods */ }; PyGreSQL-5.1/py3c.h0000644000175100077410000000771513466770070013772 0ustar darcypyg00000000000000/* Copyright (c) 2015, Red Hat, Inc. and/or its affiliates * Licensed under the MIT license; see py3c.h */ #ifndef _PY3C_COMPAT_H_ #define _PY3C_COMPAT_H_ #include #if PY_MAJOR_VERSION >= 3 /***** Python 3 *****/ #define IS_PY3 1 /* Strings */ #define PyStr_Type PyUnicode_Type #define PyStr_Check PyUnicode_Check #define PyStr_CheckExact PyUnicode_CheckExact #define PyStr_FromString PyUnicode_FromString #define PyStr_FromStringAndSize PyUnicode_FromStringAndSize #define PyStr_FromFormat PyUnicode_FromFormat #define PyStr_FromFormatV PyUnicode_FromFormatV #define PyStr_AsString PyUnicode_AsUTF8 #define PyStr_Concat PyUnicode_Concat #define PyStr_Format PyUnicode_Format #define PyStr_InternInPlace PyUnicode_InternInPlace #define PyStr_InternFromString PyUnicode_InternFromString #define PyStr_Decode PyUnicode_Decode #define PyStr_AsUTF8String PyUnicode_AsUTF8String // returns PyBytes #define PyStr_AsUTF8 PyUnicode_AsUTF8 #define PyStr_AsUTF8AndSize PyUnicode_AsUTF8AndSize /* Ints */ #define PyInt_Type PyLong_Type #define PyInt_Check PyLong_Check #define PyInt_CheckExact PyLong_CheckExact #define PyInt_FromString PyLong_FromString #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyInt_AsSsize_t PyLong_AsSsize_t /* Module init */ #define MODULE_INIT_FUNC(name) \ PyMODINIT_FUNC PyInit_ ## name(void); \ PyMODINIT_FUNC PyInit_ ## name(void) /* Other */ #define Py_TPFLAGS_HAVE_ITER 0 // not needed in Python 3 #else /***** Python 2 *****/ #define IS_PY3 0 /* Strings */ #define PyStr_Type PyString_Type #define PyStr_Check PyString_Check #define PyStr_CheckExact PyString_CheckExact #define PyStr_FromString PyString_FromString #define PyStr_FromStringAndSize PyString_FromStringAndSize #define PyStr_FromFormat PyString_FromFormat #define PyStr_FromFormatV PyString_FromFormatV #define PyStr_AsString PyString_AsString #define PyStr_Format PyString_Format #define PyStr_InternInPlace PyString_InternInPlace #define PyStr_InternFromString PyString_InternFromString #define PyStr_Decode PyString_Decode static inline PyObject *PyStr_Concat(PyObject *left, PyObject *right) { PyObject *str = left; Py_INCREF(left); // reference to old left will be stolen PyString_Concat(&str, right); if (str) { return str; } else { return NULL; } } #define PyStr_AsUTF8String(str) (Py_INCREF(str), (str)) #define PyStr_AsUTF8 PyString_AsString #define PyStr_AsUTF8AndSize(pystr, sizeptr) \ ((*sizeptr=PyString_Size(pystr)), PyString_AsString(pystr)) #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_FromFormatV PyString_FromFormatV #define PyBytes_Size PyString_Size #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_AsString PyString_AsString #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #define _PyBytes_Resize _PyString_Resize /* Floats */ #define PyFloat_FromString(str) PyFloat_FromString(str, NULL) /* Module init */ #define PyModuleDef_HEAD_INIT 0 typedef struct PyModuleDef { int m_base; const char* m_name; const char* m_doc; Py_ssize_t m_size; PyMethodDef *m_methods; } PyModuleDef; #define PyModule_Create(def) \ Py_InitModule3((def)->m_name, (def)->m_methods, (def)->m_doc) #define MODULE_INIT_FUNC(name) \ static PyObject *PyInit_ ## name(void); \ void init ## name(void); \ void init ## name(void) { PyInit_ ## name(); } \ static PyObject *PyInit_ ## name(void) #endif #endif PyGreSQL-5.1/setup.cfg0000644000175100077410000000024513470245543014550 0ustar darcypyg00000000000000[build_ext] direct_access = 1 large_objects = 1 default_vars = 1 escaping_funcs = 1 [metadata] description-file = README.rst [egg_info] tag_build = tag_date = 0 PyGreSQL-5.1/tests/0000755000175100077410000000000013470245543014070 5ustar darcypyg00000000000000PyGreSQL-5.1/tests/test_classic_dbwrapper.py0000755000175100077410000056423513466770070021215 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for the DB wrapper object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import os import sys import gc import json import tempfile import pg # the module under test from decimal import Decimal from datetime import date, time, datetime, timedelta from uuid import UUID from time import strftime from operator import itemgetter # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # The current user must have create schema privilege on the database. dbname = 'unittest' dbhost = None dbport = 5432 debug = False # let DB wrapper print debugging output try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str try: from collections import OrderedDict except ImportError: # Python 2.6 or 3.0 OrderedDict = dict if str is bytes: # noinspection PyUnresolvedReferences from StringIO import StringIO else: from io import StringIO windows = os.name == 'nt' # There is a known a bug in libpq under Windows which can cause # the interface to crash when calling PQhost(): do_not_ask_for_host = windows do_not_ask_for_host_reason = 'libpq issue on Windows' def DB(): """Create a DB wrapper object connecting to the test database.""" db = pg.DB(dbname, dbhost, dbport) if debug: db.debug = debug db.query("set client_min_messages=warning") return db class TestAttrDict(unittest.TestCase): """Test the simple ordered dictionary for attribute names.""" cls = pg.AttrDict base = OrderedDict def testInit(self): a = self.cls() self.assertIsInstance(a, self.base) self.assertEqual(a, self.base()) items = [('id', 'int'), ('name', 'text')] a = self.cls(items) self.assertIsInstance(a, self.base) self.assertEqual(a, self.base(items)) iteritems = iter(items) a = self.cls(iteritems) self.assertIsInstance(a, self.base) self.assertEqual(a, self.base(items)) def testIter(self): a = self.cls() self.assertEqual(list(a), []) keys = ['id', 'name', 'age'] items = [(key, None) for key in keys] a = self.cls(items) self.assertEqual(list(a), keys) def testKeys(self): a = self.cls() self.assertEqual(list(a.keys()), []) keys = ['id', 'name', 'age'] items = [(key, None) for key in keys] a = self.cls(items) self.assertEqual(list(a.keys()), keys) def testValues(self): a = self.cls() self.assertEqual(list(a.values()), []) items = [('id', 'int'), ('name', 'text')] values = [item[1] for item in items] a = self.cls(items) self.assertEqual(list(a.values()), values) def testItems(self): a = self.cls() self.assertEqual(list(a.items()), []) items = [('id', 'int'), ('name', 'text')] a = self.cls(items) self.assertEqual(list(a.items()), items) def testGet(self): a = self.cls([('id', 1)]) try: self.assertEqual(a['id'], 1) except KeyError: self.fail('AttrDict should be readable') def testSet(self): a = self.cls() try: a['id'] = 1 except TypeError: pass else: self.fail('AttrDict should be read-only') def testDel(self): a = self.cls([('id', 1)]) try: del a['id'] except TypeError: pass else: self.fail('AttrDict should be read-only') def testWriteMethods(self): a = self.cls([('id', 1)]) self.assertEqual(a['id'], 1) for method in 'clear', 'update', 'pop', 'setdefault', 'popitem': method = getattr(a, method) self.assertRaises(TypeError, method, a) class TestDBClassInit(unittest.TestCase): """Test proper handling of errors when creating DB instances.""" def testBadParams(self): self.assertRaises(TypeError, pg.DB, invalid=True) def testDeleteDb(self): db = DB() del db.db self.assertRaises(pg.InternalError, db.close) del db class TestDBClassBasic(unittest.TestCase): """Test existence of the DB class wrapped pg connection methods.""" def setUp(self): self.db = DB() def tearDown(self): try: self.db.close() except pg.InternalError: pass def testAllDBAttributes(self): attributes = [ 'abort', 'adapter', 'backend_pid', 'begin', 'cancel', 'clear', 'close', 'commit', 'date_format', 'db', 'dbname', 'dbtypes', 'debug', 'decode_json', 'delete', 'delete_prepared', 'describe_prepared', 'encode_json', 'end', 'endcopy', 'error', 'escape_bytea', 'escape_identifier', 'escape_literal', 'escape_string', 'fileno', 'get', 'get_as_dict', 'get_as_list', 'get_attnames', 'get_cast_hook', 'get_databases', 'get_notice_receiver', 'get_parameter', 'get_relations', 'get_tables', 'getline', 'getlo', 'getnotify', 'has_table_privilege', 'host', 'insert', 'inserttable', 'locreate', 'loimport', 'notification_handler', 'options', 'parameter', 'pkey', 'port', 'prepare', 'protocol_version', 'putline', 'query', 'query_formatted', 'query_prepared', 'release', 'reopen', 'reset', 'rollback', 'savepoint', 'server_version', 'set_cast_hook', 'set_notice_receiver', 'set_parameter', 'socket', 'source', 'ssl_attributes', 'ssl_in_use', 'start', 'status', 'transaction', 'truncate', 'unescape_bytea', 'update', 'upsert', 'use_regtypes', 'user', ] # __dir__ is not called in Python 2.6 for old-style classes db_attributes = dir(self.db) if hasattr( self.db.__class__, '__class__') else self.db.__dir__() db_attributes = [a for a in db_attributes if not a.startswith('_')] self.assertEqual(attributes, db_attributes) def testAttributeDb(self): self.assertEqual(self.db.db.db, dbname) def testAttributeDbname(self): self.assertEqual(self.db.dbname, dbname) def testAttributeError(self): error = self.db.error self.assertTrue(not error or 'krb5_' in error) self.assertEqual(self.db.error, self.db.db.error) @unittest.skipIf(do_not_ask_for_host, do_not_ask_for_host_reason) def testAttributeHost(self): if dbhost and not dbhost.startswith('/'): host = dbhost else: host = 'localhost' self.assertIsInstance(self.db.host, str) self.assertEqual(self.db.host, host) self.assertEqual(self.db.db.host, host) def testAttributeOptions(self): no_options = '' options = self.db.options self.assertEqual(options, no_options) self.assertEqual(options, self.db.db.options) def testAttributePort(self): def_port = 5432 port = self.db.port self.assertIsInstance(port, int) self.assertEqual(port, dbport or def_port) self.assertEqual(port, self.db.db.port) def testAttributeProtocolVersion(self): protocol_version = self.db.protocol_version self.assertIsInstance(protocol_version, int) self.assertTrue(2 <= protocol_version < 4) self.assertEqual(protocol_version, self.db.db.protocol_version) def testAttributeServerVersion(self): server_version = self.db.server_version self.assertIsInstance(server_version, int) self.assertTrue(90000 <= server_version < 120000) self.assertEqual(server_version, self.db.db.server_version) def testAttributeSocket(self): socket = self.db.socket self.assertIsInstance(socket, int) self.assertGreaterEqual(socket, 0) def testAttributeBackendPid(self): backend_pid = self.db.backend_pid self.assertIsInstance(backend_pid, int) self.assertGreaterEqual(backend_pid, 1) def testAttributeSslInUse(self): ssl_in_use = self.db.ssl_in_use self.assertIsInstance(ssl_in_use, bool) self.assertFalse(ssl_in_use) def testAttributeSslAttributes(self): ssl_attributes = self.db.ssl_attributes self.assertIsInstance(ssl_attributes, dict) self.assertEqual(ssl_attributes, { 'cipher': None, 'compression': None, 'key_bits': None, 'library': None, 'protocol': None}) def testAttributeStatus(self): status_ok = 1 status = self.db.status self.assertIsInstance(status, int) self.assertEqual(status, status_ok) self.assertEqual(status, self.db.db.status) def testAttributeUser(self): no_user = 'Deprecated facility' user = self.db.user self.assertTrue(user) self.assertIsInstance(user, str) self.assertNotEqual(user, no_user) self.assertEqual(user, self.db.db.user) def testMethodEscapeLiteral(self): self.assertEqual(self.db.escape_literal(''), "''") def testMethodEscapeIdentifier(self): self.assertEqual(self.db.escape_identifier(''), '""') def testMethodEscapeString(self): self.assertEqual(self.db.escape_string(''), '') def testMethodEscapeBytea(self): self.assertEqual(self.db.escape_bytea('').replace( '\\x', '').replace('\\', ''), '') def testMethodUnescapeBytea(self): self.assertEqual(self.db.unescape_bytea(''), b'') def testMethodDecodeJson(self): self.assertEqual(self.db.decode_json('{}'), {}) def testMethodEncodeJson(self): self.assertEqual(self.db.encode_json({}), '{}') def testMethodQuery(self): query = self.db.query query("select 1+1") query("select 1+$1+$2", 2, 3) query("select 1+$1+$2", (2, 3)) query("select 1+$1+$2", [2, 3]) query("select 1+$1", 1) def testMethodQueryEmpty(self): self.assertRaises(ValueError, self.db.query, '') def testMethodQueryDataError(self): try: self.db.query("select 1/0") except pg.DataError as error: self.assertEqual(error.sqlstate, '22012') def testMethodEndcopy(self): try: self.db.endcopy() except IOError: pass def testMethodClose(self): self.db.close() try: self.db.reset() except pg.Error: pass else: self.fail('Reset should give an error for a closed connection') self.assertIsNone(self.db.db) self.assertRaises(pg.InternalError, self.db.close) self.assertRaises(pg.InternalError, self.db.query, 'select 1') self.assertRaises(pg.InternalError, getattr, self.db, 'status') self.assertRaises(pg.InternalError, getattr, self.db, 'error') self.assertRaises(pg.InternalError, getattr, self.db, 'absent') def testMethodReset(self): con = self.db.db self.db.reset() self.assertIs(self.db.db, con) self.db.query("select 1+1") self.db.close() self.assertRaises(pg.InternalError, self.db.reset) def testMethodReopen(self): con = self.db.db self.db.reopen() self.assertIsNot(self.db.db, con) con = self.db.db self.db.query("select 1+1") self.db.close() self.db.reopen() self.assertIsNot(self.db.db, con) self.db.query("select 1+1") self.db.close() def testExistingConnection(self): db = pg.DB(self.db.db) self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) self.assertIsNotNone(self.db.db) db.reopen() self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db = pg.DB(self.db) self.assertEqual(self.db.db, db.db) db = pg.DB(db=self.db.db) self.assertEqual(self.db.db, db.db) def testExistingDbApi2Connection(self): class DBApi2Con: def __init__(self, cnx): self._cnx = cnx def close(self): self._cnx.close() db2 = DBApi2Con(self.db.db) db = pg.DB(db2) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db.reopen() self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db2.close() class TestDBClass(unittest.TestCase): """Test the methods of the DB class wrapped pg connection.""" maxDiff = 80 * 20 cls_set_up = False regtypes = None @classmethod def setUpClass(cls): db = DB() db.query("drop table if exists test cascade") db.query("create table test (" "i2 smallint, i4 integer, i8 bigint," " d numeric, f4 real, f8 double precision, m money," " v4 varchar(4), c4 char(4), t text)") db.query("create or replace view test_view as" " select i4, v4 from test") db.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): db = DB() db.query("drop table test cascade") db.close() def setUp(self): self.assertTrue(self.cls_set_up) self.db = DB() if self.regtypes is None: self.regtypes = self.db.use_regtypes() else: self.db.use_regtypes(self.regtypes) query = self.db.query query('set client_encoding=utf8') query("set lc_monetary='C'") query("set datestyle='ISO,YMD'") query('set standard_conforming_strings=on') try: query('set bytea_output=hex') except pg.ProgrammingError: if self.db.server_version >= 90000: raise # ignore for older server versions def tearDown(self): self.doCleanups() self.db.close() def createTable(self, table, definition, temporary=True, oids=None, values=None): query = self.db.query if '"' not in table or '.' in table: table = '"%s"' % table if not temporary: q = 'drop table if exists %s cascade' % table query(q) self.addCleanup(query, q) temporary = 'temporary table' if temporary else 'table' as_query = definition.startswith(('as ', 'AS ')) if not as_query and not definition.startswith('('): definition = '(%s)' % definition with_oids = 'with oids' if oids else 'without oids' q = ['create', temporary, table] if as_query: q.extend([with_oids, definition]) else: q.extend([definition, with_oids]) q = ' '.join(q) query(q) if values: for params in values: if not isinstance(params, (list, tuple)): params = [params] values = ', '.join('$%d' % (n + 1) for n in range(len(params))) q = "insert into %s values (%s)" % (table, values) query(q, params) def testClassName(self): self.assertEqual(self.db.__class__.__name__, 'DB') def testModuleName(self): self.assertEqual(self.db.__module__, 'pg') self.assertEqual(self.db.__class__.__module__, 'pg') def testEscapeLiteral(self): f = self.db.escape_literal r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b"'plain'") r = f(u"plain") self.assertIsInstance(r, unicode) self.assertEqual(r, u"'plain'") r = f(u"that's käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u"'that''s käse'".encode('utf-8')) r = f(u"that's käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u"'that''s käse'") self.assertEqual(f(r"It's fine to have a \ inside."), r" E'It''s fine to have a \\ inside.'") self.assertEqual(f('No "quotes" must be escaped.'), "'No \"quotes\" must be escaped.'") def testEscapeIdentifier(self): f = self.db.escape_identifier r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b'"plain"') r = f(u"plain") self.assertIsInstance(r, unicode) self.assertEqual(r, u'"plain"') r = f(u"that's käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u'"that\'s käse"'.encode('utf-8')) r = f(u"that's käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u'"that\'s käse"') self.assertEqual(f(r"It's fine to have a \ inside."), '"It\'s fine to have a \\ inside."') self.assertEqual(f('All "quotes" must be escaped.'), '"All ""quotes"" must be escaped."') def testEscapeString(self): f = self.db.escape_string r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b"plain") r = f(u"plain") self.assertIsInstance(r, unicode) self.assertEqual(r, u"plain") r = f(u"that's käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u"that''s käse".encode('utf-8')) r = f(u"that's käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u"that''s käse") self.assertEqual(f(r"It's fine to have a \ inside."), r"It''s fine to have a \ inside.") def testEscapeBytea(self): f = self.db.escape_bytea # note that escape_byte always returns hex output since Pg 9.0, # regardless of the bytea_output setting r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'\\x706c61696e') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'\\x706c61696e') r = f(u"das is' käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, b'\\x64617320697327206bc3a47365') r = f(u"das is' käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u'\\x64617320697327206bc3a47365') self.assertEqual(f(b'O\x00ps\xff!'), b'\\x4f007073ff21') def testUnescapeBytea(self): f = self.db.unescape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(b"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf8')) r = f(u"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf8')) self.assertEqual(f(r'O\\000ps\\377!'), b'O\\000ps\\377!') self.assertEqual(f(r'\\x706c61696e'), b'\\x706c61696e') self.assertEqual(f(r'\\x746861742773206be47365'), b'\\x746861742773206be47365') self.assertEqual(f(r'\\x4f007073ff21'), b'\\x4f007073ff21') def testDecodeJson(self): f = self.db.decode_json self.assertIsNone(f('null')) data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} text = json.dumps(data) r = f(text) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) def testEncodeJson(self): f = self.db.encode_json self.assertEqual(f(None), 'null') data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} text = json.dumps(data) r = f(data) self.assertIsInstance(r, str) self.assertEqual(r, text) def testGetParameter(self): f = self.db.get_parameter self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '') self.assertRaises(TypeError, f, []) self.assertRaises(TypeError, f, ['']) self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') r = f('standard_conforming_strings') self.assertEqual(r, 'on') r = f('lc_monetary') self.assertEqual(r, 'C') r = f('datestyle') self.assertEqual(r, 'ISO, YMD') r = f('bytea_output') self.assertEqual(r, 'hex') r = f(['bytea_output', 'lc_monetary']) self.assertIsInstance(r, list) self.assertEqual(r, ['hex', 'C']) r = f(('standard_conforming_strings', 'datestyle', 'bytea_output')) self.assertEqual(r, ['on', 'ISO, YMD', 'hex']) r = f(set(['bytea_output', 'lc_monetary'])) self.assertIsInstance(r, dict) self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) r = f(set(['Bytea_Output', ' LC_Monetary '])) self.assertIsInstance(r, dict) self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) s = dict.fromkeys(('bytea_output', 'lc_monetary')) r = f(s) self.assertIs(r, s) self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) s = dict.fromkeys(('Bytea_Output', ' LC_Monetary ')) r = f(s) self.assertIs(r, s) self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) def testGetParameterServerVersion(self): r = self.db.get_parameter('server_version_num') self.assertIsInstance(r, str) s = self.db.server_version self.assertIsInstance(s, int) self.assertEqual(r, str(s)) def testGetParameterAll(self): f = self.db.get_parameter r = f('all') self.assertIsInstance(r, dict) self.assertEqual(r['standard_conforming_strings'], 'on') self.assertEqual(r['lc_monetary'], 'C') self.assertEqual(r['DateStyle'], 'ISO, YMD') self.assertEqual(r['bytea_output'], 'hex') def testSetParameter(self): f = self.db.set_parameter g = self.db.get_parameter self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '') self.assertRaises(TypeError, f, []) self.assertRaises(TypeError, f, ['']) self.assertRaises(ValueError, f, 'all', 'invalid') self.assertRaises(ValueError, f, { 'invalid1': 'value1', 'invalid2': 'value2'}, 'value') self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') f('standard_conforming_strings', 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f('datestyle', 'ISO, DMY') self.assertEqual(g('datestyle'), 'ISO, DMY') f(['standard_conforming_strings', 'datestyle'], ['on', 'ISO, DMY']) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, DMY') f(['default_with_oids', 'standard_conforming_strings'], 'off') self.assertEqual(g('default_with_oids'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f(('standard_conforming_strings', 'datestyle'), ('on', 'ISO, YMD')) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, YMD') f(('default_with_oids', 'standard_conforming_strings'), 'off') self.assertEqual(g('default_with_oids'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f(set(['default_with_oids', 'standard_conforming_strings']), 'on') self.assertEqual(g('default_with_oids'), 'on') self.assertEqual(g('standard_conforming_strings'), 'on') self.assertRaises(ValueError, f, set(['default_with_oids', 'standard_conforming_strings']), ['off', 'on']) f(set(['default_with_oids', 'standard_conforming_strings']), ['off', 'off']) self.assertEqual(g('default_with_oids'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f({'standard_conforming_strings': 'on', 'datestyle': 'ISO, YMD'}) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, YMD') def testResetParameter(self): db = DB() f = db.set_parameter g = db.get_parameter r = g('default_with_oids') self.assertIn(r, ('on', 'off')) dwi, not_dwi = r, 'off' if r == 'on' else 'on' r = g('standard_conforming_strings') self.assertIn(r, ('on', 'off')) scs, not_scs = r, 'off' if r == 'on' else 'on' f('default_with_oids', not_dwi) f('standard_conforming_strings', not_scs) self.assertEqual(g('default_with_oids'), not_dwi) self.assertEqual(g('standard_conforming_strings'), not_scs) f('default_with_oids') f('standard_conforming_strings', None) self.assertEqual(g('default_with_oids'), dwi) self.assertEqual(g('standard_conforming_strings'), scs) f('default_with_oids', not_dwi) f('standard_conforming_strings', not_scs) self.assertEqual(g('default_with_oids'), not_dwi) self.assertEqual(g('standard_conforming_strings'), not_scs) f(['default_with_oids', 'standard_conforming_strings'], None) self.assertEqual(g('default_with_oids'), dwi) self.assertEqual(g('standard_conforming_strings'), scs) f('default_with_oids', not_dwi) f('standard_conforming_strings', not_scs) self.assertEqual(g('default_with_oids'), not_dwi) self.assertEqual(g('standard_conforming_strings'), not_scs) f(('default_with_oids', 'standard_conforming_strings')) self.assertEqual(g('default_with_oids'), dwi) self.assertEqual(g('standard_conforming_strings'), scs) f('default_with_oids', not_dwi) f('standard_conforming_strings', not_scs) self.assertEqual(g('default_with_oids'), not_dwi) self.assertEqual(g('standard_conforming_strings'), not_scs) f(set(['default_with_oids', 'standard_conforming_strings'])) self.assertEqual(g('default_with_oids'), dwi) self.assertEqual(g('standard_conforming_strings'), scs) db.close() def testResetParameterAll(self): db = DB() f = db.set_parameter self.assertRaises(ValueError, f, 'all', 0) self.assertRaises(ValueError, f, 'all', 'off') g = db.get_parameter r = g('default_with_oids') self.assertIn(r, ('on', 'off')) dwi, not_dwi = r, 'off' if r == 'on' else 'on' r = g('standard_conforming_strings') self.assertIn(r, ('on', 'off')) scs, not_scs = r, 'off' if r == 'on' else 'on' f('default_with_oids', not_dwi) f('standard_conforming_strings', not_scs) self.assertEqual(g('default_with_oids'), not_dwi) self.assertEqual(g('standard_conforming_strings'), not_scs) f('all') self.assertEqual(g('default_with_oids'), dwi) self.assertEqual(g('standard_conforming_strings'), scs) db.close() def testSetParameterLocal(self): f = self.db.set_parameter g = self.db.get_parameter self.assertEqual(g('standard_conforming_strings'), 'on') self.db.begin() f('standard_conforming_strings', 'off', local=True) self.assertEqual(g('standard_conforming_strings'), 'off') self.db.end() self.assertEqual(g('standard_conforming_strings'), 'on') def testSetParameterSession(self): f = self.db.set_parameter g = self.db.get_parameter self.assertEqual(g('standard_conforming_strings'), 'on') self.db.begin() f('standard_conforming_strings', 'off', local=False) self.assertEqual(g('standard_conforming_strings'), 'off') self.db.end() self.assertEqual(g('standard_conforming_strings'), 'off') def testReset(self): db = DB() default_datestyle = db.get_parameter('datestyle') changed_datestyle = 'ISO, DMY' if changed_datestyle == default_datestyle: changed_datestyle == 'ISO, YMD' self.db.set_parameter('datestyle', changed_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, changed_datestyle) con = self.db.db q = con.query("show datestyle") self.db.reset() r = q.getresult()[0][0] self.assertEqual(r, changed_datestyle) q = con.query("show datestyle") r = q.getresult()[0][0] self.assertEqual(r, default_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, default_datestyle) db.close() def testReopen(self): db = DB() default_datestyle = db.get_parameter('datestyle') changed_datestyle = 'ISO, DMY' if changed_datestyle == default_datestyle: changed_datestyle == 'ISO, YMD' self.db.set_parameter('datestyle', changed_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, changed_datestyle) con = self.db.db q = con.query("show datestyle") self.db.reopen() r = q.getresult()[0][0] self.assertEqual(r, changed_datestyle) self.assertRaises(TypeError, getattr, con, 'query') r = self.db.get_parameter('datestyle') self.assertEqual(r, default_datestyle) db.close() def testCreateTable(self): table = 'test hello world' values = [(2, "World!"), (1, "Hello")] self.createTable(table, "n smallint, t varchar", temporary=True, oids=True, values=values) r = self.db.query('select t from "%s" order by n' % table).getresult() r = ', '.join(row[0] for row in r) self.assertEqual(r, "Hello, World!") r = self.db.query('select oid from "%s" limit 1' % table).getresult() self.assertIsInstance(r[0][0], int) def testQuery(self): query = self.db.query table = 'test_table' self.createTable(table, "n integer", oids=True) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, int) q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, int) oid = r q = "select oid from test_table where n=2" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r, oid) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') def testMultipleQueries(self): self.assertEqual(self.db.query( "create temporary table test_multi (n integer);" "insert into test_multi values (4711);" "select n from test_multi").getresult()[0][0], 4711) def testQueryWithParams(self): query = self.db.query self.createTable('test_table', 'n1 integer, n2 integer', oids=True) q = "insert into test_table values ($1, $2)" r = query(q, (1, 2)) self.assertIsInstance(r, int) r = query(q, [3, 4]) self.assertIsInstance(r, int) r = query(q, [5, 6]) self.assertIsInstance(r, int) q = "select * from test_table order by 1, 2" self.assertEqual(query(q).getresult(), [(1, 2), (3, 4), (5, 6)]) q = "select * from test_table where n1=$1 and n2=$2" self.assertEqual(query(q, 3, 4).getresult(), [(3, 4)]) q = "update test_table set n2=$2 where n1=$1" r = query(q, 3, 7) self.assertEqual(r, '1') q = "select * from test_table order by 1, 2" self.assertEqual(query(q).getresult(), [(1, 2), (3, 7), (5, 6)]) q = "delete from test_table where n2!=$1" r = query(q, 4) self.assertEqual(r, '3') def testEmptyQuery(self): self.assertRaises(ValueError, self.db.query, '') def testQueryDataError(self): try: self.db.query("select 1/0") except pg.DataError as error: self.assertEqual(error.sqlstate, '22012') def testQueryFormatted(self): f = self.db.query_formatted t = True if pg.get_bool() else 't' # test with tuple q = f("select %s::int, %s::real, %s::text, %s::bool", (3, 2.5, 'hello', True)) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with tuple, inline q = f("select %s, %s, %s, %s", (3, 2.5, 'hello', True), inline=True) r = q.getresult()[0] if isinstance(r[1], Decimal): # Python 2.6 cannot compare float and Decimal r = list(r) r[1] = float(r[1]) r = tuple(r) self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict q = f("select %(a)s::int, %(b)s::real, %(c)s::text, %(d)s::bool", dict(a=3, b=2.5, c='hello', d=True)) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict, inline q = f("select %(a)s, %(b)s, %(c)s, %(d)s", dict(a=3, b=2.5, c='hello', d=True), inline=True) r = q.getresult()[0] if isinstance(r[1], Decimal): # Python 2.6 cannot compare float and Decimal r = list(r) r[1] = float(r[1]) r = tuple(r) self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict and extra values q = f("select %(a)s||%(b)s||%(c)s||%(d)s||'epsilon'", dict(a='alpha', b='beta', c='gamma', d='delta', e='extra')) r = q.getresult()[0][0] self.assertEqual(r, 'alphabetagammadeltaepsilon') def testQueryFormattedWithAny(self): f = self.db.query_formatted q = "select 2 = any(%s)" r = f(q, [[1, 3]]).getresult()[0][0] self.assertEqual(r, False if pg.get_bool() else 'f') r = f(q, [[1, 2, 3]]).getresult()[0][0] self.assertEqual(r, True if pg.get_bool() else 't') r = f(q, [[]]).getresult()[0][0] self.assertEqual(r, False if pg.get_bool() else 'f') r = f(q, [[None]]).getresult()[0][0] self.assertIsNone(r) def testQueryFormattedWithoutParams(self): f = self.db.query_formatted q = "select 42" r = f(q).getresult()[0][0] self.assertEqual(r, 42) r = f(q, None).getresult()[0][0] self.assertEqual(r, 42) r = f(q, []).getresult()[0][0] self.assertEqual(r, 42) r = f(q, {}).getresult()[0][0] self.assertEqual(r, 42) def testPrepare(self): p = self.db.prepare self.assertIsNone(p('my query', "select 'hello'")) self.assertIsNone(p('my other query', "select 'world'")) self.assertRaises(pg.ProgrammingError, p, 'my query', "select 'hello, too'") def testPrepareUnnamed(self): p = self.db.prepare self.assertIsNone(p('', "select null")) self.assertIsNone(p(None, "select null")) def testQueryPreparedWithoutParams(self): f = self.db.query_prepared self.assertRaises(pg.OperationalError, f, 'q') p = self.db.prepare p('q1', "select 17") p('q2', "select 42") r = f('q1').getresult()[0][0] self.assertEqual(r, 17) r = f('q2').getresult()[0][0] self.assertEqual(r, 42) def testQueryPreparedWithParams(self): p = self.db.prepare p('sum', "select 1 + $1 + $2 + $3") p('cat', "select initcap($1) || ', ' || $2 || '!'") f = self.db.query_prepared r = f('sum', 2, 3, 5).getresult()[0][0] self.assertEqual(r, 11) r = f('cat', 'hello', 'world').getresult()[0][0] self.assertEqual(r, 'Hello, world!') def testQueryPreparedUnnamedWithOutParams(self): f = self.db.query_prepared self.assertRaises(pg.OperationalError, f, None) self.assertRaises(pg.OperationalError, f, '') p = self.db.prepare # make sure all types are known so that we will not # generate other anonymous queries in the background p('', "select 'empty'::varchar") r = f(None).getresult()[0][0] self.assertEqual(r, 'empty') r = f('').getresult()[0][0] self.assertEqual(r, 'empty') p(None, "select 'none'::varchar") r = f(None).getresult()[0][0] self.assertEqual(r, 'none') r = f('').getresult()[0][0] self.assertEqual(r, 'none') def testQueryPreparedUnnamedWithParams(self): p = self.db.prepare p('', "select 1 + $1 + $2") f = self.db.query_prepared r = f('', 2, 3).getresult()[0][0] self.assertEqual(r, 6) r = f(None, 2, 3).getresult()[0][0] self.assertEqual(r, 6) p(None, "select 2 + $1 + $2") f = self.db.query_prepared r = f('', 3, 4).getresult()[0][0] self.assertEqual(r, 9) r = f(None, 3, 4).getresult()[0][0] self.assertEqual(r, 9) def testDescribePrepared(self): self.db.prepare('count', "select 1 as first, 2 as second") f = self.db.describe_prepared r = f('count').listfields() self.assertEqual(r, ('first', 'second')) def testDescribePreparedUnnamed(self): self.db.prepare('', "select null as anon") f = self.db.describe_prepared r = f().listfields() self.assertEqual(r, ('anon',)) r = f(None).listfields() self.assertEqual(r, ('anon',)) r = f('').listfields() self.assertEqual(r, ('anon',)) def testDeletePrepared(self): f = self.db.delete_prepared f() e = pg.OperationalError self.assertRaises(e, f, 'myquery') p = self.db.prepare p('q1', "select 1") p('q2', "select 2") f('q1') f('q2') self.assertRaises(e, f, 'q1') self.assertRaises(e, f, 'q2') p('q1', "select 1") p('q2', "select 2") f() self.assertRaises(e, f, 'q1') self.assertRaises(e, f, 'q2') def testPkey(self): query = self.db.query pkey = self.db.pkey self.assertRaises(KeyError, pkey, 'test') for t in ('pkeytest', 'primary key test'): self.createTable('%s0' % t, 'a smallint') self.createTable('%s1' % t, 'b smallint primary key') self.createTable('%s2' % t, 'c smallint, d smallint primary key') self.createTable('%s3' % t, 'e smallint, f smallint, g smallint, h smallint, i smallint,' ' primary key (f, h)') self.createTable('%s4' % t, 'e smallint, f smallint, g smallint, h smallint, i smallint,' ' primary key (h, f)') self.createTable('%s5' % t, 'more_than_one_letter varchar primary key') self.createTable('%s6' % t, '"with space" date primary key') self.createTable('%s7' % t, 'a_very_long_column_name varchar, "with space" date, "42" int,' ' primary key (a_very_long_column_name, "with space", "42")') self.assertRaises(KeyError, pkey, '%s0' % t) self.assertEqual(pkey('%s1' % t), 'b') self.assertEqual(pkey('%s1' % t, True), ('b',)) self.assertEqual(pkey('%s1' % t, composite=False), 'b') self.assertEqual(pkey('%s1' % t, composite=True), ('b',)) self.assertEqual(pkey('%s2' % t), 'd') self.assertEqual(pkey('%s2' % t, composite=True), ('d',)) r = pkey('%s3' % t) self.assertIsInstance(r, tuple) self.assertEqual(r, ('f', 'h')) r = pkey('%s3' % t, composite=False) self.assertIsInstance(r, tuple) self.assertEqual(r, ('f', 'h')) r = pkey('%s4' % t) self.assertIsInstance(r, tuple) self.assertEqual(r, ('h', 'f')) self.assertEqual(pkey('%s5' % t), 'more_than_one_letter') self.assertEqual(pkey('%s6' % t), 'with space') r = pkey('%s7' % t) self.assertIsInstance(r, tuple) self.assertEqual(r, ( 'a_very_long_column_name', 'with space', '42')) # a newly added primary key will be detected query('alter table "%s0" add primary key (a)' % t) self.assertEqual(pkey('%s0' % t), 'a') # a changed primary key will not be detected, # indicating that the internal cache is operating query('alter table "%s1" rename column b to x' % t) self.assertEqual(pkey('%s1' % t), 'b') # we get the changed primary key when the cache is flushed self.assertEqual(pkey('%s1' % t, flush=True), 'x') def testGetDatabases(self): databases = self.db.get_databases() self.assertIn('template0', databases) self.assertIn('template1', databases) self.assertNotIn('not existing database', databases) self.assertIn('postgres', databases) self.assertIn(dbname, databases) def testGetTables(self): get_tables = self.db.get_tables tables = ('A very Special Name', 'A_MiXeD_quoted_NaMe', 'Hello, Test World!', 'Zoro', 'a1', 'a2', 'a321', 'averyveryveryveryveryveryveryreallyreallylongtablename', 'b0', 'b3', 'x', 'xXx', 'xx', 'y', 'z') for t in tables: self.db.query('drop table if exists "%s" cascade' % t) before_tables = get_tables() self.assertIsInstance(before_tables, list) for t in before_tables: t = t.split('.', 1) self.assertGreaterEqual(len(t), 2) if len(t) > 2: self.assertTrue(t[1].startswith('"')) t = t[0] self.assertNotEqual(t, 'information_schema') self.assertFalse(t.startswith('pg_')) for t in tables: self.createTable(t, 'as select 0', temporary=False) current_tables = get_tables() new_tables = [t for t in current_tables if t not in before_tables] expected_new_tables = ['public.%s' % ( '"%s"' % t if ' ' in t or t != t.lower() else t) for t in tables] self.assertEqual(new_tables, expected_new_tables) self.doCleanups() after_tables = get_tables() self.assertEqual(after_tables, before_tables) def testGetSystemTables(self): get_tables = self.db.get_tables result = get_tables() self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_tables(system=False) self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_tables(system=True) self.assertIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) def testGetRelations(self): get_relations = self.db.get_relations result = get_relations() self.assertIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('rv') self.assertIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('r') self.assertIn('public.test', result) self.assertNotIn('public.test_view', result) result = get_relations('v') self.assertNotIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('cisSt') self.assertNotIn('public.test', result) self.assertNotIn('public.test_view', result) def testGetSystemRelations(self): get_relations = self.db.get_relations result = get_relations() self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_relations(system=False) self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_relations(system=True) self.assertIn('pg_catalog.pg_class', result) self.assertIn('information_schema.tables', result) def testGetAttnames(self): get_attnames = self.db.get_attnames self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'does_not_exist') self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'has.too.many.dots') r = get_attnames('test') self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, dict( i2='smallint', i4='integer', i8='bigint', d='numeric', f4='real', f8='double precision', m='money', v4='character varying', c4='character', t='text')) else: self.assertEqual(r, dict( i2='int', i4='int', i8='int', d='num', f4='float', f8='float', m='money', v4='text', c4='text', t='text')) self.createTable('test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') r = get_attnames('test_table') self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, dict( n='integer', alpha='smallint', beta='boolean', gamma='character', tau='text', v='character varying')) else: self.assertEqual(r, dict( n='int', alpha='int', beta='bool', gamma='text', tau='text', v='text')) def testGetAttnamesWithQuotes(self): get_attnames = self.db.get_attnames table = 'test table for get_attnames()' self.createTable(table, '"Prime!" smallint, "much space" integer, "Questions?" text') r = get_attnames(table) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'Prime!': 'smallint', 'much space': 'integer', 'Questions?': 'text'}) else: self.assertEqual(r, { 'Prime!': 'int', 'much space': 'int', 'Questions?': 'text'}) table = 'yet another test table for get_attnames()' self.createTable(table, 'a smallint, b integer, c bigint,' ' e numeric, f real, f2 double precision, m money,' ' x smallint, y smallint, z smallint,' ' Normal_NaMe smallint, "Special Name" smallint,' ' t text, u char(2), v varchar(2),' ' primary key (y, u)', oids=True) r = get_attnames(table) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'a': 'smallint', 'b': 'integer', 'c': 'bigint', 'e': 'numeric', 'f': 'real', 'f2': 'double precision', 'm': 'money', 'normal_name': 'smallint', 'Special Name': 'smallint', 'u': 'character', 't': 'text', 'v': 'character varying', 'y': 'smallint', 'x': 'smallint', 'z': 'smallint', 'oid': 'oid'}) else: self.assertEqual(r, {'a': 'int', 'b': 'int', 'c': 'int', 'e': 'num', 'f': 'float', 'f2': 'float', 'm': 'money', 'normal_name': 'int', 'Special Name': 'int', 'u': 'text', 't': 'text', 'v': 'text', 'y': 'int', 'x': 'int', 'z': 'int', 'oid': 'int'}) def testGetAttnamesWithRegtypes(self): get_attnames = self.db.get_attnames self.createTable('test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') use_regtypes = self.db.use_regtypes regtypes = use_regtypes() self.assertEqual(regtypes, self.regtypes) use_regtypes(True) try: r = get_attnames("test_table") self.assertIsInstance(r, dict) finally: use_regtypes(regtypes) self.assertEqual(r, dict( n='integer', alpha='smallint', beta='boolean', gamma='character', tau='text', v='character varying')) def testGetAttnamesWithoutRegtypes(self): get_attnames = self.db.get_attnames self.createTable('test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') use_regtypes = self.db.use_regtypes regtypes = use_regtypes() self.assertEqual(regtypes, self.regtypes) use_regtypes(False) try: r = get_attnames("test_table") self.assertIsInstance(r, dict) finally: use_regtypes(regtypes) self.assertEqual(r, dict( n='int', alpha='int', beta='bool', gamma='text', tau='text', v='text')) def testGetAttnamesIsCached(self): get_attnames = self.db.get_attnames int_type = 'integer' if self.regtypes else 'int' text_type = 'text' query = self.db.query self.createTable('test_table', 'col int') r = get_attnames("test_table") self.assertIsInstance(r, dict) self.assertEqual(r, dict(col=int_type)) query("alter table test_table alter column col type text") query("alter table test_table add column col2 int") r = get_attnames("test_table") self.assertEqual(r, dict(col=int_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict(col=text_type, col2=int_type)) query("alter table test_table drop column col2") r = get_attnames("test_table") self.assertEqual(r, dict(col=text_type, col2=int_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict(col=text_type)) query("alter table test_table drop column col") r = get_attnames("test_table") self.assertEqual(r, dict(col=text_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict()) def testGetAttnamesIsOrdered(self): get_attnames = self.db.get_attnames r = get_attnames('test', flush=True) self.assertIsInstance(r, OrderedDict) if self.regtypes: self.assertEqual(r, OrderedDict([ ('i2', 'smallint'), ('i4', 'integer'), ('i8', 'bigint'), ('d', 'numeric'), ('f4', 'real'), ('f8', 'double precision'), ('m', 'money'), ('v4', 'character varying'), ('c4', 'character'), ('t', 'text')])) else: self.assertEqual(r, OrderedDict([ ('i2', 'int'), ('i4', 'int'), ('i8', 'int'), ('d', 'num'), ('f4', 'float'), ('f8', 'float'), ('m', 'money'), ('v4', 'text'), ('c4', 'text'), ('t', 'text')])) if OrderedDict is not dict: r = ' '.join(list(r.keys())) self.assertEqual(r, 'i2 i4 i8 d f4 f8 m v4 c4 t') table = 'test table for get_attnames' self.createTable(table, 'n int, alpha smallint, v varchar(3),' ' gamma char(5), tau text, beta bool') r = get_attnames(table) self.assertIsInstance(r, OrderedDict) if self.regtypes: self.assertEqual(r, OrderedDict([ ('n', 'integer'), ('alpha', 'smallint'), ('v', 'character varying'), ('gamma', 'character'), ('tau', 'text'), ('beta', 'boolean')])) else: self.assertEqual(r, OrderedDict([ ('n', 'int'), ('alpha', 'int'), ('v', 'text'), ('gamma', 'text'), ('tau', 'text'), ('beta', 'bool')])) if OrderedDict is not dict: r = ' '.join(list(r.keys())) self.assertEqual(r, 'n alpha v gamma tau beta') else: self.skipTest('OrderedDict is not supported') def testGetAttnamesIsAttrDict(self): AttrDict = pg.AttrDict get_attnames = self.db.get_attnames r = get_attnames('test', flush=True) self.assertIsInstance(r, AttrDict) if self.regtypes: self.assertEqual(r, AttrDict([ ('i2', 'smallint'), ('i4', 'integer'), ('i8', 'bigint'), ('d', 'numeric'), ('f4', 'real'), ('f8', 'double precision'), ('m', 'money'), ('v4', 'character varying'), ('c4', 'character'), ('t', 'text')])) else: self.assertEqual(r, AttrDict([ ('i2', 'int'), ('i4', 'int'), ('i8', 'int'), ('d', 'num'), ('f4', 'float'), ('f8', 'float'), ('m', 'money'), ('v4', 'text'), ('c4', 'text'), ('t', 'text')])) r = ' '.join(list(r.keys())) self.assertEqual(r, 'i2 i4 i8 d f4 f8 m v4 c4 t') table = 'test table for get_attnames' self.createTable(table, 'n int, alpha smallint, v varchar(3),' ' gamma char(5), tau text, beta bool') r = get_attnames(table) self.assertIsInstance(r, AttrDict) if self.regtypes: self.assertEqual(r, AttrDict([ ('n', 'integer'), ('alpha', 'smallint'), ('v', 'character varying'), ('gamma', 'character'), ('tau', 'text'), ('beta', 'boolean')])) else: self.assertEqual(r, AttrDict([ ('n', 'int'), ('alpha', 'int'), ('v', 'text'), ('gamma', 'text'), ('tau', 'text'), ('beta', 'bool')])) r = ' '.join(list(r.keys())) self.assertEqual(r, 'n alpha v gamma tau beta') def testHasTablePrivilege(self): can = self.db.has_table_privilege self.assertEqual(can('test'), True) self.assertEqual(can('test', 'select'), True) self.assertEqual(can('test', 'SeLeCt'), True) self.assertEqual(can('test', 'SELECT'), True) self.assertEqual(can('test', 'insert'), True) self.assertEqual(can('test', 'update'), True) self.assertEqual(can('test', 'delete'), True) self.assertRaises(pg.DataError, can, 'test', 'foobar') self.assertRaises(pg.ProgrammingError, can, 'table_does_not_exist') r = self.db.query('select rolsuper FROM pg_roles' ' where rolname=current_user').getresult()[0][0] if not pg.get_bool(): r = r == 't' if r: self.skipTest('must not be superuser') self.assertEqual(can('pg_views', 'select'), True) self.assertEqual(can('pg_views', 'delete'), False) def testGet(self): get = self.db.get query = self.db.query table = 'get_test_table' self.assertRaises(TypeError, get) self.assertRaises(TypeError, get, table) self.createTable(table, 'n integer, t text', values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, get, table, 2) r = get(table, 2, 'n') self.assertIsInstance(r, dict) self.assertEqual(r, dict(n=2, t='y')) r = get(table, 1, 'n') self.assertEqual(r, dict(n=1, t='x')) r = get(table, (3,), ('n',)) self.assertEqual(r, dict(n=3, t='z')) r = get(table, 'y', 't') self.assertEqual(r, dict(n=2, t='y')) self.assertRaises(pg.DatabaseError, get, table, 4) self.assertRaises(pg.DatabaseError, get, table, 4, 'n') self.assertRaises(pg.DatabaseError, get, table, 'y') self.assertRaises(pg.DatabaseError, get, table, 2, 't') s = dict(n=3) self.assertRaises(pg.ProgrammingError, get, table, s) r = get(table, s, 'n') self.assertIs(r, s) self.assertEqual(r, dict(n=3, t='z')) s.update(t='x') r = get(table, s, 't') self.assertIs(r, s) self.assertEqual(s, dict(n=1, t='x')) r = get(table, s, ('n', 't')) self.assertIs(r, s) self.assertEqual(r, dict(n=1, t='x')) query('alter table "%s" alter n set not null' % table) query('alter table "%s" add primary key (n)' % table) r = get(table, 2) self.assertIsInstance(r, dict) self.assertEqual(r, dict(n=2, t='y')) self.assertEqual(get(table, 1)['t'], 'x') self.assertEqual(get(table, 3)['t'], 'z') self.assertEqual(get(table + '*', 2)['t'], 'y') self.assertEqual(get(table + ' *', 2)['t'], 'y') self.assertRaises(KeyError, get, table, (2, 2)) s = dict(n=3) r = get(table, s) self.assertIs(r, s) self.assertEqual(r, dict(n=3, t='z')) s.update(n=1) self.assertEqual(get(table, s)['t'], 'x') s.update(n=2) self.assertEqual(get(table, r)['t'], 'y') s.pop('n') self.assertRaises(KeyError, get, table, s) def testGetWithOid(self): get = self.db.get query = self.db.query table = 'get_with_oid_test_table' self.createTable(table, 'n integer, t text', oids=True, values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, get, table, 2) self.assertRaises(KeyError, get, table, {}, 'oid') r = get(table, 2, 'n') qoid = 'oid(%s)' % table self.assertIn(qoid, r) oid = r[qoid] self.assertIsInstance(oid, int) result = {'t': 'y', 'n': 2, qoid: oid} self.assertEqual(r, result) r = get(table, oid, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid)) self.assertEqual(r, result) r = get(table, dict(oid=oid), 'oid') self.assertEqual(r, result) r = get(table, {qoid: oid}) self.assertEqual(r, result) r = get(table, {qoid: oid}, 'oid') self.assertEqual(r, result) self.assertEqual(get(table + '*', 2, 'n'), r) self.assertEqual(get(table + ' *', 2, 'n'), r) self.assertEqual(get(table, oid, 'oid')['t'], 'y') self.assertEqual(get(table, 1, 'n')['t'], 'x') self.assertEqual(get(table, 3, 'n')['t'], 'z') self.assertEqual(get(table, 2, 'n')['t'], 'y') self.assertRaises(pg.DatabaseError, get, table, 4, 'n') r['n'] = 3 self.assertEqual(get(table, r, 'n')['t'], 'z') self.assertEqual(get(table, 1, 'n')['t'], 'x') self.assertEqual(get(table, r, 'oid')['t'], 'z') query('alter table "%s" alter n set not null' % table) query('alter table "%s" add primary key (n)' % table) self.assertEqual(get(table, 3)['t'], 'z') self.assertEqual(get(table, 1)['t'], 'x') self.assertEqual(get(table, 2)['t'], 'y') r['n'] = 1 self.assertEqual(get(table, r)['t'], 'x') r['n'] = 3 self.assertEqual(get(table, r)['t'], 'z') r['n'] = 2 self.assertEqual(get(table, r)['t'], 'y') r = get(table, oid, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid)) self.assertEqual(r, result) r = get(table, dict(oid=oid), 'oid') self.assertEqual(r, result) r = get(table, {qoid: oid}) self.assertEqual(r, result) r = get(table, {qoid: oid}, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid, n=1)) self.assertEqual(r['n'], 1) self.assertNotEqual(r[qoid], oid) r = get(table, dict(oid=oid, t='z'), 't') self.assertEqual(r['n'], 3) self.assertNotEqual(r[qoid], oid) def testGetWithCompositeKey(self): get = self.db.get query = self.db.query table = 'get_test_table_1' self.createTable(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertEqual(get(table, 2)['t'], 'b') self.assertEqual(get(table, 1, 'n')['t'], 'a') self.assertEqual(get(table, 2, ('n',))['t'], 'b') self.assertEqual(get(table, 3, ['n'])['t'], 'c') self.assertEqual(get(table, (2,), ('n',))['t'], 'b') self.assertEqual(get(table, 'b', 't')['n'], 2) self.assertEqual(get(table, ('a',), ('t',))['n'], 1) self.assertEqual(get(table, ['c'], ['t'])['n'], 3) table = 'get_test_table_2' self.createTable(table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, get, table, 2) self.assertEqual(get(table, (1, 1))['t'], 'a') self.assertEqual(get(table, (1, 2))['t'], 'b') self.assertEqual(get(table, (2, 1))['t'], 'c') self.assertEqual(get(table, (1, 2), ('n', 'm'))['t'], 'b') self.assertEqual(get(table, (1, 2), ('m', 'n'))['t'], 'c') self.assertEqual(get(table, (3, 1), ('n', 'm'))['t'], 'e') self.assertEqual(get(table, (1, 3), ('m', 'n'))['t'], 'e') self.assertEqual(get(table, dict(n=2, m=2))['t'], 'd') self.assertEqual(get(table, dict(n=1, m=2), ('n', 'm'))['t'], 'b') self.assertEqual(get(table, dict(n=2, m=1), ['n', 'm'])['t'], 'c') self.assertEqual(get(table, dict(n=3, m=2), ('m', 'n'))['t'], 'f') def testGetWithQuotedNames(self): get = self.db.get query = self.db.query table = 'test table for get()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(17, 1001, 'No!')]) r = get(table, 17) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 17) self.assertEqual(r['much space'], 1001) self.assertEqual(r['Questions?'], 'No!') def testGetFromView(self): self.db.query('delete from test where i4=14') self.db.query('insert into test (i4, v4) values(' "14, 'abc4')") r = self.db.get('test_view', 14, 'i4') self.assertIn('v4', r) self.assertEqual(r['v4'], 'abc4') def testGetLittleBobbyTables(self): get = self.db.get query = self.db.query self.createTable('test_students', 'firstname varchar primary key, nickname varchar, grade char(2)', values=[("D'Arcy", 'Darcey', 'A+'), ('Sheldon', 'Moonpie', 'A+'), ('Robert', 'Little Bobby Tables', 'D-')]) r = get('test_students', 'Sheldon') self.assertEqual(r, dict( firstname="Sheldon", nickname='Moonpie', grade='A+')) r = get('test_students', 'Robert') self.assertEqual(r, dict( firstname="Robert", nickname='Little Bobby Tables', grade='D-')) r = get('test_students', "D'Arcy") self.assertEqual(r, dict( firstname="D'Arcy", nickname='Darcey', grade='A+')) try: get('test_students', "D' Arcy") except pg.DatabaseError as error: self.assertEqual(str(error), 'No such record in test_students\nwhere "firstname" = $1\n' 'with $1="D\' Arcy"') try: get('test_students', "Robert'); TRUNCATE TABLE test_students;--") except pg.DatabaseError as error: self.assertEqual(str(error), 'No such record in test_students\nwhere "firstname" = $1\n' 'with $1="Robert\'); TRUNCATE TABLE test_students;--"') q = "select * from test_students order by 1 limit 4" r = query(q).getresult() self.assertEqual(len(r), 3) self.assertEqual(r[1][2], 'D-') def testInsert(self): insert = self.db.insert query = self.db.query bool_on = pg.get_bool() decimal = pg.get_decimal() table = 'insert_test_table' self.createTable(table, 'i2 smallint, i4 integer, i8 bigint,' ' d numeric, f4 real, f8 double precision, m money,' ' v4 varchar(4), c4 char(4), t text,' ' b boolean, ts timestamp', oids=True) oid_table = 'oid(%s)' % table tests = [dict(i2=None, i4=None, i8=None), (dict(i2='', i4='', i8=''), dict(i2=None, i4=None, i8=None)), (dict(i2=0, i4=0, i8=0), dict(i2=0, i4=0, i8=0)), dict(i2=42, i4=123456, i8=9876543210), dict(i2=2 ** 15 - 1, i4=int(2 ** 31 - 1), i8=long(2 ** 63 - 1)), dict(d=None), (dict(d=''), dict(d=None)), dict(d=Decimal(0)), (dict(d=0), dict(d=Decimal(0))), dict(f4=None, f8=None), dict(f4=0, f8=0), (dict(f4='', f8=''), dict(f4=None, f8=None)), (dict(d=1234.5, f4=1234.5, f8=1234.5), dict(d=Decimal('1234.5'))), dict(d=Decimal('123.456789'), f4=12.375, f8=123.4921875), dict(d=Decimal('123456789.9876543212345678987654321')), dict(m=None), (dict(m=''), dict(m=None)), dict(m=Decimal('-1234.56')), (dict(m=('-1234.56')), dict(m=Decimal('-1234.56'))), dict(m=Decimal('1234.56')), dict(m=Decimal('123456')), (dict(m='1234.56'), dict(m=Decimal('1234.56'))), (dict(m=1234.5), dict(m=Decimal('1234.5'))), (dict(m=-1234.5), dict(m=Decimal('-1234.5'))), (dict(m=123456), dict(m=Decimal('123456'))), (dict(m='1234567.89'), dict(m=Decimal('1234567.89'))), dict(b=None), (dict(b=''), dict(b=None)), dict(b='f'), dict(b='t'), (dict(b=0), dict(b='f')), (dict(b=1), dict(b='t')), (dict(b=False), dict(b='f')), (dict(b=True), dict(b='t')), (dict(b='0'), dict(b='f')), (dict(b='1'), dict(b='t')), (dict(b='n'), dict(b='f')), (dict(b='y'), dict(b='t')), (dict(b='no'), dict(b='f')), (dict(b='yes'), dict(b='t')), (dict(b='off'), dict(b='f')), (dict(b='on'), dict(b='t')), dict(v4=None, c4=None, t=None), (dict(v4='', c4='', t=''), dict(c4=' ' * 4)), dict(v4='1234', c4='1234', t='1234' * 10), dict(v4='abcd', c4='abcd', t='abcdefg'), (dict(v4='abc', c4='abc', t='abc'), dict(c4='abc ')), dict(ts=None), (dict(ts=''), dict(ts=None)), (dict(ts=0), dict(ts=None)), (dict(ts=False), dict(ts=None)), dict(ts='2012-12-21 00:00:00'), (dict(ts='2012-12-21'), dict(ts='2012-12-21 00:00:00')), dict(ts='2012-12-21 12:21:12'), dict(ts='2013-01-05 12:13:14'), dict(ts='current_timestamp')] for test in tests: if isinstance(test, dict): data = test change = {} else: data, change = test expect = data.copy() expect.update(change) if bool_on: b = expect.get('b') if b is not None: expect['b'] = b == 't' if decimal is not Decimal: d = expect.get('d') if d is not None: expect['d'] = decimal(d) m = expect.get('m') if m is not None: expect['m'] = decimal(m) self.assertEqual(insert(table, data), data) self.assertIn(oid_table, data) oid = data[oid_table] self.assertIsInstance(oid, int) data = dict(item for item in data.items() if item[0] in expect) ts = expect.get('ts') if ts: if ts == 'current_timestamp': ts = data['ts'] self.assertIsInstance(ts, datetime) self.assertEqual(ts.strftime('%Y-%m-%d'), strftime('%Y-%m-%d')) else: ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S') expect['ts'] = ts self.assertEqual(data, expect) data = query( 'select oid,* from "%s"' % table).dictresult()[0] self.assertEqual(data['oid'], oid) data = dict(item for item in data.items() if item[0] in expect) self.assertEqual(data, expect) query('delete from "%s"' % table) def testInsertWithOid(self): insert = self.db.insert query = self.db.query self.createTable('test_table', 'n int', oids=True) self.assertRaises(pg.ProgrammingError, insert, 'test_table', m=1) r = insert('test_table', n=1) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 1) self.assertNotIn('oid', r) qoid = 'oid(test_table)' self.assertIn(qoid, r) oid = r[qoid] self.assertEqual(sorted(r.keys()), ['n', qoid]) r = insert('test_table', n=2, oid=oid) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 2) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) r = insert('test_table', None, n=3) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 3) s = r r = insert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 3) r = insert('test_table *', r) self.assertIs(r, s) self.assertEqual(r['n'], 3) r = insert('test_table', r, n=4) self.assertIs(r, s) self.assertEqual(r['n'], 4) self.assertNotIn('oid', r) self.assertIn(qoid, r) oid = r[qoid] r = insert('test_table', r, n=5, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) r['oid'] = oid = r[qoid] r = insert('test_table', r, n=6) self.assertIs(r, s) self.assertEqual(r['n'], 6) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) q = 'select n from test_table order by 1 limit 9' r = ' '.join(str(row[0]) for row in query(q).getresult()) self.assertEqual(r, '1 2 3 3 3 4 5 6') query("truncate test_table") query("alter table test_table add unique (n)") r = insert('test_table', dict(n=7)) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 7) self.assertRaises(pg.IntegrityError, insert, 'test_table', r) r['n'] = 6 self.assertRaises(pg.IntegrityError, insert, 'test_table', r, n=7) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 7) r['n'] = 6 r = insert('test_table', r) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 6) r = ' '.join(str(row[0]) for row in query(q).getresult()) self.assertEqual(r, '6 7') def testInsertWithQuotedNames(self): insert = self.db.insert query = self.db.query table = 'test table for insert()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') r = {'Prime!': 11, 'much space': 2002, 'Questions?': 'What?'} r = insert(table, r) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 11) self.assertEqual(r['much space'], 2002) self.assertEqual(r['Questions?'], 'What?') r = query('select * from "%s" limit 2' % table).dictresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r['Prime!'], 11) self.assertEqual(r['much space'], 2002) self.assertEqual(r['Questions?'], 'What?') def testInsertIntoView(self): insert = self.db.insert query = self.db.query query("truncate test") q = 'select * from test_view order by i4 limit 3' r = query(q).getresult() self.assertEqual(r, []) r = dict(i4=1234, v4='abcd') insert('test', r) self.assertIsNone(r['i2']) self.assertEqual(r['i4'], 1234) self.assertIsNone(r['i8']) self.assertEqual(r['v4'], 'abcd') self.assertIsNone(r['c4']) r = query(q).getresult() self.assertEqual(r, [(1234, 'abcd')]) r = dict(i4=5678, v4='efgh') try: insert('test_view', r) except (pg.OperationalError, pg.NotSupportedError) as error: if self.db.server_version < 90300: # must setup rules in older PostgreSQL versions self.skipTest('database cannot insert into view') self.fail(str(error)) self.assertNotIn('i2', r) self.assertEqual(r['i4'], 5678) self.assertNotIn('i8', r) self.assertEqual(r['v4'], 'efgh') self.assertNotIn('c4', r) r = query(q).getresult() self.assertEqual(r, [(1234, 'abcd'), (5678, 'efgh')]) def testUpdate(self): update = self.db.update query = self.db.query self.assertRaises(pg.ProgrammingError, update, 'test', i2=2, i4=4, i8=8) table = 'update_test_table' self.createTable(table, 'n integer, t text', oids=True, values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, self.db.get, table, 2) r = self.db.get(table, 2, 'n') r['t'] = 'u' s = update(table, r) self.assertEqual(s, r) q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'u') def testUpdateWithOid(self): update = self.db.update get = self.db.get query = self.db.query self.createTable('test_table', 'n int', oids=True, values=[1]) s = get('test_table', 1, 'n') self.assertIsInstance(s, dict) self.assertEqual(s['n'], 1) s['n'] = 2 r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 2) qoid = 'oid(test_table)' self.assertIn(qoid, r) self.assertNotIn('oid', r) self.assertEqual(sorted(r.keys()), ['n', qoid]) r['n'] = 3 oid = r.pop(qoid) r = update('test_table', r, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 3) r.pop(qoid) self.assertRaises(pg.ProgrammingError, update, 'test_table', r) s = get('test_table', 3, 'n') self.assertIsInstance(s, dict) self.assertEqual(s['n'], 3) s.pop('n') r = update('test_table', s) oid = r.pop(qoid) self.assertEqual(r, {}) q = "select n from test_table limit 2" r = query(q).getresult() self.assertEqual(r, [(3,)]) query("insert into test_table values (1)") self.assertRaises(pg.ProgrammingError, update, 'test_table', dict(oid=oid, n=4)) r = update('test_table', dict(n=4), oid=oid) self.assertEqual(r['n'], 4) r = update('test_table *', dict(n=5), oid=oid) self.assertEqual(r['n'], 5) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) s = dict(n=1, m=4) r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 4) s = dict(m=7) r = update('test_table', s, n=5) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertEqual(r['m'], 7) q = "select n, m from test_table order by 1 limit 3" r = query(q).getresult() self.assertEqual(r, [(1, 4), (5, 7)]) s = dict(m=9, oid=oid) self.assertRaises(KeyError, update, 'test_table', s) r = update('test_table', s, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertEqual(r['m'], 9) s = dict(n=1, m=3, oid=oid) r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r = query(q).getresult() self.assertEqual(r, [(1, 3), (5, 9)]) s.update(n=4, m=7) r = update('test_table', s, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 4) self.assertEqual(r['m'], 7) r = query(q).getresult() self.assertEqual(r, [(1, 3), (4, 7)]) def testUpdateWithoutOid(self): update = self.db.update query = self.db.query self.assertRaises(pg.ProgrammingError, update, 'test', i2=2, i4=4, i8=8) table = 'update_test_table' self.createTable(table, 'n integer primary key, t text', oids=False, values=enumerate('xyz', start=1)) r = self.db.get(table, 2) r['t'] = 'u' s = update(table, r) self.assertEqual(s, r) q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'u') def testUpdateWithCompositeKey(self): update = self.db.update query = self.db.query table = 'update_test_table_1' self.createTable(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertRaises(KeyError, update, table, dict(t='b')) s = dict(n=2, t='d') r = update(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'd') q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'd') s.update(dict(n=4, t='e')) r = update(table, s) self.assertEqual(r['n'], 4) self.assertEqual(r['t'], 'e') q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'd') q = 'select t from "%s" where n=4' % table r = query(q).getresult() self.assertEqual(len(r), 0) query('drop table "%s"' % table) table = 'update_test_table_2' self.createTable(table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, update, table, dict(n=2, t='b')) self.assertEqual(update(table, dict(n=2, m=2, t='x'))['t'], 'x') q = 'select t from "%s" where n=2 order by m' % table r = [r[0] for r in query(q).getresult()] self.assertEqual(r, ['c', 'x']) def testUpdateWithQuotedNames(self): update = self.db.update query = self.db.query table = 'test table for update()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(13, 3003, 'Why!')]) r = {'Prime!': 13, 'much space': 7007, 'Questions?': 'When?'} r = update(table, r) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 13) self.assertEqual(r['much space'], 7007) self.assertEqual(r['Questions?'], 'When?') r = query('select * from "%s" limit 2' % table).dictresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r['Prime!'], 13) self.assertEqual(r['much space'], 7007) self.assertEqual(r['Questions?'], 'When?') def testUpsert(self): upsert = self.db.upsert query = self.db.query self.assertRaises(pg.ProgrammingError, upsert, 'test', i2=2, i4=4, i8=8) table = 'upsert_test_table' self.createTable(table, 'n integer primary key, t text', oids=True) s = dict(n=1, t='x') try: r = upsert(table, s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['t'], 'x') s.update(n=2, t='y') r = upsert(table, s, **dict.fromkeys(s)) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y') q = 'select n, t from "%s" order by n limit 3' % table r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y')]) s.update(t='z') r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'z')]) s.update(t='n') r = upsert(table, s, t=False) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'z')]) s.update(t='y') r = upsert(table, s, t=True) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y')]) s.update(t='n') r = upsert(table, s, t="included.t || '2'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y2') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y2')]) s.update(t='y') r = upsert(table, s, t="excluded.t || '3'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y3') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y3')]) s.update(n=1, t='2') r = upsert(table, s, t="included.t || excluded.t") self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['t'], 'x2') r = query(q).getresult() self.assertEqual(r, [(1, 'x2'), (2, 'y3')]) # not existing columns and oid parameter should be ignored s = dict(m=3, u='z') r = upsert(table, s, oid='invalid') self.assertIs(r, s) def testUpsertWithOid(self): upsert = self.db.upsert get = self.db.get query = self.db.query self.createTable('test_table', 'n int', oids=True, values=[1]) self.assertRaises(pg.ProgrammingError, upsert, 'test_table', dict(n=2)) r = get('test_table', 1, 'n') self.assertIsInstance(r, dict) self.assertEqual(r['n'], 1) qoid = 'oid(test_table)' self.assertIn(qoid, r) self.assertNotIn('oid', r) oid = r[qoid] self.assertRaises(pg.ProgrammingError, upsert, 'test_table', dict(n=2, oid=oid)) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) s = dict(n=2) try: r = upsert('test_table', s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertIsNone(r['m']) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, None), (2, None)]) r['oid'] = oid r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertIsNone(r['m']) self.assertIn(qoid, r) self.assertNotIn('oid', r) self.assertNotEqual(r[qoid], oid) r['m'] = 7 r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 7) r.update(n=1, m=3) r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, 3), (2, 7)]) r = upsert('test_table', r, oid='invalid') self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r['m'] = 5 r = upsert('test_table', r, m=False) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r['m'] = 5 r = upsert('test_table', r, m=True) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 5) r.update(n=2, m=1) r = upsert('test_table', r, m='included.m') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 7) r['m'] = 9 r = upsert('test_table', r, m='excluded.m') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 9) r['m'] = 8 r = upsert('test_table *', r, m='included.m + 1') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 10) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, 5), (2, 10)]) def testUpsertWithCompositeKey(self): upsert = self.db.upsert query = self.db.query table = 'upsert_test_table_2' self.createTable(table, 'n integer, m integer, t text, primary key (n, m)') s = dict(n=1, m=2, t='x') try: r = upsert(table, s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 2) self.assertEqual(r['t'], 'x') s.update(m=3, t='y') r = upsert(table, s, **dict.fromkeys(s)) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'y') q = 'select n, m, t from "%s" order by n, m limit 3' % table r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'y')]) s.update(t='z') r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'z')]) s.update(t='n') r = upsert(table, s, t=False) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'z')]) s.update(t='n') r = upsert(table, s, t=True) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'n') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'n')]) s.update(n=2, t='y') r = upsert(table, s, t="'z'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'y') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'n'), (2, 3, 'y')]) s.update(n=1, t='m') r = upsert(table, s, t='included.t || excluded.t') self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'nm') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'nm'), (2, 3, 'y')]) def testUpsertWithQuotedNames(self): upsert = self.db.upsert query = self.db.query table = 'test table for upsert()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') s = {'Prime!': 31, 'much space': 9009, 'Questions?': 'Yes.'} try: r = upsert(table, s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['Prime!'], 31) self.assertEqual(r['much space'], 9009) self.assertEqual(r['Questions?'], 'Yes.') q = 'select * from "%s" limit 2' % table r = query(q).getresult() self.assertEqual(r, [(31, 9009, 'Yes.')]) s.update({'Questions?': 'No.'}) r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['Prime!'], 31) self.assertEqual(r['much space'], 9009) self.assertEqual(r['Questions?'], 'No.') r = query(q).getresult() self.assertEqual(r, [(31, 9009, 'No.')]) def testClear(self): clear = self.db.clear f = False if pg.get_bool() else 'f' r = clear('test') result = dict( i2=0, i4=0, i8=0, d=0, f4=0, f8=0, m=0, v4='', c4='', t='') self.assertEqual(r, result) table = 'clear_test_table' self.createTable(table, 'n integer, f float, b boolean, d date, t text', oids=True) r = clear(table) result = dict(n=0, f=0, b=f, d='', t='') self.assertEqual(r, result) r['a'] = r['f'] = r['n'] = 1 r['d'] = r['t'] = 'x' r['b'] = 't' r['oid'] = long(1) r = clear(table, r) result = dict(a=1, n=0, f=0, b=f, d='', t='', oid=long(1)) self.assertEqual(r, result) def testClearWithQuotedNames(self): clear = self.db.clear table = 'test table for clear()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') r = clear(table) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 0) self.assertEqual(r['much space'], 0) self.assertEqual(r['Questions?'], '') def testDelete(self): delete = self.db.delete query = self.db.query self.assertRaises(pg.ProgrammingError, delete, 'test', dict(i2=2, i4=4, i8=8)) table = 'delete_test_table' self.createTable(table, 'n integer, t text', oids=True, values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, self.db.get, table, 2) r = self.db.get(table, 1, 'n') s = delete(table, r) self.assertEqual(s, 1) r = self.db.get(table, 3, 'n') s = delete(table, r) self.assertEqual(s, 1) s = delete(table, r) self.assertEqual(s, 0) r = query('select * from "%s"' % table).dictresult() self.assertEqual(len(r), 1) r = r[0] result = {'n': 2, 't': 'y'} self.assertEqual(r, result) r = self.db.get(table, 2, 'n') s = delete(table, r) self.assertEqual(s, 1) s = delete(table, r) self.assertEqual(s, 0) self.assertRaises(pg.DatabaseError, self.db.get, table, 2, 'n') # not existing columns and oid parameter should be ignored r.update(m=3, u='z', oid='invalid') s = delete(table, r) self.assertEqual(s, 0) def testDeleteWithOid(self): delete = self.db.delete get = self.db.get query = self.db.query self.createTable('test_table', 'n int', oids=True, values=range(1, 7)) r = dict(n=3) self.assertRaises(pg.ProgrammingError, delete, 'test_table', r) s = get('test_table', 1, 'n') qoid = 'oid(test_table)' self.assertIn(qoid, s) r = delete('test_table', s) self.assertEqual(r, 1) r = delete('test_table', s) self.assertEqual(r, 0) q = "select min(n),count(n) from test_table" self.assertEqual(query(q).getresult()[0], (2, 5)) oid = get('test_table', 2, 'n')[qoid] s = dict(oid=oid, n=2) self.assertRaises(pg.ProgrammingError, delete, 'test_table', s) r = delete('test_table', None, oid=oid) self.assertEqual(r, 1) r = delete('test_table', None, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (3, 4)) s = dict(oid=oid, n=2) oid = get('test_table', 3, 'n')[qoid] self.assertRaises(pg.ProgrammingError, delete, 'test_table', s) r = delete('test_table', s, oid=oid) self.assertEqual(r, 1) r = delete('test_table', s, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (4, 3)) s = get('test_table', 4, 'n') r = delete('test_table *', s) self.assertEqual(r, 1) r = delete('test_table *', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (5, 2)) oid = get('test_table', 5, 'n')[qoid] s = {qoid: oid, 'm': 4} r = delete('test_table', s, m=6) self.assertEqual(r, 1) r = delete('test_table *', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (6, 1)) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) for i in range(5): query("insert into test_table values (%d, %d)" % (i + 1, i + 2)) s = dict(m=2) self.assertRaises(KeyError, delete, 'test_table', s) s = dict(m=2, oid=oid) self.assertRaises(KeyError, delete, 'test_table', s) r = delete('test_table', dict(m=2), oid=oid) self.assertEqual(r, 0) oid = get('test_table', 1, 'n')[qoid] s = dict(oid=oid) self.assertRaises(KeyError, delete, 'test_table', s) r = delete('test_table', s, oid=oid) self.assertEqual(r, 1) r = delete('test_table', s, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (2, 5)) s = get('test_table', 2, 'n') del s['n'] r = delete('test_table', s) self.assertEqual(r, 1) r = delete('test_table', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (3, 4)) r = delete('test_table', n=3) self.assertEqual(r, 1) r = delete('test_table', n=3) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (4, 3)) r = delete('test_table', None, n=4) self.assertEqual(r, 1) r = delete('test_table', None, n=4) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (5, 2)) s = dict(n=6) r = delete('test_table', s, n=5) self.assertEqual(r, 1) r = delete('test_table', s, n=5) self.assertEqual(r, 0) s = get('test_table', 6, 'n') self.assertEqual(s['n'], 6) s['n'] = 7 r = delete('test_table', s) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (None, 0)) def testDeleteWithCompositeKey(self): query = self.db.query table = 'delete_test_table_1' self.createTable(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertRaises(KeyError, self.db.delete, table, dict(t='b')) self.assertEqual(self.db.delete(table, dict(n=2)), 1) r = query('select t from "%s" where n=2' % table).getresult() self.assertEqual(r, []) self.assertEqual(self.db.delete(table, dict(n=2)), 0) r = query('select t from "%s" where n=3' % table).getresult()[0][0] self.assertEqual(r, 'c') table = 'delete_test_table_2' self.createTable(table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, self.db.delete, table, dict(n=2, t='b')) self.assertEqual(self.db.delete(table, dict(n=2, m=2)), 1) r = [r[0] for r in query('select t from "%s" where n=2' ' order by m' % table).getresult()] self.assertEqual(r, ['c']) self.assertEqual(self.db.delete(table, dict(n=2, m=2)), 0) r = [r[0] for r in query('select t from "%s" where n=3' ' order by m' % table).getresult()] self.assertEqual(r, ['e', 'f']) self.assertEqual(self.db.delete(table, dict(n=3, m=1)), 1) r = [r[0] for r in query('select t from "%s" where n=3' ' order by m' % table).getresult()] self.assertEqual(r, ['f']) def testDeleteWithQuotedNames(self): delete = self.db.delete query = self.db.query table = 'test table for delete()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(19, 5005, 'Yes!')]) r = {'Prime!': 17} r = delete(table, r) self.assertEqual(r, 0) r = query('select count(*) from "%s"' % table).getresult() self.assertEqual(r[0][0], 1) r = {'Prime!': 19} r = delete(table, r) self.assertEqual(r, 1) r = query('select count(*) from "%s"' % table).getresult() self.assertEqual(r[0][0], 0) def testDeleteReferenced(self): delete = self.db.delete query = self.db.query self.createTable('test_parent', 'n smallint primary key', values=range(3)) self.createTable('test_child', 'n smallint primary key references test_parent', values=range(3)) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") self.assertEqual(query(q).getresult()[0], (3, 3)) self.assertRaises(pg.IntegrityError, delete, 'test_parent', None, n=2) self.assertRaises(pg.IntegrityError, delete, 'test_parent *', None, n=2) r = delete('test_child', None, n=2) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (3, 2)) r = delete('test_parent', None, n=2) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (2, 2)) self.assertRaises(pg.IntegrityError, delete, 'test_parent', dict(n=0)) self.assertRaises(pg.IntegrityError, delete, 'test_parent *', dict(n=0)) r = delete('test_child', dict(n=0)) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (2, 1)) r = delete('test_child', dict(n=0)) self.assertEqual(r, 0) r = delete('test_parent', dict(n=0)) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (1, 1)) r = delete('test_parent', None, n=0) self.assertEqual(r, 0) q = "select n from test_parent natural join test_child limit 2" self.assertEqual(query(q).getresult(), [(1,)]) def testTempCrud(self): table = 'test_temp_table' self.createTable(table, "n int primary key, t varchar", temporary=True) self.db.insert(table, dict(n=1, t='one')) self.db.insert(table, dict(n=2, t='too')) self.db.insert(table, dict(n=3, t='three')) r = self.db.get(table, 2) self.assertEqual(r['t'], 'too') self.db.update(table, dict(n=2, t='two')) r = self.db.get(table, 2) self.assertEqual(r['t'], 'two') self.db.delete(table, r) r = self.db.query('select n, t from %s order by 1' % table).getresult() self.assertEqual(r, [(1, 'one'), (3, 'three')]) def testTruncate(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, None) self.assertRaises(TypeError, truncate, 42) self.assertRaises(TypeError, truncate, dict(test_table=None)) query = self.db.query self.createTable('test_table', 'n smallint', temporary=False, values=[1] * 3) q = "select count(*) from test_table" r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('test_table') r = query(q).getresult()[0][0] self.assertEqual(r, 0) for i in range(3): query("insert into test_table values (1)") r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('public.test_table') r = query(q).getresult()[0][0] self.assertEqual(r, 0) self.createTable('test_table_2', 'n smallint', temporary=True) for t in (list, tuple, set): for i in range(3): query("insert into test_table values (1)") query("insert into test_table_2 values (2)") q = ("select (select count(*) from test_table)," " (select count(*) from test_table_2)") r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate(t(['test_table', 'test_table_2'])) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) def testTruncateRestart(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', restart='invalid') query = self.db.query self.createTable('test_table', 'n serial, t text') for n in range(3): query("insert into test_table (t) values ('test')") q = "select count(n), min(n), max(n) from test_table" r = query(q).getresult()[0] self.assertEqual(r, (3, 1, 3)) truncate('test_table') r = query(q).getresult()[0] self.assertEqual(r, (0, None, None)) for n in range(3): query("insert into test_table (t) values ('test')") r = query(q).getresult()[0] self.assertEqual(r, (3, 4, 6)) truncate('test_table', restart=True) r = query(q).getresult()[0] self.assertEqual(r, (0, None, None)) for n in range(3): query("insert into test_table (t) values ('test')") r = query(q).getresult()[0] self.assertEqual(r, (3, 1, 3)) def testTruncateCascade(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', cascade='invalid') query = self.db.query self.createTable('test_parent', 'n smallint primary key', values=range(3)) self.createTable('test_child', 'n smallint primary key references test_parent (n)', values=range(3)) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) self.assertRaises(pg.NotSupportedError, truncate, 'test_parent') truncate(['test_parent', 'test_child']) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (%d)" % n) query("insert into test_child (n) values (%d)" % n) r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_parent', cascade=True) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (%d)" % n) query("insert into test_child (n) values (%d)" % n) r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_child') r = query(q).getresult()[0] self.assertEqual(r, (3, 0)) self.assertRaises(pg.NotSupportedError, truncate, 'test_parent') truncate('test_parent', cascade=True) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) def testTruncateOnly(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', only='invalid') query = self.db.query self.createTable('test_parent', 'n smallint') self.createTable('test_child', 'm smallint) inherits (test_parent') for n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent') r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent*') r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent', only=True) r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_parent', only=False) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) self.assertRaises(ValueError, truncate, 'test_parent*', only=True) truncate('test_parent*', only=False) self.createTable('test_parent_2', 'n smallint') self.createTable('test_child_2', 'm smallint) inherits (test_parent_2') for t in '', '_2': for n in range(3): query("insert into test_parent%s (n) values (1)" % t) query("insert into test_child%s (n, m) values (2, 3)" % t) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)," " (select count(*) from test_parent_2)," " (select count(*) from test_child_2)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3, 6, 3)) truncate(['test_parent', 'test_parent_2'], only=[False, True]) r = query(q).getresult()[0] self.assertEqual(r, (0, 0, 3, 3)) truncate(['test_parent', 'test_parent_2'], only=False) r = query(q).getresult()[0] self.assertEqual(r, (0, 0, 0, 0)) self.assertRaises(ValueError, truncate, ['test_parent*', 'test_child'], only=[True, False]) truncate(['test_parent*', 'test_child'], only=[False, True]) def testTruncateQuoted(self): truncate = self.db.truncate query = self.db.query table = "test table for truncate()" self.createTable(table, 'n smallint', temporary=False, values=[1] * 3) q = 'select count(*) from "%s"' % table r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate(table) r = query(q).getresult()[0][0] self.assertEqual(r, 0) for i in range(3): query('insert into "%s" values (1)' % table) r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('public."%s"' % table) r = query(q).getresult()[0][0] self.assertEqual(r, 0) def testGetAsList(self): get_as_list = self.db.get_as_list self.assertRaises(TypeError, get_as_list) self.assertRaises(TypeError, get_as_list, None) query = self.db.query table = 'test_aslist' r = query('select 1 as colname').namedresult()[0] self.assertIsInstance(r, tuple) named = hasattr(r, 'colname') names = [(1, 'Homer'), (2, 'Marge'), (3, 'Bart'), (4, 'Lisa'), (5, 'Maggie')] self.createTable(table, 'id smallint primary key, name varchar', values=names) r = get_as_list(table) self.assertIsInstance(r, list) self.assertEqual(r, names) for t, n in zip(r, names): self.assertIsInstance(t, tuple) self.assertEqual(t, n) if named: self.assertEqual(t.id, n[0]) self.assertEqual(t.name, n[1]) self.assertEqual(t._asdict(), dict(id=n[0], name=n[1])) r = get_as_list(table, what='name') self.assertIsInstance(r, list) expected = sorted((row[1],) for row in names) self.assertEqual(r, expected) r = get_as_list(table, what='name, id') self.assertIsInstance(r, list) expected = sorted(tuple(reversed(row)) for row in names) self.assertEqual(r, expected) r = get_as_list(table, what=['name', 'id']) self.assertIsInstance(r, list) self.assertEqual(r, expected) r = get_as_list(table, where="name like 'Ba%'") self.assertIsInstance(r, list) self.assertEqual(r, names[2:3]) r = get_as_list(table, what='name', where="name like 'Ma%'") self.assertIsInstance(r, list) self.assertEqual(r, [('Maggie',), ('Marge',)]) r = get_as_list(table, what='name', where=["name like 'Ma%'", "name like '%r%'"]) self.assertIsInstance(r, list) self.assertEqual(r, [('Marge',)]) r = get_as_list(table, what='name', order='id') self.assertIsInstance(r, list) expected = [(row[1],) for row in names] self.assertEqual(r, expected) r = get_as_list(table, what=['name'], order=['id']) self.assertIsInstance(r, list) self.assertEqual(r, expected) r = get_as_list(table, what=['id', 'name'], order=['id', 'name']) self.assertIsInstance(r, list) self.assertEqual(r, names) r = get_as_list(table, what='id * 2 as num', order='id desc') self.assertIsInstance(r, list) expected = [(n,) for n in range(10, 0, -2)] self.assertEqual(r, expected) r = get_as_list(table, limit=2) self.assertIsInstance(r, list) self.assertEqual(r, names[:2]) r = get_as_list(table, offset=3) self.assertIsInstance(r, list) self.assertEqual(r, names[3:]) r = get_as_list(table, limit=1, offset=2) self.assertIsInstance(r, list) self.assertEqual(r, names[2:3]) r = get_as_list(table, scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, list(range(1, 6))) r = get_as_list(table, what='name', scalar=True) self.assertIsInstance(r, list) expected = sorted(row[1] for row in names) self.assertEqual(r, expected) r = get_as_list(table, what='name', limit=1, scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, expected[:1]) query('alter table "%s" drop constraint "%s_pkey"' % (table, table)) self.assertRaises(KeyError, self.db.pkey, table, flush=True) names.insert(1, (1, 'Snowball')) query('insert into "%s" values ($1, $2)' % table, (1, 'Snowball')) r = get_as_list(table) self.assertIsInstance(r, list) self.assertEqual(r, names) r = get_as_list(table, what='name', where='id=1', scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, ['Homer', 'Snowball']) # test with unordered query r = get_as_list(table, order=False) self.assertIsInstance(r, list) self.assertEqual(set(r), set(names)) # test with arbitrary from clause from_table = '(select lower(name) as n2 from "%s") as t2' % table r = get_as_list(from_table) self.assertIsInstance(r, list) r = set(row[0] for row in r) expected = set(row[1].lower() for row in names) self.assertEqual(r, expected) r = get_as_list(from_table, order='n2', scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, sorted(expected)) r = get_as_list(from_table, order='n2', limit=1) self.assertIsInstance(r, list) self.assertEqual(len(r), 1) t = r[0] self.assertIsInstance(t, tuple) if named: self.assertEqual(t.n2, 'bart') self.assertEqual(t._asdict(), dict(n2='bart')) else: self.assertEqual(t, ('bart',)) def testGetAsDict(self): get_as_dict = self.db.get_as_dict self.assertRaises(TypeError, get_as_dict) self.assertRaises(TypeError, get_as_dict, None) # the test table has no primary key self.assertRaises(pg.ProgrammingError, get_as_dict, 'test') query = self.db.query table = 'test_asdict' r = query('select 1 as colname').namedresult()[0] self.assertIsInstance(r, tuple) named = hasattr(r, 'colname') colors = [(1, '#7cb9e8', 'Aero'), (2, '#b5a642', 'Brass'), (3, '#b2ffff', 'Celeste'), (4, '#c19a6b', 'Desert')] self.createTable(table, 'id smallint primary key, rgb char(7), name varchar', values=colors) # keyname must be string, list or tuple self.assertRaises(KeyError, get_as_dict, table, 3) self.assertRaises(KeyError, get_as_dict, table, dict(id=None)) # missing keyname in row self.assertRaises(KeyError, get_as_dict, table, keyname='rgb', what='name') r = get_as_dict(table) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[0], row[1:]) for row in colors) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, int) self.assertIn(key, expected) row = r[key] self.assertIsInstance(row, tuple) t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.rgb, t[0]) self.assertEqual(row.name, t[1]) self.assertEqual(row._asdict(), dict(rgb=t[0], name=t[1])) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname='rgb') self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[1], (row[0], row[2])) for row in sorted(colors, key=itemgetter(1))) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, str) self.assertIn(key, expected) row = r[key] self.assertIsInstance(row, tuple) t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.id, t[0]) self.assertEqual(row.name, t[1]) self.assertEqual(row._asdict(), dict(id=t[0], name=t[1])) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname=['id', 'rgb']) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[:2], row[2:]) for row in colors) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, tuple) self.assertIsInstance(key[0], int) self.assertIsInstance(key[1], str) if named: self.assertEqual(key, (key.id, key.rgb)) self.assertEqual(key._fields, ('id', 'rgb')) row = r[key] self.assertIsInstance(row, tuple) self.assertIsInstance(row[0], str) t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.name, t[0]) self.assertEqual(row._asdict(), dict(name=t[0])) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname=['id', 'rgb'], scalar=True) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[:2], row[2]) for row in colors) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, tuple) row = r[key] self.assertIsInstance(row, str) t = expected[key] self.assertEqual(row, t) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname='rgb', what=['rgb', 'name'], scalar=True) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[1], row[2]) for row in sorted(colors, key=itemgetter(1))) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, str) row = r[key] self.assertIsInstance(row, str) t = expected[key] self.assertEqual(row, t) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, what='id, name', where="rgb like '#b%'", scalar=True) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[0], row[2]) for row in colors[1:3]) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, int) row = r[key] self.assertIsInstance(row, str) t = expected[key] self.assertEqual(row, t) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) expected = r r = get_as_dict(table, what=['name', 'id'], where=['id > 1', 'id < 4', "rgb like '#b%'", "name not like 'A%'", "name not like '%t'"], scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, what='name, id', limit=2, offset=1, scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, keyname=('id',), what=('name', 'id'), where=('id > 1', 'id < 4'), order=('id',), scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, limit=1) self.assertEqual(len(r), 1) self.assertEqual(r[1][1], 'Aero') r = get_as_dict(table, offset=3) self.assertEqual(len(r), 1) self.assertEqual(r[4][1], 'Desert') r = get_as_dict(table, order='id desc') expected = OrderedDict((row[0], row[1:]) for row in reversed(colors)) self.assertEqual(r, expected) r = get_as_dict(table, where='id > 5') self.assertIsInstance(r, OrderedDict) self.assertEqual(len(r), 0) # test with unordered query expected = dict((row[0], row[1:]) for row in colors) r = get_as_dict(table, order=False) self.assertIsInstance(r, dict) self.assertEqual(r, expected) if dict is not OrderedDict: # Python > 2.6 self.assertNotIsInstance(self, OrderedDict) # test with arbitrary from clause from_table = '(select id, lower(name) as n2 from "%s") as t2' % table # primary key must be passed explicitly in this case self.assertRaises(pg.ProgrammingError, get_as_dict, from_table) r = get_as_dict(from_table, 'id') self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[0], (row[2].lower(),)) for row in colors) self.assertEqual(r, expected) # test without a primary key query('alter table "%s" drop constraint "%s_pkey"' % (table, table)) self.assertRaises(KeyError, self.db.pkey, table, flush=True) self.assertRaises(pg.ProgrammingError, get_as_dict, table) r = get_as_dict(table, keyname='id') expected = OrderedDict((row[0], row[1:]) for row in colors) self.assertIsInstance(r, dict) self.assertEqual(r, expected) r = (1, '#007fff', 'Azure') query('insert into "%s" values ($1, $2, $3)' % table, r) # the last entry will win expected[1] = r[1:] r = get_as_dict(table, keyname='id') self.assertEqual(r, expected) def testTransaction(self): query = self.db.query self.createTable('test_table', 'n integer', temporary=False) self.db.begin() query("insert into test_table values (1)") query("insert into test_table values (2)") self.db.commit() self.db.begin() query("insert into test_table values (3)") query("insert into test_table values (4)") self.db.rollback() self.db.begin() query("insert into test_table values (5)") self.db.savepoint('before6') query("insert into test_table values (6)") self.db.rollback('before6') query("insert into test_table values (7)") self.db.commit() self.db.begin() self.db.savepoint('before8') query("insert into test_table values (8)") self.db.release('before8') self.assertRaises(pg.InternalError, self.db.rollback, 'before8') self.db.commit() self.db.start() query("insert into test_table values (9)") self.db.end() r = [r[0] for r in query( "select * from test_table order by 1").getresult()] self.assertEqual(r, [1, 2, 5, 7, 9]) self.db.begin(mode='read only') self.assertRaises(pg.InternalError, query, "insert into test_table values (0)") self.db.rollback() self.db.start(mode='Read Only') self.assertRaises(pg.InternalError, query, "insert into test_table values (0)") self.db.abort() def testTransactionAliases(self): self.assertEqual(self.db.begin, self.db.start) self.assertEqual(self.db.commit, self.db.end) self.assertEqual(self.db.rollback, self.db.abort) def testContextManager(self): query = self.db.query self.createTable('test_table', 'n integer check(n>0)') with self.db: query("insert into test_table values (1)") query("insert into test_table values (2)") try: with self.db: query("insert into test_table values (3)") query("insert into test_table values (4)") raise ValueError('test transaction should rollback') except ValueError as error: self.assertEqual(str(error), 'test transaction should rollback') with self.db: query("insert into test_table values (5)") try: with self.db: query("insert into test_table values (6)") query("insert into test_table values (-1)") except pg.IntegrityError as error: self.assertTrue('check' in str(error)) with self.db: query("insert into test_table values (7)") r = [r[0] for r in query( "select * from test_table order by 1").getresult()] self.assertEqual(r, [1, 2, 5, 7]) def testBytea(self): query = self.db.query self.createTable('bytea_test', 'n smallint primary key, data bytea') s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = self.db.escape_bytea(s) query('insert into bytea_test values(3, $1)', (r,)) r = query('select * from bytea_test where n=3').getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 2) self.assertEqual(r[0], 3) r = r[1] if pg.get_bytea_escaped(): self.assertNotEqual(r, s) r = pg.unescape_bytea(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) def testInsertUpdateGetBytea(self): query = self.db.query unescape = pg.unescape_bytea if pg.get_bytea_escaped() else None self.createTable('bytea_test', 'n smallint primary key, data bytea') # insert null value r = self.db.insert('bytea_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) s = b'None' r = self.db.update('bytea_test', n=0, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = self.db.update('bytea_test', n=0, data=None) self.assertIsNone(r['data']) # insert as bytes s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = self.db.insert('bytea_test', n=5, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) # update as bytes s += b"and now even more \x00 nasty \t stuff!\f" r = self.db.update('bytea_test', n=5, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = query('select * from bytea_test where n=5').getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 2) self.assertEqual(r[0], 5) r = r[1] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = self.db.get('bytea_test', dict(n=5)) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = pg.unescape_bytea(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) def testUpsertBytea(self): self.createTable('bytea_test', 'n smallint primary key, data bytea') s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = dict(n=7, data=s) try: r = self.db.upsert('bytea_test', r) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 7) self.assertIn('data', r) if pg.get_bytea_escaped(): self.assertNotEqual(r['data'], s) r['data'] = pg.unescape_bytea(r['data']) self.assertIsInstance(r['data'], bytes) self.assertEqual(r['data'], s) r['data'] = None r = self.db.upsert('bytea_test', r) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 7) self.assertIn('data', r) self.assertIsNone(r['data']) def testInsertGetJson(self): try: self.createTable('json_test', 'n smallint primary key, data json') except pg.ProgrammingError as error: if self.db.server_version < 90200: self.skipTest('database does not support json') self.fail(str(error)) jsondecode = pg.get_jsondecode() # insert null value r = self.db.insert('json_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) r = self.db.get('json_test', 0) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) # insert JSON object data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} r = self.db.insert('json_test', n=1, data=data) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) r = self.db.get('json_test', 1) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) # insert JSON object as text self.db.insert('json_test', n=2, data=json.dumps(data)) q = "select data from json_test where n in (1, 2) order by n" r = self.db.query(q).getresult() self.assertEqual(len(r), 2) self.assertIsInstance(r[0][0], str if jsondecode is None else dict) self.assertEqual(r[0][0], r[1][0]) def testInsertGetJsonb(self): try: self.createTable('jsonb_test', 'n smallint primary key, data jsonb') except pg.ProgrammingError as error: if self.db.server_version < 90400: self.skipTest('database does not support jsonb') self.fail(str(error)) jsondecode = pg.get_jsondecode() # insert null value r = self.db.insert('jsonb_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) r = self.db.get('jsonb_test', 0) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) # insert JSON object data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} r = self.db.insert('jsonb_test', n=1, data=data) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) r = self.db.get('jsonb_test', 1) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) def testArray(self): returns_arrays = pg.get_array() self.createTable('arraytest', 'id smallint, i2 smallint[], i4 integer[], i8 bigint[],' ' d numeric[], f4 real[], f8 double precision[], m money[],' ' b bool[], v4 varchar(4)[], c4 char(4)[], t text[]') r = self.db.get_attnames('arraytest') if self.regtypes: self.assertEqual(r, dict( id='smallint', i2='smallint[]', i4='integer[]', i8='bigint[]', d='numeric[]', f4='real[]', f8='double precision[]', m='money[]', b='boolean[]', v4='character varying[]', c4='character[]', t='text[]')) else: self.assertEqual(r, dict( id='int', i2='int[]', i4='int[]', i8='int[]', d='num[]', f4='float[]', f8='float[]', m='money[]', b='bool[]', v4='text[]', c4='text[]', t='text[]')) decimal = pg.get_decimal() if decimal is Decimal: long_decimal = decimal('123456789.123456789') odd_money = decimal('1234567891234567.89') else: long_decimal = decimal('12345671234.5') odd_money = decimal('1234567123.25') t, f = (True, False) if pg.get_bool() else ('t', 'f') data = dict(id=42, i2=[42, 1234, None, 0, -1], i4=[42, 123456789, None, 0, 1, -1], i8=[long(42), long(123456789123456789), None, long(0), long(1), long(-1)], d=[decimal(42), long_decimal, None, decimal(0), decimal(1), decimal(-1), -long_decimal], f4=[42.0, 1234.5, None, 0.0, 1.0, -1.0, float('inf'), float('-inf')], f8=[42.0, 12345671234.5, None, 0.0, 1.0, -1.0, float('inf'), float('-inf')], m=[decimal('42.00'), odd_money, None, decimal('0.00'), decimal('1.00'), decimal('-1.00'), -odd_money], b=[t, f, t, None, f, t, None, None, t], v4=['abc', '"Hi"', '', None], c4=['abc ', '"Hi"', ' ', None], t=['abc', 'Hello, World!', '"Hello, World!"', '', None]) r = data.copy() self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') self.db.insert('arraytest', r) r = self.db.get('arraytest', 42, 'id') if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') r = self.db.query('select * from arraytest limit 1').dictresult()[0] if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') def testArrayLiteral(self): insert = self.db.insert returns_arrays = pg.get_array() self.createTable('arraytest', 'i int[], t text[]', oids=True) r = dict(i=[1, 2, 3], t=['a', 'b', 'c']) insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') r = dict(i='{1,2,3}', t='{a,b,c}') self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') L = pg.Literal r = dict(i=L("ARRAY[1, 2, 3]"), t=L("ARRAY['a', 'b', 'c']")) self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') r = dict(i="1, 2, 3", t="'a', 'b', 'c'") self.assertRaises(pg.DataError, self.db.insert, 'arraytest', r) def testArrayOfIds(self): array_on = pg.get_array() self.createTable('arraytest', 'c cid[], o oid[], x xid[]', oids=True) r = self.db.get_attnames('arraytest') if self.regtypes: self.assertEqual(r, dict( oid='oid', c='cid[]', o='oid[]', x='xid[]')) else: self.assertEqual(r, dict( oid='int', c='int[]', o='int[]', x='int[]')) data = dict(c=[11, 12, 13], o=[21, 22, 23], x=[31, 32, 33]) r = data.copy() self.db.insert('arraytest', r) qoid = 'oid(arraytest)' oid = r.pop(qoid) if array_on: self.assertEqual(r, data) else: self.assertEqual(r['o'], '{21,22,23}') r = {qoid: oid} self.db.get('arraytest', r) self.assertEqual(oid, r.pop(qoid)) if array_on: self.assertEqual(r, data) else: self.assertEqual(r['o'], '{21,22,23}') def testArrayOfText(self): array_on = pg.get_array() self.createTable('arraytest', 'data text[]', oids=True) r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'text[]') data = ['Hello, World!', '', None, '{a,b,c}', '"Hi!"', 'null', 'NULL', 'Null', 'nulL', "It's all \\ kinds of\r nasty stuff!\n"] r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data']) self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], str) self.assertIsNone(r['data'][2]) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data']) self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], str) self.assertIsNone(r['data'][2]) def testArrayOfBytea(self): array_on = pg.get_array() bytea_escaped = pg.get_bytea_escaped() self.createTable('arraytest', 'data bytea[]', oids=True) r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'bytea[]') data = [b'Hello, World!', b'', None, b'{a,b,c}', b'"Hi!"', b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n"] r = dict(data=data) self.db.insert('arraytest', r) if array_on: self.assertIsInstance(r['data'], list) if array_on and not bytea_escaped: self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], bytes) self.assertIsNone(r['data'][2]) else: self.assertNotEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if array_on: self.assertIsInstance(r['data'], list) if array_on and not bytea_escaped: self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], bytes) self.assertIsNone(r['data'][2]) else: self.assertNotEqual(r['data'], data) def testArrayOfJson(self): try: self.createTable('arraytest', 'data json[]', oids=True) except pg.ProgrammingError as error: if self.db.server_version < 90200: self.skipTest('database does not support json') self.fail(str(error)) r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'json[]') data = [dict(id=815, name='John Doe'), dict(id=816, name='Jane Roe')] array_on = pg.get_array() jsondecode = pg.get_jsondecode() r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r = dict(data=[json.dumps(d) for d in data]) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) # insert empty json values r = dict(data=['', None]) self.db.insert('arraytest', r) r = r['data'] if array_on: self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertIsNone(r[0]) self.assertIsNone(r[1]) else: self.assertEqual(r, '{NULL,NULL}') def testArrayOfJsonb(self): try: self.createTable('arraytest', 'data jsonb[]', oids=True) except pg.ProgrammingError as error: if self.db.server_version < 90400: self.skipTest('database does not support jsonb') self.fail(str(error)) r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'jsonb[]' if self.regtypes else 'json[]') data = [dict(id=815, name='John Doe'), dict(id=816, name='Jane Roe')] array_on = pg.get_array() jsondecode = pg.get_jsondecode() r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r = dict(data=[json.dumps(d) for d in data]) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) # insert empty json values r = dict(data=['', None]) self.db.insert('arraytest', r) r = r['data'] if array_on: self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertIsNone(r[0]) self.assertIsNone(r[1]) else: self.assertEqual(r, '{NULL,NULL}') def testDeepArray(self): array_on = pg.get_array() self.createTable('arraytest', 'data text[][][]', oids=True) r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'text[]') data = [[['Hello, World!', '{a,b,c}', 'back\\slash']]] r = dict(data=data) self.db.insert('arraytest', r) if array_on: self.assertEqual(r['data'], data) else: self.assertTrue(r['data'].startswith('{{{"Hello,')) r['data'] = None self.db.get('arraytest', r) if array_on: self.assertEqual(r['data'], data) else: self.assertTrue(r['data'].startswith('{{{"Hello,')) def testInsertUpdateGetRecord(self): query = self.db.query query('create type test_person_type as' ' (name varchar, age smallint, married bool,' ' weight real, salary money)') self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'person test_person_type', temporary=False, oids=True) attnames = self.db.get_attnames('test_person') self.assertEqual(len(attnames), 2) self.assertIn('oid', attnames) self.assertIn('person', attnames) person_typ = attnames['person'] if self.regtypes: self.assertEqual(person_typ, 'test_person_type') else: self.assertEqual(person_typ, 'record') if self.regtypes: self.assertEqual(person_typ.attnames, dict(name='character varying', age='smallint', married='boolean', weight='real', salary='money')) else: self.assertEqual(person_typ.attnames, dict(name='text', age='int', married='bool', weight='float', salary='money')) decimal = pg.get_decimal() if pg.get_bool(): bool_class = bool t, f = True, False else: bool_class = str t, f = 't', 'f' person = ('John Doe', 61, t, 99.5, decimal('93456.75')) r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) person = ('Jane Roe', 59, f, 64.5, decimal('96543.25')) r['person'] = person self.db.update('test_person', r) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'Jane Roe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) r['person'] = None self.db.get('test_person', r) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'Jane Roe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) person = (None,) * 5 r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertIsNone(p.name) self.assertIsNone(p.age) self.assertIsNone(p.married) self.assertIsNone(p.weight) self.assertIsNone(p.salary) r['person'] = None self.db.get('test_person', r) p = r['person'] self.assertIsInstance(p, tuple) self.assertIsNone(p.name) self.assertIsNone(p.age) self.assertIsNone(p.married) self.assertIsNone(p.weight) self.assertIsNone(p.salary) r = self.db.insert('test_person', None, person=None) self.assertIsNone(r['person']) r['person'] = None self.db.get('test_person', r) self.assertIsNone(r['person']) def testRecordInsertBytea(self): query = self.db.query query('create type test_person_type as' ' (name text, picture bytea)') self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'person test_person_type', temporary=False, oids=True) person_typ = self.db.get_attnames('test_person')['person'] self.assertEqual(person_typ.attnames, dict(name='text', picture='bytea')) person = ('John Doe', b'O\x00ps\xff!') r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.picture, person[1]) self.assertIsInstance(p.picture, bytes) def testRecordInsertJson(self): query = self.db.query try: query('create type test_person_type as' ' (name text, data json)') except pg.ProgrammingError as error: if self.db.server_version < 90200: self.skipTest('database does not support json') self.fail(str(error)) self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'person test_person_type', temporary=False, oids=True) person_typ = self.db.get_attnames('test_person')['person'] self.assertEqual(person_typ.attnames, dict(name='text', data='json')) person = ('John Doe', dict(age=61, married=True, weight=99.5)) r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) if pg.get_jsondecode() is None: p = p._replace(data=json.loads(p.data)) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.data, person[1]) self.assertIsInstance(p.data, dict) def testRecordLiteral(self): query = self.db.query query('create type test_person_type as' ' (name varchar, age smallint)') self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'person test_person_type', temporary=False, oids=True) person_typ = self.db.get_attnames('test_person')['person'] if self.regtypes: self.assertEqual(person_typ, 'test_person_type') else: self.assertEqual(person_typ, 'record') if self.regtypes: self.assertEqual(person_typ.attnames, dict(name='character varying', age='smallint')) else: self.assertEqual(person_typ.attnames, dict(name='text', age='int')) person = pg.Literal("('John Doe', 61)") r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.age, 61) self.assertIsInstance(p.age, int) def testDate(self): query = self.db.query for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = date(2016, 3, 14) q = "select $1::date" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, date) self.assertEqual(r, d) q = "select '10000-08-01'::date, '0099-01-08 BC'::date" r = query(q).getresult()[0] self.assertIsInstance(r[0], date) self.assertIsInstance(r[1], date) self.assertEqual(r[0], date.max) self.assertEqual(r[1], date.min) q = "select 'infinity'::date, '-infinity'::date" r = query(q).getresult()[0] self.assertIsInstance(r[0], date) self.assertIsInstance(r[1], date) self.assertEqual(r[0], date.max) self.assertEqual(r[1], date.min) def testTime(self): query = self.db.query d = time(15, 9, 26) q = "select $1::time" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) d = time(15, 9, 26, 535897) q = "select $1::time" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) def testTimetz(self): query = self.db.query timezones = dict(CET=1, EET=2, EST=-5, UTC=0) for timezone in sorted(timezones): tz = '%+03d00' % timezones[timezone] try: tzinfo = datetime.strptime(tz, '%z').tzinfo except ValueError: # Python < 3.2 tzinfo = pg._get_timezone(tz) self.db.set_parameter('timezone', timezone) d = time(15, 9, 26, tzinfo=tzinfo) q = "select $1::timetz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) d = time(15, 9, 26, 535897, tzinfo) q = "select $1::timetz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) def testTimestamp(self): query = self.db.query for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = datetime(2016, 3, 14) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, 535897) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) q = ("select '10000-08-01 AD'::timestamp," " '0099-01-08 BC'::timestamp") r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) q = "select 'infinity'::timestamp, '-infinity'::timestamp" r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) def testTimestamptz(self): query = self.db.query timezones = dict(CET=1, EET=2, EST=-5, UTC=0) for timezone in sorted(timezones): tz = '%+03d00' % timezones[timezone] try: tzinfo = datetime.strptime(tz, '%z').tzinfo except ValueError: # Python < 3.2 tzinfo = pg._get_timezone(tz) self.db.set_parameter('timezone', timezone) for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = datetime(2016, 3, 14, tzinfo=tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, tzinfo=tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, 535897, tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) q = ("select '10000-08-01 AD'::timestamptz," " '0099-01-08 BC'::timestamptz") r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) q = "select 'infinity'::timestamptz, '-infinity'::timestamptz" r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) def testInterval(self): query = self.db.query for intervalstyle in ( 'sql_standard', 'postgres', 'postgres_verbose', 'iso_8601'): self.db.set_parameter('intervalstyle', intervalstyle) d = timedelta(3) q = "select $1::interval" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) d = timedelta(-30) r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) d = timedelta(hours=3, minutes=31, seconds=42, microseconds=5678) q = "select $1::interval" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) def testDateAndTimeArrays(self): dt = (date(2016, 3, 14), time(15, 9, 26)) q = "select ARRAY[$1::date], ARRAY[$2::time]" r = self.db.query(q, dt).getresult()[0] self.assertIsInstance(r[0], list) self.assertEqual(r[0][0], dt[0]) self.assertIsInstance(r[1], list) self.assertEqual(r[1][0], dt[1]) def testHstore(self): try: self.db.query("select 'k=>v'::hstore") except pg.DatabaseError: try: self.db.query("create extension hstore") except pg.DatabaseError: self.skipTest("hstore extension not enabled") d = {'k': 'v', 'foo': 'bar', 'baz': 'whatever', '1a': 'anything at all', '2=b': 'value = 2', '3>c': 'value > 3', '4"c': 'value " 4', "5'c": "value ' 5", 'hello, world': '"hi!"', 'None': None, 'NULL': 'NULL', 'empty': ''} q = "select $1::hstore" r = self.db.query(q, (pg.Hstore(d),)).getresult()[0][0] self.assertIsInstance(r, dict) self.assertEqual(r, d) def testUuid(self): d = UUID('{12345678-1234-5678-1234-567812345678}') q = 'select $1::uuid' r = self.db.query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, UUID) self.assertEqual(r, d) def testDbTypesInfo(self): dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('numeric', dbtypes) typ = dbtypes['numeric'] self.assertIn('numeric', dbtypes) self.assertEqual(typ, 'numeric' if self.regtypes else 'num') self.assertEqual(typ.oid, 1700) self.assertEqual(typ.pgtype, 'numeric') self.assertEqual(typ.regtype, 'numeric') self.assertEqual(typ.simple, 'num') self.assertEqual(typ.typtype, 'b') self.assertEqual(typ.category, 'N') self.assertEqual(typ.delim, ',') self.assertEqual(typ.relid, 0) self.assertIs(dbtypes[1700], typ) self.assertNotIn('pg_type', dbtypes) typ = dbtypes['pg_type'] self.assertIn('pg_type', dbtypes) self.assertEqual(typ, 'pg_type' if self.regtypes else 'record') self.assertIsInstance(typ.oid, int) self.assertEqual(typ.pgtype, 'pg_type') self.assertEqual(typ.regtype, 'pg_type') self.assertEqual(typ.simple, 'record') self.assertEqual(typ.typtype, 'c') self.assertEqual(typ.category, 'C') self.assertEqual(typ.delim, ',') self.assertNotEqual(typ.relid, 0) attnames = typ.attnames self.assertIsInstance(attnames, dict) self.assertIs(attnames, dbtypes.get_attnames('pg_type')) self.assertEqual(list(attnames)[0], 'typname') typname = attnames['typname'] self.assertEqual(typname, 'name' if self.regtypes else 'text') self.assertEqual(typname.typtype, 'b') # base self.assertEqual(typname.category, 'S') # string self.assertEqual(list(attnames)[3], 'typlen') typlen = attnames['typlen'] self.assertEqual(typlen, 'smallint' if self.regtypes else 'int') self.assertEqual(typlen.typtype, 'b') # base self.assertEqual(typlen.category, 'N') # numeric def testDbTypesTypecast(self): dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('int4', dbtypes) self.assertIs(dbtypes.get_typecast('int4'), int) dbtypes.set_typecast('int4', float) self.assertIs(dbtypes.get_typecast('int4'), float) dbtypes.reset_typecast('int4') self.assertIs(dbtypes.get_typecast('int4'), int) dbtypes.set_typecast('int4', float) self.assertIs(dbtypes.get_typecast('int4'), float) dbtypes.reset_typecast() self.assertIs(dbtypes.get_typecast('int4'), int) self.assertNotIn('circle', dbtypes) self.assertIsNone(dbtypes.get_typecast('circle')) squared_circle = lambda v: 'Squared Circle: %s' % v dbtypes.set_typecast('circle', squared_circle) self.assertIs(dbtypes.get_typecast('circle'), squared_circle) r = self.db.query("select '0,0,1'::circle").getresult()[0][0] self.assertIn('circle', dbtypes) self.assertEqual(r, 'Squared Circle: <(0,0),1>') self.assertEqual(dbtypes.typecast('Impossible', 'circle'), 'Squared Circle: Impossible') dbtypes.reset_typecast('circle') self.assertIsNone(dbtypes.get_typecast('circle')) def testGetSetTypeCast(self): get_typecast = pg.get_typecast set_typecast = pg.set_typecast dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('int4', dbtypes) self.assertNotIn('real', dbtypes) self.assertNotIn('bool', dbtypes) self.assertIs(get_typecast('int4'), int) self.assertIs(get_typecast('float4'), float) self.assertIs(get_typecast('bool'), pg.cast_bool) cast_circle = get_typecast('circle') self.addCleanup(set_typecast, 'circle', cast_circle) squared_circle = lambda v: 'Squared Circle: %s' % v self.assertNotIn('circle', dbtypes) set_typecast('circle', squared_circle) self.assertNotIn('circle', dbtypes) self.assertIs(get_typecast('circle'), squared_circle) r = self.db.query("select '0,0,1'::circle").getresult()[0][0] self.assertIn('circle', dbtypes) self.assertEqual(r, 'Squared Circle: <(0,0),1>') set_typecast('circle', cast_circle) self.assertIs(get_typecast('circle'), cast_circle) def testNotificationHandler(self): # the notification handler itself is tested separately f = self.db.notification_handler callback = lambda arg_dict: None handler = f('test', callback) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test') self.assertEqual(handler.stop_event, 'stop_test') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) handler = f('test2', callback, timeout=2) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test2') self.assertEqual(handler.stop_event, 'stop_test2') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertEqual(handler.timeout, 2) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) arg_dict = {'testing': 3} handler = f('test3', callback, arg_dict=arg_dict) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test3') self.assertEqual(handler.stop_event, 'stop_test3') self.assertIs(handler.callback, callback) self.assertIs(handler.arg_dict, arg_dict) self.assertEqual(arg_dict['testing'], 3) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) handler = f('test4', callback, stop_event='stop4') self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test4') self.assertEqual(handler.stop_event, 'stop4') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) arg_dict = {'testing': 5} handler = f('test5', callback, arg_dict, 1.5, 'stop5') self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test5') self.assertEqual(handler.stop_event, 'stop5') self.assertIs(handler.callback, callback) self.assertIs(handler.arg_dict, arg_dict) self.assertEqual(arg_dict['testing'], 5) self.assertEqual(handler.timeout, 1.5) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) class TestDBClassNonStdOpts(TestDBClass): """Test the methods of the DB class with non-standard global options.""" @classmethod def setUpClass(cls): cls.saved_options = {} cls.set_option('decimal', float) not_bool = not pg.get_bool() cls.set_option('bool', not_bool) not_array = not pg.get_array() cls.set_option('array', not_array) not_bytea_escaped = not pg.get_bytea_escaped() cls.set_option('bytea_escaped', not_bytea_escaped) cls.set_option('jsondecode', None) db = DB() cls.regtypes = not db.use_regtypes() db.close() super(TestDBClassNonStdOpts, cls).setUpClass() @classmethod def tearDownClass(cls): super(TestDBClassNonStdOpts, cls).tearDownClass() cls.reset_option('jsondecode') cls.reset_option('bool') cls.reset_option('array') cls.reset_option('bytea_escaped') cls.reset_option('decimal') @classmethod def set_option(cls, option, value): cls.saved_options[option] = getattr(pg, 'get_' + option)() return getattr(pg, 'set_' + option)(value) @classmethod def reset_option(cls, option): return getattr(pg, 'set_' + option)(cls.saved_options[option]) class TestDBClassAdapter(unittest.TestCase): """Test the adapter object associated with the DB class.""" def setUp(self): self.db = DB() self.adapter = self.db.adapter def tearDown(self): try: self.db.close() except pg.InternalError: pass def testGuessSimpleType(self): f = self.adapter.guess_simple_type self.assertEqual(f(pg.Bytea(b'test')), 'bytea') self.assertEqual(f('string'), 'text') self.assertEqual(f(b'string'), 'text') self.assertEqual(f(True), 'bool') self.assertEqual(f(3), 'int') self.assertEqual(f(2.75), 'float') self.assertEqual(f(Decimal('4.25')), 'num') self.assertEqual(f(date(2016, 1, 30)), 'date') self.assertEqual(f([1, 2, 3]), 'int[]') self.assertEqual(f([[[123]]]), 'int[]') self.assertEqual(f(['a', 'b', 'c']), 'text[]') self.assertEqual(f([[['abc']]]), 'text[]') self.assertEqual(f([False, True]), 'bool[]') self.assertEqual(f([[[False]]]), 'bool[]') r = f(('string', True, 3, 2.75, [1], [False])) self.assertEqual(r, 'record') self.assertEqual(list(r.attnames.values()), ['text', 'bool', 'int', 'float', 'int[]', 'bool[]']) def testAdaptQueryTypedList(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s,%s', (1, 2), ('int2',)) self.assertRaises(TypeError, format_query, '%s,%s', (1,), ('int2', 'int2')) values = (3, 7.5, 'hello', True) types = ('int4', 'float4', 'text', 'bool') sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) types = ('bool', 'bool', 'bool', 'bool') sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, ['t', 't', 'f', 't']) values = ('2016-01-30', 'current_date') types = ('date', 'date') sql, params = format_query("values(%s,%s)", values, types) self.assertEqual(sql, 'values($1,current_date)') self.assertEqual(params, ['2016-01-30']) values = ([1, 2, 3], ['a', 'b', 'c']) types = ('_int4', '_text') sql, params = format_query("%s::int4[],%s::text[]", values, types) self.assertEqual(sql, '$1::int4[],$2::text[]') self.assertEqual(params, ['{1,2,3}', '{a,b,c}']) types = ('_bool', '_bool') sql, params = format_query("%s::bool[],%s::bool[]", values, types) self.assertEqual(sql, '$1::bool[],$2::bool[]') self.assertEqual(params, ['{t,t,t}', '{f,f,f}']) values = [(3, 7.5, 'hello', True, [123], ['abc'])] t = self.adapter.simple_type typ = t('record') typ._get_attnames = lambda _self: pg.AttrDict([ ('i', t('int')), ('f', t('float')), ('t', t('text')), ('b', t('bool')), ('i3', t('int[]')), ('t3', t('text[]'))]) types = [typ] sql, params = format_query('select %s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryTypedDict(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s,%s', dict(i1=1, i2=2), dict(i1='int2')) values = dict(i=3, f=7.5, t='hello', b=True) types = dict(i='int4', f='float4', t='text', b='bool') sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, types) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 7.5, 3, 'hello']) types = dict(i='bool', f='bool', t='bool', b='bool') sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, types) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 't', 't', 'f']) values = dict(d1='2016-01-30', d2='current_date') types = dict(d1='date', d2='date') sql, params = format_query("values(%(d1)s,%(d2)s)", values, types) self.assertEqual(sql, 'values($1,current_date)') self.assertEqual(params, ['2016-01-30']) values = dict(i=[1, 2, 3], t=['a', 'b', 'c']) types = dict(i='_int4', t='_text') sql, params = format_query( "%(i)s::int4[],%(t)s::text[]", values, types) self.assertEqual(sql, '$1::int4[],$2::text[]') self.assertEqual(params, ['{1,2,3}', '{a,b,c}']) types = dict(i='_bool', t='_bool') sql, params = format_query( "%(i)s::bool[],%(t)s::bool[]", values, types) self.assertEqual(sql, '$1::bool[],$2::bool[]') self.assertEqual(params, ['{t,t,t}', '{f,f,f}']) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) t = self.adapter.simple_type typ = t('record') typ._get_attnames = lambda _self: pg.AttrDict([ ('i', t('int')), ('f', t('float')), ('t', t('text')), ('b', t('bool')), ('i3', t('int[]')), ('t3', t('text[]'))]) types = dict(record=typ) sql, params = format_query('select %(record)s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryUntypedList(self): format_query = self.adapter.format_query values = (3, 7.5, 'hello', True) sql, params = format_query("select %s,%s,%s,%s", values) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) values = [date(2016, 1, 30), 'current_date'] sql, params = format_query("values(%s,%s)", values) self.assertEqual(sql, 'values($1,$2)') self.assertEqual(params, values) values = ([1, 2, 3], ['a', 'b', 'c'], [True, False, True]) sql, params = format_query("%s,%s,%s", values) self.assertEqual(sql, "$1,$2,$3") self.assertEqual(params, ['{1,2,3}', '{a,b,c}', '{t,f,t}']) values = ([[1, 2], [3, 4]], [['a', 'b'], ['c', 'd']], [[True, False], [False, True]]) sql, params = format_query("%s,%s,%s", values) self.assertEqual(sql, "$1,$2,$3") self.assertEqual(params, [ '{{1,2},{3,4}}', '{{a,b},{c,d}}', '{{t,f},{f,t}}']) values = [(3, 7.5, 'hello', True, [123], ['abc'])] sql, params = format_query('select %s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryUntypedDict(self): format_query = self.adapter.format_query values = dict(i=3, f=7.5, t='hello', b=True) sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 7.5, 3, 'hello']) values = dict(d1='2016-01-30', d2='current_date') sql, params = format_query("values(%(d1)s,%(d2)s)", values) self.assertEqual(sql, 'values($1,$2)') self.assertEqual(params, [values['d1'], values['d2']]) values = dict(i=[1, 2, 3], t=['a', 'b', 'c'], b=[True, False, True]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values) self.assertEqual(sql, "$2,$3,$1") self.assertEqual(params, ['{t,f,t}', '{1,2,3}', '{a,b,c}']) values = dict(i=[[1, 2], [3, 4]], t=[['a', 'b'], ['c', 'd']], b=[[True, False], [False, True]]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values) self.assertEqual(sql, "$2,$3,$1") self.assertEqual(params, [ '{{t,f},{f,t}}', '{{1,2},{3,4}}', '{{a,b},{c,d}}']) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) sql, params = format_query('select %(record)s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryInlineList(self): format_query = self.adapter.format_query values = (3, 7.5, 'hello', True) sql, params = format_query("select %s,%s,%s,%s", values, inline=True) self.assertEqual(sql, "select 3,7.5,'hello',true") self.assertEqual(params, []) values = [date(2016, 1, 30), 'current_date'] sql, params = format_query("values(%s,%s)", values, inline=True) self.assertEqual(sql, "values('2016-01-30','current_date')") self.assertEqual(params, []) values = ([1, 2, 3], ['a', 'b', 'c'], [True, False, True]) sql, params = format_query("%s,%s,%s", values, inline=True) self.assertEqual(sql, "ARRAY[1,2,3],ARRAY['a','b','c'],ARRAY[true,false,true]") self.assertEqual(params, []) values = ([[1, 2], [3, 4]], [['a', 'b'], ['c', 'd']], [[True, False], [False, True]]) sql, params = format_query("%s,%s,%s", values, inline=True) self.assertEqual(sql, "ARRAY[[1,2],[3,4]],ARRAY[['a','b'],['c','d']]," "ARRAY[[true,false],[false,true]]") self.assertEqual(params, []) values = [(3, 7.5, 'hello', True, [123], ['abc'])] sql, params = format_query('select %s', values, inline=True) self.assertEqual(sql, "select (3,7.5,'hello',true,ARRAY[123],ARRAY['abc'])") self.assertEqual(params, []) def testAdaptQueryInlineDict(self): format_query = self.adapter.format_query values = dict(i=3, f=7.5, t='hello', b=True) sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, inline=True) self.assertEqual(sql, "select 3,7.5,'hello',true") self.assertEqual(params, []) values = dict(d1='2016-01-30', d2='current_date') sql, params = format_query( "values(%(d1)s,%(d2)s)", values, inline=True) self.assertEqual(sql, "values('2016-01-30','current_date')") self.assertEqual(params, []) values = dict(i=[1, 2, 3], t=['a', 'b', 'c'], b=[True, False, True]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values, inline=True) self.assertEqual(sql, "ARRAY[1,2,3],ARRAY['a','b','c'],ARRAY[true,false,true]") self.assertEqual(params, []) values = dict(i=[[1, 2], [3, 4]], t=[['a', 'b'], ['c', 'd']], b=[[True, False], [False, True]]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values, inline=True) self.assertEqual(sql, "ARRAY[[1,2],[3,4]],ARRAY[['a','b'],['c','d']]," "ARRAY[[true,false],[false,true]]") self.assertEqual(params, []) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) sql, params = format_query('select %(record)s', values, inline=True) self.assertEqual(sql, "select (3,7.5,'hello',true,ARRAY[123],ARRAY['abc'])") self.assertEqual(params, []) def testAdaptQueryWithPgRepr(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s', object(), inline=True) class TestObject: def __pg_repr__(self): return "'adapted'" sql, params = format_query('select %s', [TestObject()], inline=True) self.assertEqual(sql, "select 'adapted'") self.assertEqual(params, []) sql, params = format_query('select %s', [[TestObject()]], inline=True) self.assertEqual(sql, "select ARRAY['adapted']") self.assertEqual(params, []) class TestSchemas(unittest.TestCase): """Test correct handling of schemas (namespaces).""" cls_set_up = False @classmethod def setUpClass(cls): db = DB() query = db.query for num_schema in range(5): if num_schema: schema = "s%d" % num_schema query("drop schema if exists %s cascade" % (schema,)) try: query("create schema %s" % (schema,)) except pg.ProgrammingError: raise RuntimeError("The test user cannot create schemas.\n" "Grant create on database %s to the user" " for running these tests." % dbname) else: schema = "public" query("drop table if exists %s.t" % (schema,)) query("drop table if exists %s.t%d" % (schema, num_schema)) query("create table %s.t with oids as select 1 as n, %d as d" % (schema, num_schema)) query("create table %s.t%d with oids as select 1 as n, %d as d" % (schema, num_schema, num_schema)) db.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): db = DB() query = db.query for num_schema in range(5): if num_schema: schema = "s%d" % num_schema query("drop schema %s cascade" % (schema,)) else: schema = "public" query("drop table %s.t" % (schema,)) query("drop table %s.t%d" % (schema, num_schema)) db.close() def setUp(self): self.assertTrue(self.cls_set_up) self.db = DB() def tearDown(self): self.doCleanups() self.db.close() def testGetTables(self): tables = self.db.get_tables() for num_schema in range(5): if num_schema: schema = "s" + str(num_schema) else: schema = "public" for t in (schema + ".t", schema + ".t" + str(num_schema)): self.assertIn(t, tables) def testGetAttnames(self): get_attnames = self.db.get_attnames query = self.db.query result = {'oid': 'int', 'd': 'int', 'n': 'int'} r = get_attnames("t") self.assertEqual(r, result) r = get_attnames("s4.t4") self.assertEqual(r, result) query("drop table if exists s3.t3m") self.addCleanup(query, "drop table s3.t3m") query("create table s3.t3m with oids as select 1 as m") result_m = {'oid': 'int', 'm': 'int'} r = get_attnames("s3.t3m") self.assertEqual(r, result_m) query("set search_path to s1,s3") r = get_attnames("t3") self.assertEqual(r, result) r = get_attnames("t3m") self.assertEqual(r, result_m) def testGet(self): get = self.db.get query = self.db.query PrgError = pg.ProgrammingError self.assertEqual(get("t", 1, 'n')['d'], 0) self.assertEqual(get("t0", 1, 'n')['d'], 0) self.assertEqual(get("public.t", 1, 'n')['d'], 0) self.assertEqual(get("public.t0", 1, 'n')['d'], 0) self.assertRaises(PrgError, get, "public.t1", 1, 'n') self.assertEqual(get("s1.t1", 1, 'n')['d'], 1) self.assertEqual(get("s3.t", 1, 'n')['d'], 3) query("set search_path to s2,s4") self.assertRaises(PrgError, get, "t1", 1, 'n') self.assertEqual(get("t4", 1, 'n')['d'], 4) self.assertRaises(PrgError, get, "t3", 1, 'n') self.assertEqual(get("t", 1, 'n')['d'], 2) self.assertEqual(get("s3.t3", 1, 'n')['d'], 3) query("set search_path to s1,s3") self.assertRaises(PrgError, get, "t2", 1, 'n') self.assertEqual(get("t3", 1, 'n')['d'], 3) self.assertRaises(PrgError, get, "t4", 1, 'n') self.assertEqual(get("t", 1, 'n')['d'], 1) self.assertEqual(get("s4.t4", 1, 'n')['d'], 4) def testMunging(self): get = self.db.get query = self.db.query r = get("t", 1, 'n') self.assertIn('oid(t)', r) query("set search_path to s2") r = get("t2", 1, 'n') self.assertIn('oid(t2)', r) query("set search_path to s3") r = get("t", 1, 'n') self.assertIn('oid(t)', r) class TestDebug(unittest.TestCase): """Test the debug attribute of the DB class.""" def setUp(self): self.db = DB() self.query = self.db.query self.debug = self.db.debug self.output = StringIO() self.stdout, sys.stdout = sys.stdout, self.output def tearDown(self): sys.stdout = self.stdout self.output.close() self.db.debug = debug self.db.close() def get_output(self): return self.output.getvalue() def send_queries(self): self.db.query("select 1") self.db.query("select 2") def testDebugDefault(self): if debug: self.assertEqual(self.db.debug, debug) else: self.assertIsNone(self.db.debug) def testDebugIsFalse(self): self.db.debug = False self.send_queries() self.assertEqual(self.get_output(), "") def testDebugIsTrue(self): self.db.debug = True self.send_queries() self.assertEqual(self.get_output(), "select 1\nselect 2\n") def testDebugIsString(self): self.db.debug = "Test with string: %s." self.send_queries() self.assertEqual(self.get_output(), "Test with string: select 1.\nTest with string: select 2.\n") def testDebugIsFileLike(self): with tempfile.TemporaryFile('w+') as debug_file: self.db.debug = debug_file self.send_queries() debug_file.seek(0) output = debug_file.read() self.assertEqual(output, "select 1\nselect 2\n") self.assertEqual(self.get_output(), "") def testDebugIsCallable(self): output = [] self.db.debug = output.append self.db.query("select 1") self.db.query("select 2") self.assertEqual(output, ["select 1", "select 2"]) self.assertEqual(self.get_output(), "") def testDebugMultipleArgs(self): output = [] self.db.debug = output.append args = ['Error', 42, {1: 'a', 2: 'b'}, [3, 5, 7]] self.db._do_debug(*args) self.assertEqual(output, ['\n'.join(str(arg) for arg in args)]) self.assertEqual(self.get_output(), "") class TestMemoryLeaks(unittest.TestCase): """Test that the DB class does not leak memory.""" def getLeaks(self, fut): ids = set() objs = [] add_ids = ids.update gc.collect() objs[:] = gc.get_objects() add_ids(id(obj) for obj in objs) fut() gc.collect() objs[:] = gc.get_objects() objs[:] = [obj for obj in objs if id(obj) not in ids] if objs and sys.version_info[:3] in ((3, 5, 0), (3, 5, 1)): # workaround for Python issue 26811 objs[:] = [obj for obj in objs if repr(obj) != '(,)'] self.assertEqual(len(objs), 0) def testLeaksWithClose(self): def fut(): db = DB() db.query("select $1::int as r", 42).dictresult() db.close() self.getLeaks(fut) def testLeaksWithoutClose(self): def fut(): db = DB() db.query("select $1::int as r", 42).dictresult() self.getLeaks(fut) if __name__ == '__main__': unittest.main() PyGreSQL-5.1/tests/__init__.py0000644000175100077410000000105513466770070016205 0ustar darcypyg00000000000000"""PyGreSQL test suite. You can specify your local database settings in LOCAL_PyGreSQL.py. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest if not (hasattr(unittest, 'skip') and hasattr(unittest.TestCase, 'setUpClass') and hasattr(unittest.TestCase, 'skipTest') and hasattr(unittest.TestCase, 'assertIn')): raise ImportError('Please install a newer version of unittest') def discover(): loader = unittest.TestLoader() suite = loader.discover('.') return suitePyGreSQL-5.1/tests/dbapi20.py0000644000175100077410000007560113466770070015677 0ustar darcypyg00000000000000#!/usr/bin/python ''' Python DB API 2.0 driver compliance unit test suite. This software is Public Domain and may be used without restrictions. "Now we have booze and barflies entering the discussion, plus rumours of DBAs on drugs... and I won't tell you what flashes through my mind each time I read the subject line with 'Anal Compliance' in it. All around this is turning out to be a thoroughly unwholesome unit test." -- Ian Bicking ''' __rcs_id__ = '$Id: dbapi20.py 969 2019-04-19 13:35:23Z cito $' __version__ = '$Revision: 1.5 $'[11:-2] __author__ = 'Stuart Bishop ' try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import time # $Log: not supported by cvs2svn $ # Revision 1.4 2008/11/01 18:37:55 cito # Updated the dbapi20 test module. Exposed the exceptions as attributes of the connection. # # Revision 1.10 2003/10/09 03:14:14 zenzen # Add test for DB API 2.0 optional extension, where database exceptions # are exposed as attributes on the Connection object. # # Revision 1.9 2003/08/13 01:16:36 zenzen # Minor tweak from Stefan Fleiter # # Revision 1.8 2003/04/10 00:13:25 zenzen # Changes, as per suggestions by M.-A. Lemburg # - Add a table prefix, to ensure namespace collisions can always be avoided # # Revision 1.7 2003/02/26 23:33:37 zenzen # Break out DDL into helper functions, as per request by David Rushby # # Revision 1.6 2003/02/21 03:04:33 zenzen # Stuff from Henrik Ekelund: # added test_None # added test_nextset & hooks # # Revision 1.5 2003/02/17 22:08:43 zenzen # Implement suggestions and code from Henrik Eklund - test that cursor.arraysize # defaults to 1 & generic cursor.callproc test added # # Revision 1.4 2003/02/15 00:16:33 zenzen # Changes, as per suggestions and bug reports by M.-A. Lemburg, # Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar # - Class renamed # - Now a subclass of TestCase, to avoid requiring the driver stub # to use multiple inheritance # - Reversed the polarity of buggy test in test_description # - Test exception heirarchy correctly # - self.populate is now self._populate(), so if a driver stub # overrides self.ddl1 this change propogates # - VARCHAR columns now have a width, which will hopefully make the # DDL even more portible (this will be reversed if it causes more problems) # - cursor.rowcount being checked after various execute and fetchXXX methods # - Check for fetchall and fetchmany returning empty lists after results # are exhausted (already checking for empty lists if select retrieved # nothing # - Fix bugs in test_setoutputsize_basic and test_setinputsizes # class DatabaseAPI20Test(unittest.TestCase): ''' Test a database self.driver for DB API 2.0 compatibility. This implementation tests Gadfly, but the TestCase is structured so that other self.drivers can subclass this test case to ensure compiliance with the DB-API. It is expected that this TestCase may be expanded in the future if ambiguities or edge conditions are discovered. The 'Optional Extensions' are not yet being tested. self.drivers should subclass this test, overriding setUp, tearDown, self.driver, connect_args and connect_kw_args. Class specification should be as follows: import dbapi20 class mytest(dbapi20.DatabaseAPI20Test): [...] Don't 'import DatabaseAPI20Test from dbapi20', or you will confuse the unit tester - just 'import dbapi20'. ''' # The self.driver module. This should be the module where the 'connect' # method is to be found driver = None connect_args = () # List of arguments to pass to connect connect_kw_args = {} # Keyword arguments for connect table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix xddl1 = 'drop table %sbooze' % table_prefix xddl2 = 'drop table %sbarflys' % table_prefix lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase # Some drivers may need to override these helpers, for example adding # a 'commit' after the execute. def executeDDL1(self,cursor): cursor.execute(self.ddl1) def executeDDL2(self,cursor): cursor.execute(self.ddl2) def setUp(self): """self.drivers should override this method to perform required setup if any is necessary, such as creating the database. """ pass def tearDown(self): """self.drivers should override this method to perform required cleanup if any is necessary, such as deleting the test database. The default drops the tables that may be created. """ con = self._connect() try: cur = con.cursor() for ddl in (self.xddl1,self.xddl2): try: cur.execute(ddl) con.commit() except self.driver.Error: # Assume table didn't exist. Other tests will check if # execute is busted. pass finally: con.close() def _connect(self): try: return self.driver.connect( *self.connect_args,**self.connect_kw_args ) except AttributeError: self.fail("No connect method found in self.driver module") def test_connect(self): con = self._connect() con.close() def test_apilevel(self): try: # Must exist apilevel = self.driver.apilevel # Must equal 2.0 self.assertEqual(apilevel,'2.0') except AttributeError: self.fail("Driver doesn't define apilevel") def test_threadsafety(self): try: # Must exist threadsafety = self.driver.threadsafety # Must be a valid value self.assertTrue(threadsafety in (0,1,2,3)) except AttributeError: self.fail("Driver doesn't define threadsafety") def test_paramstyle(self): try: # Must exist paramstyle = self.driver.paramstyle # Must be a valid value self.assertTrue(paramstyle in ( 'qmark','numeric','named','format','pyformat' )) except AttributeError: self.fail("Driver doesn't define paramstyle") def test_Exceptions(self): """Make sure required exceptions exist, and are in the defined hierarchy. """ self.assertTrue(issubclass(self.driver.Warning,Exception)) self.assertTrue(issubclass(self.driver.Error,Exception)) self.assertTrue( issubclass(self.driver.InterfaceError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.DatabaseError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.OperationalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.IntegrityError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.InternalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.ProgrammingError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.NotSupportedError,self.driver.Error) ) def test_ExceptionsAsConnectionAttributes(self): """Optional extension Test for the optional DB API 2.0 extension, where the exceptions are exposed as attributes on the Connection object I figure this optional extension will be implemented by any driver author who is using this test suite, so it is enabled by default. """ con = self._connect() drv = self.driver self.assertTrue(con.Warning is drv.Warning) self.assertTrue(con.Error is drv.Error) self.assertTrue(con.InterfaceError is drv.InterfaceError) self.assertTrue(con.DatabaseError is drv.DatabaseError) self.assertTrue(con.OperationalError is drv.OperationalError) self.assertTrue(con.IntegrityError is drv.IntegrityError) self.assertTrue(con.InternalError is drv.InternalError) self.assertTrue(con.ProgrammingError is drv.ProgrammingError) self.assertTrue(con.NotSupportedError is drv.NotSupportedError) def test_commit(self): con = self._connect() try: # Commit must work, even if it doesn't do anything con.commit() finally: con.close() def test_rollback(self): con = self._connect() # If rollback is defined, it should either work or throw # the documented exception if hasattr(con,'rollback'): try: con.rollback() except self.driver.NotSupportedError: pass def test_cursor(self): con = self._connect() try: cur = con.cursor() finally: con.close() def test_cursor_isolation(self): con = self._connect() try: # Make sure cursors created from the same connection have # the documented transaction isolation level cur1 = con.cursor() cur2 = con.cursor() self.executeDDL1(cur1) cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) cur2.execute("select name from %sbooze" % self.table_prefix) booze = cur2.fetchall() self.assertEqual(len(booze),1) self.assertEqual(len(booze[0]),1) self.assertEqual(booze[0][0],'Victoria Bitter') finally: con.close() def test_description(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.description,None, 'cursor.description should be none after executing a ' 'statement that can return no rows (such as DDL)' ) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(len(cur.description),1, 'cursor.description describes too many columns' ) self.assertEqual(len(cur.description[0]),7, 'cursor.description[x] tuples must have 7 elements' ) self.assertEqual(cur.description[0][0].lower(),'name', 'cursor.description[x][0] must return column name' ) self.assertEqual(cur.description[0][1],self.driver.STRING, 'cursor.description[x][1] must return column type. Got %r' % cur.description[0][1] ) # Make sure self.description gets reset self.executeDDL2(cur) self.assertEqual(cur.description,None, 'cursor.description not being set to None when executing ' 'no-result statements (eg. DDL)' ) finally: con.close() def test_rowcount(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount should be -1 after executing no-result ' 'statements' ) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number or rows inserted, or ' 'set to -1 after executing an insert statement' ) cur.execute("select name from %sbooze" % self.table_prefix) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number of rows returned, or ' 'set to -1 after executing a select statement' ) self.executeDDL2(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount not being reset to -1 after executing ' 'no-result statements' ) finally: con.close() lower_func = 'lower' def test_callproc(self): con = self._connect() try: cur = con.cursor() if self.lower_func and hasattr(cur,'callproc'): r = cur.callproc(self.lower_func,('FOO',)) self.assertEqual(len(r),1) self.assertEqual(r[0],'FOO') r = cur.fetchall() self.assertEqual(len(r),1,'callproc produced no result set') self.assertEqual(len(r[0]),1, 'callproc produced invalid result set' ) self.assertEqual(r[0][0],'foo', 'callproc produced invalid results' ) finally: con.close() def test_close(self): con = self._connect() try: cur = con.cursor() finally: con.close() # cursor.execute should raise an Error if called after connection # closed self.assertRaises(self.driver.Error,self.executeDDL1,cur) # connection.commit should raise an Error if called after connection' # closed.' self.assertRaises(self.driver.Error,con.commit) # connection.close should raise an Error if called more than once self.assertRaises(self.driver.Error,con.close) def test_execute(self): con = self._connect() try: cur = con.cursor() self._paraminsert(cur) finally: con.close() def _paraminsert(self,cur): self.executeDDL1(cur) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1)) if self.driver.paramstyle == 'qmark': cur.execute( 'insert into %sbooze values (?)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'numeric': cur.execute( 'insert into %sbooze values (:1)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'named': cur.execute( 'insert into %sbooze values (:beer)' % self.table_prefix, {'beer':"Cooper's"} ) elif self.driver.paramstyle == 'format': cur.execute( 'insert into %sbooze values (%%s)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'pyformat': cur.execute( 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, {'beer':"Cooper's"} ) else: self.fail('Invalid paramstyle') self.assertTrue(cur.rowcount in (-1,1)) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) self.assertEqual(beers[1],"Victoria Bitter", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) def test_executemany(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) largs = [ ("Cooper's",) , ("Boag's",) ] margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] if self.driver.paramstyle == 'qmark': cur.executemany( 'insert into %sbooze values (?)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'numeric': cur.executemany( 'insert into %sbooze values (:1)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'named': cur.executemany( 'insert into %sbooze values (:beer)' % self.table_prefix, margs ) elif self.driver.paramstyle == 'format': cur.executemany( 'insert into %sbooze values (%%s)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'pyformat': cur.executemany( 'insert into %sbooze values (%%(beer)s)' % ( self.table_prefix ), margs ) else: self.fail('Unknown paramstyle') self.assertTrue(cur.rowcount in (-1,2), 'insert using cursor.executemany set cursor.rowcount to ' 'incorrect value %r' % cur.rowcount ) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2, 'cursor.fetchall retrieved incorrect number of rows' ) beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') finally: con.close() def test_fetchone(self): con = self._connect() try: cur = con.cursor() # cursor.fetchone should raise an Error if called before # executing a select-type query self.assertRaises(self.driver.Error,cur.fetchone) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows self.executeDDL1(cur) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if a query retrieves ' 'no rows' ) self.assertTrue(cur.rowcount in (-1,0)) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchone() self.assertEqual(len(r),1, 'cursor.fetchone should have retrieved a single row' ) self.assertEqual(r[0],'Victoria Bitter', 'cursor.fetchone retrieved incorrect data' ) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if no more rows available' ) self.assertTrue(cur.rowcount in (-1,1)) finally: con.close() samples = [ 'Carlton Cold', 'Carlton Draft', 'Mountain Goat', 'Redback', 'Victoria Bitter', 'XXXX' ] def _populate(self): """Return a list of sql commands to setup the DB for the fetch tests. """ populate = [ "insert into %sbooze values ('%s')" % (self.table_prefix,s) for s in self.samples ] return populate def test_fetchmany(self): con = self._connect() try: cur = con.cursor() # cursor.fetchmany should raise an Error if called without #issuing a query self.assertRaises(self.driver.Error,cur.fetchmany,4) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() self.assertEqual(len(r),1, 'cursor.fetchmany retrieved incorrect number of rows, ' 'default of arraysize is one.' ) cur.arraysize=10 r = cur.fetchmany(3) # Should get 3 rows self.assertEqual(len(r),3, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should get 2 more self.assertEqual(len(r),2, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should be an empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence after ' 'results are exhausted' ) self.assertTrue(cur.rowcount in (-1,6)) # Same as above, using cursor.arraysize cur.arraysize=4 cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() # Should get 4 rows self.assertEqual(len(r),4, 'cursor.arraysize not being honoured by fetchmany' ) r = cur.fetchmany() # Should get 2 more self.assertEqual(len(r),2) r = cur.fetchmany() # Should be an empty sequence self.assertEqual(len(r),0) self.assertTrue(cur.rowcount in (-1,6)) cur.arraysize=6 cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchmany() # Should get all rows self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows),6) self.assertEqual(len(rows),6) rows = [r[0] for r in rows] rows.sort() # Make sure we get the right data back out for i in range(0,6): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved by cursor.fetchmany' ) rows = cur.fetchmany() # Should return an empty list self.assertEqual(len(rows),0, 'cursor.fetchmany should return an empty sequence if ' 'called after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,6)) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) r = cur.fetchmany() # Should get empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence if ' 'query retrieved no rows' ) self.assertTrue(cur.rowcount in (-1,0)) finally: con.close() def test_fetchall(self): con = self._connect() try: cur = con.cursor() # cursor.fetchall should raise an Error if called # without executing a query that may return rows (such # as a select) self.assertRaises(self.driver.Error, cur.fetchall) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) # cursor.fetchall should raise an Error if called # after executing a a statement that cannot return rows self.assertRaises(self.driver.Error,cur.fetchall) cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.assertEqual(len(rows),len(self.samples), 'cursor.fetchall did not retrieve all rows' ) rows = [r[0] for r in rows] rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'cursor.fetchall retrieved incorrect rows' ) rows = cur.fetchall() self.assertEqual( len(rows),0, 'cursor.fetchall should return an empty list if called ' 'after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,0)) self.assertEqual(len(rows),0, 'cursor.fetchall should return an empty list if ' 'a select query returns no rows' ) finally: con.close() def test_mixedfetch(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) rows1 = cur.fetchone() rows23 = cur.fetchmany(2) rows4 = cur.fetchone() rows56 = cur.fetchall() self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows23),2, 'fetchmany returned incorrect number of rows' ) self.assertEqual(len(rows56),2, 'fetchall returned incorrect number of rows' ) rows = [rows1[0]] rows.extend([rows23[0][0],rows23[1][0]]) rows.append(rows4[0]) rows.extend([rows56[0][0],rows56[1][0]]) rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved or inserted' ) finally: con.close() def help_nextset_setUp(self, cur): """Should create a procedure called deleteme that returns two result sets, first the number of rows in booze then "name from booze" """ if False: sql = """ create procedure deleteme as begin select count(*) from booze select name from booze end """ cur.execute(sql) else: raise NotImplementedError('Helper not implemented') def help_nextset_tearDown(self, cur): """If cleaning up is needed after nextSetTest""" if False: cur.execute("drop procedure deleteme") else: raise NotImplementedError('Helper not implemented') def test_nextset(self): con = self._connect() try: cur = con.cursor() if not hasattr(cur,'nextset'): return try: self.executeDDL1(cur) sql=self._populate() for sql in self._populate(): cur.execute(sql) self.help_nextset_setUp(cur) cur.callproc('deleteme') numberofrows=cur.fetchone() assert numberofrows[0]== len(self.samples) assert cur.nextset() names=cur.fetchall() assert len(names) == len(self.samples) s=cur.nextset() assert s == None,'No more return sets, should return None' finally: self.help_nextset_tearDown(cur) finally: con.close() def test_arraysize(self): """Not much here - rest of the tests for this are in test_fetchmany""" con = self._connect() try: cur = con.cursor() self.assertTrue(hasattr(cur,'arraysize'), 'cursor.arraysize must be defined' ) finally: con.close() def test_setinputsizes(self): con = self._connect() try: cur = con.cursor() cur.setinputsizes( (25,) ) self._paraminsert(cur) # Make sure cursor still works finally: con.close() def test_setoutputsize_basic(self): """Basic test is to make sure setoutputsize doesn't blow up""" con = self._connect() try: cur = con.cursor() cur.setoutputsize(1000) cur.setoutputsize(2000,0) self._paraminsert(cur) # Make sure the cursor still works finally: con.close() def test_setoutputsize(self): """Real test for setoutputsize is driver dependant""" raise NotImplementedError('Driver needs to override this test') def test_None(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchall() self.assertEqual(len(r),1) self.assertEqual(len(r[0]),1) self.assertEqual(r[0][0],None,'NULL value not returned as None') finally: con.close() def test_Date(self): d1 = self.driver.Date(2002,12,25) d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(d1),str(d2)) def test_Time(self): t1 = self.driver.Time(13,45,30) t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Timestamp(self): t1 = self.driver.Timestamp(2002,12,25,13,45,30) t2 = self.driver.TimestampFromTicks( time.mktime((2002,12,25,13,45,30,0,0,0)) ) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Binary(self): b = self.driver.Binary(b'Something') b = self.driver.Binary(b'') def test_STRING(self): self.assertTrue(hasattr(self.driver,'STRING'), 'module.STRING must be defined' ) def test_BINARY(self): self.assertTrue(hasattr(self.driver,'BINARY'), 'module.BINARY must be defined.' ) def test_NUMBER(self): self.assertTrue(hasattr(self.driver,'NUMBER'), 'module.NUMBER must be defined.' ) def test_DATETIME(self): self.assertTrue(hasattr(self.driver,'DATETIME'), 'module.DATETIME must be defined.' ) def test_ROWID(self): self.assertTrue(hasattr(self.driver,'ROWID'), 'module.ROWID must be defined.' ) PyGreSQL-5.1/tests/test_dbapi20_copy.py0000644000175100077410000004743213466770070017771 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the modern PyGreSQL interface. Sub-tests for the copy methods. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest try: from collections.abc import Iterable except ImportError: # Python < 3.3 from collections import Iterable import pgdb # the module under test # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # The current user must have create schema privilege on the database. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str class InputStream: def __init__(self, data): if isinstance(data, unicode): data = data.encode('utf-8') self.data = data or b'' self.sizes = [] def __str__(self): data = self.data if str is unicode: # Python >= 3.0 data = data.decode('utf-8') return data def __len__(self): return len(self.data) def read(self, size=None): if size is None: output, data = self.data, b'' else: output, data = self.data[:size], self.data[size:] self.data = data self.sizes.append(size) return output class OutputStream: def __init__(self): self.data = b'' self.sizes = [] def __str__(self): data = self.data if str is unicode: # Python >= 3.0 data = data.decode('utf-8') return data def __len__(self): return len(self.data) def write(self, data): if isinstance(data, unicode): data = data.encode('utf-8') self.data += data self.sizes.append(len(data)) class TestStreams(unittest.TestCase): def test_input(self): stream = InputStream('Hello, Wörld!') self.assertIsInstance(stream.data, bytes) self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!') self.assertIsInstance(str(stream), str) self.assertEqual(str(stream), 'Hello, Wörld!') self.assertEqual(len(stream), 14) self.assertEqual(stream.read(3), b'Hel') self.assertEqual(stream.read(2), b'lo') self.assertEqual(stream.read(1), b',') self.assertEqual(stream.read(1), b' ') self.assertEqual(stream.read(), b'W\xc3\xb6rld!') self.assertEqual(stream.read(), b'') self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [3, 2, 1, 1, None, None]) def test_output(self): stream = OutputStream() self.assertEqual(len(stream), 0) for chunk in 'Hel', 'lo', ',', ' ', 'Wörld!': stream.write(chunk) self.assertIsInstance(stream.data, bytes) self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!') self.assertIsInstance(str(stream), str) self.assertEqual(str(stream), 'Hello, Wörld!') self.assertEqual(len(stream), 14) self.assertEqual(stream.sizes, [3, 2, 1, 1, 7]) class TestCopy(unittest.TestCase): cls_set_up = False @staticmethod def connect(): return pgdb.connect(database=dbname, host='%s:%d' % (dbhost or '', dbport or -1)) @classmethod def setUpClass(cls): con = cls.connect() cur = con.cursor() cur.execute("set client_min_messages=warning") cur.execute("drop table if exists copytest cascade") cur.execute("create table copytest (" "id smallint primary key, name varchar(64))") cur.close() con.commit() cur = con.cursor() try: cur.execute("set client_encoding=utf8") cur.execute("select 'Plácido and José'").fetchone() except (pgdb.DataError, pgdb.NotSupportedError): cls.data[1] = (1941, 'Plaacido Domingo') cls.data[2] = (1946, 'Josee Carreras') cls.can_encode = False cur.close() con.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): con = cls.connect() cur = con.cursor() cur.execute("set client_min_messages=warning") cur.execute("drop table if exists copytest cascade") con.commit() con.close() def setUp(self): self.assertTrue(self.cls_set_up) self.con = self.connect() self.cursor = self.con.cursor() self.cursor.execute("set client_encoding=utf8") def tearDown(self): try: self.cursor.close() except Exception: pass try: self.con.rollback() except Exception: pass try: self.con.close() except Exception: pass data = [(1935, 'Luciano Pavarotti'), (1941, 'Plácido Domingo'), (1946, 'José Carreras')] can_encode = True @property def data_text(self): return ''.join('%d\t%s\n' % row for row in self.data) @property def data_csv(self): return ''.join('%d,%s\n' % row for row in self.data) def truncate_table(self): self.cursor.execute("truncate table copytest") @property def table_data(self): self.cursor.execute("select * from copytest") return self.cursor.fetchall() def check_table(self): self.assertEqual(self.table_data, self.data) def check_rowcount(self, number=len(data)): self.assertEqual(self.cursor.rowcount, number) class TestCopyFrom(TestCopy): """Test the copy_from method.""" def tearDown(self): super(TestCopyFrom, self).tearDown() self.setUp() self.truncate_table() super(TestCopyFrom, self).tearDown() def copy_from(self, stream, **options): return self.cursor.copy_from(stream, 'copytest', **options) @property def data_file(self): return InputStream(self.data_text) def test_bad_params(self): call = self.cursor.copy_from call('0\t', 'copytest'), self.cursor call('1\t', 'copytest', format='text', sep='\t', null='', columns=['id', 'name']) self.assertRaises(TypeError, call) self.assertRaises(TypeError, call, None) self.assertRaises(TypeError, call, None, None) self.assertRaises(TypeError, call, '0\t') self.assertRaises(TypeError, call, '0\t', None) self.assertRaises(TypeError, call, '0\t', 42) self.assertRaises(TypeError, call, '0\t', ['copytest']) self.assertRaises(TypeError, call, '0\t', 'copytest', format=42) self.assertRaises(ValueError, call, '0\t', 'copytest', format='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', sep=42) self.assertRaises(ValueError, call, '0\t', 'copytest', sep='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', null=42) self.assertRaises(ValueError, call, '0\t', 'copytest', size='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', columns=42) self.assertRaises(ValueError, call, b'', 'copytest', format='binary', sep=',') def test_input_string(self): ret = self.copy_from('42\tHello, world!') self.assertIs(ret, self.cursor) self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.check_rowcount(1) def test_input_string_with_newline(self): self.copy_from('42\tHello, world!\n') self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.check_rowcount(1) def test_input_string_multiple_rows(self): ret = self.copy_from(self.data_text) self.assertIs(ret, self.cursor) self.check_table() self.check_rowcount() if str is unicode: # Python >= 3.0 def test_input_bytes(self): self.copy_from(b'42\tHello, world!') self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.truncate_table() self.copy_from(self.data_text.encode('utf-8')) self.check_table() else: # Python < 3.0 def test_input_unicode(self): if not self.can_encode: self.skipTest('database does not support utf8') self.copy_from(u'43\tWürstel, Käse!') self.assertEqual(self.table_data, [(43, 'Würstel, Käse!')]) self.truncate_table() self.copy_from(self.data_text.decode('utf-8')) self.check_table() def test_input_iterable(self): self.copy_from(self.data_text.splitlines()) self.check_table() self.check_rowcount() def test_input_iterable_invalid(self): self.assertRaises(IOError, self.copy_from, [None]) def test_input_iterable_with_newlines(self): self.copy_from('%s\n' % row for row in self.data_text.splitlines()) self.check_table() if str is unicode: # Python >= 3.0 def test_input_iterable_bytes(self): self.copy_from(row.encode('utf-8') for row in self.data_text.splitlines()) self.check_table() def test_sep(self): stream = ('%d-%s' % row for row in self.data) self.copy_from(stream, sep='-') self.check_table() def test_null(self): self.copy_from('0\t\\N') self.assertEqual(self.table_data, [(0, None)]) self.assertIsNone(self.table_data[0][1]) self.truncate_table() self.copy_from('1\tNix') self.assertEqual(self.table_data, [(1, 'Nix')]) self.assertIsNotNone(self.table_data[0][1]) self.truncate_table() self.copy_from('2\tNix', null='Nix') self.assertEqual(self.table_data, [(2, None)]) self.assertIsNone(self.table_data[0][1]) self.truncate_table() self.copy_from('3\t') self.assertEqual(self.table_data, [(3, '')]) self.assertIsNotNone(self.table_data[0][1]) self.truncate_table() self.copy_from('4\t', null='') self.assertEqual(self.table_data, [(4, None)]) self.assertIsNone(self.table_data[0][1]) def test_columns(self): self.copy_from('1', columns='id') self.copy_from('2', columns=['id']) self.copy_from('3\tThree') self.copy_from('4\tFour', columns='id, name') self.copy_from('5\tFive', columns=['id', 'name']) self.assertEqual(self.table_data, [ (1, None), (2, None), (3, 'Three'), (4, 'Four'), (5, 'Five')]) self.check_rowcount(5) self.assertRaises(pgdb.ProgrammingError, self.copy_from, '6\t42', columns=['id', 'age']) self.check_rowcount(-1) def test_csv(self): self.copy_from(self.data_csv, format='csv') self.check_table() def test_csv_with_sep(self): stream = ('%d;"%s"\n' % row for row in self.data) self.copy_from(stream, format='csv', sep=';') self.check_table() self.check_rowcount() def test_binary(self): self.assertRaises(IOError, self.copy_from, b'NOPGCOPY\n', format='binary') self.check_rowcount(-1) def test_binary_with_sep(self): self.assertRaises(ValueError, self.copy_from, '', format='binary', sep='\t') def test_binary_with_unicode(self): self.assertRaises(ValueError, self.copy_from, u'', format='binary') def test_query(self): self.assertRaises(ValueError, self.cursor.copy_from, '', "select null") def test_file(self): stream = self.data_file ret = self.copy_from(stream) self.assertIs(ret, self.cursor) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [8192]) self.check_rowcount() def test_size_positive(self): stream = self.data_file size = 7 num_chunks = (len(stream) + size - 1) // size self.copy_from(stream, size=size) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [size] * num_chunks) self.check_rowcount() def test_size_negative(self): stream = self.data_file self.copy_from(stream, size=-1) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [None]) self.check_rowcount() def test_size_invalid(self): self.assertRaises(TypeError, self.copy_from, self.data_file, size='invalid') class TestCopyTo(TestCopy): """Test the copy_to method.""" @classmethod def setUpClass(cls): super(TestCopyTo, cls).setUpClass() con = cls.connect() cur = con.cursor() cur.execute("set client_encoding=utf8") cur.execute("insert into copytest values (%d, %s)", cls.data) cur.close() con.commit() con.close() def copy_to(self, stream=None, **options): return self.cursor.copy_to(stream, 'copytest', **options) @property def data_file(self): return OutputStream() def test_bad_params(self): call = self.cursor.copy_to call(None, 'copytest') call(None, 'copytest', format='text', sep='\t', null='', columns=['id', 'name']) self.assertRaises(TypeError, call) self.assertRaises(TypeError, call, None) self.assertRaises(TypeError, call, None, 42) self.assertRaises(TypeError, call, None, ['copytest']) self.assertRaises(TypeError, call, 'bad', 'copytest') self.assertRaises(TypeError, call, None, 'copytest', format=42) self.assertRaises(ValueError, call, None, 'copytest', format='bad') self.assertRaises(TypeError, call, None, 'copytest', sep=42) self.assertRaises(ValueError, call, None, 'copytest', sep='bad') self.assertRaises(TypeError, call, None, 'copytest', null=42) self.assertRaises(TypeError, call, None, 'copytest', decode='bad') self.assertRaises(TypeError, call, None, 'copytest', columns=42) def test_generator(self): ret = self.copy_to() self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = ''.join(rows) self.assertIsInstance(rows, str) self.assertEqual(rows, self.data_text) self.check_rowcount() if str is unicode: # Python >= 3.0 def test_generator_bytes(self): ret = self.copy_to(decode=False) self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = b''.join(rows) self.assertIsInstance(rows, bytes) self.assertEqual(rows, self.data_text.encode('utf-8')) else: # Python < 3.0 def test_generator_unicode(self): ret = self.copy_to(decode=True) self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = ''.join(rows) self.assertIsInstance(rows, unicode) self.assertEqual(rows, self.data_text.decode('utf-8')) def test_rowcount_increment(self): ret = self.copy_to() self.assertIsInstance(ret, Iterable) for n, row in enumerate(ret): self.check_rowcount(n + 1) def test_decode(self): ret_raw = b''.join(self.copy_to(decode=False)) ret_decoded = ''.join(self.copy_to(decode=True)) self.assertIsInstance(ret_raw, bytes) self.assertIsInstance(ret_decoded, unicode) self.assertEqual(ret_decoded, ret_raw.decode('utf-8')) self.check_rowcount() def test_sep(self): ret = list(self.copy_to(sep='-')) self.assertEqual(ret, ['%d-%s\n' % row for row in self.data]) def test_null(self): data = ['%d\t%s\n' % row for row in self.data] self.cursor.execute('insert into copytest values(4, null)') try: ret = list(self.copy_to()) self.assertEqual(ret, data + ['4\t\\N\n']) ret = list(self.copy_to(null='Nix')) self.assertEqual(ret, data + ['4\tNix\n']) ret = list(self.copy_to(null='')) self.assertEqual(ret, data + ['4\t\n']) finally: self.cursor.execute('delete from copytest where id=4') def test_columns(self): data_id = ''.join('%d\n' % row[0] for row in self.data) data_name = ''.join('%s\n' % row[1] for row in self.data) ret = ''.join(self.copy_to(columns='id')) self.assertEqual(ret, data_id) ret = ''.join(self.copy_to(columns=['id'])) self.assertEqual(ret, data_id) ret = ''.join(self.copy_to(columns='name')) self.assertEqual(ret, data_name) ret = ''.join(self.copy_to(columns=['name'])) self.assertEqual(ret, data_name) ret = ''.join(self.copy_to(columns='id, name')) self.assertEqual(ret, self.data_text) ret = ''.join(self.copy_to(columns=['id', 'name'])) self.assertEqual(ret, self.data_text) self.assertRaises(pgdb.ProgrammingError, self.copy_to, columns=['id', 'age']) def test_csv(self): ret = self.copy_to(format='csv') self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = ''.join(rows) self.assertIsInstance(rows, str) self.assertEqual(rows, self.data_csv) self.check_rowcount(3) def test_csv_with_sep(self): rows = ''.join(self.copy_to(format='csv', sep=';')) self.assertEqual(rows, self.data_csv.replace(',', ';')) def test_binary(self): ret = self.copy_to(format='binary') self.assertIsInstance(ret, Iterable) for row in ret: self.assertTrue(row.startswith(b'PGCOPY\n\377\r\n\0')) break self.check_rowcount(1) def test_binary_with_sep(self): self.assertRaises(ValueError, self.copy_to, format='binary', sep='\t') def test_binary_with_unicode(self): self.assertRaises(ValueError, self.copy_to, format='binary', decode=True) def test_query(self): self.assertRaises(ValueError, self.cursor.copy_to, None, "select name from copytest", columns='noname') ret = self.cursor.copy_to(None, "select name||'!' from copytest where id=1941") self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 1) self.assertIsInstance(rows[0], str) self.assertEqual(rows[0], '%s!\n' % self.data[1][1]) self.check_rowcount(1) def test_file(self): stream = self.data_file ret = self.copy_to(stream) self.assertIs(ret, self.cursor) self.assertEqual(str(stream), self.data_text) data = self.data_text if str is unicode: # Python >= 3.0 data = data.encode('utf-8') sizes = [len(row) + 1 for row in data.splitlines()] self.assertEqual(stream.sizes, sizes) self.check_rowcount() class TestBinary(TestCopy): """Test the copy_from and copy_to methods with binary data.""" def test_round_trip(self): # fill table from textual data self.cursor.copy_from(self.data_text, 'copytest', format='text') self.check_table() self.check_rowcount() # get data back in binary format ret = self.cursor.copy_to(None, 'copytest', format='binary') self.assertIsInstance(ret, Iterable) data_binary = b''.join(ret) self.assertTrue(data_binary.startswith(b'PGCOPY\n\377\r\n\0')) self.check_rowcount() self.truncate_table() # fill table from binary data self.cursor.copy_from(data_binary, 'copytest', format='binary') self.check_table() self.check_rowcount() if __name__ == '__main__': unittest.main() PyGreSQL-5.1/tests/test_classic.py0000755000175100077410000002707113466770070017137 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import sys from functools import partial from time import sleep from threading import Thread from pg import * # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass def opendb(): db = DB(dbname, dbhost, dbport) db.query("SET DATESTYLE TO 'ISO'") db.query("SET TIME ZONE 'EST5EDT'") db.query("SET DEFAULT_WITH_OIDS=FALSE") db.query("SET CLIENT_MIN_MESSAGES=WARNING") db.query("SET STANDARD_CONFORMING_STRINGS=FALSE") return db db = opendb() for q in ( "DROP TABLE _test1._test_schema", "DROP TABLE _test2._test_schema", "DROP SCHEMA _test1", "DROP SCHEMA _test2", ): try: db.query(q) except Exception: pass db.close() class UtilityTest(unittest.TestCase): def setUp(self): """Setup test tables or empty them if they already exist.""" db = opendb() for t in ('_test1', '_test2'): try: db.query("CREATE SCHEMA " + t) except Error: pass try: db.query("CREATE TABLE %s._test_schema " "(%s int PRIMARY KEY)" % (t, t)) except Error: db.query("DELETE FROM %s._test_schema" % t) try: db.query("CREATE TABLE _test_schema " "(_test int PRIMARY KEY, _i interval, dvar int DEFAULT 999)") except Error: db.query("DELETE FROM _test_schema") try: db.query("CREATE VIEW _test_vschema AS " "SELECT _test, 'abc'::text AS _test2 FROM _test_schema") except Error: pass def test_invalidname(self): """Make sure that invalid table names are caught""" db = opendb() self.assertRaises(NotSupportedError, db.get_attnames, 'x.y.z') def test_schema(self): """Does it differentiate the same table name in different schemas""" db = opendb() # see if they differentiate the table names properly self.assertEqual( db.get_attnames('_test_schema'), {'_test': 'int', '_i': 'date', 'dvar': 'int'} ) self.assertEqual( db.get_attnames('public._test_schema'), {'_test': 'int', '_i': 'date', 'dvar': 'int'} ) self.assertEqual( db.get_attnames('_test1._test_schema'), {'_test1': 'int'} ) self.assertEqual( db.get_attnames('_test2._test_schema'), {'_test2': 'int'} ) def test_pkey(self): db = opendb() self.assertEqual(db.pkey('_test_schema'), '_test') self.assertEqual(db.pkey('public._test_schema'), '_test') self.assertEqual(db.pkey('_test1._test_schema'), '_test1') self.assertEqual(db.pkey('_test2._test_schema'), '_test2') self.assertRaises(KeyError, db.pkey, '_test_vschema') def test_get(self): db = opendb() db.query("INSERT INTO _test_schema VALUES (1234)") db.get('_test_schema', 1234) db.get('_test_schema', 1234, keyname='_test') self.assertRaises(ProgrammingError, db.get, '_test_vschema', 1234) db.get('_test_vschema', 1234, keyname='_test') def test_params(self): db = opendb() db.query("INSERT INTO _test_schema VALUES ($1, $2, $3)", 12, None, 34) d = db.get('_test_schema', 12) self.assertEqual(d['dvar'], 34) def test_insert(self): db = opendb() d = dict(_test=1234) db.insert('_test_schema', d) self.assertEqual(d['dvar'], 999) db.insert('_test_schema', _test=1235) self.assertEqual(d['dvar'], 999) def test_context_manager(self): db = opendb() t = '_test_schema' d = dict(_test=1235) with db: db.insert(t, d) d['_test'] += 1 db.insert(t, d) try: with db: d['_test'] += 1 db.insert(t, d) db.insert(t, d) except IntegrityError: pass with db: d['_test'] += 1 db.insert(t, d) d['_test'] += 1 db.insert(t, d) self.assertTrue(db.get(t, 1235)) self.assertTrue(db.get(t, 1236)) self.assertRaises(DatabaseError, db.get, t, 1237) self.assertTrue(db.get(t, 1238)) self.assertTrue(db.get(t, 1239)) def test_sqlstate(self): db = opendb() db.query("INSERT INTO _test_schema VALUES (1234)") try: db.query("INSERT INTO _test_schema VALUES (1234)") except DatabaseError as error: self.assertTrue(isinstance(error, IntegrityError)) # the SQLSTATE error code for unique violation is 23505 self.assertEqual(error.sqlstate, '23505') def test_mixed_case(self): db = opendb() try: db.query('CREATE TABLE _test_mc ("_Test" int PRIMARY KEY)') except Error: db.query("DELETE FROM _test_mc") d = dict(_Test=1234) db.insert('_test_mc', d) def test_update(self): db = opendb() db.query("INSERT INTO _test_schema VALUES (1234)") r = db.get('_test_schema', 1234) r['dvar'] = 123 db.update('_test_schema', r) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 123) r = db.get('_test_schema', 1234) self.assertIn('dvar', r) db.update('_test_schema', _test=1234, dvar=456) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 456) r = db.get('_test_schema', 1234) db.update('_test_schema', r, dvar=456) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 456) def notify_callback(self, arg_dict): if arg_dict: arg_dict['called'] = True else: self.notify_timeout = True def test_notify(self, options=None): if not options: options = {} run_as_method = options.get('run_as_method') call_notify = options.get('call_notify') two_payloads = options.get('two_payloads') db = opendb() # Get function under test, can be standalone or DB method. fut = db.notification_handler if run_as_method else partial( NotificationHandler, db) arg_dict = dict(event=None, called=False) self.notify_timeout = False # Listen for 'event_1'. target = fut('event_1', self.notify_callback, arg_dict, 5) thread = Thread(None, target) thread.start() try: # Wait until the thread has started. for n in range(500): if target.listening: break sleep(0.01) self.assertTrue(target.listening) self.assertTrue(thread.is_alive()) # Open another connection for sending notifications. db2 = opendb() # Generate notification from the other connection. if two_payloads: db2.begin() if call_notify: if two_payloads: target.notify(db2, payload='payload 0') target.notify(db2, payload='payload 1') else: if two_payloads: db2.query("notify event_1, 'payload 0'") db2.query("notify event_1, 'payload 1'") if two_payloads: db2.commit() # Wait until the notification has been caught. for n in range(500): if arg_dict['called'] or self.notify_timeout: break sleep(0.01) # Check that callback has been invoked. self.assertTrue(arg_dict['called']) self.assertEqual(arg_dict['event'], 'event_1') self.assertEqual(arg_dict['extra'], 'payload 1') self.assertTrue(isinstance(arg_dict['pid'], int)) self.assertFalse(self.notify_timeout) arg_dict['called'] = False self.assertTrue(thread.is_alive()) # Generate stop notification. if call_notify: target.notify(db2, stop=True, payload='payload 2') else: db2.query("notify stop_event_1, 'payload 2'") db2.close() # Wait until the notification has been caught. for n in range(500): if arg_dict['called'] or self.notify_timeout: break sleep(0.01) # Check that callback has been invoked. self.assertTrue(arg_dict['called']) self.assertEqual(arg_dict['event'], 'stop_event_1') self.assertEqual(arg_dict['extra'], 'payload 2') self.assertTrue(isinstance(arg_dict['pid'], int)) self.assertFalse(self.notify_timeout) thread.join(5) self.assertFalse(thread.is_alive()) self.assertFalse(target.listening) target.close() except Exception: target.close() if thread.is_alive(): thread.join(5) def test_notify_other_options(self): for run_as_method in False, True: for call_notify in False, True: for two_payloads in False, True: options = dict( run_as_method=run_as_method, call_notify=call_notify, two_payloads=two_payloads) if any(options.values()): self.test_notify(options) def test_notify_timeout(self): for run_as_method in False, True: db = opendb() # Get function under test, can be standalone or DB method. fut = db.notification_handler if run_as_method else partial( NotificationHandler, db) arg_dict = dict(event=None, called=False) self.notify_timeout = False # Listen for 'event_1' with timeout of 10ms. target = fut('event_1', self.notify_callback, arg_dict, 0.01) thread = Thread(None, target) thread.start() # Sleep 20ms, long enough to time out. sleep(0.02) # Verify that we've indeed timed out. self.assertFalse(arg_dict.get('called')) self.assertTrue(self.notify_timeout) self.assertFalse(thread.is_alive()) self.assertFalse(target.listening) target.close() if __name__ == '__main__': if len(sys.argv) == 2 and sys.argv[1] == '-l': print('\n'.join(unittest.getTestCaseNames(UtilityTest, 'test_'))) sys.exit(0) test_list = [name for name in sys.argv[1:] if not name.startswith('-')] if not test_list: test_list = unittest.getTestCaseNames(UtilityTest, 'test_') suite = unittest.TestSuite() for test_name in test_list: try: suite.addTest(UtilityTest(test_name)) except Exception: print("\n ERROR: %s.\n" % sys.exc_value) sys.exit(1) verbosity = '-v' in sys.argv[1:] and 2 or 1 failfast = '-l' in sys.argv[1:] runner = unittest.TextTestRunner(verbosity=verbosity, failfast=failfast) rc = runner.run(suite) sys.exit(1 if rc.errors or rc.failures else 0) PyGreSQL-5.1/tests/test_classic_notification.py0000755000175100077410000003504213466770070021702 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for the notification handler object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import warnings from time import sleep from threading import Thread import pg # the module under test # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # The current user must have create schema privilege on the database. dbname = 'unittest' dbhost = None dbport = 5432 debug = False # let DB wrapper print debugging output try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass def DB(): """Create a DB wrapper object connecting to the test database.""" db = pg.DB(dbname, dbhost, dbport) if debug: db.debug = debug return db class TestPyNotifyAlias(unittest.TestCase): """Test alternative ways of creating a NotificationHandler.""" def callback(self): self.fail('Callback should not be called in this test') def testPgNotify(self): db = DB() arg_dict = {} args = ('test_event', self.callback, arg_dict) kwargs = dict(timeout=2, stop_event='test_stop') with warnings.catch_warnings(record=True) as warn_msgs: warnings.simplefilter("always") handler1 = pg.pgnotify(db, *args, **kwargs) assert len(warn_msgs) == 1 warn_msg = warn_msgs[0] assert issubclass(warn_msg.category, DeprecationWarning) assert 'deprecated' in str(warn_msg.message) self.assertIsInstance(handler1, pg.NotificationHandler) handler2 = db.notification_handler(*args, **kwargs) self.assertIsInstance(handler2, pg.NotificationHandler) self.assertIs(handler1.db, handler2.db) self.assertEqual(handler1.event, handler2.event) self.assertIs(handler1.callback, handler2.callback) self.assertIs(handler1.arg_dict, handler2.arg_dict) self.assertEqual(handler1.timeout, handler2.timeout) self.assertEqual(handler1.stop_event, handler2.stop_event) class TestSyncNotification(unittest.TestCase): """Test notification handler running in the same thread.""" def setUp(self): self.db = DB() self.timeout = None self.called = True self.payloads = [] def tearDown(self): if self.db: self.db.close() def callback(self, arg_dict): if arg_dict is None: self.timeout = True else: self.timeout = False self.payloads.append(arg_dict.get('extra')) def get_handler(self, event=None, arg_dict=None, stop_event=None): if not event: event = 'test_async_notification' if not stop_event: stop_event = 'stop_async_notification' callback = self.callback handler = self.db.notification_handler( event, callback, arg_dict, 0, stop_event) self.assertEqual(handler.event, event) self.assertEqual(handler.stop_event, stop_event or 'stop_%s' % event) self.assertIs(handler.callback, callback) if arg_dict is None: self.assertEqual(handler.arg_dict, {}) else: self.assertIs(handler.arg_dict, arg_dict) self.assertEqual(handler.timeout, 0) self.assertFalse(handler.listening) return handler def testCloseHandler(self): handler = self.get_handler() self.assertIs(handler.db, self.db) handler.close() self.assertRaises(pg.InternalError, self.db.close) self.db = None self.assertIs(handler.db, None) def testDeleteHandler(self): handler = self.get_handler('test_del') self.assertIs(handler.db, self.db) handler.listen() self.db.query('notify test_del') self.db.query('notify test_del') del handler self.db.query('notify test_del') n = 0 while self.db.getnotify() and n < 4: n += 1 self.assertEqual(n, 2) def testNotify(self): handler = self.get_handler() handler.listen() self.assertRaises(TypeError, handler.notify, invalid=True) handler.notify(payload='baz') handler.notify(stop=True, payload='buz') handler.unlisten() self.db.close() self.db = None def testNotifyWithArgsAndPayload(self): arg_dict = {'foo': 'bar'} handler = self.get_handler(arg_dict=arg_dict) self.assertEqual(handler.timeout, 0) handler.listen() handler.notify(payload='baz') handler.notify(payload='biz') handler() self.assertIsNotNone(self.timeout) self.assertFalse(self.timeout) self.assertEqual(self.payloads, ['baz', 'biz']) self.assertEqual(arg_dict['foo'], 'bar') self.assertEqual(arg_dict['event'], handler.event) self.assertIsInstance(arg_dict['pid'], int) self.assertEqual(arg_dict['extra'], 'biz') self.assertTrue(handler.listening) del self.payloads[:] handler.notify(stop=True, payload='buz') handler() self.assertIsNotNone(self.timeout) self.assertFalse(self.timeout) self.assertEqual(self.payloads, ['buz']) self.assertEqual(arg_dict['foo'], 'bar') self.assertEqual(arg_dict['event'], handler.stop_event) self.assertIsInstance(arg_dict['pid'], int) self.assertEqual(arg_dict['extra'], 'buz') self.assertFalse(handler.listening) handler.unlisten() def testNotifyWrongEvent(self): handler = self.get_handler('good_event') self.assertEqual(handler.timeout, 0) handler.listen() handler.notify(payload="note 1") self.db.query("notify bad_event, 'note 2'") handler.notify(payload="note 3") handler() self.assertIsNotNone(self.timeout) self.assertFalse(self.timeout) self.assertEqual(self.payloads, ['note 1', 'note 3']) self.assertTrue(handler.listening) del self.payloads[:] self.db.query('listen bad_event') handler.notify(payload="note 4") self.db.query("notify bad_event, 'note 5'") handler.notify(payload="note 6") try: handler() except pg.DatabaseError as error: self.assertEqual(str(error), 'Listening for "good_event" and "stop_good_event",' ' but notified of "bad_event"') self.assertIsNotNone(self.timeout) self.assertFalse(self.timeout) self.assertEqual(self.payloads, ['note 4']) self.assertFalse(handler.listening) class TestAsyncNotification(unittest.TestCase): """Test notification handler running in a separate thread.""" def setUp(self): self.db = DB() def tearDown(self): self.doCleanups() if self.db: self.db.close() def callback(self, arg_dict): if arg_dict is None: self.timeout = True elif arg_dict is self.arg_dict: arg_dict = arg_dict.copy() pid = arg_dict.get('pid') if isinstance(pid, int): arg_dict['pid'] = 1 self.received.append(arg_dict) else: self.received.append(dict(error=arg_dict)) def start_handler(self, event=None, arg_dict=None, timeout=5, stop_event=None): db = DB() if not event: event = 'test_async_notification' if not stop_event: stop_event = 'stop_async_notification' callback = self.callback handler = db.notification_handler( event, callback, arg_dict, timeout, stop_event) self.handler = handler self.assertIsInstance(handler, pg.NotificationHandler) self.assertEqual(handler.event, event) self.assertEqual(handler.stop_event, stop_event or 'stop_%s' % event) self.event = handler.event self.assertIs(handler.callback, callback) if arg_dict is None: self.assertEqual(handler.arg_dict, {}) else: self.assertIsInstance(handler.arg_dict, dict) self.arg_dict = handler.arg_dict self.assertEqual(handler.timeout, timeout) self.assertFalse(handler.listening) thread = Thread(target=handler, name='test_notification_thread') self.thread = thread thread.start() self.stopped = timeout == 0 self.addCleanup(self.stop_handler) for n in range(500): if handler.listening: break sleep(0.01) self.assertTrue(handler.listening) if not self.stopped: self.assertTrue(thread.is_alive()) self.timeout = False self.received = [] self.sent = [] def stop_handler(self): handler = self.handler thread = self.thread if not self.stopped and self.handler.listening: self.notify_handler(stop=True) handler.close() self.db = None if thread.is_alive(): thread.join(5) self.assertFalse(handler.listening) self.assertFalse(thread.is_alive()) def notify_handler(self, stop=False, payload=None): event = self.event if stop: event = self.handler.stop_event self.stopped = True arg_dict = self.arg_dict.copy() arg_dict.update(event=event, pid=1, extra=payload or '') self.handler.notify(db=self.db, stop=stop, payload=payload) self.sent.append(arg_dict) def notify_query(self, stop=False, payload=None): event = self.event if stop: event = self.handler.stop_event self.stopped = True q = 'notify "%s"' % event if payload: q += ", '%s'" % payload arg_dict = self.arg_dict.copy() arg_dict.update(event=event, pid=1, extra=payload or '') self.db.query(q) self.sent.append(arg_dict) def wait(self): for n in range(500): if self.timeout: return False if len(self.received) >= len(self.sent): return True sleep(0.01) def receive(self, stop=False): if not self.sent: stop = True if stop: self.notify_handler(stop=True, payload='stop') self.assertTrue(self.wait()) self.assertFalse(self.timeout) self.assertEqual(self.received, self.sent) self.received = [] self.sent = [] self.assertEqual(self.handler.listening, not self.stopped) def testNotifyHandlerEmpty(self): self.start_handler() self.notify_handler(stop=True) self.assertEqual(len(self.sent), 1) self.receive() def testNotifyQueryEmpty(self): self.start_handler() self.notify_query(stop=True) self.assertEqual(len(self.sent), 1) self.receive() def testNotifyHandlerOnce(self): self.start_handler() self.notify_handler() self.assertEqual(len(self.sent), 1) self.receive() self.receive(stop=True) def testNotifyQueryOnce(self): self.start_handler() self.notify_query() self.receive() self.notify_query(stop=True) self.receive() def testNotifyWithArgs(self): arg_dict = {'test': 42, 'more': 43, 'less': 41} self.start_handler('test_args', arg_dict) self.notify_query() self.receive(stop=True) def testNotifySeveralTimes(self): arg_dict = {'test': 1} self.start_handler(arg_dict=arg_dict) for count in range(3): self.notify_query() self.receive() arg_dict['test'] += 1 for count in range(2): self.notify_handler() self.receive() arg_dict['test'] += 1 for count in range(3): self.notify_query() self.receive(stop=True) def testNotifyOnceWithPayload(self): self.start_handler() self.notify_query(payload='test_payload') self.receive(stop=True) def testNotifyWithArgsAndPayload(self): self.start_handler(arg_dict={'foo': 'bar'}) self.notify_query(payload='baz') self.receive(stop=True) def testNotifyQuotedNames(self): self.start_handler('Hello, World!') self.notify_query(payload='How do you do?') self.receive(stop=True) def testNotifyWithFivePayloads(self): self.start_handler('gimme_5', {'test': 'Gimme 5'}) for count in range(5): self.notify_query(payload="Round %d" % count) self.assertEqual(len(self.sent), 5) self.receive(stop=True) def testReceiveImmediately(self): self.start_handler('immediate', {'test': 'immediate'}) for count in range(3): self.notify_query(payload="Round %d" % count) self.receive() self.receive(stop=True) def testNotifyDistinctInTransaction(self): self.start_handler('test_transaction', {'transaction': True}) self.db.begin() for count in range(3): self.notify_query(payload='Round %d' % count) self.db.commit() self.receive(stop=True) def testNotifySameInTransaction(self): self.start_handler('test_transaction', {'transaction': True}) self.db.begin() for count in range(3): self.notify_query() self.db.commit() # these same notifications may be delivered as one, # so we must not wait for all three to appear self.sent = self.sent[:1] self.receive(stop=True) def testNotifyNoTimeout(self): self.start_handler(timeout=None) self.assertIsNone(self.handler.timeout) self.assertTrue(self.handler.listening) sleep(0.02) self.assertFalse(self.timeout) self.receive(stop=True) def testNotifyZeroTimeout(self): self.start_handler(timeout=0) self.assertEqual(self.handler.timeout, 0) self.assertTrue(self.handler.listening) self.assertFalse(self.timeout) def testNotifyWithoutTimeout(self): self.start_handler(timeout=1) self.assertEqual(self.handler.timeout, 1) sleep(0.02) self.assertFalse(self.timeout) self.receive(stop=True) def testNotifyWithTimeout(self): self.start_handler(timeout=0.01) sleep(0.02) self.assertTrue(self.timeout) if __name__ == '__main__': unittest.main() PyGreSQL-5.1/tests/test_dbapi20.py0000755000175100077410000015245713466770070016746 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # $Id: test_dbapi20.py 995 2019-04-25 14:10:20Z cito $ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import pgdb try: from . import dbapi20 except (ImportError, ValueError, SystemError): import dbapi20 # We need a database to test against. # If LOCAL_PyGreSQL.py exists we will get our information from that. # Otherwise we use the defaults. dbname = 'dbapi20_test' dbhost = '' dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass import gc import sys from datetime import date, time, datetime, timedelta from uuid import UUID as Uuid try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: from collections import OrderedDict except ImportError: # Python 2.6 or 3.0 OrderedDict = None class PgBitString: """Test object with a PostgreSQL representation as Bit String.""" def __init__(self, value): self.value = value def __pg_repr__(self): return "B'{0:b}'".format(self.value) class test_PyGreSQL(dbapi20.DatabaseAPI20Test): driver = pgdb connect_args = () connect_kw_args = {'database': dbname, 'host': '%s:%d' % (dbhost or '', dbport or -1)} lower_func = 'lower' # For stored procedure test def setUp(self): # Call superclass setUp in case this does something in the future dbapi20.DatabaseAPI20Test.setUp(self) try: con = self._connect() con.close() except pgdb.Error: # try to create a missing database import pg try: # first try to log in as superuser db = pg.DB('postgres', dbhost or None, dbport or -1, user='postgres') except Exception: # then try to log in as current user db = pg.DB('postgres', dbhost or None, dbport or -1) db.query('create database ' + dbname) def tearDown(self): dbapi20.DatabaseAPI20Test.tearDown(self) def test_version(self): v = pgdb.version self.assertIsInstance(v, str) self.assertIn('.', v) self.assertEqual(pgdb.__version__, v) def test_connect_kwargs(self): application_name = 'PyGreSQL DB API 2.0 Test' self.connect_kw_args['application_name'] = application_name con = self._connect() cur = con.cursor() cur.execute("select application_name from pg_stat_activity" " where application_name = %s", (application_name,)) self.assertEqual(cur.fetchone(), (application_name,)) def test_percent_sign(self): con = self._connect() cur = con.cursor() cur.execute("select %s, 'a %% sign'", ('a % sign',)) self.assertEqual(cur.fetchone(), ('a % sign', 'a % sign')) cur.execute("select 'a % sign'") self.assertEqual(cur.fetchone(), ('a % sign',)) cur.execute("select 'a %% sign'") self.assertEqual(cur.fetchone(), ('a % sign',)) def test_callproc_no_params(self): con = self._connect() cur = con.cursor() # note that now() does not change within a transaction cur.execute('select now()') now = cur.fetchone()[0] res = cur.callproc('now') self.assertIsNone(res) res = cur.fetchone()[0] self.assertEqual(res, now) def test_callproc_bad_params(self): con = self._connect() cur = con.cursor() self.assertRaises(TypeError, cur.callproc, 'lower', 42) self.assertRaises(pgdb.ProgrammingError, cur.callproc, 'lower', (42,)) def test_callproc_one_param(self): con = self._connect() cur = con.cursor() params = (42.4382,) res = cur.callproc("round", params) self.assertIs(res, params) res = cur.fetchone()[0] self.assertEqual(res, 42) def test_callproc_two_params(self): con = self._connect() cur = con.cursor() params = (9, 4) res = cur.callproc("div", params) self.assertIs(res, params) res = cur.fetchone()[0] self.assertEqual(res, 2) def test_cursor_type(self): class TestCursor(pgdb.Cursor): pass con = self._connect() self.assertIs(con.cursor_type, pgdb.Cursor) cur = con.cursor() self.assertIsInstance(cur, pgdb.Cursor) self.assertNotIsInstance(cur, TestCursor) con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) cur = con.cursor() self.assertIsInstance(cur, TestCursor) con = self._connect() self.assertIs(con.cursor_type, pgdb.Cursor) cur = con.cursor() self.assertIsInstance(cur, pgdb.Cursor) self.assertNotIsInstance(cur, TestCursor) def test_row_factory(self): class TestCursor(pgdb.Cursor): def row_factory(self, row): return dict(('column %s' % desc[0], value) for desc, value in zip(self.description, row)) con = self._connect() con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) res = cur.execute("select 1 as a, 2 as b") self.assertIs(res, cur, 'execute() should return cursor') res = cur.fetchone() self.assertIsInstance(res, dict) self.assertEqual(res, {'column a': 1, 'column b': 2}) cur.execute("select 1 as a, 2 as b union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], dict) self.assertEqual(res[0], {'column a': 1, 'column b': 2}) self.assertIsInstance(res[1], dict) self.assertEqual(res[1], {'column a': 3, 'column b': 4}) def test_build_row_factory(self): class TestCursor(pgdb.Cursor): def build_row_factory(self): keys = [desc[0] for desc in self.description] return lambda row: dict((key, value) for key, value in zip(keys, row)) con = self._connect() con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) cur.execute("select 1 as a, 2 as b") res = cur.fetchone() self.assertIsInstance(res, dict) self.assertEqual(res, {'a': 1, 'b': 2}) cur.execute("select 1 as a, 2 as b union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], dict) self.assertEqual(res[0], {'a': 1, 'b': 2}) self.assertIsInstance(res[1], dict) self.assertEqual(res[1], {'a': 3, 'b': 4}) def test_cursor_with_named_columns(self): con = self._connect() cur = con.cursor() res = cur.execute("select 1 as abc, 2 as de, 3 as f") self.assertIs(res, cur, 'execute() should return cursor') res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) self.assertEqual(res._fields, ('abc', 'de', 'f')) self.assertEqual(res.abc, 1) self.assertEqual(res.de, 2) self.assertEqual(res.f, 3) cur.execute("select 1 as one, 2 as two union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], tuple) self.assertEqual(res[0], (1, 2)) self.assertEqual(res[0]._fields, ('one', 'two')) self.assertIsInstance(res[1], tuple) self.assertEqual(res[1], (3, 4)) self.assertEqual(res[1]._fields, ('one', 'two')) def test_cursor_with_unnamed_columns(self): con = self._connect() cur = con.cursor() cur.execute("select 1, 2, 3") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) old_py = OrderedDict is None # Python 2.6 or 3.0 # old Python versions cannot rename tuple fields with underscore if old_py: self.assertEqual(res._fields, ('column_0', 'column_1', 'column_2')) else: self.assertEqual(res._fields, ('_0', '_1', '_2')) cur.execute("select 1 as one, 2, 3 as three") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) if old_py: # cannot auto rename with underscore self.assertEqual(res._fields, ('one', 'column_1', 'three')) else: self.assertEqual(res._fields, ('one', '_1', 'three')) def test_cursor_with_badly_named_columns(self): con = self._connect() cur = con.cursor() cur.execute("select 1 as abc, 2 as def") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2)) old_py = OrderedDict is None # Python 2.6 or 3.0 if old_py: self.assertEqual(res._fields, ('abc', 'column_1')) else: self.assertEqual(res._fields, ('abc', '_1')) cur.execute('select 1 as snake_case, 2 as "CamelCase",' ' 3 as "kebap-case", 4 as "_bad", 5 as "0bad", 6 as "bad$"') res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3, 4, 5, 6)) # old Python versions cannot rename tuple fields with underscore self.assertEqual(res._fields[:2], ('snake_case', 'CamelCase')) fields = ('_2', '_3', '_4', '_5') if old_py: fields = tuple('column' + field for field in fields) self.assertEqual(res._fields[2:], fields) def test_colnames(self): con = self._connect() cur = con.cursor() cur.execute("select 1, 2, 3") names = cur.colnames self.assertIsInstance(names, list) self.assertEqual(names, ['?column?', '?column?', '?column?']) cur.execute("select 1 as a, 2 as bc, 3 as def, 4 as g") names = cur.colnames self.assertIsInstance(names, list) self.assertEqual(names, ['a', 'bc', 'def', 'g']) def test_coltypes(self): con = self._connect() cur = con.cursor() cur.execute("select 1::int2, 2::int4, 3::int8") types = cur.coltypes self.assertIsInstance(types, list) self.assertEqual(types, ['int2', 'int4', 'int8']) def test_description_fields(self): con = self._connect() cur = con.cursor() cur.execute("select 123456789::int8 col0," " 123456.789::numeric(41, 13) as col1," " 'foobar'::char(39) as col2") desc = cur.description self.assertIsInstance(desc, list) self.assertEqual(len(desc), 3) cols = [('int8', 8, None), ('numeric', 41, 13), ('bpchar', 39, None)] for i in range(3): c, d = cols[i], desc[i] self.assertIsInstance(d, tuple) self.assertEqual(len(d), 7) self.assertIsInstance(d.name, str) self.assertEqual(d.name, 'col%d' % i) self.assertIsInstance(d.type_code, str) self.assertEqual(d.type_code, c[0]) self.assertIsNone(d.display_size) self.assertIsInstance(d.internal_size, int) self.assertEqual(d.internal_size, c[1]) if c[2] is not None: self.assertIsInstance(d.precision, int) self.assertEqual(d.precision, c[1]) self.assertIsInstance(d.scale, int) self.assertEqual(d.scale, c[2]) else: self.assertIsNone(d.precision) self.assertIsNone(d.scale) self.assertIsNone(d.null_ok) def test_type_cache_info(self): con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertNotIn('numeric', type_cache) type_info = type_cache['numeric'] self.assertIn('numeric', type_cache) self.assertEqual(type_info, 'numeric') self.assertEqual(type_info.oid, 1700) self.assertEqual(type_info.len, -1) self.assertEqual(type_info.type, 'b') # base self.assertEqual(type_info.category, 'N') # numeric self.assertEqual(type_info.delim, ',') self.assertEqual(type_info.relid, 0) self.assertIs(con.type_cache[1700], type_info) self.assertNotIn('pg_type', type_cache) type_info = type_cache['pg_type'] self.assertIn('pg_type', type_cache) self.assertEqual(type_info.type, 'c') # composite self.assertEqual(type_info.category, 'C') # composite cols = type_cache.get_fields('pg_type') self.assertEqual(cols[0].name, 'typname') typname = type_cache[cols[0].type] self.assertEqual(typname, 'name') self.assertEqual(typname.type, 'b') # base self.assertEqual(typname.category, 'S') # string self.assertEqual(cols[3].name, 'typlen') typlen = type_cache[cols[3].type] self.assertEqual(typlen, 'int2') self.assertEqual(typlen.type, 'b') # base self.assertEqual(typlen.category, 'N') # numeric cur.close() cur = con.cursor() type_cache = con.type_cache self.assertIn('numeric', type_cache) cur.close() finally: con.close() con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertNotIn('pg_type', type_cache) self.assertEqual(type_cache.get('pg_type'), type_info) self.assertIn('pg_type', type_cache) self.assertIsNone(type_cache.get( self.table_prefix + '_surely_does_not_exist')) cur.close() finally: con.close() def test_type_cache_typecast(self): con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertIs(type_cache.get_typecast('int4'), int) cast_int = lambda v: 'int(%s)' % v type_cache.set_typecast('int4', cast_int) query = 'select 2::int2, 4::int4, 8::int8' cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 8) self.assertEqual(type_cache.typecast(42, 'int4'), 'int(42)') type_cache.set_typecast(['int2', 'int8'], cast_int) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 'int(8)') type_cache.reset_typecast('int4') cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') type_cache.reset_typecast(['int2', 'int8']) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) type_cache.set_typecast(['int2', 'int8'], cast_int) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') type_cache.reset_typecast() cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) cur.close() finally: con.close() def test_cursor_iteration(self): con = self._connect() cur = con.cursor() cur.execute("select 1 union select 2 union select 3 order by 1") self.assertEqual([r[0] for r in cur], [1, 2, 3]) def test_cursor_invalidation(self): con = self._connect() cur = con.cursor() cur.execute("select 1 union select 2") self.assertEqual(cur.fetchone(), (1,)) self.assertFalse(con.closed) con.close() self.assertTrue(con.closed) self.assertRaises(pgdb.OperationalError, cur.fetchone) def test_fetch_2_rows(self): Decimal = pgdb.decimal_type() values = ('test', pgdb.Binary(b'\xff\x52\xb2'), True, 5, 6, 5.7, Decimal('234.234234'), Decimal('75.45'), pgdb.Date(2011, 7, 17), pgdb.Time(15, 47, 42), pgdb.Timestamp(2008, 10, 20, 15, 25, 35), pgdb.Interval(15, 31, 5), 7897234) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set datestyle to iso") cur.execute("create table %s (" "stringtest varchar," "binarytest bytea," "booltest bool," "integertest int4," "longtest int8," "floattest float8," "numerictest numeric," "moneytest money," "datetest date," "timetest time," "datetimetest timestamp," "intervaltest interval," "rowidtest oid)" % table) cur.execute("set standard_conforming_strings to on") for s in ('numeric', 'monetary', 'time'): cur.execute("set lc_%s to 'C'" % s) for _i in range(2): cur.execute("insert into %s values (" "%%s,%%s,%%s,%%s,%%s,%%s,%%s," "'%%s'::money,%%s,%%s,%%s,%%s,%%s)" % table, values) cur.execute("select * from %s" % table) rows = cur.fetchall() self.assertEqual(len(rows), 2) row0 = rows[0] self.assertEqual(row0, values) self.assertEqual(row0, rows[1]) self.assertIsInstance(row0[0], str) self.assertIsInstance(row0[1], bytes) self.assertIsInstance(row0[2], bool) self.assertIsInstance(row0[3], int) self.assertIsInstance(row0[4], long) self.assertIsInstance(row0[5], float) self.assertIsInstance(row0[6], Decimal) self.assertIsInstance(row0[7], Decimal) self.assertIsInstance(row0[8], date) self.assertIsInstance(row0[9], time) self.assertIsInstance(row0[10], datetime) self.assertIsInstance(row0[11], timedelta) finally: con.close() def test_integrity_error(self): table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set client_min_messages = warning") cur.execute("create table %s (i int primary key)" % table) cur.execute("insert into %s values (1)" % table) cur.execute("insert into %s values (2)" % table) self.assertRaises(pgdb.IntegrityError, cur.execute, "insert into %s values (1)" % table) finally: con.close() def test_update_rowcount(self): table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("create table %s (i int)" % table) cur.execute("insert into %s values (1)" % table) cur.execute("update %s set i=2 where i=2 returning i" % table) self.assertEqual(cur.rowcount, 0) cur.execute("update %s set i=2 where i=1 returning i" % table) self.assertEqual(cur.rowcount, 1) cur.close() # keep rowcount even if cursor is closed (needed by SQLAlchemy) self.assertEqual(cur.rowcount, 1) finally: con.close() def test_sqlstate(self): con = self._connect() cur = con.cursor() try: cur.execute("select 1/0") except pgdb.DatabaseError as error: self.assertTrue(isinstance(error, pgdb.DataError)) # the SQLSTATE error code for division by zero is 22012 self.assertEqual(error.sqlstate, '22012') def test_float(self): nan, inf = float('nan'), float('inf') from math import isnan, isinf self.assertTrue(isnan(nan) and not isinf(nan)) self.assertTrue(isinf(inf) and not isnan(inf)) values = [0, 1, 0.03125, -42.53125, nan, inf, -inf, 'nan', 'inf', '-inf', 'NaN', 'Infinity', '-Infinity'] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute( "create table %s (n smallint, floattest float)" % table) params = enumerate(values) cur.executemany("insert into %s values (%%d,%%s)" % table, params) cur.execute("select floattest from %s order by n" % table) rows = cur.fetchall() self.assertEqual(cur.description[0].type_code, pgdb.FLOAT) self.assertNotEqual(cur.description[0].type_code, pgdb.ARRAY) self.assertNotEqual(cur.description[0].type_code, pgdb.RECORD) finally: con.close() self.assertEqual(len(rows), len(values)) rows = [row[0] for row in rows] for inval, outval in zip(values, rows): if inval in ('inf', 'Infinity'): inval = inf elif inval in ('-inf', '-Infinity'): inval = -inf elif inval in ('nan', 'NaN'): inval = nan if isinf(inval): self.assertTrue(isinf(outval)) if inval < 0: self.assertTrue(outval < 0) else: self.assertTrue(outval > 0) elif isnan(inval): self.assertTrue(isnan(outval)) else: self.assertEqual(inval, outval) def test_datetime(self): dt = datetime(2011, 7, 17, 15, 47, 42, 317509) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set timezone = UTC") cur.execute("create table %s (" "d date, t time, ts timestamp," "tz timetz, tsz timestamptz)" % table) for n in range(3): values = [dt.date(), dt.time(), dt, dt.time(), dt] values[3] = values[3].replace(tzinfo=pgdb.timezone.utc) values[4] = values[4].replace(tzinfo=pgdb.timezone.utc) if n == 0: # input as objects params = values if n == 1: # input as text params = [v.isoformat() for v in values] # as text elif n == 2: # input using type helpers d = (dt.year, dt.month, dt.day) t = (dt.hour, dt.minute, dt.second, dt.microsecond) z = (pgdb.timezone.utc,) params = [pgdb.Date(*d), pgdb.Time(*t), pgdb.Timestamp(*(d + t)), pgdb.Time(*(t + z)), pgdb.Timestamp(*(d + t + z))] for datestyle in ('iso', 'postgres, mdy', 'postgres, dmy', 'sql, mdy', 'sql, dmy', 'german'): cur.execute("set datestyle to %s" % datestyle) if n != 1: cur.execute("select %s,%s,%s,%s,%s", params) row = cur.fetchone() self.assertEqual(row, tuple(values)) cur.execute("insert into %s" " values (%%s,%%s,%%s,%%s,%%s)" % table, params) cur.execute("select * from %s" % table) d = cur.description for i in range(5): self.assertEqual(d[i].type_code, pgdb.DATETIME) self.assertNotEqual(d[i].type_code, pgdb.STRING) self.assertNotEqual(d[i].type_code, pgdb.ARRAY) self.assertNotEqual(d[i].type_code, pgdb.RECORD) self.assertEqual(d[0].type_code, pgdb.DATE) self.assertEqual(d[1].type_code, pgdb.TIME) self.assertEqual(d[2].type_code, pgdb.TIMESTAMP) self.assertEqual(d[3].type_code, pgdb.TIME) self.assertEqual(d[4].type_code, pgdb.TIMESTAMP) row = cur.fetchone() self.assertEqual(row, tuple(values)) cur.execute("delete from %s" % table) finally: con.close() def test_interval(self): td = datetime(2011, 7, 17, 15, 47, 42, 317509) - datetime(1970, 1, 1) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("create table %s (i interval)" % table) for n in range(3): if n == 0: # input as objects param = td if n == 1: # input as text param = '%d days %d seconds %d microseconds ' % ( td.days, td.seconds, td.microseconds) elif n == 2: # input using type helpers param = pgdb.Interval( td.days, 0, 0, td.seconds, td.microseconds) for intervalstyle in ('sql_standard ', 'postgres', 'postgres_verbose', 'iso_8601'): cur.execute("set intervalstyle to %s" % intervalstyle) cur.execute("insert into %s" " values (%%s)" % table, [param]) cur.execute("select * from %s" % table) tc = cur.description[0].type_code self.assertEqual(tc, pgdb.DATETIME) self.assertNotEqual(tc, pgdb.STRING) self.assertNotEqual(tc, pgdb.ARRAY) self.assertNotEqual(tc, pgdb.RECORD) self.assertEqual(tc, pgdb.INTERVAL) row = cur.fetchone() self.assertEqual(row, (td,)) cur.execute("delete from %s" % table) finally: con.close() def test_hstore(self): con = self._connect() try: cur = con.cursor() cur.execute("select 'k=>v'::hstore") except pgdb.DatabaseError: try: cur.execute("create extension hstore") except pgdb.DatabaseError: self.skipTest("hstore extension not enabled") finally: con.close() d = {'k': 'v', 'foo': 'bar', 'baz': 'whatever', 'back\\': '\\slash', '1a': 'anything at all', '2=b': 'value = 2', '3>c': 'value > 3', '4"c': 'value " 4', "5'c": "value ' 5", 'hello, world': '"hi!"', 'None': None, 'NULL': 'NULL', 'empty': ''} con = self._connect() try: cur = con.cursor() cur.execute("select %s::hstore", (pgdb.Hstore(d),)) result = cur.fetchone()[0] finally: con.close() self.assertIsInstance(result, dict) self.assertEqual(result, d) def test_uuid(self): self.assertIs(Uuid, pgdb.Uuid) d = Uuid('{12345678-1234-5678-1234-567812345678}') con = self._connect() try: cur = con.cursor() cur.execute("select %s::uuid", (d,)) result = cur.fetchone()[0] finally: con.close() self.assertIsInstance(result, Uuid) self.assertEqual(result, d) def test_insert_array(self): values = [(None, None), ([], []), ([None], [[None], ['null']]), ([1, 2, 3], [['a', 'b'], ['c', 'd']]), ([20000, 25000, 25000, 30000], [['breakfast', 'consulting'], ['meeting', 'lunch']]), ([0, 1, -1], [['Hello, World!', '"Hi!"'], ['{x,y}', ' x y ']])] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("create table %s" " (n smallint, i int[], t text[][])" % table) params = [(n, v[0], v[1]) for n, v in enumerate(values)] # Note that we must explicit casts because we are inserting # empty arrays. Otherwise this is not necessary. cur.executemany("insert into %s values" " (%%d,%%s::int[],%%s::text[][])" % table, params) cur.execute("select i, t from %s order by n" % table) d = cur.description self.assertEqual(d[0].type_code, pgdb.ARRAY) self.assertNotEqual(d[0].type_code, pgdb.RECORD) self.assertEqual(d[0].type_code, pgdb.NUMBER) self.assertEqual(d[0].type_code, pgdb.INTEGER) self.assertEqual(d[1].type_code, pgdb.ARRAY) self.assertNotEqual(d[1].type_code, pgdb.RECORD) self.assertEqual(d[1].type_code, pgdb.STRING) rows = cur.fetchall() finally: con.close() self.assertEqual(rows, values) def test_select_array(self): values = ([1, 2, 3, None], ['a', 'b', 'c', None]) con = self._connect() try: cur = con.cursor() cur.execute("select %s::int[], %s::text[]", values) row = cur.fetchone() finally: con.close() self.assertEqual(row, values) def test_unicode_list_and_tuple(self): value = (u'Käse', u'Würstchen') con = self._connect() try: cur = con.cursor() try: cur.execute("select %s, %s", value) except pgdb.DatabaseError: self.skipTest('database does not support latin-1') row = cur.fetchone() cur.execute("select %s, %s", (list(value), tuple(value))) as_list, as_tuple = cur.fetchone() finally: con.close() self.assertEqual(as_list, list(row)) self.assertEqual(as_tuple, tuple(row)) def test_insert_record(self): values = [('John', 61), ('Jane', 63), ('Fred', None), ('Wilma', None), (None, 42), (None, None)] table = self.table_prefix + 'booze' record = self.table_prefix + 'munch' con = self._connect() try: cur = con.cursor() cur.execute("create type %s as (name varchar, age int)" % record) cur.execute("create table %s (n smallint, r %s)" % (table, record)) params = enumerate(values) cur.executemany("insert into %s values (%%d,%%s)" % table, params) cur.execute("select r from %s order by n" % table) type_code = cur.description[0].type_code self.assertEqual(type_code, record) self.assertEqual(type_code, pgdb.RECORD) self.assertNotEqual(type_code, pgdb.ARRAY) columns = con.type_cache.get_fields(type_code) self.assertEqual(columns[0].name, 'name') self.assertEqual(columns[1].name, 'age') self.assertEqual(con.type_cache[columns[0].type], 'varchar') self.assertEqual(con.type_cache[columns[1].type], 'int4') rows = cur.fetchall() finally: cur.execute('drop table %s' % table) cur.execute('drop type %s' % record) con.close() self.assertEqual(len(rows), len(values)) rows = [row[0] for row in rows] self.assertEqual(rows, values) self.assertEqual(rows[0].name, 'John') self.assertEqual(rows[0].age, 61) def test_select_record(self): value = (1, 25000, 2.5, 'hello', 'Hello World!', 'Hello, World!', '(test)', '(x,y)', ' x y ', 'null', None) con = self._connect() try: cur = con.cursor() cur.execute("select %s as test_record", [value]) self.assertEqual(cur.description[0].name, 'test_record') self.assertEqual(cur.description[0].type_code, 'record') row = cur.fetchone()[0] finally: con.close() # Note that the element types get lost since we created an # untyped record (an anonymous composite type). For the same # reason this is also a normal tuple, not a named tuple. text_row = tuple(None if v is None else str(v) for v in value) self.assertEqual(row, text_row) def test_custom_type(self): values = [3, 5, 65] values = list(map(PgBitString, values)) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() params = enumerate(values) # params have __pg_repr__ method cur.execute( 'create table "%s" (n smallint, b bit varying(7))' % table) cur.executemany("insert into %s values (%%s,%%s)" % table, params) cur.execute("select * from %s" % table) rows = cur.fetchall() finally: con.close() self.assertEqual(len(rows), len(values)) con = self._connect() try: cur = con.cursor() params = (1, object()) # an object that cannot be handled self.assertRaises(pgdb.InterfaceError, cur.execute, "insert into %s values (%%s,%%s)" % table, params) finally: con.close() def test_set_decimal_type(self): decimal_type = pgdb.decimal_type() self.assertTrue(decimal_type is not None and callable(decimal_type)) con = self._connect() try: cur = con.cursor() # change decimal type globally to int int_type = lambda v: int(float(v)) self.assertTrue(pgdb.decimal_type(int_type) is int_type) cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] self.assertTrue(isinstance(value, int)) self.assertEqual(value, 4) # change decimal type again to float self.assertTrue(pgdb.decimal_type(float) is float) cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] # the connection still uses the old setting self.assertTrue(isinstance(value, int)) # bust the cache for type functions for the connection con.type_cache.reset_typecast() cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] # now the connection uses the new setting self.assertTrue(isinstance(value, float)) self.assertEqual(value, 4.25) finally: con.close() pgdb.decimal_type(decimal_type) self.assertTrue(pgdb.decimal_type() is decimal_type) def test_global_typecast(self): try: query = 'select 2::int2, 4::int4, 8::int8' self.assertIs(pgdb.get_typecast('int4'), int) cast_int = lambda v: 'int(%s)' % v pgdb.set_typecast('int4', cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 8) pgdb.set_typecast(['int2', 'int8'], cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 'int(8)') pgdb.reset_typecast('int4') con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') pgdb.reset_typecast(['int2', 'int8']) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) pgdb.set_typecast(['int2', 'int8'], cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') finally: pgdb.reset_typecast() con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) def test_set_typecast_for_arrays(self): query = 'select ARRAY[1,2,3]' try: con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, list) self.assertEqual(r, [1, 2, 3]) pgdb.set_typecast('anyarray', lambda v, basecast: v) con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, str) self.assertEqual(r, '{1,2,3}') finally: pgdb.reset_typecast() con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, list) self.assertEqual(r, [1, 2, 3]) def test_unicode_with_utf8(self): table = self.table_prefix + 'booze' input = u"He wes Leovenaðes sone — liðe him be Drihten" con = self._connect() try: cur = con.cursor() cur.execute("create table %s (t text)" % table) try: cur.execute("set client_encoding=utf8") cur.execute(u"select '%s'" % input) except Exception: self.skipTest("database does not support utf8") output1 = cur.fetchone()[0] cur.execute("insert into %s values (%%s)" % table, (input,)) cur.execute("select * from %s" % table) output2 = cur.fetchone()[0] cur.execute("select t = '%s' from %s" % (input, table)) output3 = cur.fetchone()[0] cur.execute("select t = %%s from %s" % table, (input,)) output4 = cur.fetchone()[0] finally: con.close() if str is bytes: # Python < 3.0 input = input.encode('utf8') self.assertIsInstance(output1, str) self.assertEqual(output1, input) self.assertIsInstance(output2, str) self.assertEqual(output2, input) self.assertIsInstance(output3, bool) self.assertTrue(output3) self.assertIsInstance(output4, bool) self.assertTrue(output4) def test_unicode_with_latin1(self): table = self.table_prefix + 'booze' input = u"Ehrt den König seine Würde, ehret uns der Hände Fleiß." con = self._connect() try: cur = con.cursor() cur.execute("create table %s (t text)" % table) try: cur.execute("set client_encoding=latin1") cur.execute(u"select '%s'" % input) except Exception: self.skipTest("database does not support latin1") output1 = cur.fetchone()[0] cur.execute("insert into %s values (%%s)" % table, (input,)) cur.execute("select * from %s" % table) output2 = cur.fetchone()[0] cur.execute("select t = '%s' from %s" % (input, table)) output3 = cur.fetchone()[0] cur.execute("select t = %%s from %s" % table, (input,)) output4 = cur.fetchone()[0] finally: con.close() if str is bytes: # Python < 3.0 input = input.encode('latin1') self.assertIsInstance(output1, str) self.assertEqual(output1, input) self.assertIsInstance(output2, str) self.assertEqual(output2, input) self.assertIsInstance(output3, bool) self.assertTrue(output3) self.assertIsInstance(output4, bool) self.assertTrue(output4) def test_bool(self): values = [False, True, None, 't', 'f', 'true', 'false'] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute( "create table %s (n smallint, booltest bool)" % table) params = enumerate(values) cur.executemany("insert into %s values (%%s,%%s)" % table, params) cur.execute("select booltest from %s order by n" % table) rows = cur.fetchall() self.assertEqual(cur.description[0].type_code, pgdb.BOOL) finally: con.close() rows = [row[0] for row in rows] values[3] = values[5] = True values[4] = values[6] = False self.assertEqual(rows, values) def test_literal(self): con = self._connect() try: cur = con.cursor() value = "lower('Hello')" cur.execute("select %s, %s", (value, pgdb.Literal(value))) row = cur.fetchone() finally: con.close() self.assertEqual(row, (value, 'hello')) def test_json(self): inval = {"employees": [{"firstName": "John", "lastName": "Doe", "age": 61}]} table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() try: cur.execute("create table %s (jsontest json)" % table) except pgdb.ProgrammingError: self.skipTest('database does not support json') params = (pgdb.Json(inval),) cur.execute("insert into %s values (%%s)" % table, params) cur.execute("select jsontest from %s" % table) outval = cur.fetchone()[0] self.assertEqual(cur.description[0].type_code, pgdb.JSON) finally: con.close() self.assertEqual(inval, outval) def test_jsonb(self): inval = {"employees": [{"firstName": "John", "lastName": "Doe", "age": 61}]} table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() try: cur.execute("create table %s (jsonbtest jsonb)" % table) except pgdb.ProgrammingError: self.skipTest('database does not support jsonb') params = (pgdb.Json(inval),) cur.execute("insert into %s values (%%s)" % table, params) cur.execute("select jsonbtest from %s" % table) outval = cur.fetchone()[0] self.assertEqual(cur.description[0].type_code, pgdb.JSON) finally: con.close() self.assertEqual(inval, outval) def test_execute_edge_cases(self): con = self._connect() try: cur = con.cursor() sql = 'invalid' # should be ignored with empty parameter list cur.executemany(sql, []) sql = 'select %d + 1' cur.execute(sql, [(1,), (2,)]) # deprecated use of execute() self.assertEqual(cur.fetchone()[0], 3) sql = 'select 1/0' # cannot be executed self.assertRaises(pgdb.DataError, cur.execute, sql) cur.close() con.rollback() if pgdb.shortcutmethods: res = con.execute('select %d', (1,)).fetchone() self.assertEqual(res, (1,)) res = con.executemany('select %d', [(1,), (2,)]).fetchone() self.assertEqual(res, (2,)) finally: con.close() sql = 'select 1' # cannot be executed after connection is closed self.assertRaises(pgdb.OperationalError, cur.execute, sql) def test_fetchmany_with_keep(self): con = self._connect() try: cur = con.cursor() self.assertEqual(cur.arraysize, 1) cur.execute('select * from generate_series(1, 25)') self.assertEqual(len(cur.fetchmany()), 1) self.assertEqual(len(cur.fetchmany()), 1) self.assertEqual(cur.arraysize, 1) cur.arraysize = 3 self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(cur.arraysize, 3) self.assertEqual(len(cur.fetchmany(size=2)), 2) self.assertEqual(cur.arraysize, 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany(size=2, keep=True)), 2) self.assertEqual(cur.arraysize, 2) self.assertEqual(len(cur.fetchmany()), 2) self.assertEqual(len(cur.fetchmany()), 2) self.assertEqual(len(cur.fetchmany(25)), 3) finally: con.close() def test_nextset(self): con = self._connect() cur = con.cursor() self.assertRaises(con.NotSupportedError, cur.nextset) def test_setoutputsize(self): pass # not supported def test_connection_errors(self): con = self._connect() self.assertEqual(con.Error, pgdb.Error) self.assertEqual(con.Warning, pgdb.Warning) self.assertEqual(con.InterfaceError, pgdb.InterfaceError) self.assertEqual(con.DatabaseError, pgdb.DatabaseError) self.assertEqual(con.InternalError, pgdb.InternalError) self.assertEqual(con.OperationalError, pgdb.OperationalError) self.assertEqual(con.ProgrammingError, pgdb.ProgrammingError) self.assertEqual(con.IntegrityError, pgdb.IntegrityError) self.assertEqual(con.DataError, pgdb.DataError) self.assertEqual(con.NotSupportedError, pgdb.NotSupportedError) def test_transaction(self): table = self.table_prefix + 'booze' con1 = self._connect() cur1 = con1.cursor() self.executeDDL1(cur1) con1.commit() con2 = self._connect() cur2 = con2.cursor() cur2.execute("select name from %s" % table) self.assertIsNone(cur2.fetchone()) cur1.execute("insert into %s values('Schlafly')" % table) cur2.execute("select name from %s" % table) self.assertIsNone(cur2.fetchone()) con1.commit() cur2.execute("select name from %s" % table) self.assertEqual(cur2.fetchone(), ('Schlafly',)) con2.close() con1.close() def test_autocommit(self): table = self.table_prefix + 'booze' con1 = self._connect() con1.autocommit = True cur1 = con1.cursor() self.executeDDL1(cur1) con2 = self._connect() cur2 = con2.cursor() cur2.execute("select name from %s" % table) self.assertIsNone(cur2.fetchone()) cur1.execute("insert into %s values('Shmaltz Pastrami')" % table) cur2.execute("select name from %s" % table) self.assertEqual(cur2.fetchone(), ('Shmaltz Pastrami',)) con2.close() con1.close() def test_connection_as_contextmanager(self): table = self.table_prefix + 'booze' for autocommit in False, True: con = self._connect() con.autocommit = autocommit try: cur = con.cursor() if autocommit: cur.execute("truncate %s" % table) else: cur.execute( "create table %s (n smallint check(n!=4))" % table) with con: cur.execute("insert into %s values (1)" % table) cur.execute("insert into %s values (2)" % table) try: with con: cur.execute("insert into %s values (3)" % table) cur.execute("insert into %s values (4)" % table) except con.IntegrityError as error: self.assertTrue('check' in str(error).lower()) with con: cur.execute("insert into %s values (5)" % table) cur.execute("insert into %s values (6)" % table) try: with con: cur.execute("insert into %s values (7)" % table) cur.execute("insert into %s values (8)" % table) raise ValueError('transaction should rollback') except ValueError as error: self.assertEqual(str(error), 'transaction should rollback') with con: cur.execute("insert into %s values (9)" % table) cur.execute("select * from %s order by 1" % table) rows = cur.fetchall() rows = [row[0] for row in rows] finally: con.close() self.assertEqual(rows, [1, 2, 5, 6, 9]) def test_cursor_connection(self): con = self._connect() cur = con.cursor() self.assertEqual(cur.connection, con) cur.close() def test_cursor_as_contextmanager(self): con = self._connect() with con.cursor() as cur: self.assertEqual(cur.connection, con) def test_pgdb_type(self): self.assertEqual(pgdb.STRING, pgdb.STRING) self.assertNotEqual(pgdb.STRING, pgdb.INTEGER) self.assertNotEqual(pgdb.STRING, pgdb.BOOL) self.assertNotEqual(pgdb.BOOL, pgdb.INTEGER) self.assertEqual(pgdb.INTEGER, pgdb.INTEGER) self.assertNotEqual(pgdb.INTEGER, pgdb.NUMBER) self.assertEqual('char', pgdb.STRING) self.assertEqual('varchar', pgdb.STRING) self.assertEqual('text', pgdb.STRING) self.assertNotEqual('numeric', pgdb.STRING) self.assertEqual('numeric', pgdb.NUMERIC) self.assertEqual('numeric', pgdb.NUMBER) self.assertEqual('int4', pgdb.NUMBER) self.assertNotEqual('int4', pgdb.NUMERIC) self.assertEqual('int2', pgdb.SMALLINT) self.assertNotEqual('int4', pgdb.SMALLINT) self.assertEqual('int2', pgdb.INTEGER) self.assertEqual('int4', pgdb.INTEGER) self.assertEqual('int8', pgdb.INTEGER) self.assertNotEqual('int4', pgdb.LONG) self.assertEqual('int8', pgdb.LONG) self.assertTrue('char' in pgdb.STRING) self.assertTrue(pgdb.NUMERIC <= pgdb.NUMBER) self.assertTrue(pgdb.NUMBER >= pgdb.INTEGER) self.assertTrue(pgdb.TIME <= pgdb.DATETIME) self.assertTrue(pgdb.DATETIME >= pgdb.DATE) self.assertEqual(pgdb.ARRAY, pgdb.ARRAY) self.assertNotEqual(pgdb.ARRAY, pgdb.STRING) self.assertEqual('_char', pgdb.ARRAY) self.assertNotEqual('char', pgdb.ARRAY) self.assertEqual(pgdb.RECORD, pgdb.RECORD) self.assertNotEqual(pgdb.RECORD, pgdb.STRING) self.assertNotEqual(pgdb.RECORD, pgdb.ARRAY) self.assertEqual('record', pgdb.RECORD) self.assertNotEqual('_record', pgdb.RECORD) def test_no_close(self): data = ('hello', 'world') con = self._connect() cur = con.cursor() cur.build_row_factory = lambda: tuple cur.execute("select %s, %s", data) row = cur.fetchone() self.assertEqual(row, data) def test_set_row_factory_size(self): try: from functools import lru_cache except ImportError: # Python < 3.2 lru_cache = None queries = ['select 1 as a, 2 as b, 3 as c', 'select 123 as abc'] con = self._connect() cur = con.cursor() for maxsize in (None, 0, 1, 2, 3, 10, 1024): pgdb.set_row_factory_size(maxsize) for i in range(3): for q in queries: cur.execute(q) r = cur.fetchone() if q.endswith('abc'): self.assertEqual(r, (123,)) self.assertEqual(r._fields, ('abc',)) else: self.assertEqual(r, (1, 2, 3)) self.assertEqual(r._fields, ('a', 'b', 'c')) if lru_cache: info = pgdb._row_factory.cache_info() self.assertEqual(info.maxsize, maxsize) self.assertEqual(info.hits + info.misses, 6) self.assertEqual(info.hits, 0 if maxsize is not None and maxsize < 2 else 4) def test_memory_leaks(self): ids = set() objs = [] add_ids = ids.update gc.collect() objs[:] = gc.get_objects() add_ids(id(obj) for obj in objs) self.test_no_close() gc.collect() objs[:] = gc.get_objects() objs[:] = [obj for obj in objs if id(obj) not in ids] if objs and sys.version_info[:3] in ((3, 5, 0), (3, 5, 1)): # workaround for Python issue 26811 objs[:] = [obj for obj in objs if repr(obj) != '(,)'] self.assertEqual(len(objs), 0) if __name__ == '__main__': unittest.main() PyGreSQL-5.1/tests/test_tutorial.py0000644000175100077410000001511413466770070017351 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest from pg import DB from pgdb import connect # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass class TestClassicTutorial(unittest.TestCase): """Test the First Steps Tutorial for the classic interface.""" def setUp(self): """Setup test tables or empty them if they already exist.""" db = DB(dbname=dbname, host=dbhost, port=dbport) db.query("set datestyle to 'iso'") db.query("set default_with_oids=false") db.query("set standard_conforming_strings=false") db.query("set client_min_messages=warning") db.query("drop table if exists fruits cascade") db.query("create table fruits(id serial primary key, name varchar)") self.db = db def tearDown(self): db = self.db db.query("drop table fruits") db.close() def test_all_steps(self): db = self.db r = db.get_tables() self.assertIsInstance(r, list) self.assertIn('public.fruits', r) r = db.get_attnames('fruits') self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 'int', 'name': 'text'}) r = db.has_table_privilege('fruits', 'insert') self.assertTrue(r) r = db.insert('fruits', name='apple') self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'apple', 'id': 1}) banana = r = db.insert('fruits', name='banana') self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'banana', 'id': 2}) more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() data = list(enumerate(more_fruits, start=3)) db.inserttable('fruits', data) q = db.query('select * from fruits') r = str(q).splitlines() self.assertEqual(r[0], 'id| name ') self.assertEqual(r[1], '--+----------') self.assertEqual(r[2], ' 1|apple ') self.assertEqual(r[8], ' 7|grapefruit') self.assertEqual(r[9], '(7 rows)') q = db.query('select * from fruits') r = q.getresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], tuple) self.assertEqual(r[0], (1, 'apple')) self.assertEqual(r[6], (7, 'grapefruit')) r = q.dictresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], dict) self.assertEqual(r[0], {'id': 1, 'name': 'apple'}) self.assertEqual(r[6], {'id': 7, 'name': 'grapefruit'}) rows = r = q.namedresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], tuple) self.assertEqual(rows[3].name, 'durian') r = db.update('fruits', banana, name=banana['name'].capitalize()) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 2, 'name': 'Banana'}) q = db.query('select * from fruits where id between 1 and 3') r = str(q).splitlines() self.assertEqual(r[0], 'id| name ') self.assertEqual(r[1], '--+---------') self.assertEqual(r[2], ' 1|apple ') self.assertEqual(r[3], ' 2|Banana ') self.assertEqual(r[4], ' 3|cherimaya') self.assertEqual(r[5], '(3 rows)') r = db.query('update fruits set name=initcap(name)') self.assertIsInstance(r, str) self.assertEqual(r, '7') r = db.delete('fruits', banana) self.assertIsInstance(r, int) self.assertEqual(r, 1) r = db.delete('fruits', banana) self.assertIsInstance(r, int) self.assertEqual(r, 0) r = db.insert('fruits', banana) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 2, 'name': 'Banana'}) apple = r = db.get('fruits', 1) self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'Apple', 'id': 1}) r = db.insert('fruits', apple, id=8) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 8, 'name': 'Apple'}) r = db.delete('fruits', id=8) self.assertIsInstance(r, int) self.assertEqual(r, 1) class TestDbApi20Tutorial(unittest.TestCase): """Test the First Steps Tutorial for the DB-API 2.0 interface.""" def setUp(self): """Setup test tables or empty them if they already exist.""" database = dbname host = '%s:%d' % (dbhost or '', dbport or -1) con = connect(database=database, host=host) cur = con.cursor() cur.execute("set datestyle to 'iso'") cur.execute("set default_with_oids=false") cur.execute("set standard_conforming_strings=false") cur.execute("set client_min_messages=warning") cur.execute("drop table if exists fruits cascade") cur.execute("create table fruits(id serial primary key, name varchar)") cur.close() self.con = con def tearDown(self): con = self.con cur = con.cursor() cur.execute("drop table fruits") cur.close() con.close() def test_all_steps(self): con = self.con cursor = con.cursor() cursor.execute("insert into fruits (name) values ('apple')") cursor.execute("insert into fruits (name) values (%s)", ('banana',)) more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() parameters = [(name,) for name in more_fruits] cursor.executemany("insert into fruits (name) values (%s)", parameters) con.commit() cursor.execute('select * from fruits where id=1') r = cursor.fetchone() self.assertIsInstance(r, tuple) self.assertEqual(len(r), 2) r = str(r) self.assertEqual(r, "Row(id=1, name='apple')") cursor.execute('select * from fruits') r = cursor.fetchall() self.assertIsInstance(r, list) self.assertEqual(len(r), 7) self.assertEqual(str(r[0]), "Row(id=1, name='apple')") self.assertEqual(str(r[6]), "Row(id=7, name='grapefruit')") cursor.execute('select * from fruits') r = cursor.fetchmany(2) self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertEqual(str(r[0]), "Row(id=1, name='apple')") self.assertEqual(str(r[1]), "Row(id=2, name='banana')") if __name__ == '__main__': unittest.main() PyGreSQL-5.1/tests/test_classic_connection.py0000755000175100077410000025505713466770070021365 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for the low-level connection object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import threading import time import os from collections import namedtuple, Iterable from decimal import Decimal import pg # the module under test # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # These tests should be run with various PostgreSQL versions and databases # created with different encodings and locales. Particularly, make sure the # tests are running against databases created with both SQL_ASCII and UTF8. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str unicode_strings = str is not bytes windows = os.name == 'nt' # There is a known a bug in libpq under Windows which can cause # the interface to crash when calling PQhost(): do_not_ask_for_host = windows do_not_ask_for_host_reason = 'libpq issue on Windows' def connect(): """Create a basic pg connection to the test database.""" connection = pg.connect(dbname, dbhost, dbport) connection.query("set client_min_messages=warning") return connection class TestCanConnect(unittest.TestCase): """Test whether a basic connection to PostgreSQL is possible.""" def testCanConnect(self): try: connection = connect() except pg.Error as error: self.fail('Cannot connect to database %s:\n%s' % (dbname, error)) try: connection.close() except pg.Error: self.fail('Cannot close the database connection') class TestConnectObject(unittest.TestCase): """Test existence of basic pg connection methods.""" def setUp(self): self.connection = connect() def tearDown(self): try: self.connection.close() except pg.InternalError: pass def is_method(self, attribute): """Check if given attribute on the connection is a method.""" if do_not_ask_for_host and attribute == 'host': return False return callable(getattr(self.connection, attribute)) def testClassName(self): self.assertEqual(self.connection.__class__.__name__, 'Connection') def testModuleName(self): self.assertEqual(self.connection.__class__.__module__, 'pg') def testStr(self): r = str(self.connection) self.assertTrue(r.startswith(' 5: break r = self.connection.cancel() # cancel the running query thread.join() # wait for the thread to end t2 = time.time() self.assertIsInstance(r, int) self.assertEqual(r, 1) # return code should be 1 self.assertLessEqual(t2 - t1, 3) # time should be under 3 seconds self.assertTrue(errors) def testMethodFileNo(self): r = self.connection.fileno() self.assertIsInstance(r, int) self.assertGreaterEqual(r, 0) def testMethodTransaction(self): transaction = self.connection.transaction self.assertRaises(TypeError, transaction, None) self.assertEqual(transaction(), pg.TRANS_IDLE) self.connection.query('begin') self.assertEqual(transaction(), pg.TRANS_INTRANS) self.connection.query('rollback') self.assertEqual(transaction(), pg.TRANS_IDLE) def testMethodParameter(self): parameter = self.connection.parameter query = self.connection.query self.assertRaises(TypeError, parameter) r = parameter('this server setting does not exist') self.assertIsNone(r) s = query('show server_version').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_version') self.assertEqual(r, s) s = query('show server_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_encoding') self.assertEqual(r, s) s = query('show client_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('client_encoding') self.assertEqual(r, s) s = query('show server_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_encoding') self.assertEqual(r, s) class TestSimpleQueries(unittest.TestCase): """Test simple queries via a basic pg connection.""" def setUp(self): self.c = connect() def tearDown(self): self.doCleanups() self.c.close() def testClassName(self): r = self.c.query("select 1") self.assertEqual(r.__class__.__name__, 'Query') def testModuleName(self): r = self.c.query("select 1") self.assertEqual(r.__class__.__module__, 'pg') def testStr(self): q = ("select 1 as a, 'hello' as h, 'w' as world" " union select 2, 'xyz', 'uvw'") r = self.c.query(q) self.assertEqual(str(r), 'a| h |world\n' '-+-----+-----\n' '1|hello|w \n' '2|xyz |uvw \n' '(2 rows)') def testRepr(self): r = repr(self.c.query("select 1")) self.assertTrue(r.startswith('= 90000: raise # ignore for older server versions db.close() cls.cls_set_up = True def testEscapeString(self): self.assertTrue(self.cls_set_up) f = pg.escape_string r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f(u"das is' käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is'' käse".encode('utf-8')) r = f(u"that's cheesy") self.assertIsInstance(r, unicode) self.assertEqual(r, u"that''s cheesy") r = f(r"It's bad to have a \ inside.") self.assertEqual(r, r"It''s bad to have a \\ inside.") def testEscapeBytea(self): self.assertTrue(self.cls_set_up) f = pg.escape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f(u"das is' käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, b"das is'' k\\\\303\\\\244se") r = f(u"that's cheesy") self.assertIsInstance(r, unicode) self.assertEqual(r, u"that''s cheesy") r = f(b'O\x00ps\xff!') self.assertEqual(r, b'O\\\\000ps\\\\377!') if __name__ == '__main__': unittest.main() PyGreSQL-5.1/tests/test_classic_largeobj.py0000755000175100077410000003505013466770070021000 0ustar darcypyg00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for large object support. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import tempfile import os import pg # the module under test # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass windows = os.name == 'nt' def connect(): """Create a basic pg connection to the test database.""" connection = pg.connect(dbname, dbhost, dbport) connection.query("set client_min_messages=warning") return connection class TestModuleConstants(unittest.TestCase): """Test the existence of the documented module constants.""" def testLargeObjectIntConstants(self): names = 'INV_READ INV_WRITE SEEK_SET SEEK_CUR SEEK_END'.split() for name in names: try: value = getattr(pg, name) except AttributeError: self.fail('Module constant %s is missing' % name) self.assertIsInstance(value, int) class TestCreatingLargeObjects(unittest.TestCase): """Test creating large objects using a connection.""" def setUp(self): self.c = connect() self.c.query('begin') def tearDown(self): self.c.query('rollback') self.c.close() def assertIsLargeObject(self, obj): self.assertIsNotNone(obj) self.assertTrue(hasattr(obj, 'open')) self.assertTrue(hasattr(obj, 'close')) self.assertTrue(hasattr(obj, 'oid')) self.assertTrue(hasattr(obj, 'pgcnx')) self.assertTrue(hasattr(obj, 'error')) self.assertIsInstance(obj.oid, int) self.assertNotEqual(obj.oid, 0) self.assertIs(obj.pgcnx, self.c) self.assertIsInstance(obj.error, str) self.assertFalse(obj.error) def testLoCreate(self): large_object = self.c.locreate(pg.INV_READ | pg.INV_WRITE) try: self.assertIsLargeObject(large_object) finally: del large_object def testGetLo(self): large_object = self.c.locreate(pg.INV_READ | pg.INV_WRITE) try: self.assertIsLargeObject(large_object) oid = large_object.oid finally: del large_object data = b'some data to be shared' large_object = self.c.getlo(oid) try: self.assertIsLargeObject(large_object) self.assertEqual(large_object.oid, oid) large_object.open(pg.INV_WRITE) large_object.write(data) large_object.close() finally: del large_object large_object = self.c.getlo(oid) try: self.assertIsLargeObject(large_object) self.assertEqual(large_object.oid, oid) large_object.open(pg.INV_READ) r = large_object.read(80) large_object.close() large_object.unlink() finally: del large_object self.assertIsInstance(r, bytes) self.assertEqual(r, data) def testLoImport(self): if windows: # NamedTemporaryFiles don't work well here fname = 'temp_test_pg_largeobj_import.txt' f = open(fname, 'wb') else: f = tempfile.NamedTemporaryFile() fname = f.name data = b'some data to be imported' f.write(data) if windows: f.close() f = open(fname, 'rb') else: f.flush() f.seek(0) large_object = self.c.loimport(f.name) try: f.close() if windows: os.remove(fname) self.assertIsLargeObject(large_object) large_object.open(pg.INV_READ) large_object.seek(0, pg.SEEK_SET) r = large_object.size() self.assertIsInstance(r, int) self.assertEqual(r, len(data)) r = large_object.read(80) self.assertIsInstance(r, bytes) self.assertEqual(r, data) large_object.close() large_object.unlink() finally: del large_object class TestLargeObjects(unittest.TestCase): """Test the large object methods.""" def setUp(self): self.pgcnx = connect() self.pgcnx.query('begin') self.obj = self.pgcnx.locreate(pg.INV_READ | pg.INV_WRITE) def tearDown(self): if self.obj.oid: try: self.obj.close() except (SystemError, IOError): pass try: self.obj.unlink() except (SystemError, IOError): pass del self.obj try: self.pgcnx.query('rollback') except SystemError: pass self.pgcnx.close() def testClassName(self): self.assertEqual(self.obj.__class__.__name__, 'LargeObject') def testModuleName(self): self.assertEqual(self.obj.__class__.__module__, 'pg') def testOid(self): self.assertIsInstance(self.obj.oid, int) self.assertNotEqual(self.obj.oid, 0) def testPgcn(self): self.assertIs(self.obj.pgcnx, self.pgcnx) def testError(self): self.assertIsInstance(self.obj.error, str) self.assertEqual(self.obj.error, '') def testStr(self): self.obj.open(pg.INV_WRITE) data = b'some object to be printed' self.obj.write(data) oid = self.obj.oid r = str(self.obj) self.assertEqual(r, 'Opened large object, oid %d' % oid) self.obj.close() r = str(self.obj) self.assertEqual(r, 'Closed large object, oid %d' % oid) def testRepr(self): r = repr(self.obj) self.assertTrue(r.startswith('= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str class TestHasConnect(unittest.TestCase): """Test existence of basic pg module functions.""" def testhasPgError(self): self.assertTrue(issubclass(pg.Error, Exception)) def testhasPgWarning(self): self.assertTrue(issubclass(pg.Warning, Exception)) def testhasPgInterfaceError(self): self.assertTrue(issubclass(pg.InterfaceError, pg.Error)) def testhasPgDatabaseError(self): self.assertTrue(issubclass(pg.DatabaseError, pg.Error)) def testhasPgInternalError(self): self.assertTrue(issubclass(pg.InternalError, pg.DatabaseError)) def testhasPgOperationalError(self): self.assertTrue(issubclass(pg.OperationalError, pg.DatabaseError)) def testhasPgProgrammingError(self): self.assertTrue(issubclass(pg.ProgrammingError, pg.DatabaseError)) def testhasPgIntegrityError(self): self.assertTrue(issubclass(pg.IntegrityError, pg.DatabaseError)) def testhasPgDataError(self): self.assertTrue(issubclass(pg.DataError, pg.DatabaseError)) def testhasPgNotSupportedError(self): self.assertTrue(issubclass(pg.NotSupportedError, pg.DatabaseError)) def testhasPgInvalidResultError(self): self.assertTrue(issubclass(pg.InvalidResultError, pg.DataError)) def testhasPgNoResultError(self): self.assertTrue(issubclass(pg.NoResultError, pg.InvalidResultError)) def testhasPgMultipleResultsError(self): self.assertTrue( issubclass(pg.MultipleResultsError, pg.InvalidResultError)) def testhasConnect(self): self.assertTrue(callable(pg.connect)) def testhasEscapeString(self): self.assertTrue(callable(pg.escape_string)) def testhasEscapeBytea(self): self.assertTrue(callable(pg.escape_bytea)) def testhasUnescapeBytea(self): self.assertTrue(callable(pg.unescape_bytea)) def testDefHost(self): d0 = pg.get_defhost() d1 = 'pgtesthost' pg.set_defhost(d1) self.assertEqual(pg.get_defhost(), d1) pg.set_defhost(d0) self.assertEqual(pg.get_defhost(), d0) def testDefPort(self): d0 = pg.get_defport() d1 = 1234 pg.set_defport(d1) self.assertEqual(pg.get_defport(), d1) if d0 is None: d0 = -1 pg.set_defport(d0) if d0 == -1: d0 = None self.assertEqual(pg.get_defport(), d0) def testDefOpt(self): d0 = pg.get_defopt() d1 = '-h pgtesthost -p 1234' pg.set_defopt(d1) self.assertEqual(pg.get_defopt(), d1) pg.set_defopt(d0) self.assertEqual(pg.get_defopt(), d0) def testDefBase(self): d0 = pg.get_defbase() d1 = 'pgtestdb' pg.set_defbase(d1) self.assertEqual(pg.get_defbase(), d1) pg.set_defbase(d0) self.assertEqual(pg.get_defbase(), d0) class TestParseArray(unittest.TestCase): """Test the array parser.""" test_strings = [ ('', str, ValueError), ('{}', None, []), ('{}', str, []), (' { } ', None, []), ('{', str, ValueError), ('{{}', str, ValueError), ('{}{', str, ValueError), ('[]', str, ValueError), ('()', str, ValueError), ('{[]}', str, ['[]']), ('{hello}', int, ValueError), ('{42}', int, [42]), ('{ 42 }', int, [42]), ('{42', int, ValueError), ('{ 42 ', int, ValueError), ('{hello}', str, ['hello']), ('{ hello }', str, ['hello']), ('{hi} ', str, ['hi']), ('{hi} ?', str, ValueError), ('{null}', str, [None]), (' { NULL } ', str, [None]), (' { NULL } ', str, [None]), (' { not null } ', str, ['not null']), (' { not NULL } ', str, ['not NULL']), (' {"null"} ', str, ['null']), (' {"NULL"} ', str, ['NULL']), ('{Hi!}', str, ['Hi!']), ('{"Hi!"}', str, ['Hi!']), ('{" Hi! "}', str, [' Hi! ']), ('{a"}', str, ValueError), ('{"b}', str, ValueError), ('{a"b}', str, ValueError), (r'{a\"b}', str, ['a"b']), (r'{a\,b}', str, ['a,b']), (r'{a\bc}', str, ['abc']), (r'{"a\bc"}', str, ['abc']), (r'{\a\b\c}', str, ['abc']), (r'{"\a\b\c"}', str, ['abc']), (r'{"a"b"}', str, ValueError), (r'{"a""b"}', str, ValueError), (r'{"a\"b"}', str, ['a"b']), ('{"{}"}', str, ['{}']), (r'{\{\}}', str, ['{}']), ('{"{a,b,c}"}', str, ['{a,b,c}']), ("{'abc'}", str, ["'abc'"]), ('{"abc"}', str, ['abc']), (r'{\"abc\"}', str, ['"abc"']), (r"{\'abc\'}", str, ["'abc'"]), (r"{abc,d,efg}", str, ['abc', 'd', 'efg']), ('{Hello World!}', str, ['Hello World!']), ('{Hello, World!}', str, ['Hello', 'World!']), ('{Hello,\ World!}', str, ['Hello', ' World!']), ('{Hello\, World!}', str, ['Hello, World!']), ('{"Hello World!"}', str, ['Hello World!']), ('{this, should, be, null}', str, ['this', 'should', 'be', None]), ('{This, should, be, NULL}', str, ['This', 'should', 'be', None]), ('{3, 2, 1, null}', int, [3, 2, 1, None]), ('{3, 2, 1, NULL}', int, [3, 2, 1, None]), ('{3,17,51}', int, [3, 17, 51]), (' { 3 , 17 , 51 } ', int, [3, 17, 51]), ('{3,17,51}', str, ['3', '17', '51']), (' { 3 , 17 , 51 } ', str, ['3', '17', '51']), ('{1,"2",abc,"def"}', str, ['1', '2', 'abc', 'def']), ('{{}}', int, [[]]), ('{{},{}}', int, [[], []]), ('{ {} , {} , {} }', int, [[], [], []]), ('{ {} , {} , {} , }', int, ValueError), ('{{{1,2,3},{4,5,6}}}', int, [[[1, 2, 3], [4, 5, 6]]]), ('{{1,2,3},{4,5,6},{7,8,9}}', int, [[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ('{20000, 25000, 25000, 25000}', int, [20000, 25000, 25000, 25000]), ('{{{17,18,19},{14,15,16},{11,12,13}},' '{{27,28,29},{24,25,26},{21,22,23}},' '{{37,38,39},{34,35,36},{31,32,33}}}', int, [[[17, 18, 19], [14, 15, 16], [11, 12, 13]], [[27, 28, 29], [24, 25, 26], [21, 22, 23]], [[37, 38, 39], [34, 35, 36], [31, 32, 33]]]), ('{{"breakfast", "consulting"}, {"meeting", "lunch"}}', str, [['breakfast', 'consulting'], ['meeting', 'lunch']]), ('[1:3]={1,2,3}', int, [1, 2, 3]), ('[-1:1]={1,2,3}', int, [1, 2, 3]), ('[-1:+1]={1,2,3}', int, [1, 2, 3]), ('[-3:-1]={1,2,3}', int, [1, 2, 3]), ('[+1:+3]={1,2,3}', int, [1, 2, 3]), ('[0:2]={1,2,3}', int, [1, 2, 3]), ('[7:9]={1,2,3}', int, [1, 2, 3]), ('[]={1,2,3}', int, ValueError), ('[1:]={1,2,3}', int, ValueError), ('[:3]={1,2,3}', int, ValueError), ('[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}', int, [[[1, 2, 3], [4, 5, 6]]]), (' [1:1] [-2:-1] [3:5] = { { { 1 , 2 , 3 }, {4 , 5 , 6 } } }', int, [[[1, 2, 3], [4, 5, 6]]]), ('[1:1][3:5]={{1,2,3},{4,5,6}}', int, [[1, 2, 3], [4, 5, 6]]), ('[3:5]={{1,2,3},{4,5,6}}', int, ValueError), ('[1:1][-2:-1][3:5]={{1,2,3},{4,5,6}}', int, ValueError)] def testParserParams(self): f = pg.cast_array self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, '{}', 1) self.assertRaises(TypeError, f, '{}', b',',) self.assertRaises(TypeError, f, '{}', None, None) self.assertRaises(TypeError, f, '{}', None, 1) self.assertRaises(TypeError, f, '{}', None, b'') self.assertRaises(ValueError, f, '{}', None, b'\\') self.assertRaises(ValueError, f, '{}', None, b'{') self.assertRaises(ValueError, f, '{}', None, b'}') self.assertRaises(TypeError, f, '{}', None, b',;') self.assertEqual(f('{}'), []) self.assertEqual(f('{}', None), []) self.assertEqual(f('{}', None, b';'), []) self.assertEqual(f('{}', str), []) self.assertEqual(f('{}', str, b';'), []) def testParserSimple(self): r = pg.cast_array('{a,b,c}') self.assertIsInstance(r, list) self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) def testParserNested(self): f = pg.cast_array r = f('{{a,b,c}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, list) self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) self.assertRaises(ValueError, f, '{a,{b,c}}') r = f('{{a,b},{c,d}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 2) r = r[1] self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertEqual(r, ['c', 'd']) r = f('{{a},{b},{c}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 3) r = r[1] self.assertIsInstance(r, list) self.assertEqual(len(r), 1) self.assertEqual(r[0], 'b') r = f('{{{{{{{abc}}}}}}}') for i in range(7): self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r, 'abc') def testParserTooDeeplyNested(self): f = pg.cast_array for n in 3, 5, 9, 12, 16, 32, 64, 256: r = '%sa,b,c%s' % ('{' * n, '}' * n) if n > 16: # hard coded maximum depth self.assertRaises(ValueError, f, r) else: r = f(r) for i in range(n - 1): self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) def testParserCast(self): f = pg.cast_array self.assertEqual(f('{1}'), ['1']) self.assertEqual(f('{1}', None), ['1']) self.assertEqual(f('{1}', int), [1]) self.assertEqual(f('{1}', str), ['1']) self.assertEqual(f('{a}'), ['a']) self.assertEqual(f('{a}', None), ['a']) self.assertRaises(ValueError, f, '{a}', int) self.assertEqual(f('{a}', str), ['a']) cast = lambda s: '%s is ok' % s self.assertEqual(f('{a}', cast), ['a is ok']) def testParserDelim(self): f = pg.cast_array self.assertEqual(f('{1,2}'), ['1', '2']) self.assertEqual(f('{1,2}', delim=b','), ['1', '2']) self.assertEqual(f('{1;2}'), ['1;2']) self.assertEqual(f('{1;2}', delim=b';'), ['1', '2']) self.assertEqual(f('{1,2}', delim=b';'), ['1,2']) def testParserWithData(self): f = pg.cast_array for string, cast, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: self.assertEqual(f(string, cast), expected) def testParserWithoutCast(self): f = pg.cast_array for string, cast, expected in self.test_strings: if cast is not str: continue if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) def testParserWithDifferentDelimiter(self): f = pg.cast_array def replace_comma(value): if isinstance(value, str): return value.replace(',', ';') elif isinstance(value, list): return [replace_comma(v) for v in value] else: return value for string, cast, expected in self.test_strings: string = replace_comma(string) if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: expected = replace_comma(expected) self.assertEqual(f(string, cast, b';'), expected) class TestParseRecord(unittest.TestCase): """Test the record parser.""" test_strings = [ ('', None, ValueError), ('', str, ValueError), ('(', None, ValueError), ('(', str, ValueError), ('()', None, (None,)), ('()', str, (None,)), ('()', int, (None,)), ('(,)', str, (None, None)), ('( , )', str, (' ', ' ')), ('(")', None, ValueError), ('("")', None, ('',)), ('("")', str, ('',)), ('("")', int, ValueError), ('("" )', None, (' ',)), ('("" )', str, (' ',)), ('("" )', int, ValueError), (' () ', None, (None,)), (' ( ) ', None, (' ',)), ('(', str, ValueError), ('(()', str, ('(',)), ('(())', str, ValueError), ('()(', str, ValueError), ('()()', str, ValueError), ('[]', str, ValueError), ('{}', str, ValueError), ('([])', str, ('[]',)), ('(hello)', int, ValueError), ('(42)', int, (42,)), ('( 42 )', int, (42,)), ('( 42)', int, (42,)), ('(42)', str, ('42',)), ('( 42 )', str, (' 42 ',)), ('( 42)', str, (' 42',)), ('(42', int, ValueError), ('( 42 ', int, ValueError), ('(hello)', str, ('hello',)), ('( hello )', str, (' hello ',)), ('(hello))', str, ValueError), (' (hello) ', str, ('hello',)), (' (hello) )', str, ValueError), ('(hello)?', str, ValueError), ('(null)', str, ('null',)), ('(null)', int, ValueError), (' ( NULL ) ', str, (' NULL ',)), (' ( NULL ) ', str, (' NULL ',)), (' ( null null ) ', str, (' null null ',)), (' ("null") ', str, ('null',)), (' ("NULL") ', str, ('NULL',)), ('(Hi!)', str, ('Hi!',)), ('("Hi!")', str, ('Hi!',)), ("('Hi!')", str, ("'Hi!'",)), ('(" Hi! ")', str, (' Hi! ',)), ('("Hi!" )', str, ('Hi! ',)), ('( "Hi!")', str, (' Hi!',)), ('( "Hi!" )', str, (' Hi! ',)), ('( ""Hi!"" )', str, (' Hi! ',)), ('( """Hi!""" )', str, (' "Hi!" ',)), ('(a")', str, ValueError), ('("b)', str, ValueError), ('("a" "b)', str, ValueError), ('("a" "b")', str, ('a b',)), ('( "a" "b" "c" )', str, (' a b c ',)), ('( "a" "b" "c" )', str, (' a b c ',)), ('( "a,b" "c,d" )', str, (' a,b c,d ',)), ('( "(a,b,c)" d, e, "f,g")', str, (' (a,b,c) d', ' e', ' f,g')), ('(a",b,c",d,"e,f")', str, ('a,b,c', 'd', 'e,f')), ('( """a,b""", ""c,d"", "e,f", "g", ""h"", """i""")', str, (' "a,b"', ' c', 'd', ' e,f', ' g', ' h', ' "i"')), ('(a",b)",c"),(d,e)",f,g)', str, ('a,b)', 'c),(d,e)', 'f', 'g')), ('(a"b)', str, ValueError), (r'(a\"b)', str, ('a"b',)), ('(a""b)', str, ('ab',)), ('("a""b")', str, ('a"b',)), (r'(a\,b)', str, ('a,b',)), (r'(a\bc)', str, ('abc',)), (r'("a\bc")', str, ('abc',)), (r'(\a\b\c)', str, ('abc',)), (r'("\a\b\c")', str, ('abc',)), ('("()")', str, ('()',)), (r'(\,)', str, (',',)), (r'(\(\))', str, ('()',)), (r'(\)\()', str, (')(',)), ('("(a,b,c)")', str, ('(a,b,c)',)), ("('abc')", str, ("'abc'",)), ('("abc")', str, ('abc',)), (r'(\"abc\")', str, ('"abc"',)), (r"(\'abc\')", str, ("'abc'",)), ('(Hello World!)', str, ('Hello World!',)), ('(Hello, World!)', str, ('Hello', ' World!',)), ('(Hello,\ World!)', str, ('Hello', ' World!',)), ('(Hello\, World!)', str, ('Hello, World!',)), ('("Hello World!")', str, ('Hello World!',)), ("(this,shouldn't,be,null)", str, ('this', "shouldn't", 'be', 'null')), ('(null,should,be,)', str, ('null', 'should', 'be', None)), ('(abcABC0123!?+-*/=&%$\\\\\'\\"{[]}"""":;\\,,)', str, ('abcABC0123!?+-*/=&%$\\\'"{[]}":;,', None)), ('(3, 2, 1,)', int, (3, 2, 1, None)), ('(3, 2, 1, )', int, ValueError), ('(, 1, 2, 3)', int, (None, 1, 2, 3)), ('( , 1, 2, 3)', int, ValueError), ('(,1,,2,,3,)', int, (None, 1, None, 2, None, 3, None)), ('(3,17,51)', int, (3, 17, 51)), (' ( 3 , 17 , 51 ) ', int, (3, 17, 51)), ('(3,17,51)', str, ('3', '17', '51')), (' ( 3 , 17 , 51 ) ', str, (' 3 ', ' 17 ', ' 51 ')), ('(1,"2",abc,"def")', str, ('1', '2', 'abc', 'def')), ('(())', str, ValueError), ('()))', str, ValueError), ('()()', str, ValueError), ('((()', str, ('((',)), ('(())', int, ValueError), ('((),())', str, ValueError), ('("()","()")', str, ('()', '()')), ('( " () , () , () " )', str, (' () , () , () ',)), ('(20000, 25000, 25000, 25000)', int, (20000, 25000, 25000, 25000)), ('("breakfast","consulting","meeting","lunch")', str, ('breakfast', 'consulting', 'meeting', 'lunch')), ('("breakfast","consulting","meeting","lunch")', (str, str, str), ValueError), ('("breakfast","consulting","meeting","lunch")', (str, str, str, str), ('breakfast', 'consulting', 'meeting', 'lunch')), ('("breakfast","consulting","meeting","lunch")', (str, str, str, str, str), ValueError), ('("fuzzy dice",42,1.9375)', None, ('fuzzy dice', '42', '1.9375')), ('("fuzzy dice",42,1.9375)', str, ('fuzzy dice', '42', '1.9375')), ('("fuzzy dice",42,1.9375)', int, ValueError), ('("fuzzy dice",42,1.9375)', (str, int, float), ('fuzzy dice', 42, 1.9375)), ('("fuzzy dice",42,1.9375)', (str, int), ValueError), ('("fuzzy dice",42,1.9375)', (str, int, float, str), ValueError), ('("fuzzy dice",42,)', (str, int, float), ('fuzzy dice', 42, None)), ('("fuzzy dice",42,)', (str, int), ValueError), ('("",42,)', (str, int, float), ('', 42, None)), ('("fuzzy dice","",1.9375)', (str, int, float), ValueError), ('(fuzzy dice,"42","1.9375")', (str, int, float), ('fuzzy dice', 42, 1.9375))] def testParserParams(self): f = pg.cast_record self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, '()', 1) self.assertRaises(TypeError, f, '()', b',',) self.assertRaises(TypeError, f, '()', None, None) self.assertRaises(TypeError, f, '()', None, 1) self.assertRaises(TypeError, f, '()', None, b'') self.assertRaises(ValueError, f, '()', None, b'\\') self.assertRaises(ValueError, f, '()', None, b'(') self.assertRaises(ValueError, f, '()', None, b')') self.assertRaises(TypeError, f, '{}', None, b',;') self.assertEqual(f('()'), (None,)) self.assertEqual(f('()', None), (None,)) self.assertEqual(f('()', None, b';'), (None,)) self.assertEqual(f('()', str), (None,)) self.assertEqual(f('()', str, b';'), (None,)) def testParserSimple(self): r = pg.cast_record('(a,b,c)') self.assertIsInstance(r, tuple) self.assertEqual(len(r), 3) self.assertEqual(r, ('a', 'b', 'c')) def testParserNested(self): f = pg.cast_record self.assertRaises(ValueError, f, '((a,b,c))') self.assertRaises(ValueError, f, '((a,b),(c,d))') self.assertRaises(ValueError, f, '((a),(b),(c))') self.assertRaises(ValueError, f, '(((((((abc)))))))') def testParserManyElements(self): f = pg.cast_record for n in 3, 5, 9, 12, 16, 32, 64, 256: r = '(%s)' % ','.join(map(str, range(n))) r = f(r, int) self.assertEqual(r, tuple(range(n))) def testParserCastUniform(self): f = pg.cast_record self.assertEqual(f('(1)'), ('1',)) self.assertEqual(f('(1)', None), ('1',)) self.assertEqual(f('(1)', int), (1,)) self.assertEqual(f('(1)', str), ('1',)) self.assertEqual(f('(a)'), ('a',)) self.assertEqual(f('(a)', None), ('a',)) self.assertRaises(ValueError, f, '(a)', int) self.assertEqual(f('(a)', str), ('a',)) cast = lambda s: '%s is ok' % s self.assertEqual(f('(a)', cast), ('a is ok',)) def testParserCastNonUniform(self): f = pg.cast_record self.assertEqual(f('(1)', []), ('1',)) self.assertEqual(f('(1)', [None]), ('1',)) self.assertEqual(f('(1)', [str]), ('1',)) self.assertEqual(f('(1)', [int]), (1,)) self.assertRaises(ValueError, f, '(1)', [None, None]) self.assertRaises(ValueError, f, '(1)', [str, str]) self.assertRaises(ValueError, f, '(1)', [int, int]) self.assertEqual(f('(a)', [None]), ('a',)) self.assertEqual(f('(a)', [str]), ('a',)) self.assertRaises(ValueError, f, '(a)', [int]) self.assertEqual(f('(1,a)', [int, str]), (1, 'a')) self.assertRaises(ValueError, f, '(1,a)', [str, int]) self.assertEqual(f('(a,1)', [str, int]), ('a', 1)) self.assertRaises(ValueError, f, '(a,1)', [int, str]) self.assertEqual(f('(1,a,2,b,3,c)', [int, str, int, str, int, str]), (1, 'a', 2, 'b', 3, 'c')) self.assertEqual(f('(1,a,2,b,3,c)', (int, str, int, str, int, str)), (1, 'a', 2, 'b', 3, 'c')) cast1 = lambda s: '%s is ok' % s self.assertEqual(f('(a)', [cast1]), ('a is ok',)) cast2 = lambda s: 'and %s is ok, too' % s self.assertEqual(f('(a,b)', [cast1, cast2]), ('a is ok', 'and b is ok, too')) self.assertRaises(ValueError, f, '(a)', [cast1, cast2]) self.assertRaises(ValueError, f, '(a,b,c)', [cast1, cast2]) self.assertEqual(f('(1,2,3,4,5,6)', [int, float, str, None, cast1, cast2]), (1, 2.0, '3', '4', '5 is ok', 'and 6 is ok, too')) def testParserDelim(self): f = pg.cast_record self.assertEqual(f('(1,2)'), ('1', '2')) self.assertEqual(f('(1,2)', delim=b','), ('1', '2')) self.assertEqual(f('(1;2)'), ('1;2',)) self.assertEqual(f('(1;2)', delim=b';'), ('1', '2')) self.assertEqual(f('(1,2)', delim=b';'), ('1,2',)) def testParserWithData(self): f = pg.cast_record for string, cast, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: self.assertEqual(f(string, cast), expected) def testParserWithoutCast(self): f = pg.cast_record for string, cast, expected in self.test_strings: if cast is not str: continue if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) def testParserWithDifferentDelimiter(self): f = pg.cast_record def replace_comma(value): if isinstance(value, str): return value.replace(';', '@').replace( ',', ';').replace('@', ',') elif isinstance(value, tuple): return tuple(replace_comma(v) for v in value) else: return value for string, cast, expected in self.test_strings: string = replace_comma(string) if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: expected = replace_comma(expected) self.assertEqual(f(string, cast, b';'), expected) class TestParseHStore(unittest.TestCase): """Test the hstore parser.""" test_strings = [ ('', {}), ('=>', ValueError), ('""=>', ValueError), ('=>""', ValueError), ('""=>""', {'': ''}), ('NULL=>NULL', {'NULL': None}), ('null=>null', {'null': None}), ('NULL=>"NULL"', {'NULL': 'NULL'}), ('null=>"null"', {'null': 'null'}), ('k', ValueError), ('k,', ValueError), ('k=', ValueError), ('k=>', ValueError), ('k=>v', {'k': 'v'}), ('k=>v,', ValueError), (' k => v ', {'k': 'v'}), (' k => v ', {'k': 'v'}), ('" k " => " v "', {' k ': ' v '}), ('"k=>v', ValueError), ('k=>"v', ValueError), ('"1-a" => "anything at all"', {'1-a': 'anything at all'}), ('k => v, foo => bar, baz => whatever,' ' "1-a" => "anything at all"', {'k': 'v', 'foo': 'bar', 'baz': 'whatever', '1-a': 'anything at all'}), ('"Hello, World!"=>"Hi!"', {'Hello, World!': 'Hi!'}), ('"Hi!"=>"Hello, World!"', {'Hi!': 'Hello, World!'}), ('"k=>v"=>k\=\>v', {'k=>v': 'k=>v'}), ('k\=\>v=>"k=>v"', {'k=>v': 'k=>v'}), ('a\\,b=>a,b=>a', {'a,b': 'a', 'b': 'a'})] def testParser(self): f = pg.cast_hstore self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '', None) for string, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) class TestCastInterval(unittest.TestCase): """Test the interval typecast function.""" intervals = [ ((0, 0, 0, 1, 0, 0, 0), ('1:00:00', '01:00:00', '@ 1 hour', 'PT1H')), ((0, 0, 0, -1, 0, 0, 0), ('-1:00:00', '-01:00:00', '@ -1 hour', 'PT-1H')), ((0, 0, 0, 1, 0, 0, 0), ('0-0 0 1:00:00', '0 years 0 mons 0 days 01:00:00', '@ 0 years 0 mons 0 days 1 hour', 'P0Y0M0DT1H')), ((0, 0, 0, -1, 0, 0, 0), ('-0-0 -1:00:00', '0 years 0 mons 0 days -01:00:00', '@ 0 years 0 mons 0 days -1 hour', 'P0Y0M0DT-1H')), ((0, 0, 1, 0, 0, 0, 0), ('1 0:00:00', '1 day', '@ 1 day', 'P1D')), ((0, 0, -1, 0, 0, 0, 0), ('-1 0:00:00', '-1 day', '@ -1 day', 'P-1D')), ((0, 1, 0, 0, 0, 0, 0), ('0-1', '1 mon', '@ 1 mon', 'P1M')), ((1, 0, 0, 0, 0, 0, 0), ('1-0', '1 year', '@ 1 year', 'P1Y')), ((0, 0, 0, 2, 0, 0, 0), ('2:00:00', '02:00:00', '@ 2 hours', 'PT2H')), ((0, 0, 2, 0, 0, 0, 0), ('2 0:00:00', '2 days', '@ 2 days', 'P2D')), ((0, 2, 0, 0, 0, 0, 0), ('0-2', '2 mons', '@ 2 mons', 'P2M')), ((2, 0, 0, 0, 0, 0, 0), ('2-0', '2 years', '@ 2 years', 'P2Y')), ((0, 0, 0, -3, 0, 0, 0), ('-3:00:00', '-03:00:00', '@ 3 hours ago', 'PT-3H')), ((0, 0, -3, 0, 0, 0, 0), ('-3 0:00:00', '-3 days', '@ 3 days ago', 'P-3D')), ((0, -3, 0, 0, 0, 0, 0), ('-0-3', '-3 mons', '@ 3 mons ago', 'P-3M')), ((-3, 0, 0, 0, 0, 0, 0), ('-3-0', '-3 years', '@ 3 years ago', 'P-3Y')), ((0, 0, 0, 0, 1, 0, 0), ('0:01:00', '00:01:00', '@ 1 min', 'PT1M')), ((0, 0, 0, 0, 0, 1, 0), ('0:00:01', '00:00:01', '@ 1 sec', 'PT1S')), ((0, 0, 0, 0, 0, 0, 1), ('0:00:00.000001', '00:00:00.000001', '@ 0.000001 secs', 'PT0.000001S')), ((0, 0, 0, 0, 2, 0, 0), ('0:02:00', '00:02:00', '@ 2 mins', 'PT2M')), ((0, 0, 0, 0, 0, 2, 0), ('0:00:02', '00:00:02', '@ 2 secs', 'PT2S')), ((0, 0, 0, 0, 0, 0, 2), ('0:00:00.000002', '00:00:00.000002', '@ 0.000002 secs', 'PT0.000002S')), ((0, 0, 0, 0, -3, 0, 0), ('-0:03:00', '-00:03:00', '@ 3 mins ago', 'PT-3M')), ((0, 0, 0, 0, 0, -3, 0), ('-0:00:03', '-00:00:03', '@ 3 secs ago', 'PT-3S')), ((0, 0, 0, 0, 0, 0, -3), ('-0:00:00.000003', '-00:00:00.000003', '@ 0.000003 secs ago', 'PT-0.000003S')), ((1, 2, 0, 0, 0, 0, 0), ('1-2', '1 year 2 mons', '@ 1 year 2 mons', 'P1Y2M')), ((0, 0, 3, 4, 5, 6, 0), ('3 4:05:06', '3 days 04:05:06', '@ 3 days 4 hours 5 mins 6 secs', 'P3DT4H5M6S')), ((1, 2, 3, 4, 5, 6, 0), ('+1-2 +3 +4:05:06', '1 year 2 mons 3 days 04:05:06', '@ 1 year 2 mons 3 days 4 hours 5 mins 6 secs', 'P1Y2M3DT4H5M6S')), ((1, 2, 3, -4, -5, -6, 0), ('+1-2 +3 -4:05:06', '1 year 2 mons 3 days -04:05:06', '@ 1 year 2 mons 3 days -4 hours -5 mins -6 secs', 'P1Y2M3DT-4H-5M-6S')), ((1, 2, 3, -4, 5, 6, 0), ('+1-2 +3 -3:54:54', '1 year 2 mons 3 days -03:54:54', '@ 1 year 2 mons 3 days -3 hours -54 mins -54 secs', 'P1Y2M3DT-3H-54M-54S')), ((-1, -2, 3, -4, -5, -6, 0), ('-1-2 +3 -4:05:06', '-1 years -2 mons +3 days -04:05:06', '@ 1 year 2 mons -3 days 4 hours 5 mins 6 secs ago', 'P-1Y-2M3DT-4H-5M-6S')), ((1, 2, -3, 4, 5, 6, 0), ('+1-2 -3 +4:05:06', '1 year 2 mons -3 days +04:05:06', '@ 1 year 2 mons -3 days 4 hours 5 mins 6 secs', 'P1Y2M-3DT4H5M6S')), ((0, 0, 0, 1, 30, 0, 0), ('1:30:00', '01:30:00', '@ 1 hour 30 mins', 'PT1H30M')), ((0, 0, 0, 3, 15, 45, 123456), ('3:15:45.123456', '03:15:45.123456', '@ 3 hours 15 mins 45.123456 secs', 'PT3H15M45.123456S')), ((0, 0, 0, 3, 15, -5, 123), ('3:14:55.000123', '03:14:55.000123', '@ 3 hours 14 mins 55.000123 secs', 'PT3H14M55.000123S')), ((0, 0, 0, 3, -5, 15, -12345), ('2:55:14.987655', '02:55:14.987655', '@ 2 hours 55 mins 14.987655 secs', 'PT2H55M14.987655S')), ((0, 0, 0, 2, -1, 0, 0), ('1:59:00', '01:59:00', '@ 1 hour 59 mins', 'PT1H59M')), ((0, 0, 0, -1, 2, 0, 0), ('-0:58:00', '-00:58:00', '@ 58 mins ago', 'PT-58M')), ((1, 11, 0, 0, 0, 0, 0), ('1-11', '1 year 11 mons', '@ 1 year 11 mons', 'P1Y11M')), ((0, -10, 0, 0, 0, 0, 0), ('-0-10', '-10 mons', '@ 10 mons ago', 'P-10M')), ((0, 0, 2, -1, 0, 0, 0), ('+0-0 +2 -1:00:00', '2 days -01:00:00', '@ 2 days -1 hours', 'P2DT-1H')), ((0, 0, -1, 2, 0, 0, 0), ('+0-0 -1 +2:00:00', '-1 days +02:00:00', '@ 1 day -2 hours ago', 'P-1DT2H')), ((0, 0, 1, 0, 0, 0, 1), ('1 0:00:00.000001', '1 day 00:00:00.000001', '@ 1 day 0.000001 secs', 'P1DT0.000001S')), ((0, 0, 1, 0, 0, 1, 0), ('1 0:00:01', '1 day 00:00:01', '@ 1 day 1 sec', 'P1DT1S')), ((0, 0, 1, 0, 1, 0, 0), ('1 0:01:00', '1 day 00:01:00', '@ 1 day 1 min', 'P1DT1M')), ((0, 0, 0, 0, 1, 0, -1), ('0:00:59.999999', '00:00:59.999999', '@ 59.999999 secs', 'PT59.999999S')), ((0, 0, 0, 0, -1, 0, 1), ('-0:00:59.999999', '-00:00:59.999999', '@ 59.999999 secs ago', 'PT-59.999999S')), ((0, 0, 0, 0, -1, 1, 1), ('-0:00:58.999999', '-00:00:58.999999', '@ 58.999999 secs ago', 'PT-58.999999S')), ((0, 0, 42, 0, 0, 0, 0), ('42 0:00:00', '42 days', '@ 42 days', 'P42D')), ((0, 0, -7, 0, 0, 0, 0), ('-7 0:00:00', '-7 days', '@ 7 days ago', 'P-7D')), ((1, 1, 1, 1, 1, 0, 0), ('+1-1 +1 +1:01:00', '1 year 1 mon 1 day 01:01:00', '@ 1 year 1 mon 1 day 1 hour 1 min', 'P1Y1M1DT1H1M')), ((0, -11, -1, -1, 1, 0, 0), ('-0-11 -1 -0:59:00', '-11 mons -1 days -00:59:00', '@ 11 mons 1 day 59 mins ago', 'P-11M-1DT-59M')), ((-1, -1, -1, -1, -1, 0, 0), ('-1-1 -1 -1:01:00', '-1 years -1 mons -1 days -01:01:00', '@ 1 year 1 mon 1 day 1 hour 1 min ago', 'P-1Y-1M-1DT-1H-1M')), ((-1, 0, -3, 1, 0, 0, 0), ('-1-0 -3 +1:00:00', '-1 years -3 days +01:00:00', '@ 1 year 3 days -1 hours ago', 'P-1Y-3DT1H')), ((1, 0, 0, 0, 0, 0, 1), ('+1-0 +0 +0:00:00.000001', '1 year 00:00:00.000001', '@ 1 year 0.000001 secs', 'P1YT0.000001S')), ((1, 0, 0, 0, 0, 0, -1), ('+1-0 +0 -0:00:00.000001', '1 year -00:00:00.000001', '@ 1 year -0.000001 secs', 'P1YT-0.000001S')), ((1, 2, 3, 4, 5, 6, 7), ('+1-2 +3 +4:05:06.000007', '1 year 2 mons 3 days 04:05:06.000007', '@ 1 year 2 mons 3 days 4 hours 5 mins 6.000007 secs', 'P1Y2M3DT4H5M6.000007S')), ((0, 10, 3, -4, 5, -6, 7), ('+0-10 +3 -3:55:05.999993', '10 mons 3 days -03:55:05.999993', '@ 10 mons 3 days -3 hours -55 mins -5.999993 secs', 'P10M3DT-3H-55M-5.999993S')), ((0, -10, -3, 4, -5, 6, -7), ('-0-10 -3 +3:55:05.999993', '-10 mons -3 days +03:55:05.999993', '@ 10 mons 3 days -3 hours -55 mins -5.999993 secs ago', 'P-10M-3DT3H55M5.999993S'))] def testCastInterval(self): for result, values in self.intervals: f = pg.cast_interval years, mons, days, hours, mins, secs, usecs = result days += 365 * years + 30 * mons interval = timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) for value in values: self.assertEqual(f(value), interval) class TestEscapeFunctions(unittest.TestCase): """Test pg escape and unescape functions. The libpq interface memorizes some parameters of the last opened connection that influence the result of these functions. Therefore we cannot do rigid tests of these functions here. We leave this for the test module that runs with a database. """ def testEscapeString(self): f = pg.escape_string r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f("that's cheese") self.assertIsInstance(r, str) self.assertEqual(r, "that''s cheese") def testEscapeBytea(self): f = pg.escape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f("that's cheese") self.assertIsInstance(r, str) self.assertEqual(r, "that''s cheese") def testUnescapeBytea(self): f = pg.unescape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(b"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf-8')) r = f(u"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf-8')) r = f(b'O\\000ps\\377!') self.assertEqual(r, b'O\x00ps\xff!') r = f(u'O\\000ps\\377!') self.assertEqual(r, b'O\x00ps\xff!') class TestConfigFunctions(unittest.TestCase): """Test the functions for changing default settings. The effect of most of these cannot be tested here, because that needs a database connection. So we merely test their existence here. """ def testGetDatestyle(self): self.assertIsNone(pg.get_datestyle()) def testGetDatestyle(self): datestyle = pg.get_datestyle() try: pg.set_datestyle('ISO, YMD') self.assertEqual(pg.get_datestyle(), 'ISO, YMD') pg.set_datestyle('Postgres, MDY') self.assertEqual(pg.get_datestyle(), 'Postgres, MDY') pg.set_datestyle('Postgres, DMY') self.assertEqual(pg.get_datestyle(), 'Postgres, DMY') pg.set_datestyle('SQL, MDY') self.assertEqual(pg.get_datestyle(), 'SQL, MDY') pg.set_datestyle('SQL, DMY') self.assertEqual(pg.get_datestyle(), 'SQL, DMY') pg.set_datestyle('German, DMY') self.assertEqual(pg.get_datestyle(), 'German, DMY') pg.set_datestyle(None) self.assertIsNone(pg.get_datestyle()) finally: pg.set_datestyle(datestyle) def testGetDecimalPoint(self): r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, '.') def testSetDecimalPoint(self): point = pg.get_decimal_point() try: pg.set_decimal_point('*') r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, '*') finally: pg.set_decimal_point(point) r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, point) def testGetDecimal(self): r = pg.get_decimal() self.assertIs(r, pg.Decimal) def testSetDecimal(self): decimal_class = pg.Decimal try: pg.set_decimal(int) r = pg.get_decimal() self.assertIs(r, int) finally: pg.set_decimal(decimal_class) r = pg.get_decimal() self.assertIs(r, decimal_class) def testGetBool(self): r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, True) def testSetBool(self): use_bool = pg.get_bool() try: pg.set_bool(False) r = pg.get_bool() pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bool(True) r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, True) finally: pg.set_bool(use_bool) r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, use_bool) def testGetByteaEscaped(self): r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, False) def testSetByteaEscaped(self): bytea_escaped = pg.get_bytea_escaped() try: pg.set_bytea_escaped(True) r = pg.get_bytea_escaped() pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bytea_escaped(False) r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, False) finally: pg.set_bytea_escaped(bytea_escaped) r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, bytea_escaped) def testGetJsondecode(self): r = pg.get_jsondecode() self.assertTrue(callable(r)) self.assertIs(r, json.loads) def testSetJsondecode(self): jsondecode = pg.get_jsondecode() try: pg.set_jsondecode(None) r = pg.get_jsondecode() self.assertIsNone(r) pg.set_jsondecode(str) r = pg.get_jsondecode() self.assertIs(r, str) self.assertRaises(TypeError, pg.set_jsondecode, 'invalid') finally: pg.set_jsondecode(jsondecode) r = pg.get_jsondecode() self.assertIs(r, jsondecode) class TestModuleConstants(unittest.TestCase): """Test the existence of the documented module constants.""" def testVersion(self): v = pg.version self.assertIsInstance(v, str) # make sure the version conforms to PEP440 re_version = r"""^ (\d[\.\d]*(?<= \d)) ((?:[abc]|rc)\d+)? (?:(\.post\d+))? (?:(\.dev\d+))? (?:(\+(?![.])[a-zA-Z0-9\.]*[a-zA-Z0-9]))? $""" match = re.match(re_version, v, re.X) self.assertIsNotNone(match) self.assertEqual(pg.__version__, v) if __name__ == '__main__': unittest.main() PyGreSQL-5.1/pg.py0000644000175100077410000030275413466770070013724 0ustar darcypyg00000000000000#!/usr/bin/python # # $Id: pg.py 989 2019-04-24 15:49:20Z cito $ # # PyGreSQL - a Python interface for the PostgreSQL database. # # This file contains the classic pg module. # # Copyright (c) 2019 by the PyGreSQL Development Team # # The notification handler is based on pgnotify which is # Copyright (c) 2001 Ng Pheng Siong. All rights reserved. # # Please see the LICENSE.TXT file for specific restrictions. """PyGreSQL classic interface. This pg module implements some basic database management stuff. It includes the _pg module and builds on it, providing the higher level wrapper class named DB with additional functionality. This is known as the "classic" ("old style") PyGreSQL interface. For a DB-API 2 compliant interface use the newer pgdb module. """ from __future__ import print_function, division from _pg import * __version__ = version import select import warnings import weakref from datetime import date, time, datetime, timedelta, tzinfo from decimal import Decimal from math import isnan, isinf from collections import namedtuple from keyword import iskeyword from operator import itemgetter from functools import partial from re import compile as regex from json import loads as jsondecode, dumps as jsonencode from uuid import UUID try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences basestring except NameError: # Python >= 3.0 basestring = (str, bytes) try: from functools import lru_cache except ImportError: # Python < 3.2 from functools import update_wrapper try: from _thread import RLock except ImportError: class RLock: # for builds without threads def __enter__(self): pass def __exit__(self, exctype, excinst, exctb): pass def lru_cache(maxsize=128): """Simplified functools.lru_cache decorator for one argument.""" def decorator(function): sentinel = object() cache = {} get = cache.get lock = RLock() root = [] root_full = [root, False] root[:] = [root, root, None, None] if maxsize == 0: def wrapper(arg): res = function(arg) return res elif maxsize is None: def wrapper(arg): res = get(arg, sentinel) if res is not sentinel: return res res = function(arg) cache[arg] = res return res else: def wrapper(arg): with lock: link = get(arg) if link is not None: root = root_full[0] prev, next, _arg, res = link prev[1] = next next[0] = prev last = root[0] last[1] = root[0] = link link[0] = last link[1] = root return res res = function(arg) with lock: root, full = root_full if arg in cache: pass elif full: oldroot = root oldroot[2] = arg oldroot[3] = res root = root_full[0] = oldroot[1] oldarg = root[2] oldres = root[3] # keep reference root[2] = root[3] = None del cache[oldarg] cache[arg] = oldroot else: last = root[0] link = [last, root, arg, res] last[1] = root[0] = cache[arg] = link if len(cache) >= maxsize: root_full[1] = True return res wrapper.__wrapped__ = function return update_wrapper(wrapper, function) return decorator # Auxiliary classes and functions that are independent from a DB connection: try: from collections import OrderedDict except ImportError: # Python 2.6 or 3.0 OrderedDict = dict class AttrDict(dict): """Simple read-only ordered dictionary for storing attribute names.""" def __init__(self, *args, **kw): if len(args) > 1 or kw: raise TypeError items = args[0] if args else [] if isinstance(items, dict): raise TypeError items = list(items) self._keys = [item[0] for item in items] dict.__init__(self, items) self._read_only = True error = self._read_only_error self.clear = self.update = error self.pop = self.setdefault = self.popitem = error def __setitem__(self, key, value): if self._read_only: self._read_only_error() dict.__setitem__(self, key, value) def __delitem__(self, key): if self._read_only: self._read_only_error() dict.__delitem__(self, key) def __iter__(self): return iter(self._keys) def keys(self): return list(self._keys) def values(self): return [self[key] for key in self] def items(self): return [(key, self[key]) for key in self] def iterkeys(self): return self.__iter__() def itervalues(self): return iter(self.values()) def iteritems(self): return iter(self.items()) @staticmethod def _read_only_error(*args, **kw): raise TypeError('This object is read-only') else: class AttrDict(OrderedDict): """Simple read-only ordered dictionary for storing attribute names.""" def __init__(self, *args, **kw): self._read_only = False OrderedDict.__init__(self, *args, **kw) self._read_only = True error = self._read_only_error self.clear = self.update = error self.pop = self.setdefault = self.popitem = error def __setitem__(self, key, value): if self._read_only: self._read_only_error() OrderedDict.__setitem__(self, key, value) def __delitem__(self, key): if self._read_only: self._read_only_error() OrderedDict.__delitem__(self, key) @staticmethod def _read_only_error(*args, **kw): raise TypeError('This object is read-only') try: from inspect import signature except ImportError: # Python < 3.3 from inspect import getargspec def get_args(func): return getargspec(func).args else: def get_args(func): return list(signature(func).parameters) try: from datetime import timezone except ImportError: # Python < 3.2 class timezone(tzinfo): """Simple timezone implementation.""" def __init__(self, offset, name=None): self.offset = offset if not name: minutes = self.offset.days * 1440 + self.offset.seconds // 60 if minutes < 0: hours, minutes = divmod(-minutes, 60) hours = -hours else: hours, minutes = divmod(minutes, 60) name = 'UTC%+03d:%02d' % (hours, minutes) self.name = name def utcoffset(self, dt): return self.offset def tzname(self, dt): return self.name def dst(self, dt): return None timezone.utc = timezone(timedelta(0), 'UTC') _has_timezone = False else: _has_timezone = True # time zones used in Postgres timestamptz output _timezones = dict(CET='+0100', EET='+0200', EST='-0500', GMT='+0000', HST='-1000', MET='+0100', MST='-0700', UCT='+0000', UTC='+0000', WET='+0000') def _timezone_as_offset(tz): if tz.startswith(('+', '-')): if len(tz) < 5: return tz + '00' return tz.replace(':', '') return _timezones.get(tz, '+0000') def _get_timezone(tz): tz = _timezone_as_offset(tz) minutes = 60 * int(tz[1:3]) + int(tz[3:5]) if tz[0] == '-': minutes = -minutes return timezone(timedelta(minutes=minutes), tz) def _oid_key(table): """Build oid key from a table name.""" return 'oid(%s)' % table class _SimpleTypes(dict): """Dictionary mapping pg_type names to simple type names.""" _types = {'bool': 'bool', 'bytea': 'bytea', 'date': 'date interval time timetz timestamp timestamptz' ' abstime reltime', # these are very old 'float': 'float4 float8', 'int': 'cid int2 int4 int8 oid xid', 'hstore': 'hstore', 'json': 'json jsonb', 'uuid': 'uuid', 'num': 'numeric', 'money': 'money', 'text': 'bpchar char name text varchar'} def __init__(self): for typ, keys in self._types.items(): for key in keys.split(): self[key] = typ self['_%s' % key] = '%s[]' % typ # this could be a static method in Python > 2.6 def __missing__(self, key): return 'text' _simpletypes = _SimpleTypes() def _quote_if_unqualified(param, name): """Quote parameter representing a qualified name. Puts a quote_ident() call around the give parameter unless the name contains a dot, in which case the name is ambiguous (could be a qualified name or just a name with a dot in it) and must be quoted manually by the caller. """ if isinstance(name, basestring) and '.' not in name: return 'quote_ident(%s)' % (param,) return param class _ParameterList(list): """Helper class for building typed parameter lists.""" def add(self, value, typ=None): """Typecast value with known database type and build parameter list. If this is a literal value, it will be returned as is. Otherwise, a placeholder will be returned and the parameter list will be augmented. """ value = self.adapt(value, typ) if isinstance(value, Literal): return value self.append(value) return '$%d' % len(self) class Bytea(bytes): """Wrapper class for marking Bytea values.""" class Hstore(dict): """Wrapper class for marking hstore values.""" _re_quote = regex('^[Nn][Uu][Ll][Ll]$|[ ,=>]') @classmethod def _quote(cls, s): if s is None: return 'NULL' if not s: return '""' s = s.replace('"', '\\"') if cls._re_quote.search(s): s = '"%s"' % s return s def __str__(self): q = self._quote return ','.join('%s=>%s' % (q(k), q(v)) for k, v in self.items()) class Json: """Wrapper class for marking Json values.""" def __init__(self, obj): self.obj = obj class Literal(str): """Wrapper class for marking literal SQL values.""" class Adapter: """Class providing methods for adapting parameters to the database.""" _bool_true_values = frozenset('t true 1 y yes on'.split()) _date_literals = frozenset('current_date current_time' ' current_timestamp localtime localtimestamp'.split()) _re_array_quote = regex(r'[{},"\\\s]|^[Nn][Uu][Ll][Ll]$') _re_record_quote = regex(r'[(,"\\]') _re_array_escape = _re_record_escape = regex(r'(["\\])') def __init__(self, db): self.db = weakref.proxy(db) @classmethod def _adapt_bool(cls, v): """Adapt a boolean parameter.""" if isinstance(v, basestring): if not v: return None v = v.lower() in cls._bool_true_values return 't' if v else 'f' @classmethod def _adapt_date(cls, v): """Adapt a date parameter.""" if not v: return None if isinstance(v, basestring) and v.lower() in cls._date_literals: return Literal(v) return v @staticmethod def _adapt_num(v): """Adapt a numeric parameter.""" if not v and v != 0: return None return v _adapt_int = _adapt_float = _adapt_money = _adapt_num def _adapt_bytea(self, v): """Adapt a bytea parameter.""" return self.db.escape_bytea(v) def _adapt_json(self, v): """Adapt a json parameter.""" if not v: return None if isinstance(v, basestring): return v return self.db.encode_json(v) @classmethod def _adapt_text_array(cls, v): """Adapt a text type array parameter.""" if isinstance(v, list): adapt = cls._adapt_text_array return '{%s}' % ','.join(adapt(v) for v in v) if v is None: return 'null' if not v: return '""' v = str(v) if cls._re_array_quote.search(v): v = '"%s"' % cls._re_array_escape.sub(r'\\\1', v) return v _adapt_date_array = _adapt_text_array @classmethod def _adapt_bool_array(cls, v): """Adapt a boolean array parameter.""" if isinstance(v, list): adapt = cls._adapt_bool_array return '{%s}' % ','.join(adapt(v) for v in v) if v is None: return 'null' if isinstance(v, basestring): if not v: return 'null' v = v.lower() in cls._bool_true_values return 't' if v else 'f' @classmethod def _adapt_num_array(cls, v): """Adapt a numeric array parameter.""" if isinstance(v, list): adapt = cls._adapt_num_array return '{%s}' % ','.join(adapt(v) for v in v) if not v and v != 0: return 'null' return str(v) _adapt_int_array = _adapt_float_array = _adapt_money_array = \ _adapt_num_array def _adapt_bytea_array(self, v): """Adapt a bytea array parameter.""" if isinstance(v, list): return b'{' + b','.join( self._adapt_bytea_array(v) for v in v) + b'}' if v is None: return b'null' return self.db.escape_bytea(v).replace(b'\\', b'\\\\') def _adapt_json_array(self, v): """Adapt a json array parameter.""" if isinstance(v, list): adapt = self._adapt_json_array return '{%s}' % ','.join(adapt(v) for v in v) if not v: return 'null' if not isinstance(v, basestring): v = self.db.encode_json(v) if self._re_array_quote.search(v): v = '"%s"' % self._re_array_escape.sub(r'\\\1', v) return v def _adapt_record(self, v, typ): """Adapt a record parameter with given type.""" typ = self.get_attnames(typ).values() if len(typ) != len(v): raise TypeError('Record parameter %s has wrong size' % v) adapt = self.adapt value = [] for v, t in zip(v, typ): v = adapt(v, t) if v is None: v = '' elif not v: v = '""' else: if isinstance(v, bytes): if str is not bytes: v = v.decode('ascii') else: v = str(v) if self._re_record_quote.search(v): v = '"%s"' % self._re_record_escape.sub(r'\\\1', v) value.append(v) return '(%s)' % ','.join(value) def adapt(self, value, typ=None): """Adapt a value with known database type.""" if value is not None and not isinstance(value, Literal): if typ: simple = self.get_simple_name(typ) else: typ = simple = self.guess_simple_type(value) or 'text' pg_str = getattr(value, '__pg_str__', None) if pg_str: value = pg_str(typ) if simple == 'text': pass elif simple == 'record': if isinstance(value, tuple): value = self._adapt_record(value, typ) elif simple.endswith('[]'): if isinstance(value, list): adapt = getattr(self, '_adapt_%s_array' % simple[:-2]) value = adapt(value) else: adapt = getattr(self, '_adapt_%s' % simple) value = adapt(value) return value @staticmethod def simple_type(name): """Create a simple database type with given attribute names.""" typ = DbType(name) typ.simple = name return typ @staticmethod def get_simple_name(typ): """Get the simple name of a database type.""" if isinstance(typ, DbType): return typ.simple return _simpletypes[typ] @staticmethod def get_attnames(typ): """Get the attribute names of a composite database type.""" if isinstance(typ, DbType): return typ.attnames return {} _frequent_simple_types = { Bytea: 'bytea', str: 'text', bytes: 'text', bool: 'bool', int: 'int', long: 'int', float: 'float', Decimal: 'num', date: 'date', time: 'date', datetime: 'date', timedelta: 'date' } @classmethod def guess_simple_type(cls, value): """Try to guess which database type the given value has.""" # optimize for most frequent types simple_type = cls._frequent_simple_types.get(type(value)) if simple_type: return simple_type if isinstance(value, Bytea): return 'bytea' if isinstance(value, basestring): return 'text' if isinstance(value, bool): return 'bool' if isinstance(value, (int, long)): return 'int' if isinstance(value, float): return 'float' if isinstance(value, Decimal): return 'num' if isinstance(value, (date, time, datetime, timedelta)): return 'date' if isinstance(value, list): return '%s[]' % (cls.guess_simple_base_type(value) or 'text',) if isinstance(value, tuple): simple_type = cls.simple_type guess = cls.guess_simple_type def get_attnames(self): return AttrDict((str(n + 1), simple_type(guess(v))) for n, v in enumerate(value)) typ = simple_type('record') typ._get_attnames = get_attnames return typ @classmethod def guess_simple_base_type(cls, value): """Try to guess the base type of a given array.""" for v in value: if isinstance(v, list): typ = cls.guess_simple_base_type(v) else: typ = cls.guess_simple_type(v) if typ: return typ def adapt_inline(self, value, nested=False): """Adapt a value that is put into the SQL and needs to be quoted.""" if value is None: return 'NULL' if isinstance(value, Literal): return value if isinstance(value, Bytea): value = self.db.escape_bytea(value) if bytes is not str: # Python >= 3.0 value = value.decode('ascii') elif isinstance(value, Json): if value.encode: return value.encode() value = self.db.encode_json(value) elif isinstance(value, (datetime, date, time, timedelta)): value = str(value) if isinstance(value, basestring): value = self.db.escape_string(value) return "'%s'" % value if isinstance(value, bool): return 'true' if value else 'false' if isinstance(value, float): if isinf(value): return "'-Infinity'" if value < 0 else "'Infinity'" if isnan(value): return "'NaN'" return value if isinstance(value, (int, long, Decimal)): return value if isinstance(value, list): q = self.adapt_inline s = '[%s]' if nested else 'ARRAY[%s]' return s % ','.join(str(q(v, nested=True)) for v in value) if isinstance(value, tuple): q = self.adapt_inline return '(%s)' % ','.join(str(q(v)) for v in value) pg_repr = getattr(value, '__pg_repr__', None) if not pg_repr: raise InterfaceError( 'Do not know how to adapt type %s' % type(value)) value = pg_repr() if isinstance(value, (tuple, list)): value = self.adapt_inline(value) return value def parameter_list(self): """Return a parameter list for parameters with known database types. The list has an add(value, typ) method that will build up the list and return either the literal value or a placeholder. """ params = _ParameterList() params.adapt = self.adapt return params def format_query(self, command, values=None, types=None, inline=False): """Format a database query using the given values and types.""" if not values: return command, [] if inline and types: raise ValueError('Typed parameters must be sent separately') params = self.parameter_list() if isinstance(values, (list, tuple)): if inline: adapt = self.adapt_inline literals = [adapt(value) for value in values] else: add = params.add if types: if (not isinstance(types, (list, tuple)) or len(types) != len(values)): raise TypeError('The values and types do not match') literals = [add(value, typ) for value, typ in zip(values, types)] else: literals = [add(value) for value in values] command %= tuple(literals) elif isinstance(values, dict): # we want to allow extra keys in the dictionary, # so we first must find the values actually used in the command used_values = {} literals = dict.fromkeys(values, '') for key in values: del literals[key] try: command % literals except KeyError: used_values[key] = values[key] literals[key] = '' values = used_values if inline: adapt = self.adapt_inline literals = dict((key, adapt(value)) for key, value in values.items()) else: add = params.add if types: if not isinstance(types, dict): raise TypeError('The values and types do not match') literals = dict((key, add(values[key], types.get(key))) for key in sorted(values)) else: literals = dict((key, add(values[key])) for key in sorted(values)) command %= literals else: raise TypeError('The values must be passed as tuple, list or dict') return command, params def cast_bool(value): """Cast a boolean value.""" if not get_bool(): return value return value[0] == 't' def cast_json(value): """Cast a JSON value.""" cast = get_jsondecode() if not cast: return value return cast(value) def cast_num(value): """Cast a numeric value.""" return (get_decimal() or float)(value) def cast_money(value): """Cast a money value.""" point = get_decimal_point() if not point: return value if point != '.': value = value.replace(point, '.') value = value.replace('(', '-') value = ''.join(c for c in value if c.isdigit() or c in '.-') return (get_decimal() or float)(value) def cast_int2vector(value): """Cast an int2vector value.""" return [int(v) for v in value.split()] def cast_date(value, connection): """Cast a date value.""" # The output format depends on the server setting DateStyle. The default # setting ISO and the setting for German are actually unambiguous. The # order of days and months in the other two settings is however ambiguous, # so at least here we need to consult the setting to properly parse values. if value == '-infinity': return date.min if value == 'infinity': return date.max value = value.split() if value[-1] == 'BC': return date.min value = value[0] if len(value) > 10: return date.max fmt = connection.date_format() return datetime.strptime(value, fmt).date() def cast_time(value): """Cast a time value.""" fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' return datetime.strptime(value, fmt).time() _re_timezone = regex('(.*)([+-].*)') def cast_timetz(value): """Cast a timetz value.""" tz = _re_timezone.match(value) if tz: value, tz = tz.groups() else: tz = '+0000' fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' if _has_timezone: value += _timezone_as_offset(tz) fmt += '%z' return datetime.strptime(value, fmt).timetz() return datetime.strptime(value, fmt).timetz().replace( tzinfo=_get_timezone(tz)) def cast_timestamp(value, connection): """Cast a timestamp value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:5] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] else: if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] return datetime.strptime(' '.join(value), ' '.join(fmt)) def cast_timestamptz(value, connection): """Cast a timestamptz value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] value, tz = value[:-1], value[-1] else: if fmt.startswith('%Y-'): tz = _re_timezone.match(value[1]) if tz: value[1], tz = tz.groups() else: tz = '+0000' else: value, tz = value[:-1], value[-1] if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] if _has_timezone: value.append(_timezone_as_offset(tz)) fmt.append('%z') return datetime.strptime(' '.join(value), ' '.join(fmt)) return datetime.strptime(' '.join(value), ' '.join(fmt)).replace( tzinfo=_get_timezone(tz)) _re_interval_sql_standard = regex( '(?:([+-])?([0-9]+)-([0-9]+) ?)?' '(?:([+-]?[0-9]+)(?!:) ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres = regex( '(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres_verbose = regex( '@ ?(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-]?[0-9]+) ?hours? ?)?' '(?:([+-]?[0-9]+) ?mins? ?)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))? ?secs?)? ?(ago)?') _re_interval_iso_8601 = regex( 'P(?:([+-]?[0-9]+)Y)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-]?[0-9]+)D)?' '(?:T(?:([+-]?[0-9]+)H)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))?S)?)?') def cast_interval(value): """Cast an interval value.""" # The output format depends on the server setting IntervalStyle, but it's # not necessary to consult this setting to parse it. It's faster to just # check all possible formats, and there is no ambiguity here. m = _re_interval_iso_8601.match(value) if m: m = [d or '0' for d in m.groups()] secs_ago = m.pop(5) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = -secs usecs = -usecs else: m = _re_interval_postgres_verbose.match(value) if m: m, ago = [d or '0' for d in m.groups()[:8]], m.group(9) secs_ago = m.pop(5) == '-' m = [-int(d) for d in m] if ago else [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = - secs usecs = -usecs else: m = _re_interval_postgres.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: m = _re_interval_sql_standard.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] years_ago = m.pop(0) == '-' hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if years_ago: years = -years mons = -mons if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: raise ValueError('Cannot parse interval: %s' % value) days += 365 * years + 30 * mons return timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) class Typecasts(dict): """Dictionary mapping database types to typecast functions. The cast functions get passed the string representation of a value in the database which they need to convert to a Python object. The passed string will never be None since NULL values are already handled before the cast function is called. Note that the basic types are already handled by the C extension. They only need to be handled here as record or array components. """ # the default cast functions # (str functions are ignored but have been added for faster access) defaults = {'char': str, 'bpchar': str, 'name': str, 'text': str, 'varchar': str, 'bool': cast_bool, 'bytea': unescape_bytea, 'int2': int, 'int4': int, 'serial': int, 'int8': long, 'oid': int, 'hstore': cast_hstore, 'json': cast_json, 'jsonb': cast_json, 'float4': float, 'float8': float, 'numeric': cast_num, 'money': cast_money, 'date': cast_date, 'interval': cast_interval, 'time': cast_time, 'timetz': cast_timetz, 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz, 'int2vector': cast_int2vector, 'uuid': UUID, 'anyarray': cast_array, 'record': cast_record} connection = None # will be set in a connection specific instance def __missing__(self, typ): """Create a cast function if it is not cached. Note that this class never raises a KeyError, but returns None when no special cast function exists. """ if not isinstance(typ, str): raise TypeError('Invalid type: %s' % typ) cast = self.defaults.get(typ) if cast: # store default for faster access cast = self._add_connection(cast) self[typ] = cast elif typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast else: attnames = self.get_attnames(typ) if attnames: casts = [self[v.pgtype] for v in attnames.values()] cast = self.create_record_cast(typ, attnames, casts) self[typ] = cast return cast @staticmethod def _needs_connection(func): """Check if a typecast function needs a connection argument.""" try: args = get_args(func) except (TypeError, ValueError): return False else: return 'connection' in args[1:] def _add_connection(self, cast): """Add a connection argument to the typecast function if necessary.""" if not self.connection or not self._needs_connection(cast): return cast return partial(cast, connection=self.connection) def get(self, typ, default=None): """Get the typecast function for the given database type.""" return self[typ] or default def set(self, typ, cast): """Set a typecast function for the specified database type(s).""" if isinstance(typ, basestring): typ = [typ] if cast is None: for t in typ: self.pop(t, None) self.pop('_%s' % t, None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: self[t] = self._add_connection(cast) self.pop('_%s' % t, None) def reset(self, typ=None): """Reset the typecasts for the specified type(s) to their defaults. When no type is specified, all typecasts will be reset. """ if typ is None: self.clear() else: if isinstance(typ, basestring): typ = [typ] for t in typ: self.pop(t, None) @classmethod def get_default(cls, typ): """Get the default typecast function for the given database type.""" return cls.defaults.get(typ) @classmethod def set_default(cls, typ, cast): """Set a default typecast function for the given database type(s).""" if isinstance(typ, basestring): typ = [typ] defaults = cls.defaults if cast is None: for t in typ: defaults.pop(t, None) defaults.pop('_%s' % t, None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: defaults[t] = cast defaults.pop('_%s' % t, None) def get_attnames(self, typ): """Return the fields for the given record type. This method will be replaced with the get_attnames() method of DbTypes. """ return {} def dateformat(self): """Return the current date format. This method will be replaced with the dateformat() method of DbTypes. """ return '%Y-%m-%d' def create_array_cast(self, basecast): """Create an array typecast for the given base cast.""" cast_array = self['anyarray'] def cast(v): return cast_array(v, basecast) return cast def create_record_cast(self, name, fields, casts): """Create a named record typecast for the given fields and casts.""" cast_record = self['record'] record = namedtuple(name, fields) def cast(v): return record(*cast_record(v, casts)) return cast def get_typecast(typ): """Get the global typecast function for the given database type(s).""" return Typecasts.get_default(typ) def set_typecast(typ, cast): """Set a global typecast function for the given database type(s). Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call db.db_types.reset_typecast(). """ Typecasts.set_default(typ, cast) class DbType(str): """Class augmenting the simple type name with additional info. The following additional information is provided: oid: the PostgreSQL type OID pgtype: the internal PostgreSQL data type name regtype: the registered PostgreSQL data type name simple: the more coarse-grained PyGreSQL type name typtype: b = base type, c = composite type etc. category: A = Array, b = Boolean, C = Composite etc. delim: delimiter for array types relid: corresponding table for composite types attnames: attributes for composite types """ @property def attnames(self): """Get names and types of the fields of a composite type.""" return self._get_attnames(self) class DbTypes(dict): """Cache for PostgreSQL data types. This cache maps type OIDs and names to DbType objects containing information on the associated database type. """ _num_types = frozenset('int float num money' ' int2 int4 int8 float4 float8 numeric money'.split()) def __init__(self, db): """Initialize type cache for connection.""" super(DbTypes, self).__init__() self._db = weakref.proxy(db) self._regtypes = False self._typecasts = Typecasts() self._typecasts.get_attnames = self.get_attnames self._typecasts.connection = self._db if db.server_version < 80400: # older remote databases (not officially supported) self._query_pg_type = ( "SELECT oid, typname, typname::text::regtype," " typtype, null as typcategory, typdelim, typrelid" " FROM pg_type WHERE oid=%s::regtype") else: self._query_pg_type = ( "SELECT oid, typname, typname::regtype," " typtype, typcategory, typdelim, typrelid" " FROM pg_type WHERE oid=%s::regtype") def add(self, oid, pgtype, regtype, typtype, category, delim, relid): """Create a PostgreSQL type name with additional info.""" if oid in self: return self[oid] simple = 'record' if relid else _simpletypes[pgtype] typ = DbType(regtype if self._regtypes else simple) typ.oid = oid typ.simple = simple typ.pgtype = pgtype typ.regtype = regtype typ.typtype = typtype typ.category = category typ.delim = delim typ.relid = relid typ._get_attnames = self.get_attnames return typ def __missing__(self, key): """Get the type info from the database if it is not cached.""" try: q = self._query_pg_type % (_quote_if_unqualified('$1', key),) res = self._db.query(q, (key,)).getresult() except ProgrammingError: res = None if not res: raise KeyError('Type %s could not be found' % key) res = res[0] typ = self.add(*res) self[typ.oid] = self[typ.pgtype] = typ return typ def get(self, key, default=None): """Get the type even if it is not cached.""" try: return self[key] except KeyError: return default def get_attnames(self, typ): """Get names and types of the fields of a composite type.""" if not isinstance(typ, DbType): typ = self.get(typ) if not typ: return None if not typ.relid: return None return self._db.get_attnames(typ.relid, with_oid=False) def get_typecast(self, typ): """Get the typecast function for the given database type.""" return self._typecasts.get(typ) def set_typecast(self, typ, cast): """Set a typecast function for the specified database type(s).""" self._typecasts.set(typ, cast) def reset_typecast(self, typ=None): """Reset the typecast function for the specified database type(s).""" self._typecasts.reset(typ) def typecast(self, value, typ): """Cast the given value according to the given database type.""" if value is None: # for NULL values, no typecast is necessary return None if not isinstance(typ, DbType): typ = self.get(typ) if typ: typ = typ.pgtype cast = self.get_typecast(typ) if typ else None if not cast or cast is str: # no typecast is necessary return value return cast(value) _re_fieldname = regex('^[A-Za-z][_a-zA-Z0-9]*$') # The result rows for database operations are returned as named tuples # by default. Since creating namedtuple classes is a somewhat expensive # operation, we cache up to 1024 of these classes by default. @lru_cache(maxsize=1024) def _row_factory(names): """Get a namedtuple factory for row results with the given names.""" try: try: return namedtuple('Row', names, rename=True)._make except TypeError: # Python 2.6 and 3.0 do not support rename names = [v if _re_fieldname.match(v) and not iskeyword(v) else 'column_%d' % (n,) for n, v in enumerate(names)] return namedtuple('Row', names)._make except ValueError: # there is still a problem with the field names names = ['column_%d' % (n,) for n in range(len(names))] return namedtuple('Row', names)._make def set_row_factory_size(maxsize): """Change the size of the namedtuple factory cache. If maxsize is set to None, the cache can grow without bound. """ global _row_factory _row_factory = lru_cache(maxsize)(_row_factory.__wrapped__) # Helper functions used by the query object def _dictiter(q): """Get query result as an iterator of dictionaries.""" fields = q.listfields() for r in q: yield dict(zip(fields, r)) def _namediter(q): """Get query result as an iterator of named tuples.""" row = _row_factory(q.listfields()) for r in q: yield row(r) def _namednext(q): """Get next row from query result as a named tuple.""" return _row_factory(q.listfields())(next(q)) def _scalariter(q): """Get query result as an iterator of scalar values.""" for r in q: yield r[0] class _MemoryQuery: """Class that embodies a given query result.""" def __init__(self, result, fields): """Create query from given result rows and field names.""" self.result = result self.fields = tuple(fields) def listfields(self): """Return the stored field names of this query.""" return self.fields def getresult(self): """Return the stored result of this query.""" return self.result def __iter__(self): return iter(self.result) def _db_error(msg, cls=DatabaseError): """Return DatabaseError with empty sqlstate attribute.""" error = cls(msg) error.sqlstate = None return error def _int_error(msg): """Return InternalError.""" return _db_error(msg, InternalError) def _prg_error(msg): """Return ProgrammingError.""" return _db_error(msg, ProgrammingError) # Initialize the C module set_decimal(Decimal) set_jsondecode(jsondecode) set_query_helpers(_dictiter, _namediter, _namednext, _scalariter) # The notification handler class NotificationHandler(object): """A PostgreSQL client-side asynchronous notification handler.""" def __init__(self, db, event, callback=None, arg_dict=None, timeout=None, stop_event=None): """Initialize the notification handler. You must pass a PyGreSQL database connection, the name of an event (notification channel) to listen for and a callback function. You can also specify a dictionary arg_dict that will be passed as the single argument to the callback function, and a timeout value in seconds (a floating point number denotes fractions of seconds). If it is absent or None, the callers will never time out. If the timeout is reached, the callback function will be called with a single argument that is None. If you set the timeout to zero, the handler will poll notifications synchronously and return. You can specify the name of the event that will be used to signal the handler to stop listening as stop_event. By default, it will be the event name prefixed with 'stop_'. """ self.db = db self.event = event self.stop_event = stop_event or 'stop_%s' % event self.listening = False self.callback = callback if arg_dict is None: arg_dict = {} self.arg_dict = arg_dict self.timeout = timeout def __del__(self): self.unlisten() def close(self): """Stop listening and close the connection.""" if self.db: self.unlisten() self.db.close() self.db = None def listen(self): """Start listening for the event and the stop event.""" if not self.listening: self.db.query('listen "%s"' % self.event) self.db.query('listen "%s"' % self.stop_event) self.listening = True def unlisten(self): """Stop listening for the event and the stop event.""" if self.listening: self.db.query('unlisten "%s"' % self.event) self.db.query('unlisten "%s"' % self.stop_event) self.listening = False def notify(self, db=None, stop=False, payload=None): """Generate a notification. Optionally, you can pass a payload with the notification. If you set the stop flag, a stop notification will be sent that will cause the handler to stop listening. Note: If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe. """ if self.listening: if not db: db = self.db q = 'notify "%s"' % (self.stop_event if stop else self.event) if payload: q += ", '%s'" % payload return db.query(q) def __call__(self): """Invoke the notification handler. The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated 'pid', 'event' and 'extra' (the payload passed with the notification) are inserted into its arg_dict dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return. In the special case that the timeout of the handler has been set to zero, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event. Note: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. """ self.listen() poll = self.timeout == 0 if not poll: rlist = [self.db.fileno()] while self.listening: if poll or select.select(rlist, [], [], self.timeout)[0]: while self.listening: notice = self.db.getnotify() if not notice: # no more messages break event, pid, extra = notice if event not in (self.event, self.stop_event): self.unlisten() raise _db_error( 'Listening for "%s" and "%s", but notified of "%s"' % (self.event, self.stop_event, event)) if event == self.stop_event: self.unlisten() self.arg_dict.update(pid=pid, event=event, extra=extra) self.callback(self.arg_dict) if poll: break else: # we timed out self.unlisten() self.callback(None) def pgnotify(*args, **kw): """Same as NotificationHandler, under the traditional name.""" warnings.warn("pgnotify is deprecated, use NotificationHandler instead", DeprecationWarning, stacklevel=2) return NotificationHandler(*args, **kw) # The actual PostgreSQL database connection interface: class DB: """Wrapper class for the _pg connection type.""" db = None # invalid fallback for underlying connection def __init__(self, *args, **kw): """Create a new connection You can pass either the connection parameters or an existing _pg or pgdb connection. This allows you to use the methods of the classic pg interface with a DB-API 2 pgdb connection. """ if not args and len(kw) == 1: db = kw.get('db') elif not kw and len(args) == 1: db = args[0] else: db = None if db: if isinstance(db, DB): db = db.db else: try: db = db._cnx except AttributeError: pass if not db or not hasattr(db, 'db') or not hasattr(db, 'query'): db = connect(*args, **kw) self._db_args = args, kw self._closeable = True else: self._db_args = db self._closeable = False self.db = db self.dbname = db.db self._regtypes = False self._attnames = {} self._pkeys = {} self._privileges = {} self.adapter = Adapter(self) self.dbtypes = DbTypes(self) if db.server_version < 80400: # support older remote data bases self._query_attnames = ( "SELECT a.attname, t.oid, t.typname, t.typname::text::regtype," " t.typtype, null as typcategory, t.typdelim, t.typrelid" " FROM pg_attribute a" " JOIN pg_type t ON t.oid = a.atttypid" " WHERE a.attrelid = %s::regclass AND %s" " AND NOT a.attisdropped ORDER BY a.attnum") else: self._query_attnames = ( "SELECT a.attname, t.oid, t.typname, t.typname::regtype," " t.typtype, t.typcategory, t.typdelim, t.typrelid" " FROM pg_attribute a" " JOIN pg_type t ON t.oid = a.atttypid" " WHERE a.attrelid = %s::regclass AND %s" " AND NOT a.attisdropped ORDER BY a.attnum") db.set_cast_hook(self.dbtypes.typecast) self.debug = None # For debugging scripts, this can be set # * to a string format specification (e.g. in CGI set to "%s
"), # * to a file object to write debug statements or # * to a callable object which takes a string argument # * to any other true value to just print debug statements def __getattr__(self, name): # All undefined members are same as in underlying connection: if self.db: return getattr(self.db, name) else: raise _int_error('Connection is not valid') def __dir__(self): # Custom dir function including the attributes of the connection: attrs = set(self.__class__.__dict__) attrs.update(self.__dict__) attrs.update(dir(self.db)) return sorted(attrs) # Context manager methods def __enter__(self): """Enter the runtime context. This will start a transaction.""" self.begin() return self def __exit__(self, et, ev, tb): """Exit the runtime context. This will end the transaction.""" if et is None and ev is None and tb is None: self.commit() else: self.rollback() def __del__(self): try: db = self.db except AttributeError: db = None if db: try: db.set_cast_hook(None) except TypeError: pass # probably already closed if self._closeable: try: db.close() except InternalError: pass # probably already closed # Auxiliary methods def _do_debug(self, *args): """Print a debug message""" if self.debug: s = '\n'.join(str(arg) for arg in args) if isinstance(self.debug, basestring): print(self.debug % s) elif hasattr(self.debug, 'write'): self.debug.write(s + '\n') elif callable(self.debug): self.debug(s) else: print(s) def _escape_qualified_name(self, s): """Escape a qualified name. Escapes the name for use as an SQL identifier, unless the name contains a dot, in which case the name is ambiguous (could be a qualified name or just a name with a dot in it) and must be quoted manually by the caller. """ if '.' not in s: s = self.escape_identifier(s) return s @staticmethod def _make_bool(d): """Get boolean value corresponding to d.""" return bool(d) if get_bool() else ('t' if d else 'f') def _list_params(self, params): """Create a human readable parameter list.""" return ', '.join('$%d=%r' % (n, v) for n, v in enumerate(params, 1)) # Public methods # escape_string and escape_bytea exist as methods, # so we define unescape_bytea as a method as well unescape_bytea = staticmethod(unescape_bytea) def decode_json(self, s): """Decode a JSON string coming from the database.""" return (get_jsondecode() or jsondecode)(s) def encode_json(self, d): """Encode a JSON string for use within SQL.""" return jsonencode(d) def close(self): """Close the database connection.""" # Wraps shared library function so we can track state. db = self.db if db: try: db.set_cast_hook(None) except TypeError: pass # probably already closed if self._closeable: db.close() self.db = None else: raise _int_error('Connection already closed') def reset(self): """Reset connection with current parameters. All derived queries and large objects derived from this connection will not be usable after this call. """ if self.db: self.db.reset() else: raise _int_error('Connection already closed') def reopen(self): """Reopen connection to the database. Used in case we need another connection to the same database. Note that we can still reopen a database that we have closed. """ # There is no such shared library function. if self._closeable: db = connect(*self._db_args[0], **self._db_args[1]) if self.db: self.db.set_cast_hook(None) self.db.close() db.set_cast_hook(self.dbtypes.typecast) self.db = db else: self.db = self._db_args def begin(self, mode=None): """Begin a transaction.""" qstr = 'BEGIN' if mode: qstr += ' ' + mode return self.query(qstr) start = begin def commit(self): """Commit the current transaction.""" return self.query('COMMIT') end = commit def rollback(self, name=None): """Roll back the current transaction.""" qstr = 'ROLLBACK' if name: qstr += ' TO ' + name return self.query(qstr) abort = rollback def savepoint(self, name): """Define a new savepoint within the current transaction.""" return self.query('SAVEPOINT ' + name) def release(self, name): """Destroy a previously defined savepoint.""" return self.query('RELEASE ' + name) def get_parameter(self, parameter): """Get the value of a run-time parameter. If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name. You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys. By passing the special name 'all' as the parameter, you can get a dict of all existing configuration parameters. """ if isinstance(parameter, basestring): parameter = [parameter] values = None elif isinstance(parameter, (list, tuple)): values = [] elif isinstance(parameter, (set, frozenset)): values = {} elif isinstance(parameter, dict): values = parameter else: raise TypeError( 'The parameter must be a string, list, set or dict') if not parameter: raise TypeError('No parameter has been specified') params = {} if isinstance(values, dict) else [] for key in parameter: param = key.strip().lower() if isinstance( key, basestring) else None if not param: raise TypeError('Invalid parameter') if param == 'all': q = 'SHOW ALL' values = self.db.query(q).getresult() values = dict(value[:2] for value in values) break if isinstance(values, dict): params[param] = key else: params.append(param) else: for param in params: q = 'SHOW %s' % (param,) value = self.db.query(q).getresult()[0][0] if values is None: values = value elif isinstance(values, list): values.append(value) else: values[params[param]] = value return values def set_parameter(self, parameter, value=None, local=False): """Set the value of a run-time parameter. If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or None is passed as a value, then the run-time parameter will be restored to its default value. You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict. By passing the special name 'all' as the parameter, you can reset all existing settable run-time parameters to their default values. If you set local to True, then the command takes effect for only the current transaction. After commit() or rollback(), the session-level setting takes effect again. Setting local to True will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately. """ if isinstance(parameter, basestring): parameter = {parameter: value} elif isinstance(parameter, (list, tuple)): if isinstance(value, (list, tuple)): parameter = dict(zip(parameter, value)) else: parameter = dict.fromkeys(parameter, value) elif isinstance(parameter, (set, frozenset)): if isinstance(value, (list, tuple, set, frozenset)): value = set(value) if len(value) == 1: value = value.pop() if not(value is None or isinstance(value, basestring)): raise ValueError('A single value must be specified' ' when parameter is a set') parameter = dict.fromkeys(parameter, value) elif isinstance(parameter, dict): if value is not None: raise ValueError('A value must not be specified' ' when parameter is a dictionary') else: raise TypeError( 'The parameter must be a string, list, set or dict') if not parameter: raise TypeError('No parameter has been specified') params = {} for key, value in parameter.items(): param = key.strip().lower() if isinstance( key, basestring) else None if not param: raise TypeError('Invalid parameter') if param == 'all': if value is not None: raise ValueError('A value must ot be specified' " when parameter is 'all'") params = {'all': None} break params[param] = value local = ' LOCAL' if local else '' for param, value in params.items(): if value is None: q = 'RESET%s %s' % (local, param) else: q = 'SET%s %s TO %s' % (local, param, value) self._do_debug(q) self.db.query(q) def query(self, command, *args): """Execute a SQL command string. This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row. If the query is an update or delete statement, or an insert statement that did not insert exactly one row in a table with OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an "insert/update ... returning" statement), this method returns a Query object that can be accessed via getresult() or dictresult() or simply printed. Otherwise, it returns `None`. The query can contain numbered parameters of the form $1 in place of any data constant. Arguments given after the query string will be substituted for the corresponding numbered parameter. Parameter values can also be given as a single list or tuple argument. """ # Wraps shared library function for debugging. if not self.db: raise _int_error('Connection is not valid') if args: self._do_debug(command, args) return self.db.query(command, args) self._do_debug(command) return self.db.query(command) def query_formatted(self, command, parameters=None, types=None, inline=False): """Execute a formatted SQL command string. Similar to query, but using Python format placeholders of the form %s or %(names)s instead of PostgreSQL placeholders of the form $1. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity. If you set inline to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately. """ return self.query(*self.adapter.format_query( command, parameters, types, inline)) def query_prepared(self, name, *args): """Execute a prepared SQL statement. This works like the query() method, except that instead of passing the SQL command, you pass the name of a prepared statement. If you pass an empty name, the unnamed statement will be executed. """ if not self.db: raise _int_error('Connection is not valid') if name is None: name = '' if args: self._do_debug('EXECUTE', name, args) return self.db.query_prepared(name, args) self._do_debug('EXECUTE', name) return self.db.query_prepared(name) def prepare(self, name, command): """Create a prepared SQL statement. This creates a prepared statement for the given command with the the given name for later execution with the query_prepared() method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise it is an error if the statement name is already defined in the current database session. We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations. """ if not self.db: raise _int_error('Connection is not valid') if name is None: name = '' self._do_debug('prepare', name, command) return self.db.prepare(name, command) def describe_prepared(self, name=None): """Describe a prepared SQL statement. This method returns a Query object describing the result columns of the prepared statement with the given name. If you omit the name, the unnamed statement will be described if you created one before. """ if name is None: name = '' return self.db.describe_prepared(name) def delete_prepared(self, name=None): """Delete a prepared SQL statement This deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are also deallocated automatically when the current session ends. """ q = "DEALLOCATE %s" % (name or 'ALL',) self._do_debug(q) return self.db.query(q) def pkey(self, table, composite=False, flush=False): """Get or set the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key. If flush is set then the internal cache for primary keys will be flushed. This may be necessary after the database schema or the search path has been changed. """ pkeys = self._pkeys if flush: pkeys.clear() self._do_debug('The pkey cache has been flushed') try: # cache lookup pkey = pkeys[table] except KeyError: # cache miss, check the database q = ("SELECT a.attname, a.attnum, i.indkey FROM pg_index i" " JOIN pg_attribute a ON a.attrelid = i.indrelid" " AND a.attnum = ANY(i.indkey)" " AND NOT a.attisdropped" " WHERE i.indrelid=%s::regclass" " AND i.indisprimary ORDER BY a.attnum") % ( _quote_if_unqualified('$1', table),) pkey = self.db.query(q, (table,)).getresult() if not pkey: raise KeyError('Table %s has no primary key' % table) # we want to use the order defined in the primary key index here, # not the order as defined by the columns in the table if len(pkey) > 1: indkey = pkey[0][2] pkey = sorted(pkey, key=lambda row: indkey.index(row[1])) pkey = tuple(row[0] for row in pkey) else: pkey = pkey[0][0] pkeys[table] = pkey # cache it if composite and not isinstance(pkey, tuple): pkey = (pkey,) return pkey def get_databases(self): """Get list of databases in the system.""" return [s[0] for s in self.db.query('SELECT datname FROM pg_database').getresult()] def get_relations(self, kinds=None, system=False): """Get list of relations in connected database of specified kinds. If kinds is None or empty, all kinds of relations are returned. Otherwise kinds can be a string or sequence of type letters specifying which kind of relations you want to list. Set the system flag if you want to get the system relations as well. """ where = [] if kinds: where.append("r.relkind IN (%s)" % ','.join("'%s'" % k for k in kinds)) if not system: where.append("s.nspname NOT SIMILAR" " TO 'pg/_%|information/_schema' ESCAPE '/'") where = " WHERE %s" % ' AND '.join(where) if where else '' q = ("SELECT quote_ident(s.nspname)||'.'||quote_ident(r.relname)" " FROM pg_class r" " JOIN pg_namespace s ON s.oid = r.relnamespace%s" " ORDER BY s.nspname, r.relname") % where return [r[0] for r in self.db.query(q).getresult()] def get_tables(self, system=False): """Return list of tables in connected database. Set the system flag if you want to get the system tables as well. """ return self.get_relations('r', system) def get_attnames(self, table, with_oid=True, flush=False): """Given the name of a table, dig out the set of attribute names. Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes' types) with the column names in the proper order if you iterate over it. If flush is set, then the internal cache for attribute names will be flushed. This may be necessary after the database schema or the search path has been changed. By default, only a limited number of simple types will be returned. You can get the registered types after calling use_regtypes(True). """ attnames = self._attnames if flush: attnames.clear() self._do_debug('The attnames cache has been flushed') try: # cache lookup names = attnames[table] except KeyError: # cache miss, check the database q = "a.attnum > 0" if with_oid: q = "(%s OR a.attname = 'oid')" % q q = self._query_attnames % (_quote_if_unqualified('$1', table), q) names = self.db.query(q, (table,)).getresult() types = self.dbtypes names = ((name[0], types.add(*name[1:])) for name in names) names = AttrDict(names) attnames[table] = names # cache it return names def use_regtypes(self, regtypes=None): """Use registered type names instead of simplified type names.""" if regtypes is None: return self.dbtypes._regtypes else: regtypes = bool(regtypes) if regtypes != self.dbtypes._regtypes: self.dbtypes._regtypes = regtypes self._attnames.clear() self.dbtypes.clear() return regtypes def has_table_privilege(self, table, privilege='select', flush=False): """Check whether current user has specified table privilege. If flush is set, then the internal cache for table privileges will be flushed. This may be necessary after privileges have been changed. """ privileges = self._privileges if flush: privileges.clear() self._do_debug('The privileges cache has been flushed') privilege = privilege.lower() try: # ask cache ret = privileges[table, privilege] except KeyError: # cache miss, ask the database q = "SELECT has_table_privilege(%s, $2)" % ( _quote_if_unqualified('$1', table),) q = self.db.query(q, (table, privilege)) ret = q.getresult()[0][0] == self._make_bool(True) privileges[table, privilege] = ret # cache it return ret def get(self, table, row, keyname=None): """Get a row from a database table or view. This method is the basic mechanism to get a single row. It assumes that the keyname specifies a unique row. It must be the name of a single column or a tuple of column names. If the keyname is not specified, then the primary key for the table is used. If row is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed keyname or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values when row was passed as a dictionary. The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as "oid(table)" using the actual name of the table. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None if keyname and isinstance(keyname, basestring): keyname = (keyname,) if qoid and isinstance(row, dict) and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if not keyname: try: # if keyname is not specified, try using the primary key keyname = self.pkey(table, True) except KeyError: # the table has no primary key # try using the oid instead if qoid and isinstance(row, dict) and 'oid' in row: keyname = ('oid',) else: raise _prg_error('Table %s has no primary key' % table) else: # the table has a primary key # check whether all key columns have values if isinstance(row, dict) and not set(keyname).issubset(row): # try using the oid instead if qoid and 'oid' in row: keyname = ('oid',) else: raise KeyError( 'Missing value in row for specified keyname') if not isinstance(row, dict): if not isinstance(row, (tuple, list)): row = [row] if len(keyname) != len(row): raise KeyError( 'Differing number of items in keyname and row') row = dict(zip(keyname, row)) params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier what = 'oid, *' if qoid else '*' where = ' AND '.join('%s = %s' % ( col(k), adapt(row[k], attnames[k])) for k in keyname) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] q = 'SELECT %s FROM %s WHERE %s LIMIT 1' % ( what, self._escape_qualified_name(table), where) self._do_debug(q, params) q = self.db.query(q, params) res = q.dictresult() if not res: raise _db_error('No such record in %s\nwhere %s\nwith %s' % ( table, where, self._list_params(params))) for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def insert(self, table, row=None, **kw): """Insert a row into a database table. This method inserts a row into a table. The name of the table must be passed as the first parameter. The other parameters are used for providing the data of the row that shall be inserted into the table. If a dictionary is supplied as the second parameter, it starts with that. Otherwise it uses a blank dictionary. Either way the dictionary is updated from the keywords. The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() if row is None: row = {} row.update(kw) if 'oid' in row: del row['oid'] # do not insert oid attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier names, values = [], [] for n in attnames: if n in row: names.append(col(n)) values.append(adapt(row[n], attnames[n])) if not names: raise _prg_error('No column found that can be inserted') names, values = ', '.join(names), ', '.join(values) ret = 'oid, *' if qoid else '*' q = 'INSERT INTO %s (%s) VALUES (%s) RETURNING %s' % ( self._escape_qualified_name(table), names, values, ret) self._do_debug(q, params) q = self.db.query(q, params) res = q.dictresult() if res: # this should always be true for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def update(self, table, row=None, **kw): """Update an existing row in a database table. Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by get() or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself. The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc. """ if table.endswith('*'): table = table[:-1].rstrip() # need parent table name attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None if row is None: row = {} elif 'oid' in row: del row['oid'] # only accept oid key from named args for safety row.update(kw) if qoid and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if qoid and 'oid' in row: # try using the oid keyname = ('oid',) else: # try using the primary key try: keyname = self.pkey(table, True) except KeyError: # the table has no primary key raise _prg_error('Table %s has no primary key' % table) # check whether all key columns have values if not set(keyname).issubset(row): raise KeyError('Missing value for primary key in row') params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier where = ' AND '.join('%s = %s' % ( col(k), adapt(row[k], attnames[k])) for k in keyname) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] values = [] keyname = set(keyname) for n in attnames: if n in row and n not in keyname: values.append('%s = %s' % (col(n), adapt(row[n], attnames[n]))) if not values: return row values = ', '.join(values) ret = 'oid, *' if qoid else '*' q = 'UPDATE %s SET %s WHERE %s RETURNING %s' % ( self._escape_qualified_name(table), values, where, ret) self._do_debug(q, params) q = self.db.query(q, params) res = q.dictresult() if res: # may be empty when row does not exist for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def upsert(self, table, row=None, **kw): """Insert a row into a database table with conflict resolution This method inserts a row into a table, but instead of raising a ProgrammingError exception in case a row with the same primary key already exists, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided. Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary. Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way: A value of False or None means the column shall not be updated, a value of True means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value True. So if in the case of a conflict you want to update every column that has been passed in the dictionary row, you would call upsert(table, row). If you don't want to do anything in case of a conflict, i.e. leave the existing row as it is, call upsert(table, row, **dict.fromkeys(row)). If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by prefixing the column name with "included.", and to the value that has been proposed for insertion by prefixing the column name with the "excluded." The dictionary is modified in any case to reflect the values in the database after the operation has completed. Note: The method uses the PostgreSQL "upsert" feature which is only available since PostgreSQL 9.5. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() if row is None: row = {} if 'oid' in row: del row['oid'] # do not insert oid if 'oid' in kw: del kw['oid'] # do not update oid attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier names, values, updates = [], [], [] for n in attnames: if n in row: names.append(col(n)) values.append(adapt(row[n], attnames[n])) names, values = ', '.join(names), ', '.join(values) try: keyname = self.pkey(table, True) except KeyError: raise _prg_error('Table %s has no primary key' % table) target = ', '.join(col(k) for k in keyname) update = [] keyname = set(keyname) keyname.add('oid') for n in attnames: if n not in keyname: value = kw.get(n, True) if value: if not isinstance(value, basestring): value = 'excluded.%s' % col(n) update.append('%s = %s' % (col(n), value)) if not values: return row do = 'update set %s' % ', '.join(update) if update else 'nothing' ret = 'oid, *' if qoid else '*' q = ('INSERT INTO %s AS included (%s) VALUES (%s)' ' ON CONFLICT (%s) DO %s RETURNING %s') % ( self._escape_qualified_name(table), names, values, target, do, ret) self._do_debug(q, params) try: q = self.db.query(q, params) except ProgrammingError: if self.server_version < 90500: raise _prg_error( 'Upsert operation is not supported by PostgreSQL version') raise # re-raise original error res = q.dictresult() if res: # may be empty with "do nothing" for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value else: self.get(table, row) return row def clear(self, table, row=None): """Clear all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to false, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged. """ # At some point we will need a way to get defaults from a table. if row is None: row = {} # empty if argument is not present attnames = self.get_attnames(table) for n, t in attnames.items(): if n == 'oid': continue t = t.simple if t in DbTypes._num_types: row[n] = 0 elif t == 'bool': row[n] = self._make_bool(False) else: row[n] = '' return row def delete(self, table, row=None, **kw): """Delete an existing row in a database table. This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by get() or passed as keyword. The OID will take precedence if provided. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method raises a ProgrammingError. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None if row is None: row = {} elif 'oid' in row: del row['oid'] # only accept oid key from named args for safety row.update(kw) if qoid and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if qoid and 'oid' in row: # try using the oid keyname = ('oid',) else: # try using the primary key try: keyname = self.pkey(table, True) except KeyError: # the table has no primary key raise _prg_error('Table %s has no primary key' % table) # check whether all key columns have values if not set(keyname).issubset(row): raise KeyError('Missing value for primary key in row') params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier where = ' AND '.join('%s = %s' % ( col(k), adapt(row[k], attnames[k])) for k in keyname) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] q = 'DELETE FROM %s WHERE %s' % ( self._escape_qualified_name(table), where) self._do_debug(q, params) res = self.db.query(q, params) return int(res) def truncate(self, table, restart=False, cascade=False, only=False): """Empty a table or set of tables. This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables. If restart is set to True, sequences owned by columns of the truncated table(s) are automatically restarted. If cascade is set to True, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter only is not set to True, all the descendant tables (if any) will also be truncated. Optionally, a '*' can be specified after the table name to explicitly indicate that descendant tables are included. """ if isinstance(table, basestring): only = {table: only} table = [table] elif isinstance(table, (list, tuple)): if isinstance(only, (list, tuple)): only = dict(zip(table, only)) else: only = dict.fromkeys(table, only) elif isinstance(table, (set, frozenset)): only = dict.fromkeys(table, only) else: raise TypeError('The table must be a string, list or set') if not (restart is None or isinstance(restart, (bool, int))): raise TypeError('Invalid type for the restart option') if not (cascade is None or isinstance(cascade, (bool, int))): raise TypeError('Invalid type for the cascade option') tables = [] for t in table: u = only.get(t) if not (u is None or isinstance(u, (bool, int))): raise TypeError('Invalid type for the only option') if t.endswith('*'): if u: raise ValueError( 'Contradictory table name and only options') t = t[:-1].rstrip() t = self._escape_qualified_name(t) if u: t = 'ONLY %s' % t tables.append(t) q = ['TRUNCATE', ', '.join(tables)] if restart: q.append('RESTART IDENTITY') if cascade: q.append('CASCADE') q = ' '.join(q) self._do_debug(q) return self.db.query(q) def get_as_list(self, table, what=None, where=None, order=None, limit=None, offset=None, scalar=False): """Get a table as a list. This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters. The parameter 'what' can restrict the query to only return a subset of the table columns. It can be a string, list or a tuple. The parameter 'where' can restrict the query to only return a subset of the table rows. It can be a string, list or a tuple of SQL expressions that all need to be fulfilled. The parameter 'order' specifies the ordering of the rows. It can also be a other string, list or a tuple. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set 'order' to False if you don't care about the ordering. The parameters 'limit' and 'offset' can be integers specifying the maximum number of rows returned and a number of rows skipped over. If you set the 'scalar' option to True, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway. """ if not table: raise TypeError('The table name is missing') if what: if isinstance(what, (list, tuple)): what = ', '.join(map(str, what)) if order is None: order = what else: what = '*' q = ['SELECT', what, 'FROM', table] if where: if isinstance(where, (list, tuple)): where = ' AND '.join(map(str, where)) q.extend(['WHERE', where]) if order is None: try: order = self.pkey(table, True) except (KeyError, ProgrammingError): try: order = list(self.get_attnames(table)) except (KeyError, ProgrammingError): pass if order: if isinstance(order, (list, tuple)): order = ', '.join(map(str, order)) q.extend(['ORDER BY', order]) if limit: q.append('LIMIT %d' % limit) if offset: q.append('OFFSET %d' % offset) q = ' '.join(q) self._do_debug(q) q = self.db.query(q) res = q.namedresult() if res and scalar: res = [row[0] for row in res] return res def get_as_dict(self, table, keyname=None, what=None, where=None, order=None, limit=None, offset=None, scalar=False): """Get a table as a dictionary. This method is similar to get_as_list(), but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the 'scalar' option has been set to True. With the optional parameter 'keyname' you can specify an alternative set of columns to be used as the keys of the dictionary. It must be set as a string, list or a tuple. If the Python version supports it, the dictionary will be an OrderedDict using the order specified with the 'order' parameter or the key column(s) if not specified. You can set 'order' to False if you don't care about the ordering. In this case the returned dictionary will be an ordinary one. """ if not table: raise TypeError('The table name is missing') if not keyname: try: keyname = self.pkey(table, True) except (KeyError, ProgrammingError): raise _prg_error('Table %s has no primary key' % table) if isinstance(keyname, basestring): keyname = [keyname] elif not isinstance(keyname, (list, tuple)): raise KeyError('The keyname must be a string, list or tuple') if what: if isinstance(what, (list, tuple)): what = ', '.join(map(str, what)) if order is None: order = what else: what = '*' q = ['SELECT', what, 'FROM', table] if where: if isinstance(where, (list, tuple)): where = ' AND '.join(map(str, where)) q.extend(['WHERE', where]) if order is None: order = keyname if order: if isinstance(order, (list, tuple)): order = ', '.join(map(str, order)) q.extend(['ORDER BY', order]) if limit: q.append('LIMIT %d' % limit) if offset: q.append('OFFSET %d' % offset) q = ' '.join(q) self._do_debug(q) q = self.db.query(q) res = q.getresult() cls = OrderedDict if order else dict if not res: return cls() keyset = set(keyname) fields = q.listfields() if not keyset.issubset(fields): raise KeyError('Missing keyname in row') keyind, rowind = [], [] for i, f in enumerate(fields): (keyind if f in keyset else rowind).append(i) keytuple = len(keyind) > 1 getkey = itemgetter(*keyind) keys = map(getkey, res) if scalar: rowind = rowind[:1] rowtuple = False else: rowtuple = len(rowind) > 1 if scalar or rowtuple: getrow = itemgetter(*rowind) else: rowind = rowind[0] getrow = lambda row: (row[rowind],) rowtuple = True rows = map(getrow, res) if keytuple or rowtuple: if keytuple: keys = _namediter(_MemoryQuery(keys, keyname)) if rowtuple: fields = [f for f in fields if f not in keyset] rows = _namediter(_MemoryQuery(rows, fields)) return cls(zip(keys, rows)) def notification_handler(self, event, callback, arg_dict=None, timeout=None, stop_event=None): """Get notification handler that will run the given callback.""" return NotificationHandler(self, event, callback, arg_dict, timeout, stop_event) # if run as script, print some information if __name__ == '__main__': print('PyGreSQL version' + version) print('') print(__doc__) PyGreSQL-5.1/pgdb.py0000644000175100077410000017145413466770070014233 0ustar darcypyg00000000000000#!/usr/bin/python # # $Id: pgdb.py 995 2019-04-25 14:10:20Z cito $ # # PyGreSQL - a Python interface for the PostgreSQL database. # # This file contains the DB-API 2 compatible pgdb module. # # Copyright (c) 2019 by the PyGreSQL Development Team # # Please see the LICENSE.TXT file for specific restrictions. """pgdb - DB-API 2.0 compliant module for PygreSQL. (c) 1999, Pascal Andre . See package documentation for further information on copyright. Inline documentation is sparse. See DB-API 2.0 specification for usage information: http://www.python.org/peps/pep-0249.html Basic usage: pgdb.connect(connect_string) # open a connection # connect_string = 'host:database:user:password:opt' # All parts are optional. You may also pass host through # password as keyword arguments. To pass a port, # pass it in the host keyword parameter: connection = pgdb.connect(host='localhost:5432') cursor = connection.cursor() # open a cursor cursor.execute(query[, params]) # Execute a query, binding params (a dictionary) if they are # passed. The binding syntax is the same as the % operator # for dictionaries, and no quoting is done. cursor.executemany(query, list of params) # Execute a query many times, binding each param dictionary # from the list. cursor.fetchone() # fetch one row, [value, value, ...] cursor.fetchall() # fetch all rows, [[value, value, ...], ...] cursor.fetchmany([size]) # returns size or cursor.arraysize number of rows, # [[value, value, ...], ...] from result set. # Default cursor.arraysize is 1. cursor.description # returns information about the columns # [(column_name, type_name, display_size, # internal_size, precision, scale, null_ok), ...] # Note that display_size, precision, scale and null_ok # are not implemented. cursor.rowcount # number of rows available in the result set # Available after a call to execute. connection.commit() # commit transaction connection.rollback() # or rollback transaction cursor.close() # close the cursor connection.close() # close the connection """ from __future__ import print_function, division from _pg import * __version__ = version from datetime import date, time, datetime, timedelta, tzinfo from time import localtime from decimal import Decimal from uuid import UUID as Uuid from math import isnan, isinf try: from collections.abc import Iterable except ImportError: # Python < 3.3 from collections import Iterable from collections import namedtuple from keyword import iskeyword from functools import partial from re import compile as regex from json import loads as jsondecode, dumps as jsonencode try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str try: # noinspection PyUnresolvedReferences basestring except NameError: # Python >= 3.0 basestring = (str, bytes) try: from functools import lru_cache except ImportError: # Python < 3.2 from functools import update_wrapper try: from _thread import RLock except ImportError: class RLock: # for builds without threads def __enter__(self): pass def __exit__(self, exctype, excinst, exctb): pass def lru_cache(maxsize=128): """Simplified functools.lru_cache decorator for one argument.""" def decorator(function): sentinel = object() cache = {} get = cache.get lock = RLock() root = [] root_full = [root, False] root[:] = [root, root, None, None] if maxsize == 0: def wrapper(arg): res = function(arg) return res elif maxsize is None: def wrapper(arg): res = get(arg, sentinel) if res is not sentinel: return res res = function(arg) cache[arg] = res return res else: def wrapper(arg): with lock: link = get(arg) if link is not None: root = root_full[0] prev, next, _arg, res = link prev[1] = next next[0] = prev last = root[0] last[1] = root[0] = link link[0] = last link[1] = root return res res = function(arg) with lock: root, full = root_full if arg in cache: pass elif full: oldroot = root oldroot[2] = arg oldroot[3] = res root = root_full[0] = oldroot[1] oldarg = root[2] oldres = root[3] # keep reference root[2] = root[3] = None del cache[oldarg] cache[arg] = oldroot else: last = root[0] link = [last, root, arg, res] last[1] = root[0] = cache[arg] = link if len(cache) >= maxsize: root_full[1] = True return res wrapper.__wrapped__ = function return update_wrapper(wrapper, function) return decorator ### Module Constants # compliant with DB API 2.0 apilevel = '2.0' # module may be shared, but not connections threadsafety = 1 # this module use extended python format codes paramstyle = 'pyformat' # shortcut methods have been excluded from DB API 2 and # are not recommended by the DB SIG, but they can be handy shortcutmethods = 1 ### Internal Type Handling try: from inspect import signature except ImportError: # Python < 3.3 from inspect import getargspec def get_args(func): return getargspec(func).args else: def get_args(func): return list(signature(func).parameters) try: from datetime import timezone except ImportError: # Python < 3.2 class timezone(tzinfo): """Simple timezone implementation.""" def __init__(self, offset, name=None): self.offset = offset if not name: minutes = self.offset.days * 1440 + self.offset.seconds // 60 if minutes < 0: hours, minutes = divmod(-minutes, 60) hours = -hours else: hours, minutes = divmod(minutes, 60) name = 'UTC%+03d:%02d' % (hours, minutes) self.name = name def utcoffset(self, dt): return self.offset def tzname(self, dt): return self.name def dst(self, dt): return None timezone.utc = timezone(timedelta(0), 'UTC') _has_timezone = False else: _has_timezone = True # time zones used in Postgres timestamptz output _timezones = dict(CET='+0100', EET='+0200', EST='-0500', GMT='+0000', HST='-1000', MET='+0100', MST='-0700', UCT='+0000', UTC='+0000', WET='+0000') def _timezone_as_offset(tz): if tz.startswith(('+', '-')): if len(tz) < 5: return tz + '00' return tz.replace(':', '') return _timezones.get(tz, '+0000') def _get_timezone(tz): tz = _timezone_as_offset(tz) minutes = 60 * int(tz[1:3]) + int(tz[3:5]) if tz[0] == '-': minutes = -minutes return timezone(timedelta(minutes=minutes), tz) def decimal_type(decimal_type=None): """Get or set global type to be used for decimal values. Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ global Decimal if decimal_type is not None: Decimal = decimal_type set_typecast('numeric', decimal_type) return Decimal def cast_bool(value): """Cast boolean value in database format to bool.""" if value: return value[0] in ('t', 'T') def cast_money(value): """Cast money value in database format to Decimal.""" if value: value = value.replace('(', '-') return Decimal(''.join(c for c in value if c.isdigit() or c in '.-')) def cast_int2vector(value): """Cast an int2vector value.""" return [int(v) for v in value.split()] def cast_date(value, connection): """Cast a date value.""" # The output format depends on the server setting DateStyle. The default # setting ISO and the setting for German are actually unambiguous. The # order of days and months in the other two settings is however ambiguous, # so at least here we need to consult the setting to properly parse values. if value == '-infinity': return date.min if value == 'infinity': return date.max value = value.split() if value[-1] == 'BC': return date.min value = value[0] if len(value) > 10: return date.max fmt = connection.date_format() return datetime.strptime(value, fmt).date() def cast_time(value): """Cast a time value.""" fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' return datetime.strptime(value, fmt).time() _re_timezone = regex('(.*)([+-].*)') def cast_timetz(value): """Cast a timetz value.""" tz = _re_timezone.match(value) if tz: value, tz = tz.groups() else: tz = '+0000' fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' if _has_timezone: value += _timezone_as_offset(tz) fmt += '%z' return datetime.strptime(value, fmt).timetz() return datetime.strptime(value, fmt).timetz().replace( tzinfo=_get_timezone(tz)) def cast_timestamp(value, connection): """Cast a timestamp value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:5] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] else: if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] return datetime.strptime(' '.join(value), ' '.join(fmt)) def cast_timestamptz(value, connection): """Cast a timestamptz value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] value, tz = value[:-1], value[-1] else: if fmt.startswith('%Y-'): tz = _re_timezone.match(value[1]) if tz: value[1], tz = tz.groups() else: tz = '+0000' else: value, tz = value[:-1], value[-1] if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] if _has_timezone: value.append(_timezone_as_offset(tz)) fmt.append('%z') return datetime.strptime(' '.join(value), ' '.join(fmt)) return datetime.strptime(' '.join(value), ' '.join(fmt)).replace( tzinfo=_get_timezone(tz)) _re_interval_sql_standard = regex( '(?:([+-])?([0-9]+)-([0-9]+) ?)?' '(?:([+-]?[0-9]+)(?!:) ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres = regex( '(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres_verbose = regex( '@ ?(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-]?[0-9]+) ?hours? ?)?' '(?:([+-]?[0-9]+) ?mins? ?)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))? ?secs?)? ?(ago)?') _re_interval_iso_8601 = regex( 'P(?:([+-]?[0-9]+)Y)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-]?[0-9]+)D)?' '(?:T(?:([+-]?[0-9]+)H)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))?S)?)?') def cast_interval(value): """Cast an interval value.""" # The output format depends on the server setting IntervalStyle, but it's # not necessary to consult this setting to parse it. It's faster to just # check all possible formats, and there is no ambiguity here. m = _re_interval_iso_8601.match(value) if m: m = [d or '0' for d in m.groups()] secs_ago = m.pop(5) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = -secs usecs = -usecs else: m = _re_interval_postgres_verbose.match(value) if m: m, ago = [d or '0' for d in m.groups()[:8]], m.group(9) secs_ago = m.pop(5) == '-' m = [-int(d) for d in m] if ago else [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = - secs usecs = -usecs else: m = _re_interval_postgres.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: m = _re_interval_sql_standard.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] years_ago = m.pop(0) == '-' hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if years_ago: years = -years mons = -mons if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: raise ValueError('Cannot parse interval: %s' % value) days += 365 * years + 30 * mons return timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) class Typecasts(dict): """Dictionary mapping database types to typecast functions. The cast functions get passed the string representation of a value in the database which they need to convert to a Python object. The passed string will never be None since NULL values are already handled before the cast function is called. """ # the default cast functions # (str functions are ignored but have been added for faster access) defaults = {'char': str, 'bpchar': str, 'name': str, 'text': str, 'varchar': str, 'bool': cast_bool, 'bytea': unescape_bytea, 'int2': int, 'int4': int, 'serial': int, 'int8': long, 'oid': int, 'hstore': cast_hstore, 'json': jsondecode, 'jsonb': jsondecode, 'float4': float, 'float8': float, 'numeric': Decimal, 'money': cast_money, 'date': cast_date, 'interval': cast_interval, 'time': cast_time, 'timetz': cast_timetz, 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz, 'int2vector': cast_int2vector, 'uuid': Uuid, 'anyarray': cast_array, 'record': cast_record} connection = None # will be set in local connection specific instances def __missing__(self, typ): """Create a cast function if it is not cached. Note that this class never raises a KeyError, but returns None when no special cast function exists. """ if not isinstance(typ, str): raise TypeError('Invalid type: %s' % typ) cast = self.defaults.get(typ) if cast: # store default for faster access cast = self._add_connection(cast) self[typ] = cast elif typ.startswith('_'): # create array cast base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: # store only if base type exists self[typ] = cast return cast @staticmethod def _needs_connection(func): """Check if a typecast function needs a connection argument.""" try: args = get_args(func) except (TypeError, ValueError): return False else: return 'connection' in args[1:] def _add_connection(self, cast): """Add a connection argument to the typecast function if necessary.""" if not self.connection or not self._needs_connection(cast): return cast return partial(cast, connection=self.connection) def get(self, typ, default=None): """Get the typecast function for the given database type.""" return self[typ] or default def set(self, typ, cast): """Set a typecast function for the specified database type(s).""" if isinstance(typ, basestring): typ = [typ] if cast is None: for t in typ: self.pop(t, None) self.pop('_%s' % t, None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: self[t] = self._add_connection(cast) self.pop('_%s' % t, None) def reset(self, typ=None): """Reset the typecasts for the specified type(s) to their defaults. When no type is specified, all typecasts will be reset. """ defaults = self.defaults if typ is None: self.clear() self.update(defaults) else: if isinstance(typ, basestring): typ = [typ] for t in typ: cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) t = '_%s' % t cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) else: self.pop(t, None) else: self.pop(t, None) self.pop('_%s' % t, None) def create_array_cast(self, basecast): """Create an array typecast for the given base cast.""" cast_array = self['anyarray'] def cast(v): return cast_array(v, basecast) return cast def create_record_cast(self, name, fields, casts): """Create a named record typecast for the given fields and casts.""" cast_record = self['record'] record = namedtuple(name, fields) def cast(v): return record(*cast_record(v, casts)) return cast _typecasts = Typecasts() # this is the global typecast dictionary def get_typecast(typ): """Get the global typecast function for the given database type(s).""" return _typecasts.get(typ) def set_typecast(typ, cast): """Set a global typecast function for the given database type(s). Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ _typecasts.set(typ, cast) def reset_typecast(typ=None): """Reset the global typecasts for the given type(s) to their default. When no type is specified, all typecasts will be reset. Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ _typecasts.reset(typ) class LocalTypecasts(Typecasts): """Map typecasts, including local composite types, to cast functions.""" defaults = _typecasts connection = None # will be set in a connection specific instance def __missing__(self, typ): """Create a cast function if it is not cached.""" if typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast else: cast = self.defaults.get(typ) if cast: cast = self._add_connection(cast) self[typ] = cast else: fields = self.get_fields(typ) if fields: casts = [self[field.type] for field in fields] fields = [field.name for field in fields] cast = self.create_record_cast(typ, fields, casts) self[typ] = cast return cast def get_fields(self, typ): """Return the fields for the given record type. This method will be replaced with a method that looks up the fields using the type cache of the connection. """ return [] class TypeCode(str): """Class representing the type_code used by the DB-API 2.0. TypeCode objects are strings equal to the PostgreSQL type name, but carry some additional information. """ @classmethod def create(cls, oid, name, len, type, category, delim, relid): """Create a type code for a PostgreSQL data type.""" self = cls(name) self.oid = oid self.len = len self.type = type self.category = category self.delim = delim self.relid = relid return self FieldInfo = namedtuple('FieldInfo', ['name', 'type']) class TypeCache(dict): """Cache for database types. This cache maps type OIDs and names to TypeCode strings containing important information on the associated database type. """ def __init__(self, cnx): """Initialize type cache for connection.""" super(TypeCache, self).__init__() self._escape_string = cnx.escape_string self._src = cnx.source() self._typecasts = LocalTypecasts() self._typecasts.get_fields = self.get_fields self._typecasts.connection = cnx if cnx.server_version < 80400: # older remote databases (not officially supported) self._query_pg_type = ("SELECT oid, typname," " typlen, typtype, null as typcategory, typdelim, typrelid" " FROM pg_type WHERE oid=%s") else: self._query_pg_type = ("SELECT oid, typname," " typlen, typtype, typcategory, typdelim, typrelid" " FROM pg_type WHERE oid=%s") def __missing__(self, key): """Get the type info from the database if it is not cached.""" if isinstance(key, int): oid = key else: if '.' not in key and '"' not in key: key = '"%s"' % (key,) oid = "'%s'::regtype" % (self._escape_string(key),) try: self._src.execute(self._query_pg_type % (oid,)) except ProgrammingError: res = None else: res = self._src.fetch(1) if not res: raise KeyError('Type %s could not be found' % (key,)) res = res[0] type_code = TypeCode.create(int(res[0]), res[1], int(res[2]), res[3], res[4], res[5], int(res[6])) self[type_code.oid] = self[str(type_code)] = type_code return type_code def get(self, key, default=None): """Get the type even if it is not cached.""" try: return self[key] except KeyError: return default def get_fields(self, typ): """Get the names and types of the fields of composite types.""" if not isinstance(typ, TypeCode): typ = self.get(typ) if not typ: return None if not typ.relid: return None # this type is not composite self._src.execute("SELECT attname, atttypid" " FROM pg_attribute WHERE attrelid=%s AND attnum>0" " AND NOT attisdropped ORDER BY attnum" % (typ.relid,)) return [FieldInfo(name, self.get(int(oid))) for name, oid in self._src.fetch(-1)] def get_typecast(self, typ): """Get the typecast function for the given database type.""" return self._typecasts.get(typ) def set_typecast(self, typ, cast): """Set a typecast function for the specified database type(s).""" self._typecasts.set(typ, cast) def reset_typecast(self, typ=None): """Reset the typecast function for the specified database type(s).""" self._typecasts.reset(typ) def typecast(self, value, typ): """Cast the given value according to the given database type.""" if value is None: # for NULL values, no typecast is necessary return None cast = self.get_typecast(typ) if not cast or cast is str: # no typecast is necessary return value return cast(value) class _quotedict(dict): """Dictionary with auto quoting of its items. The quote attribute must be set to the desired quote function. """ def __getitem__(self, key): return self.quote(super(_quotedict, self).__getitem__(key)) ### Error Messages def _db_error(msg, cls=DatabaseError): """Return DatabaseError with empty sqlstate attribute.""" error = cls(msg) error.sqlstate = None return error def _op_error(msg): """Return OperationalError.""" return _db_error(msg, OperationalError) ### Row Tuples _re_fieldname = regex('^[A-Za-z][_a-zA-Z0-9]*$') # The result rows for database operations are returned as named tuples # by default. Since creating namedtuple classes is a somewhat expensive # operation, we cache up to 1024 of these classes by default. @lru_cache(maxsize=1024) def _row_factory(names): """Get a namedtuple factory for row results with the given names.""" try: try: return namedtuple('Row', names, rename=True)._make except TypeError: # Python 2.6 and 3.0 do not support rename names = [v if _re_fieldname.match(v) and not iskeyword(v) else 'column_%d' % (n,) for n, v in enumerate(names)] return namedtuple('Row', names)._make except ValueError: # there is still a problem with the field names names = ['column_%d' % (n,) for n in range(len(names))] return namedtuple('Row', names)._make def set_row_factory_size(maxsize): """Change the size of the namedtuple factory cache. If maxsize is set to None, the cache can grow without bound. """ global _row_factory _row_factory = lru_cache(maxsize)(_row_factory.__wrapped__) ### Cursor Object class Cursor(object): """Cursor object.""" def __init__(self, dbcnx): """Create a cursor object for the database connection.""" self.connection = self._dbcnx = dbcnx self._cnx = dbcnx._cnx self.type_cache = dbcnx.type_cache self._src = self._cnx.source() # the official attribute for describing the result columns self._description = None if self.row_factory is Cursor.row_factory: # the row factory needs to be determined dynamically self.row_factory = None else: self.build_row_factory = None self.rowcount = -1 self.arraysize = 1 self.lastrowid = None def __iter__(self): """Make cursor compatible to the iteration protocol.""" return self def __enter__(self): """Enter the runtime context for the cursor object.""" return self def __exit__(self, et, ev, tb): """Exit the runtime context for the cursor object.""" self.close() def _quote(self, value): """Quote value depending on its type.""" if value is None: return 'NULL' if isinstance(value, (Hstore, Json)): value = str(value) if isinstance(value, basestring): if isinstance(value, Binary): value = self._cnx.escape_bytea(value) if bytes is not str: # Python >= 3.0 value = value.decode('ascii') else: value = self._cnx.escape_string(value) return "'%s'" % (value,) if isinstance(value, float): if isinf(value): return "'-Infinity'" if value < 0 else "'Infinity'" if isnan(value): return "'NaN'" return value if isinstance(value, (int, long, Decimal, Literal)): return value if isinstance(value, datetime): if value.tzinfo: return "'%s'::timestamptz" % (value,) return "'%s'::timestamp" % (value,) if isinstance(value, date): return "'%s'::date" % (value,) if isinstance(value, time): if value.tzinfo: return "'%s'::timetz" % (value,) return "'%s'::time" % value if isinstance(value, timedelta): return "'%s'::interval" % (value,) if isinstance(value, Uuid): return "'%s'::uuid" % (value,) if isinstance(value, list): # Quote value as an ARRAY constructor. This is better than using # an array literal because it carries the information that this is # an array and not a string. One issue with this syntax is that # you need to add an explicit typecast when passing empty arrays. # The ARRAY keyword is actually only necessary at the top level. if not value: # exception for empty array return "'{}'" q = self._quote try: return 'ARRAY[%s]' % (','.join(str(q(v)) for v in value),) except UnicodeEncodeError: # Python 2 with non-ascii values return u'ARRAY[%s]' % (','.join(unicode(q(v)) for v in value),) if isinstance(value, tuple): # Quote as a ROW constructor. This is better than using a record # literal because it carries the information that this is a record # and not a string. We don't use the keyword ROW in order to make # this usable with the IN syntax as well. It is only necessary # when the records has a single column which is not really useful. q = self._quote try: return '(%s)' % (','.join(str(q(v)) for v in value),) except UnicodeEncodeError: # Python 2 with non-ascii values return u'(%s)' % (','.join(unicode(q(v)) for v in value),) try: value = value.__pg_repr__() except AttributeError: raise InterfaceError( 'Do not know how to adapt type %s' % (type(value),)) if isinstance(value, (tuple, list)): value = self._quote(value) return value def _quoteparams(self, string, parameters): """Quote parameters. This function works for both mappings and sequences. The function should be used even when there are no parameters, so that we have a consistent behavior regarding percent signs. """ if not parameters: try: return string % () # unescape literal quotes if possible except (TypeError, ValueError): return string # silently accept unescaped quotes if isinstance(parameters, dict): parameters = _quotedict(parameters) parameters.quote = self._quote else: parameters = tuple(map(self._quote, parameters)) return string % parameters def _make_description(self, info): """Make the description tuple for the given field info.""" name, typ, size, mod = info[1:] type_code = self.type_cache[typ] if mod > 0: mod -= 4 if type_code == 'numeric': precision, scale = mod >> 16, mod & 0xffff size = precision else: if not size: size = type_code.size if size == -1: size = mod precision = scale = None return CursorDescription(name, type_code, None, size, precision, scale, None) @property def description(self): """Read-only attribute describing the result columns.""" descr = self._description if self._description is True: make = self._make_description descr = [make(info) for info in self._src.listinfo()] self._description = descr return descr @property def colnames(self): """Unofficial convenience method for getting the column names.""" return [d[0] for d in self.description] @property def coltypes(self): """Unofficial convenience method for getting the column types.""" return [d[1] for d in self.description] def close(self): """Close the cursor object.""" self._src.close() def execute(self, operation, parameters=None): """Prepare and execute a database operation (query or command).""" # The parameters may also be specified as list of tuples to e.g. # insert multiple rows in a single operation, but this kind of # usage is deprecated. We make several plausibility checks because # tuples can also be passed with the meaning of ROW constructors. if (parameters and isinstance(parameters, list) and len(parameters) > 1 and all(isinstance(p, tuple) for p in parameters) and all(len(p) == len(parameters[0]) for p in parameters[1:])): return self.executemany(operation, parameters) else: # not a list of tuples return self.executemany(operation, [parameters]) def executemany(self, operation, seq_of_parameters): """Prepare operation and execute it against a parameter sequence.""" if not seq_of_parameters: # don't do anything without parameters return self._description = None self.rowcount = -1 # first try to execute all queries rowcount = 0 sql = "BEGIN" try: if not self._dbcnx._tnx and not self._dbcnx.autocommit: try: self._src.execute(sql) except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't start transaction") else: self._dbcnx._tnx = True for parameters in seq_of_parameters: sql = operation sql = self._quoteparams(sql, parameters) rows = self._src.execute(sql) if rows: # true if not DML rowcount += rows else: self.rowcount = -1 except DatabaseError: raise # database provides error message except Error as err: raise _db_error( "Error in '%s': '%s' " % (sql, err), InterfaceError) except Exception as err: raise _op_error("Internal error in '%s': %s" % (sql, err)) # then initialize result raw count and description if self._src.resulttype == RESULT_DQL: self._description = True # fetch on demand self.rowcount = self._src.ntuples self.lastrowid = None if self.build_row_factory: self.row_factory = self.build_row_factory() else: self.rowcount = rowcount self.lastrowid = self._src.oidstatus() # return the cursor object, so you can write statements such as # "cursor.execute(...).fetchall()" or "for row in cursor.execute(...)" return self def fetchone(self): """Fetch the next row of a query result set.""" res = self.fetchmany(1, False) try: return res[0] except IndexError: return None def fetchall(self): """Fetch all (remaining) rows of a query result.""" return self.fetchmany(-1, False) def fetchmany(self, size=None, keep=False): """Fetch the next set of rows of a query result. The number of rows to fetch per call is specified by the size parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. If you set the keep parameter to true, this is kept as new arraysize. """ if size is None: size = self.arraysize if keep: self.arraysize = size try: result = self._src.fetch(size) except DatabaseError: raise except Error as err: raise _db_error(str(err)) typecast = self.type_cache.typecast return [self.row_factory([typecast(value, typ) for typ, value in zip(self.coltypes, row)]) for row in result] def callproc(self, procname, parameters=None): """Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each input argument that the procedure expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported. The procedure may also provide a result set as output. These can be requested through the standard fetch methods of the cursor. """ n = parameters and len(parameters) or 0 query = 'select * from "%s"(%s)' % (procname, ','.join(n * ['%s'])) self.execute(query, parameters) return parameters def copy_from(self, stream, table, format=None, sep=None, null=None, size=None, columns=None): """Copy data from an input stream to the specified table. The input stream can be a file-like object with a read() method or it can also be an iterable returning a row or multiple rows of input on each iteration. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the input. The size option sets the size of the buffer used when reading data from file-like objects. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. """ binary_format = format == 'binary' try: read = stream.read except AttributeError: if size: raise ValueError("Size must only be set for file-like objects") if binary_format: input_type = bytes type_name = 'byte strings' else: input_type = basestring type_name = 'strings' if isinstance(stream, basestring): if not isinstance(stream, input_type): raise ValueError("The input must be %s" % (type_name,)) if not binary_format: if isinstance(stream, str): if not stream.endswith('\n'): stream += '\n' else: if not stream.endswith(b'\n'): stream += b'\n' def chunks(): yield stream elif isinstance(stream, Iterable): def chunks(): for chunk in stream: if not isinstance(chunk, input_type): raise ValueError( "Input stream must consist of %s" % (type_name,)) if isinstance(chunk, str): if not chunk.endswith('\n'): chunk += '\n' else: if not chunk.endswith(b'\n'): chunk += b'\n' yield chunk else: raise TypeError("Need an input stream to copy from") else: if size is None: size = 8192 elif not isinstance(size, int): raise TypeError("The size option must be an integer") if size > 0: def chunks(): while True: buffer = read(size) yield buffer if not buffer or len(buffer) < size: break else: def chunks(): yield read() if not table or not isinstance(table, basestring): raise TypeError("Need a table to copy to") if table.lower().startswith('select'): raise ValueError("Must specify a table, not a query") else: table = '"%s"' % (table,) operation = ['copy %s' % (table,)] options = [] params = [] if format is not None: if not isinstance(format, basestring): raise TypeError("The frmat option must be be a string") if format not in ('text', 'csv', 'binary'): raise ValueError("Invalid format") options.append('format %s' % (format,)) if sep is not None: if not isinstance(sep, basestring): raise TypeError("The sep option must be a string") if format == 'binary': raise ValueError( "The sep option is not allowed with binary format") if len(sep) != 1: raise ValueError( "The sep option must be a single one-byte character") options.append('delimiter %s') params.append(sep) if null is not None: if not isinstance(null, basestring): raise TypeError("The null option must be a string") options.append('null %s') params.append(null) if columns: if not isinstance(columns, basestring): columns = ','.join('"%s"' % (col,) for col in columns) operation.append('(%s)' % (columns,)) operation.append("from stdin") if options: operation.append('(%s)' % (','.join(options),)) operation = ' '.join(operation) putdata = self._src.putdata self.execute(operation, params) try: for chunk in chunks(): putdata(chunk) except BaseException as error: self.rowcount = -1 # the following call will re-raise the error putdata(error) else: self.rowcount = putdata(None) # return the cursor object, so you can chain operations return self def copy_to(self, stream, table, format=None, sep=None, null=None, decode=None, columns=None): """Copy data from the specified table to an output stream. The output stream can be a file-like object with a write() method or it can also be None, in which case the method will return a generator yielding a row on each iteration. Output will be returned as byte strings unless you set decode to true. Note that you can also use a select query instead of the table name. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the output. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. """ binary_format = format == 'binary' if stream is not None: try: write = stream.write except AttributeError: raise TypeError("Need an output stream to copy to") if not table or not isinstance(table, basestring): raise TypeError("Need a table to copy to") if table.lower().startswith('select'): if columns: raise ValueError("Columns must be specified in the query") table = '(%s)' % (table,) else: table = '"%s"' % (table,) operation = ['copy %s' % (table,)] options = [] params = [] if format is not None: if not isinstance(format, basestring): raise TypeError("The format option must be a string") if format not in ('text', 'csv', 'binary'): raise ValueError("Invalid format") options.append('format %s' % (format,)) if sep is not None: if not isinstance(sep, basestring): raise TypeError("The sep option must be a string") if binary_format: raise ValueError( "The sep option is not allowed with binary format") if len(sep) != 1: raise ValueError( "The sep option must be a single one-byte character") options.append('delimiter %s') params.append(sep) if null is not None: if not isinstance(null, basestring): raise TypeError("The null option must be a string") options.append('null %s') params.append(null) if decode is None: if format == 'binary': decode = False else: decode = str is unicode else: if not isinstance(decode, (int, bool)): raise TypeError("The decode option must be a boolean") if decode and binary_format: raise ValueError( "The decode option is not allowed with binary format") if columns: if not isinstance(columns, basestring): columns = ','.join('"%s"' % (col,) for col in columns) operation.append('(%s)' % (columns,)) operation.append("to stdout") if options: operation.append('(%s)' % (','.join(options),)) operation = ' '.join(operation) getdata = self._src.getdata self.execute(operation, params) def copy(): self.rowcount = 0 while True: row = getdata(decode) if isinstance(row, int): if self.rowcount != row: self.rowcount = row break self.rowcount += 1 yield row if stream is None: # no input stream, return the generator return copy() # write the rows to the file-like input stream for row in copy(): write(row) # return the cursor object, so you can chain operations return self def __next__(self): """Return the next row (support for the iteration protocol).""" res = self.fetchone() if res is None: raise StopIteration return res # Note that since Python 2.6 the iterator protocol uses __next()__ # instead of next(), we keep it only for backward compatibility of pgdb. next = __next__ @staticmethod def nextset(): """Not supported.""" raise NotSupportedError("The nextset() method is not supported") @staticmethod def setinputsizes(sizes): """Not supported.""" pass # unsupported, but silently passed @staticmethod def setoutputsize(size, column=0): """Not supported.""" pass # unsupported, but silently passed @staticmethod def row_factory(row): """Process rows before they are returned. You can overwrite this statically with a custom row factory, or you can build a row factory dynamically with build_row_factory(). For example, you can create a Cursor class that returns rows as Python dictionaries like this: class DictCursor(pgdb.Cursor): def row_factory(self, row): return {desc[0]: value for desc, value in zip(self.description, row)} cur = DictCursor(con) # get one DictCursor instance or con.cursor_type = DictCursor # always use DictCursor instances """ raise NotImplementedError def build_row_factory(self): """Build a row factory based on the current description. This implementation builds a row factory for creating named tuples. You can overwrite this method if you want to dynamically create different row factories whenever the column description changes. """ names = self.colnames if names: return _row_factory(tuple(names)) CursorDescription = namedtuple('CursorDescription', ['name', 'type_code', 'display_size', 'internal_size', 'precision', 'scale', 'null_ok']) ### Connection Objects class Connection(object): """Connection object.""" # expose the exceptions as attributes on the connection object Error = Error Warning = Warning InterfaceError = InterfaceError DatabaseError = DatabaseError InternalError = InternalError OperationalError = OperationalError ProgrammingError = ProgrammingError IntegrityError = IntegrityError DataError = DataError NotSupportedError = NotSupportedError def __init__(self, cnx): """Create a database connection object.""" self._cnx = cnx # connection self._tnx = False # transaction state self.type_cache = TypeCache(cnx) self.cursor_type = Cursor self.autocommit = False try: self._cnx.source() except Exception: raise _op_error("Invalid connection") def __enter__(self): """Enter the runtime context for the connection object. The runtime context can be used for running transactions. This also starts a transaction in autocommit mode. """ if self.autocommit: try: self._cnx.source().execute("BEGIN") except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't start transaction") else: self._tnx = True return self def __exit__(self, et, ev, tb): """Exit the runtime context for the connection object. This does not close the connection, but it ends a transaction. """ if et is None and ev is None and tb is None: self.commit() else: self.rollback() def close(self): """Close the connection object.""" if self._cnx: if self._tnx: try: self.rollback() except DatabaseError: pass self._cnx.close() self._cnx = None else: raise _op_error("Connection has been closed") @property def closed(self): """Check whether the connection has been closed or is broken.""" try: return not self._cnx or self._cnx.status != 1 except TypeError: return True def commit(self): """Commit any pending transaction to the database.""" if self._cnx: if self._tnx: self._tnx = False try: self._cnx.source().execute("COMMIT") except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't commit transaction") else: raise _op_error("Connection has been closed") def rollback(self): """Roll back to the start of any pending transaction.""" if self._cnx: if self._tnx: self._tnx = False try: self._cnx.source().execute("ROLLBACK") except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't rollback transaction") else: raise _op_error("Connection has been closed") def cursor(self): """Return a new cursor object using the connection.""" if self._cnx: try: return self.cursor_type(self) except Exception: raise _op_error("Invalid connection") else: raise _op_error("Connection has been closed") if shortcutmethods: # otherwise do not implement and document this def execute(self, operation, params=None): """Shortcut method to run an operation on an implicit cursor.""" cursor = self.cursor() cursor.execute(operation, params) return cursor def executemany(self, operation, param_seq): """Shortcut method to run an operation against a sequence.""" cursor = self.cursor() cursor.executemany(operation, param_seq) return cursor ### Module Interface _connect = connect def connect(dsn=None, user=None, password=None, host=None, database=None, **kwargs): """Connect to a database.""" # first get params from DSN dbport = -1 dbhost = "" dbname = "" dbuser = "" dbpasswd = "" dbopt = "" try: params = dsn.split(":") dbhost = params[0] dbname = params[1] dbuser = params[2] dbpasswd = params[3] dbopt = params[4] except (AttributeError, IndexError, TypeError): pass # override if necessary if user is not None: dbuser = user if password is not None: dbpasswd = password if database is not None: dbname = database if host is not None: try: params = host.split(":") dbhost = params[0] dbport = int(params[1]) except (AttributeError, IndexError, TypeError, ValueError): pass # empty host is localhost if dbhost == "": dbhost = None if dbuser == "": dbuser = None # pass keyword arguments as connection info string if kwargs: kwargs = list(kwargs.items()) if '=' in dbname: dbname = [dbname] else: kwargs.insert(0, ('dbname', dbname)) dbname = [] for kw, value in kwargs: value = str(value) if not value or ' ' in value: value = "'%s'" % (value.replace( "'", "\\'").replace('\\', '\\\\'),) dbname.append('%s=%s' % (kw, value)) dbname = ' '.join(dbname) # open the connection cnx = _connect(dbname, dbhost, dbport, dbopt, dbuser, dbpasswd) return Connection(cnx) ### Types Handling class Type(frozenset): """Type class for a couple of PostgreSQL data types. PostgreSQL is object-oriented: types are dynamic. We must thus use type names as internal type codes. """ def __new__(cls, values): if isinstance(values, basestring): values = values.split() return super(Type, cls).__new__(cls, values) def __eq__(self, other): if isinstance(other, basestring): if other.startswith('_'): other = other[1:] return other in self else: return super(Type, self).__eq__(other) def __ne__(self, other): if isinstance(other, basestring): if other.startswith('_'): other = other[1:] return other not in self else: return super(Type, self).__ne__(other) class ArrayType: """Type class for PostgreSQL array types.""" def __eq__(self, other): if isinstance(other, basestring): return other.startswith('_') else: return isinstance(other, ArrayType) def __ne__(self, other): if isinstance(other, basestring): return not other.startswith('_') else: return not isinstance(other, ArrayType) class RecordType: """Type class for PostgreSQL record types.""" def __eq__(self, other): if isinstance(other, TypeCode): return other.type == 'c' elif isinstance(other, basestring): return other == 'record' else: return isinstance(other, RecordType) def __ne__(self, other): if isinstance(other, TypeCode): return other.type != 'c' elif isinstance(other, basestring): return other != 'record' else: return not isinstance(other, RecordType) # Mandatory type objects defined by DB-API 2 specs: STRING = Type('char bpchar name text varchar') BINARY = Type('bytea') NUMBER = Type('int2 int4 serial int8 float4 float8 numeric money') DATETIME = Type('date time timetz timestamp timestamptz interval' ' abstime reltime') # these are very old ROWID = Type('oid') # Additional type objects (more specific): BOOL = Type('bool') SMALLINT = Type('int2') INTEGER = Type('int2 int4 int8 serial') LONG = Type('int8') FLOAT = Type('float4 float8') NUMERIC = Type('numeric') MONEY = Type('money') DATE = Type('date') TIME = Type('time timetz') TIMESTAMP = Type('timestamp timestamptz') INTERVAL = Type('interval') UUID = Type('uuid') HSTORE = Type('hstore') JSON = Type('json jsonb') # Type object for arrays (also equate to their base types): ARRAY = ArrayType() # Type object for records (encompassing all composite types): RECORD = RecordType() # Mandatory type helpers defined by DB-API 2 specs: def Date(year, month, day): """Construct an object holding a date value.""" return date(year, month, day) def Time(hour, minute=0, second=0, microsecond=0, tzinfo=None): """Construct an object holding a time value.""" return time(hour, minute, second, microsecond, tzinfo) def Timestamp(year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Construct an object holding a time stamp value.""" return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) def DateFromTicks(ticks): """Construct an object holding a date value from the given ticks value.""" return Date(*localtime(ticks)[:3]) def TimeFromTicks(ticks): """Construct an object holding a time value from the given ticks value.""" return Time(*localtime(ticks)[3:6]) def TimestampFromTicks(ticks): """Construct an object holding a time stamp from the given ticks value.""" return Timestamp(*localtime(ticks)[:6]) class Binary(bytes): """Construct an object capable of holding a binary (long) string value.""" # Additional type helpers for PyGreSQL: def Interval(days, hours=0, minutes=0, seconds=0, microseconds=0): """Construct an object holding a time inverval value.""" return timedelta(days, hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds) Uuid = Uuid # Construct an object holding a UUID value class Hstore(dict): """Wrapper class for marking hstore values.""" _re_quote = regex('^[Nn][Uu][Ll][Ll]$|[ ,=>]') _re_escape = regex(r'(["\\])') @classmethod def _quote(cls, s): if s is None: return 'NULL' if not s: return '""' quote = cls._re_quote.search(s) s = cls._re_escape.sub(r'\\\1', s) if quote: s = '"%s"' % (s,) return s def __str__(self): q = self._quote return ','.join('%s=>%s' % (q(k), q(v)) for k, v in self.items()) class Json: """Construct a wrapper for holding an object serializable to JSON.""" def __init__(self, obj, encode=None): self.obj = obj self.encode = encode or jsonencode def __str__(self): obj = self.obj if isinstance(obj, basestring): return obj return self.encode(obj) class Literal: """Construct a wrapper for holding a literal SQL string.""" def __init__(self, sql): self.sql = sql def __str__(self): return self.sql __pg_repr__ = __str__ # If run as script, print some information: if __name__ == '__main__': print('PyGreSQL version', version) print('') print(__doc__) PyGreSQL-5.1/LICENSE.txt0000644000175100077410000000257213466770070014562 0ustar darcypyg00000000000000Written by D'Arcy J.M. Cain (darcy@PyGreSQL.org) Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain Further modifications copyright (c) 2009-2019 by the PyGreSQL Development Team PyGreSQL is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses: Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. In this license the term "AUTHORS" refers to anyone who has contributed code to PyGreSQL. IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. PyGreSQL-5.1/docs/0000755000175100077410000000000013470245541013654 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/Makefile0000644000175100077410000001637113466770070015331 0ustar darcypyg00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyGreSQL.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyGreSQL.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PyGreSQL" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyGreSQL" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." PyGreSQL-5.1/docs/announce.rst0000644000175100077410000000142413466776511016230 0ustar darcypyg00000000000000====================== PyGreSQL Announcements ====================== ------------------------------- Release of PyGreSQL version 5.1 ------------------------------- Release 5.1 of PyGreSQL. It is available at: http://pygresql.org/files/PyGreSQL-5.1.tar.gz. If you are running NetBSD, look in the packages directory under databases. There is also a package in the FreeBSD ports collection. Please refer to `changelog.txt `_ for things that have changed in this version. This version has been built and unit tested on: - NetBSD - FreeBSD - openSUSE - Ubuntu - Windows 7 and 10 with both MinGW and Visual Studio - PostgreSQL 9.0 to 9.6 and 10 or 11 (32 and 64bit) - Python 2.6, 2.7 and 3.3 to 3.7 (32 and 64bit) | D'Arcy J.M. Cain | darcy@PyGreSQL.org PyGreSQL-5.1/docs/_static/0000755000175100077410000000000013470245541015302 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_static/pygresql.css_t0000644000175100077410000000331413466770070020213 0ustar darcypyg00000000000000{% macro experimental(keyword, value) %} {% if value %} -moz-{{keyword}}: {{value}}; -webkit-{{keyword}}: {{value}}; -o-{{keyword}}: {{value}}; -ms-{{keyword}}: {{value}}; {{keyword}}: {{value}}; {% endif %} {% endmacro %} {% macro border_radius(value) -%} {{experimental("border-radius", value)}} {% endmacro %} {% macro box_shadow(value) -%} {{experimental("box-shadow", value)}} {% endmacro %} .pageheader.related { text-align: left; padding: 10px 15px; border: 1px solid #eeeeee; margin-bottom: 10px; {{border_radius("1em 1em 1em 1em")}} {% if theme_borderless_decor | tobool %} border-top: 0; border-bottom: 0; {% endif %} } .pageheader.related .logo { font-size: 36px; font-style: italic; letter-spacing: 5px; margin-right: 2em; } .pageheader.related .logo { font-size: 36px; font-style: italic; letter-spacing: 5px; margin-right: 2em; } .pageheader.related .logo a, .pageheader.related .logo a:hover { background: transparent; color: {{ theme_relbarlinkcolor }}; border: none; text-decoration: none; text-shadow: none; {{box_shadow("none")}} } .pageheader.related ul { float: right; margin: 2px 1em; } .pageheader.related li { float: left; margin: 0 0 0 10px; } .pageheader.related li a { padding: 8px 12px; } .norelbar .subtitle { font-size: 14px; line-height: 18px; font-weight: bold; letter-spacing: 4px; text-align: right; padding: 0 1em; margin-top: -9px; } .relbar-top .related.norelbar { height: 22px; border-bottom: 14px solid #eeeeee; } .relbar-bottom .related.norelbar { height: 22px; border-top: 14px solid #eeeeee; } PyGreSQL-5.1/docs/_static/pygresql.png0000644000175100077410000002276713466770070017701 0ustar darcypyg00000000000000‰PNG  IHDR€€L\öœtEXtCreation Time06/05/04M¾æqtIMEÚ6C…/- pHYs ð ðB¬4˜gAMA± üa%dIDATxÚí}`Õ™ÿ›¶½J«Þ,ÉE.’l„mlÜ0lBïýB3$r˜ËåB¨GHÀt|@þဠ&p´¿i6`¶ÜåŠlõ^VÚ:;3oîÍÌîh¶jW;²œ; ã™Ñ›öýÞûú{‹ñ<NÑø>Þ/ðN0Ît €q¦SŒ3`œéãLäx¿ÀIJÈ6ç8ÎO3.ï ËÛ;àâ!Ôi5v«ÉbÒgX’Äq,ýý/@trWx…aIpÌåñÿýËû»ºÜn_¿7à¡4á»8†ønÖQ™F]n†iÆ„œ™SŠÊ ³m#E£{CìD:bèY†uº¼]½ƒ]®ö§×K‹½ Óë4ùÙ6‡Íd6hí¡—‘$úÞ”»ÇÁþAwCkÏž#-ǺœÇ»]N¯ßí4T¶UŸo59L“Šs&eåeÙ z-ÇÂn¯ÿµÿÞzçÿûÖ¬' µb ½yxÂ~zpPoÑÿ|vÙåçœ6cR±NKt 'ô9][꿹¹~cc/¤Y}‰ëqŒÂ0ĈÇóÈ£ 1Ã5ä™Ùæye9 ª&L+/ÈqXµ•ô=Î_î~âó}M^Ǭn!€ˆx@CÞÉA'Àð”ãe³'^°°zRI®†ŠCž_ø`ý7G*lZKÄ(ÈóNvzLK=vÞiןf¦ÍtR€î}¬µ{Ýk›ÞØÓÂkˆizJG&%8Ñ;1w³ð8Í"fZâò)¹«Î¨˜3½´ '#B”4wôÞ÷ü‡oíow©Iˆø.塼Ïp°'Àõx\O­[<ýš•ó s2•÷F ¿ßwtþïÿ+ŸV‘S¨}[€syÙ–WÞ{õ2«Ù0žx|ô¯×¿ÿ춆IV­™¥ˆ”?ÌÏÁî_,öø]«dÑIJܟßúòWïî(³j,áˆ@ô"ò~¦ßË\Z]üðÍ+J s”0 º½ç¯Ý°¥m`º‘J†Y¨´ûY' ß¹}ÙªE§'ÖÕc@c[Ï”û_Ï"±<™þÐ4û˜ËfMxjÍj™ûN—ç¬~mÏ€§Æ¤‰xD2Ý?掋ᚆè›j'>vû…v«Q¾ ©–Gÿ㓵ì˜nÕÂð›ÇÔìè$jt`>wRΛ¿¹©´ Àáã3î{}š]G©a¨!ò±ÒR?üñVYRw÷ –Üýj±˜cI†QD°{i¶Ó˾}Û²ÕKf+ Ÿ×­þãûSí1¸ ƒ¨zÀ;üѧnDÆÒ  ³×Y|ÇKÓ,šä¤}R´{ÀßòÌ-yY6é©ÇüÛ_(×áÚ8‘‡¨ÿ:ý—TmX{…A§•/ÿô›½ËùÛÔŒØ=Z ƒ¼”ÙQg`×ã×VO)sXξû¿6’ªùØu.ú¯W/¸|ù\é²³Ö<ÏÑŒ‰"â½|ò ‘nàùî×GÇ¿±8Ï!ßí‹ïë?ôÎT».1ÊCä‡ðïyüºÊÉE—¨ŠØøEÝî>·I=î£î3É¢¿øœÓå3½úq³ËoŽÏ}µÝ=KC”¾äægön’Ï/š;ýokVtú£G_¼WBuª];óþ×Áù'ߨG®|mËif­ŠŒÙ;H¿yçJ2ägÖÿØúЦú £ u“òkÓ&äTd誵aûþùäêsf?qñ¼î@òo€tI¹™*ûÕ«h‡Wñ]7m«Gþ¡ŠlAÝ¿*Ó8sJ‰tÈrܪ'ß«ÌÐKŸ0ñ®Â¦›sßë;—OÞsåÒËf!7"ùû ·›óÐJ Š Gó±÷¾¯4jTüò½.ú‰«Dð%·ì8|tÀ+[V'f„ž¦dêk~ýzS{tY÷/ÿó½ga ý Ð¤ùðÛÊ®£}N×Ö6§†P)è»4:Í‚š éYâw¿öe•eØ 9Áå÷‰Mé]/¹}Ò£^[ÿð5Géä¿Ù&Zz=þÀðmÕz¿}G[ùôÜÝê¤Ù‡O×j‚†ÿÁcmûzÝJÇâDމÐÓs)ì¢ÙÀA(™ZVððª9 >6Ù;`X§Ë×ÖÕ/ŸQÔ7ïn(Ó©Üîð0«VïÀ“o=Ù¬¦|K‰d°íþYcï+·Èºçª%EJ‚hä>!S…éê”O¨€×8ØÚ«¢íÏñ|©]_œ´¾ÝßëûÚLi0™AÑ; + ±yšYsë«_ÊÊ@§¡>¿w5Dx' ì¤:åØá£Ëš;û:ý”z2¡/ÀÝ8w"I9^w bü˜Jœ! ƵE &Xµý ª­ž´ Ôáeáˆw–¬gB‘½Q€ƒ íšQQ&·øØ%s¦È‡ï}]?ÉB¢C-õ;ƒa¯kw~¶m¿tYDÏÞyÁñ¡@‚ %BvƒQKåfÙå3êÐÖïV‘û¼!™bÁwG;3)œüAÃø¹)™†Kj&Ì›VT”—)‰)—Ç¿¿¡ýÍ-õŸ4õN2jÍTŒ’›1ù.ŸŒnYïšt¹þ' N›’•a¡H2dEzãçͯ¼ãö|=qgy‡fùi¥yÊ¿ªÃ8½VH–ª&…X˜“a‘v!ä?kîíòÏʱ¼¾ºvÑœ©™VADJ¤YSK®ZYÛÜÑû‹ç>|·¡»&éˆlL`ä„úó÷ÛÏ=wAµlÇd®D92!”¬ˆ,ÎËT€ÉùöM»ÔA€¬:ÜRS4ÃNw˜_X{ñäÒ¼˜5÷gÿ~ôµÛ¤Ä=êe/]öòg>>ͬ¸O’‚¨ËÃÜ|á™òá†÷¶¼òÕãO\g y'—¯˜;啞Ùr° lX'¹½~?^ËÒ°¶ª<â}Ô)¨êòÜ÷¶¥C]ÈD‘ÊW§ì;›¶ßðæVÂJ£æ´ ½—…3‹O®¹HøHJ %|ûÏ\ôÈ}þ𵸨*æÏœ„só©Øšò>2:§d³3¬Òù!ÏMmz÷¡ ÌÆa uöŽ~7 °Ú’lY95uôI.K ™‰a¸†(ÎÍŒ8­Zü²jb~W*ù¹¸¼ȇôÉÛk»þé«ÿúM¥‘BzUJøh lScoDrÕn5Uf™Žµƒ”F½vÙ„ š‹ Ãâ‡-¥PòÏ©(ûõŽúã0Owß_6iì”Àò˜Ÿ{ÿ?÷·šplJÑ0O;ú4Bidäý…{²ðª:mdD=¥ÿ0`Ú})||Lða¥¶\'˜"XÃȽÒã¥ýÞ@MŽQÙ}¢Àû׿ÿøH: }áñ¶îõß5Ì›V<±8G:³´j£ŸîÎ tù¨r¶ºÁAQ†Y><ÞÚ£Ñà+×¾±jRn±Ãò×=MÖ©½CtUy¾Ür_Sw¾x´ó£}ú¬ªè‘¼8ßžoô¢ ``PT—˜ô´±Ť6K°´ÒÔbÈÉ‚ê!b¥93µÄ»{š¾¹û…‡¯\›aù¢îȽï.1i6ïkDÒYj3{zqçu’[”*a%º¢¿…P¯²èöµõ×µôeS8AŠ/ÈòE¹R3d loè’½ñH)Ä@ÉaN€@èy0]€÷¡Š®?:¢ˆ¥¦Å{æu ñÚ´ÓÂ<'d€¥}ÔÁqöòÐ[äèH†a—?ó yyj¦IËAøßG:Y’"Ê™pPFc Þ™7âX‡Ó+ŸÏ˲3¡Ð¿Ä á·È © †ã~pú¦jðhÀ@pF ¹Ñï0’ú tþÐMˆú>'"JÝ"È ñºZIô’ÍŽ&"ȳ!ÐÀZ‹£D)CŠ¡&CŸ/†_2ìp|¡”·Åd 4¤ÌáÄAiåÄåï:a¨ÜazY`x9!ÑZš‘0œ7] ®:àcÖœ»L:!L;x0½úЋÁwy+ü k'Ñ´ zÝP‚AJŒ\˜?ŒûÁظNCU˜u\¬ðdb (ÿ¡Ç4­t˜ã°a Á"“p@²ËB /ì^2fX€;{NEÌŽbzûïÓ _d4ŒÁw0<Ðÿʳ°TŠdÐðwðÂ#Rj$ÖÕ?ü6›ä0±ÉU“‡ÛBHÍq=ýÁÔ¹`P•“-JBÂJ«%eT ) –ddÚÌ Å ç%Àô 0ˆ½[bD\ Ä‚b›¶+ÛB¨â탕ÊI²ÛC5è&Ó 3}\ŒxC¼ÇÊûF=ùÅöÃòù›—Ï:ìc#íK€1—-f=ð —½÷ÂÚxóÝâhî:‘û|û >÷%FñH¡;/¢w1êxAR¬¾²àX{·S>ÌÏ09㸉‡B‰Ž\ÿÿ÷Èj`aÍÄl.‚0ƒ,Ç…Â¥±^ Ð…§O÷Âqèyp®Êå%þŽ8dVTñ?úUÐÃJî–¤e2`…™—ABÂH& Z¯ëêJ³ «ùŠê"w¸M%˜ ”k#´*â=Ñ}ø«ŸL6´ Åu|ÿÄÍ ßƒŒy mUQ?`ÒvÇpL.‡fXr) )d¹=ÀdÔ'¾8ARÓ®'ÿöùÎP3pÏEgþè D7ó†Œ.­–"Z^1HÐÉ&}ÓOç%úÖçúßœðl¨ïËÝ\æx”-%Y†£™„)Ê0£¡dÈêÐ,Ï¥V¤Ì ðF¼6ž8*ÒQk>Ø)<ª+JÊ2Œn08ä‘P^eÔ(®ÇŸžg-/ÊNðôX¸·‹Ò† à;ˆò‚ÀP|ßMSõCi $òJôÙ´ðÒ”|k¤•ùQty’ÙêhÐcYšùaß1éõ‰?]µpŸ‹»Œ@F—¬óñ‰YfF!ñêÝô#WŸ…'Ì#Eý ±žåOTgæ;…܆^}:ÓàcF ê559VÙgépT\ë!ÈÛMÓ‡<¾€…H!àÃt³öîW7ÉR~IítJÖÇÍÞØ1ºTMÈñ†t>‚ÔPgΚœø‰Q/7¸Y?Aw7¶ ß*Æ„ØrFA/Æ^y9¾<Ë,s¡¥£à)ppˆƒ%ùŠðd¯ËJâ }b4V×åÚ{¸Y:¯ÑïÞ°hïÐð 0â€>ù°0ÓÔRÔ‡|Ìs«g š95¼D¿—‹äþð2VMGM¶ž­;àƒ°0Ç&¶öåS©ÄÌXJ""UTw¼GŠŽÅô#Â0ͪ½çÅOd{té3²L:yR†‘Àº‡£FÈÛ „”Ëò—.3âS¢¨WÈŸä1ÞÐyÝ]=Jc´Ÿã ³‡hësRѨ·ç‡f³ì¶~9baBÒøæVç΃ҡ†"ß»så¾!ZZ„Y«-^ÙYqØÍ@ĦÑ˲ÍSzôA3¬º[žûHOÕVM\\”áE &¤Ï†-ùÖƒîÚóæ%sÿp\Ûƒæîˆ„E–&Ò@ŽN™‘!ï6·f]òÒì°Ÿ¹aq¥ìôoÙuT¯‹]Ì“ Hìêqo®;d޽x÷…‡ý1=i vÿÇÏ?Íby¥Ž(èf1ôÓÄ@÷ß_cUhÚB` ¡´"¢)…™Iµ,Íž3wš´Ä«Ÿï-×ÅÕ)aPmÕ®Xÿ‰\ ]V”½æÌI½4’óÀaÔÊcÖë£ÑþËߤHè§@ p"#øRˆ}ќꣀĿoè’¥ª0½?¹7’ —Lˤ°H}N÷—­‰³CaË$,Ãrùh›|fíµËZ<¬›…síòõí=Nàa^ºü eú>Ü„t”’›`TRðŽcD ìDòÁ>nì—“*Åy™”žJF¢ï¿tÌÍw¿ÜeÓ&›ñN†FÍ?½¹­Ð-"¤ŸþÙì¶>_íô¹Íîc]¤žºtÙìä¿W€ y`Xø31a³Gy^šQcp¦^'!Í¢:x¼]:Dvô¿¯œy|$Ï.ùÓ²ÍU¡Õ0èsç;'èS\=l¤‘§'y}“|xýùóµ~4FƒË? -ýÌÖ†÷n_¦×¦0™P7q‹æ n†ø;ü À@öŠ‘*\}†÷˜;ü«È4ÙH½ðÁr§¿fåõ¶ n=Ý@éRT†©yG9Ðûç[åÐSú|Ú_JQ¤ Õ0 þ´uù¿}(Meª» êæÏŸo»ùìÈõDà”=ß™†™ŸîÎôàÿù½ïã&O¥Eµe£_ ¾v»áº…Wž;W=Ú^G›,÷“Ã`Vg­³Ï´¥&"Ñ¥;œþ§–×,ùãéDO‘ïÉÓ¡îŠóþÌRsÒdZ,ÃÀ>wà¶ÓK×ݼҔ^÷áJ½:>Š@dû¨­¨ÿýYæ–Tò”ˆ“;\ôµÆ5‹·cœGèÈI”¶ÈVæÃ·ÂàÄÂáàƒS³Øó¯ý8ýœ EóJjó±÷ž1ùÉ»V§Ïýp̳‚‡ªcÙ[4õ& âþ^æ–iÚuÖap(Eë‹’[šÈÆM¿d¸FcÇ#ŽçY‚X{à ¥“ìòøä@iFô_”-ÄGaÀ§Œàíøß/2'EŸÕégÏ^±€)Y_MÎê‹ÖÏzëûùf“ÚXê×-2w6=M¨g_ß1/ÓøêÝä)Öª‘44Y€n•ðg^Hs1ÐgóÁ¹ ´ù'qðÁù˜²YpN¿;òÑÕ¥K6øN·ëãiEšƒ-,Óùëß$^.Ý?ü)âªGŠy"¾‹›Xþ0Ya×h¬&˸öÓÁ»VÈ&Ô>ÏÞ“^iäè&8ÛC¿ à£_·9«,‚×M`ÂLÔàH—ó‡Ö~„CÈý~²€¶r’ãË5+gV”D‡ZÃ0M®=áá,ÀbÁ‚û fÒw—n»vêO>odsb­.‡:Ñ!—¿ùA¯?z P`~ #Àò—<é(!1i ŸÐf ÇR{øÎGM߉U{A7;`a«{g¡1‡Â¥™y ÏAÞ":ªù¶Kj'Ÿ;FQnf¼EìÃa(„ ›oÙ ¢k‡wÃПâb†qƒë/;”ùh©…ÅõáZÑß÷õ{öÞKäév…œ­8pc ì îßðÒ¤¦AºPOò!óé·Ëí&ꨤ0 Oð Ä(•ÖQÀ†Ë&­fCÛ·iêlêrözÆYf»ÙP”mÍuØ2¬ÈJÒŒˆz8Öy@˜å9%[ ä¾b °d03À›a*úƒ}‚^'¬ÿ,þòüž^ïÖ»ˆiö¯ÃFˆgèó¸¾ÑYyé ºvw Ð@I§÷¯›¢{ðÂz ú¤çò<)Ô-¨ÄÿlaQøáÕO&d%¿’DL ÷× Ð…l¡C@I©YÄî¶úô:~GŸ¯ÝÏÖ Ñû¾í÷àsó¶(쮈kå!1“‹-ÿ®yÁ5/×–?„yi üF„ðÛ3üŽ^ïoæ^¾éCi}ah¡JžaµÃ”jÄ?1EÉbÛ<à9 ß/ô¨QŒ€X[Ù>߻㮎•›S“s±E“›MØ¡n㉘úè’ƒ™_Ö}z˜ý¶“† PaÒÌ²ë‘æè pÇÝ+ùÝ=ú¹¶‰³h<òx9rž6y9~ZžM­eºâàX:Þ¥Œ-…@,î $,yÌ®gvö—³³qYšÇÅñÓîÌ|ûò‹ãô ñmþÐ Mh°R#yq¥æŠùÜô¼£$è‹0–0q0©Â3Kó2Tä~,H+Ћ3#Y¡ç©2\…A…÷H¤œY¼ùÌɆ7 M˜3 Tz4¸›ÀÜâlù@îꌀ.–d¦Ÿ„ Ê:¸ ³‚Õ#`Ü„·áp~P‹ iñŽaŒ d—à,©Šcan†9ýÛ()–>ÉX(hc Z»)†Ò ÛæÚ‘ì^ÅX¡µÔÆŒ™KFû]ꉰÑc0Ö—º±h«IFB^³Æâp0~R>½°]ÊéÏ‘0Ã1u @7Ã5¤†J9­4*pÈþ©°•ƒ<‘¶9ˆÂDaÂ1ࣸ]?z¹'¶Á@(•±<_eÕQj§ûãûE7C)Àô! ¹Æ hFÇüˆâfŠ‘· f2BÚR…Q€ü´,Ž©œjŽ;ŒSÀ´!f…s3ƒ¨3 Ng¡¤.1œIc:ƒ=Kä~T—å©þ« ñÔdƒÜK„5® ­ ”¼¸ç T1‹~Š¢Y<8Å38Æâ„ # …ᔫ“å_ ŠoÚ<€…ú¡½ Åw6ü©ÍT11ÚË(†a†"Æs˜*µ_œëHÿ6”D½ã`›šžƒ;„IdœGÌ€GÔLÅ~Õ£«±’c(ÇÀGÈÒ„¶¼ —›¦Ìu¨ì$€ÐÊÊœôm®}‚ßï9¨ÈS‚HFf(#2JÈýá;Fb LÝ¡ÃmÉ:Ûà,i+N^È…F½ e£@"B²Ïþ?ŽÄ Æ;ߟ|°釖RšpIç ÌZƒNýšß±­ßÂÄtW÷DéŒgsô˜r}Qùš‹º™‰þáàC3™AÒššµD‹'4ÈÀ9å¹êf$Ró‡bSÓ“ÂÔW‰5Jù&ýA”þPÐpý!ç MãI*ùƒñLžßÑž­,œ6!g,Ø3ö¸öº-ªÖ(ž-F²ˆB†?H:e ØÒ,è…Ð Òˆ"¸IEiå~ãÑØ€lVaö=²eÃQ o` ÃQ뇶ÁÚ“ð­ŒG̪!l€ùV¢ ä¥ñ ˜°x¥u,¸3ö:à…í¢MÃÛ\ Ò(Ñ…<°˜uÖä–H•N€Ž›S7l3šÜ/Îp¥³°¸‡å.™”C¦ñKr èDYA‘ÁI€à¢ $…äð&x²±9>fO îÛô.<•åw#舗¹òœ™cʱ×áSe­&ø õbê\\Á¼¡ ',Kœd…¤\]LDû¢‚¢Ž!8ºyK$Àc.|þPöÍjÅõB5÷Ê`îÓ :^ƒÛÄ ûL°.¹j»3û7tïRlٺƖr,¡ÎE¿|IíXøÀ%EÑÊ*žŽ•Aî#BgŠïY«nS¡ „Y¬mêyé³&ÖkõXJ¿ÿÄ€^§¹jeíØ±ç„è%_„P‚ ä])ø¢)ç* /Ç%–(ß RÆ@[·Ü€í‰¿ÞA¡fí>æ ¿~Ä…÷Ò¡±€rùä.ÈŸÌåq¾›™ç‰su@L>¦ƒÁœ¼m,1ì tu·Ÿ­ëõ^VSÚ÷ÂmÙjסDÐØë#(Í%ùƒ7Í*_â‘®DQ j»8úúï;ç[›aþÏ7z ,Z‡– †#•§!ßNsNKêÈT\ÞÜâ<õÓ/ãiúµ´£¬?!ú;az[5 0Î}ë¼/VVW¿±ÕôÕ¾Ï+8Y¨•Ý +É4-¨šP[Y:!ß¡KeÁŸ4iäß|M—z?ǬSŒ a€Ðm>¨~ Ä+/ø4? 8çð„ýt&*ǹŠÇŒ½Ìä#=¥C F§¥ÊŠsó³lÔØ¸Z‰iìG€ý Ðl¬Âä'$5{>ÙçÅnßý¾ÐÂáa>HmÄC8†ù³tÍY3ª@ÆXyXIÒØ+a$R²–‹5^R!-¬t¼!$8£©õ%à= _Ç(¿ˆ®@± Vê_Ø£{׺b|¹NZp¨ZuÁ¼.ôºì¿t0 ÝŽ­ÝoÖ-2NZ9„c32‘0l¢1“V˜0ër¡ür¼iìu€D[Áñ§Ý)ü&œPe‰ ¡¤Ÿ‘IŠ‹sÛ¡WüÍŽ@(u£œr  d>i9òª˜íÅ-i†Ó@Þ½‚u0Þt¢@äÜ Ž=]ÁŸ¦ÁÄ`bASüp²e”Ê61fâ!2 =0Îù÷ƒtò3êÑ ˆ4#)ß(ôw^T³€OÞt ÂF³8!¸hÌY€ìOîƒ "Î+È"ç6ÀŠå=@úMJ‹› EîGo¥ŽÄF¨ìÓæŒÕÀò“ñåxp$B*¡ã-à=&Àq™0ŒDº&¿î X0Jèõ2xÇ¥@íÚæôiœ(ÐZ^ƒ;ClhÝðP"eùƒ‹|G[R°sL9€}9°-»žd4®HÄ3 o ú í—dò‹¿¡šªäb–P@iX£J9µP—z=%òš%3€e°/ †Ob: P’Hý›…òSß1¡ QÒ¯iÄßrUÔ¶ {†4[ÁŠ%kJ“-„²Ó€e®`ÝþƒÐI€’ÐÈ@nÚP`2!çÙßR@ìõ0U H˜ªEÂtÊ÷dè$àÿýCöšÿMt €q¦SŒ3`œéãLÿÃßùä<IEND®B`‚PyGreSQL-5.1/docs/_static/favicon.ico0000644000175100077410000002215613466770070017436 0ustar darcypyg00000000000000h6 ¨ž@@(F( 5Çÿ6Çÿ9Êÿ:Ëÿ;Êÿ<Íÿ=Îÿ>Ïÿ@ÑÿAÌÿAÑÿAÒÿBÒÿDÔÿEÕÿFÖÿHØÿIØÿIÙÿJÚÿKÚÿL×ÿMÜÿOÒÿOÝÿQàÿRàÿUäÿVÐÿVÔÿVäÿXÐÿZçÿ]áÿtáöN‚N„S…Q…ßÿ†S‡S‡])ˆWˆ[ˆ^*ˆâÿŠWŠ`,Šäÿ‹`(‹a-Œ]Œb.Œ¾ÃŒàÿc/e$Žd0d/e1b f2‘g2‘g3‘g4“l>•j1ž|U§Šl²éÿÒŸêåàñûÿýüüþýýþþþÿÿÿMMMMMMF MMMMMMMMMMMM MMMMMMMMMI @@=MMMMMMM7'.1@@@MMMMM @@@MMMM 69@@@MMMM @A @@@MLLM )@@J@@@@@KMMM @@4@@M@>>(+MM@@?@@M>:5%M#3MMMD@@@/><80-H-0MMMM@@@M><80,*-$MMMMMMMMMMMMBEMMMMMMMMMMMMMMMMMM( @4Åÿ4Æÿ5Åÿ5Çÿ6Çÿ6Èÿ7Èÿ7Éÿ8Éÿ9Êÿ9Ëÿ:Ëÿ:Ìÿ;Êÿ;Ìÿ<Íÿ=Çÿ=Îÿ>Îÿ>Ïÿ?Èÿ?Ìÿ?Ïÿ?Ðÿ@Ðÿ@ÑÿAÌÿAÑÿAÒÿBÒÿBÓÿCÓÿDÔÿEÕÿFÖÿGÖÿG×ÿH×ÿHØÿIØÿIÙÿJ×ÿJØÿJÚÿKÙÿKÚÿKÛÿLÖÿL×ÿLÚÿLÛÿLÜÿMÜÿOÒÿOÛÿOÝÿOÞÿPÑÿPÞÿQßÿQàÿRÓÿRàÿSÏÿSâÿTâÿUáÿUäÿVÐÿVÔÿVäÿWÑÿXÐÿYÑÿYÒÿZÒÿZçÿ\Óÿ]Õÿ]áÿ^Öÿ^×ÿ`×ÿ`çþbÙÿcÛÿeÜÿgÞÿlæÿnÞùnâùnèÿtáövëÿ€M€ÑâN‚N‚Oƒßÿƒèø„S„Üÿ…Q…ßÿ†S†S†X†áÿ‡S‡S‡T‡])‡ÖàˆTˆTˆUˆUˆWˆWˆ[ˆ^*ˆ^+ˆâÿˆãÿ‰V‰X‰Z‰[‰_+ŠWŠYŠ[Š\Š`,Ša.Šäÿ‹\‹`(‹a-Œ]Œ]Œ^Œ^Œb.Œ¾ÃŒàÿ```"c/d e$ßÿŽ` Žd0`c(c*d/e1b f2‘f1‘f2‘g2‘g3‘g4“i*“l>“‹q•j1•äÿž|U¤ƒa§‡g§Šl²éÿ¹¡‰»¤Ž¼íÿÁ®›Áðÿ®šÅ³£Ê»¬Í½­ÐÁ´ÒŸÓǾÙÎÅÝÒÊçùÿêäßêåàïêæðîìñûÿöóñöýÿ÷þÿøø÷ù÷öûúùüûúüüûýüüýýüþýýþþýþþþÿþþÿÿÿÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔ±?E¬ÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔ /ÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔDÔ  "'ÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔ™ÔÔÀ "&6¦®ÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔHÅÔ5 "&+¦¦¦¦¡ÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔJ !#&+_¦¦¦¦¦ÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔ’chl{|ˆ%'-4ª¦¦¦¦¦ÔÔÔÔÔÔÔÔÔÔÔfIKMNPRTUVW*-48—¦¦¦¦¦ÆÔÔÔÔÔÔÔÔÔ !$'-48<„¦¦¦¦¦²ÔÔÔÔÔÔÔÔ !$(-48<@t¦¦¦¦¦tÔÔÔÔÔÔÔÔ !$(.‘r˜@C}¦¦¦¦¦ŽÔÔÔÔÔÔÔ´  !¦¦248>Z¦S…¦¦¦¦¦¤Ô·ºÒÔÔÔG  "¦¦§:>AFqL“¦¦¦¦¦¦ÔsÒÔÒÔÔ9 ")¦¦¦¦§X[]dw¦¦¦¦¦¦¦”ÔÔÔÔÔÔ= "%m¦¦¦¦¿Î¦¦Ô¦¦¦¦¦¦¦¦ÐÔÔÔÔÔÔQ "&,¦¦¦¦¦Ô¦¦¦Ïœ¦¦¦¦¢¢ÔÔ»ÔÔÔÔ¶ "&+Y¦¦¦¦ŒÔ¦¦¦‰Ô¦¦¢¢¢¢ÔjÒvÔÔÔÔÔ "&+3u¦¦¦¦šÔ¦¦¦¦Ô¦¢¢¢¢oÔ ÔµÔÔÔÔ0&-47¦¦¦¦¦¥Ô¦¦¦¦Ô¢¢¢  Ôi›ÔŸ ÔÔÔÔÇ148;¦¦¦¦¦¥Ô¦¦¦¦Ô¢¢  ›Ô––Ô–›ÑÔÔÔÔÈOB\¦¦¦¦¦¥Ô¦¦¼•Ô¢ ››€ÔÔŠ–gÔÔÔÔÔÔÔ¯¦¦¦¦¦£ÔʃÁü  ›–ÔÄÉÏbÔÔÔÔÔÔÔ¥¦¦¦¦¦¦Ô«ÔÔ¢ ›–‹eÔÔÔ`†‹ÔÔÔÔÔÔÔ¥¦¦¦¦¦¦Ô¦¦¢¢  ›‹†yyÔ‹ËÔÔÔÔÔÔ­¦¦¦¦¦¦Í‚¦¢¢ ›–†yp¾y†½ÔÔÔÔÔÔЦ¦¦¦¦¦~Ô¦¢¢ ›–†y^Ôpy†ÒÔÔÔÔÔÔÔ¦¦¦¦¦¦Ô¨¢¢ ›–†xÔppyaÔÔÔÔÔÔÔÔÔn¦¦¦¦¦žÔ³¢  –kÓÔzyyy¹ÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÐÔÔÔ¸©‡°ÌÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔÔ(@€3Åÿ4Æÿ6Èÿ8Æÿ9Êÿ:Ìÿ=Çÿ=Éÿ=Íÿ?ÐÿAÌÿAÑÿEÑÿEÕÿHÓÿHÖÿIÙÿJÌÿLÖÿLÚÿMÜÿNÑÿPÞÿQÑÿRÎÿRÕÿRÛÿRàÿTÍÿTÚÿTâÿUÝÿVÐÿVÕÿVäÿYÑÿYÔÿYÝþYæÿ[èÿ]Ñÿ]Õÿ]Üÿ]áÿ^èÿ`×ÿ`çþbÙÿcÓÿdÛÿfÝÿgÚùhåülæÿmÖÿnÞùnâùnãÿnèÿoØÿpÞÿpêÿtÜòtáövëÿxãõyàòzâÿL€M€ÑâÕå‚F‚ÝÿƒCƒPƒTƒèø„M„Ûÿ„Ýÿ…R…W†U†V†X†Y†áÿ‡Y‡])‡ÖàˆUˆ[ˆ^*ˆâÿ‰Y‰[‰[‰\‰] ‰^%‰_,‰äÿŠZŠ[ Š[Š\Š\Š]Š`Š`Š`,‹]‹]‹`)‹ºÀ‹ÉÐŒZŒ[Œ\Œ\Œ]Œ]Œ^Œ^Œ^Œ`Œb.Œ¾ÃŒàÿ]```!a%b)d e$j0·»ßÿŽd*Žd0ad-yRb e-f2‘g4‘h3‘tK’g)’~\’—†“i*“l>“mA“‹q”j/”j0”j4”n>”Šn”u”z”—ˆ”¯¬”°¨•pD•äÿ—o;™¡”™¦™™áÿštHœ­¢zQž|V }U¢‚^¢‚a¤ƒa¦†b§‡f§Šl©íÿªŒkªŒmªŽq­q±•y²éÿ´›‚¶ž†·¡‹¹¡‰¹¢Œº¤Ž»ñÿ¼íÿ½§‘½¨“½¨”Á­šÁ¯žÁðÿų£Æ´¤È¶¥È¸©Ê»¬Ì¼­Í¾°ÐÁ´ÑĶÒŸÓǾÔÇ»ÔȽ×ÌÂÙÍÃÙÎÄÛÑÈÜÒÉÞÔËÞÕÍàÖÎáÙÓãÛÔåÞ×æÞØçàÙçùÿêäßêåàìçâíèãíéåñíêñîìñûÿòüÿôñîõòðöôñöüÿøöôùø÷úùøúþÿüûúüüûþþþÿÿÿþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþÀ0  !)ªöþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþO <úþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþO <þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþ úþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþ( öþI *þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþöþþþþ; ºþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþŒIþþþþé š•”¶æþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþ6 þþþþÈ  [•••••„¾þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþ  ñþþþ m••••••••’þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþ# !$  %•••••••••÷þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþ# F••••••••••¸þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþ$ §••••••••••[þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþIIPPWW^^^^ffCž••••••••••_þþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþò—••••••••••ýþþþþþþþþþþþþþþþþþþþþöO ###$())))---//11122*ˆ••••••••••”õþþþþþþþþþþþþþþþþþþþ ƒ•••••••••••ãþþþþþþþþþþþþþþþþþþ j•••••••••••Äþþþþþþþþþþþþþþþþþ® [••••••••••• þþþþþþþþþþþþþþþþþ [•••••••••••[þþþþþþþþþþþþþþþþÈ [•••••••••••[þþþþþþþþþþþþþþþþ €‡[[‰t""[•••••••••••{þþþþþþþþþþþþþþþþ Š|T>™•Q4""_•••••••••••ýþþþþþþþþþþþþþþÈ n•••Q8{•_.&j•••••••••••”ùþêÌÂÔìýþþþþþþþ6 ‹•••••lB•A&_•••••••••••”õþg••p_Hãþþþþþ  z••••••‘""Z¨&,„••••••••••••Ðþô[Úýþþþýðþþþþ ¦••••••••¥""&&&'‡•••••••••••••|ýþþþþþþþþþþþþþ z•••••••••­55::@=M°[•••••••••••••••„Ñþþþþþþþþþþþþ s•••••••••‡÷þþ»_[þþ_••••••••••••••••[þþþþþþþþþþþþþ Q•••••••••àþü_•••íþ·•••••••••••••••‡ýþþôþþþþþþþþþþ! 3”••••••••[þþ[••••xþü“•••••••••••••”·þþþÉþþþþþþþþþþ- ¤•••••••••âþÊ•••••“üþƒ•••••••••””””}þþJþüÖþþþþþþþþþf [••••••••ýþ[••••••³þð•••••••””””””Ìþð}þþNþþþþþþþþþÎ 7”••••••••zþþ…••••••jþþl••••”””””””[þþQýþ[Æþþþþþþþþþ ¬•••••••••gþþ†•••••••õþx••”””””””””áþÞ”ýþ[kþþþþþþþþþ! [•••••••••…þþ_•••••••×þ¾•””””””””ŽSþþSŽqþþgŽÌþþþþþþþþ< †•••••••••”üþ[•••••••ÂþÌ”””””””ŽŽŽÝþÖŽŽSþý†ŽSþþþþþþþþöG••••••••••”÷þ[•••••••¿þÑ”””””ŽŽŽŽkþþQŽŽQþùŽŽŽèþþþþþþþþ*£••••••••••”ôþv•••••••ËþË””””ŽŽŽŽŽ¡þðŽŽŽ_þŽ[þþþþþþþþö[••••••••••”óþx•••••••àþ±”””ŽŽŽŽŽŽëþ¯‚þ펎rýþþþþþþþþÇ{••••••••••”ôþg•••••••õþg””ŽŽŽŽŽŽrýþKQþõŽÒþþþþþþþþþö9+?”••••••••••”ùþQ•••}ØÆ…þþ[”ŽŽŽŽŽŽaþþTbKþýrQþþþþþþþþþþþþþþþÞ•••••••••••”üþu”•”øþý¼þýŽŽŽŽŽŽcþþQäù½XþþTdýþþþþþþþþþþþþþþ¸•••••••••••”ûþþø«_½êÁîþØ”ŽŽŽŽŽîþÍðøøRüþKoçþþþþþþþþþþþþþþ–•••••••••••”÷þýþþþýüþþýkŽŽŽŽŽoKþþýâÕÜýþEooo±þþþþþþþþþþþþþþ”••••••••••••ëþ ïþþþþë_”ŽŽŽŽoooQüþþþþþþEooooKþþþþþþþþþþþþþþ•••••••••••••ÐþÆ••…_Sh”””ŽŽŽŽooooeYDŽ´~þþReoooTþþþþþþþþþþþþþþ”••••••••••••˜þí••••””””ŽŽŽŽŽoooee]]]]Òþßeeeoodùþþþþþþþþþþþþþ”••••••••••••_þþ†•••””””ŽŽŽŽoooee]]]]EþþD]eeoooèþþþþþþþþþþþþþ²••••••••••••”üþ_•••””””ŽŽŽŽoooee]]YYëþÞ]]]eeooÙþþþþþþþþþþþþþÛ•••••••••••••Åþä•••””””ŽŽŽŽoooe]]YY©þþLY]]eeooçþþþþþþþþþþþþþý••••••••••••_þþ_••””””ŽŽŽŽoooe]]YEþþSYY]]eeo\ýþþþþþþþþþþþþþþv•••••••••••••âþå••””””ŽŽŽŽoooe]]DþþÅYYY]]eeogþþþþþþþþþþþþþþþü‡••••••••••••_þþ›•””””ŽŽŽŽoooe]_þþÛYYYY]]eeEþþþþþþþþþþþþþþþþþç“••••••••••••Ÿþþ‚””””ŽŽŽŽoooVÒþþÔYYYY]]]eSþþþþþþþþþþþþþþþþþþþ÷Q•••••••••••¾þþƆ””ŽŽŽŽU²ýþþ¢]]]]]]]RÓþþþþþþþþþþþþþþþþþþþþþþð³[_yy_Qw¿ëýþþþþýËSi‡r`K“åþþþóEe]]]]YEÃþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþýýýþþþþþþϵœoo¹ßûþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþþPyGreSQL-5.1/docs/start.txt0000644000175100077410000000035713466770070015564 0ustar darcypyg00000000000000.. PyGreSQL index page without toc (for use with cloud theme) Welcome to PyGreSQL =================== .. toctree:: :hidden: copyright announce download/index contents/index community/index .. include:: about.txtPyGreSQL-5.1/docs/_templates/0000755000175100077410000000000013470245541016011 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_templates/layout.html0000644000175100077410000000307013466770070020221 0ustar darcypyg00000000000000{%- extends "cloud/layout.html" %} {% set css_files = css_files + ["_static/pygresql.css"] %} {# This layout adds a page header above the standard layout. It also removes the relbars from all pages that are not part of the core documentation in the contents/ directory, adapting the navigation bar (breadcrumb) appropriately. #} {% set is_content = pagename.startswith(('contents/', 'genindex', 'modindex', 'py-', 'search')) %} {% if is_content %} {% set master_doc = 'contents/index' %} {% set parents = parents[1:] %} {% endif %} {% block header %} {% endblock %} {% block relbar1 -%} {%- if is_content -%} {{ super() }} {% else %}
{%- endif -%} {%- endblock %} {% block relbar2 -%} {%- if is_content -%} {{ super() }} {%- else -%}
{%- endif -%} {%- endblock %} {% block content -%} {%- if is_content -%} {{ super() }} {%- else -%}
{{ super() }}
{%- endif -%} {%- endblock %} PyGreSQL-5.1/docs/community/0000755000175100077410000000000013470245541015700 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/community/index.rst0000644000175100077410000000073513466770070017553 0ustar darcypyg00000000000000PyGreSQL Development and Support ================================ PyGreSQL is an open-source project created by a group of volunteers. The project and the development infrastructure are currently maintained by D'Arcy J.M. Cain. We would be glad to welcome more contributors so that PyGreSQL can be further developed, modernized and improved. .. include:: mailinglist.rst .. include:: source.rst .. include:: bugtracker.rst .. include:: support.rst .. include:: homes.rst PyGreSQL-5.1/docs/community/support.rst0000644000175100077410000000106413466770070020154 0ustar darcypyg00000000000000Support ------- **Python**: see http://www.python.org/community/ **PostgreSQL**: see http://www.postgresql.org/support/ **PyGreSQL**: Join `the PyGreSQL mailing list `_ if you need help regarding PyGreSQL. Please also send context diffs there, if you would like to proposes changes. Please note that messages to individual developers will generally not be answered directly. All questions, comments and code changes must be submitted to the mailing list for peer review and archiving purposes. PyGreSQL-5.1/docs/community/bugtracker.rst0000644000175100077410000000140013466770070020563 0ustar darcypyg00000000000000Bug Tracker ----------- We are using `Trac `_ as an issue tracker. Track tickets are usually entered after discussion on the mailing list, but you may also request an account for the issue tracker and add or process tickets if you want to get more involved into the development of the project. You can use the following links to get an overview: * `PyGreSQL Issues Tracker `_ * `Timeline with all changes `_ * `Roadmap of the project `_ * `Lists of active tickets `_ * `PyGreSQL Trac browser `_PyGreSQL-5.1/docs/community/homes.rst0000644000175100077410000000024013466770070017546 0ustar darcypyg00000000000000Project home sites ------------------ **Python**: http://www.python.org **PostgreSQL**: http://www.postgresql.org **PyGreSQL**: http://www.pygresql.orgPyGreSQL-5.1/docs/community/source.rst0000644000175100077410000000066113466770070017742 0ustar darcypyg00000000000000Access to the source repository ------------------------------- We are using a central `Subversion `_ source code repository for PyGreSQL. The current trunk of the repository can be checked out with the command:: svn co svn://svn.pygresql.org/pygresql/trunk You can also browse through the repository using the `PyGreSQL Trac browser `_. PyGreSQL-5.1/docs/community/mailinglist.rst0000644000175100077410000000056113466770070020755 0ustar darcypyg00000000000000Mailing list ------------ You can join `the mailing list `_ to discuss future development of the PyGreSQL interface or if you have questions or problems with PyGreSQL that are not covered in the :doc:`documentation <../contents/index>`. This is usually a low volume list except when there are new features being added. PyGreSQL-5.1/docs/_build/0000755000175100077410000000000013470245541015112 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/0000755000175100077410000000000013470245541016056 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/searchindex.js0000644000175100077410000013301413470245537020720 0ustar darcypyg00000000000000Search.setIndex({docnames:["about","announce","community/index","contents/changelog","contents/examples","contents/general","contents/index","contents/install","contents/pg/adaptation","contents/pg/connection","contents/pg/db_types","contents/pg/db_wrapper","contents/pg/index","contents/pg/introduction","contents/pg/large_objects","contents/pg/module","contents/pg/notification","contents/pg/query","contents/pgdb/adaptation","contents/pgdb/connection","contents/pgdb/cursor","contents/pgdb/index","contents/pgdb/introduction","contents/pgdb/module","contents/pgdb/typecache","contents/pgdb/types","contents/postgres/advanced","contents/postgres/basic","contents/postgres/func","contents/postgres/index","contents/postgres/syscat","contents/tutorial","copyright","download/index","index"],envversion:53,filenames:["about.rst","announce.rst","community/index.rst","contents/changelog.rst","contents/examples.rst","contents/general.rst","contents/index.rst","contents/install.rst","contents/pg/adaptation.rst","contents/pg/connection.rst","contents/pg/db_types.rst","contents/pg/db_wrapper.rst","contents/pg/index.rst","contents/pg/introduction.rst","contents/pg/large_objects.rst","contents/pg/module.rst","contents/pg/notification.rst","contents/pg/query.rst","contents/pgdb/adaptation.rst","contents/pgdb/connection.rst","contents/pgdb/cursor.rst","contents/pgdb/index.rst","contents/pgdb/introduction.rst","contents/pgdb/module.rst","contents/pgdb/typecache.rst","contents/pgdb/types.rst","contents/postgres/advanced.rst","contents/postgres/basic.rst","contents/postgres/func.rst","contents/postgres/index.rst","contents/postgres/syscat.rst","contents/tutorial.rst","copyright.rst","download/index.rst","index.rst"],objects:{"":{pg:[12,0,0,"-"],pgdb:[21,0,0,"-"]},"pg.Connection":{backend_pid:[9,3,1,""],cancel:[9,4,1,""],close:[9,4,1,""],date_format:[9,4,1,""],db:[9,3,1,""],describe_prepared:[9,4,1,""],endcopy:[9,4,1,""],error:[9,3,1,""],fileno:[9,4,1,""],get_cast_hook:[9,4,1,""],get_notice_receiver:[9,4,1,""],getline:[9,4,1,""],getlo:[9,4,1,""],getnotify:[9,4,1,""],host:[9,3,1,""],inserttable:[9,4,1,""],locreate:[9,4,1,""],loimport:[9,4,1,""],options:[9,3,1,""],parameter:[9,4,1,""],port:[9,3,1,""],prepare:[9,4,1,""],protocol_version:[9,3,1,""],putline:[9,4,1,""],query:[9,4,1,""],query_prepared:[9,4,1,""],reset:[9,4,1,""],server_version:[9,3,1,""],set_cast_hook:[9,4,1,""],set_notice_receiver:[9,4,1,""],socket:[9,3,1,""],ssl_attributes:[9,3,1,""],ssl_in_use:[9,3,1,""],status:[9,3,1,""],transaction:[9,4,1,""],user:[9,3,1,""]},"pg.DB":{"delete":[11,4,1,""],abort:[11,4,1,""],adapter:[11,3,1,""],begin:[11,4,1,""],clear:[11,4,1,""],commit:[11,4,1,""],db:[11,3,1,""],dbname:[11,3,1,""],dbtypes:[11,3,1,""],decode_json:[11,4,1,""],delete_prepared:[11,4,1,""],describe_prepared:[11,4,1,""],encode_json:[11,4,1,""],end:[11,4,1,""],escape_bytea:[11,4,1,""],escape_identifier:[11,4,1,""],escape_literal:[11,4,1,""],escape_string:[11,4,1,""],get:[11,4,1,""],get_as_dict:[11,4,1,""],get_as_list:[11,4,1,""],get_attnames:[11,4,1,""],get_databases:[11,4,1,""],get_parameter:[11,4,1,""],get_relations:[11,4,1,""],get_tables:[11,4,1,""],has_table_privilege:[11,4,1,""],insert:[11,4,1,""],notification_handler:[11,2,1,""],pkey:[11,4,1,""],prepare:[11,4,1,""],query:[11,4,1,""],query_formatted:[11,4,1,""],query_prepared:[11,4,1,""],release:[11,4,1,""],rollback:[11,4,1,""],savepoint:[11,4,1,""],set_parameter:[11,4,1,""],start:[11,4,1,""],truncate:[11,4,1,""],unescape_bytea:[11,4,1,""],update:[11,4,1,""],upsert:[11,4,1,""],use_regtypes:[11,4,1,""]},"pg.DbTypes":{get_attnames:[10,4,1,""],get_typecast:[10,4,1,""],reset_typecast:[10,4,1,""],set_typecast:[10,4,1,""],typecast:[10,4,1,""]},"pg.LargeObject":{"export":[14,4,1,""],close:[14,4,1,""],error:[14,3,1,""],oid:[14,3,1,""],open:[14,4,1,""],pgcnx:[14,3,1,""],read:[14,4,1,""],seek:[14,4,1,""],size:[14,4,1,""],tell:[14,4,1,""],unlink:[14,4,1,""],write:[14,4,1,""]},"pg.Notice":{detail:[9,3,1,""],hint:[9,3,1,""],message:[9,3,1,""],pgcnx:[9,3,1,""],primary:[9,3,1,""],severity:[9,3,1,""]},"pg.NotificationHandler":{close:[16,4,1,""],listen:[16,4,1,""],notify:[16,4,1,""],unlisten:[16,4,1,""]},"pg.Query":{dictiter:[17,4,1,""],dictresult:[17,4,1,""],fieldname:[17,4,1,""],fieldnum:[17,4,1,""],getresult:[17,4,1,""],listfields:[17,4,1,""],namediter:[17,4,1,""],namedresult:[17,4,1,""],ntuples:[17,4,1,""],one:[17,4,1,""],onedict:[17,4,1,""],onenamed:[17,4,1,""],onescalar:[17,4,1,""],scalariter:[17,4,1,""],scalarresult:[17,4,1,""],single:[17,4,1,""],singledict:[17,4,1,""],singlenamed:[17,4,1,""],singlescalar:[17,4,1,""]},"pgdb.Connection":{autocommit:[19,3,1,""],close:[19,4,1,""],closed:[19,3,1,""],commit:[19,4,1,""],cursor:[19,4,1,""],cursor_type:[19,3,1,""],rollback:[19,4,1,""],type_cache:[19,3,1,""]},"pgdb.Cursor":{arraysize:[20,3,1,""],build_row_factory:[20,4,1,""],close:[20,4,1,""],colnames:[20,3,1,""],coltypes:[20,3,1,""],copy_from:[20,4,1,""],copy_to:[20,4,1,""],description:[20,3,1,""],execute:[20,4,1,""],executemany:[20,4,1,""],fetchall:[20,4,1,""],fetchmany:[20,4,1,""],fetchone:[20,4,1,""],row_factory:[20,4,1,""],rowcount:[20,3,1,""]},"pgdb.TypeCache":{get_fields:[24,4,1,""],get_typecast:[24,4,1,""],reset_typecast:[24,4,1,""],set_typecast:[24,4,1,""],typecast:[24,4,1,""]},pg:{Bytea:[15,1,1,""],Connection:[9,2,1,""],DB:[11,2,1,""],DbTypes:[10,2,1,""],HStore:[15,1,1,""],INV_READ:[15,5,1,""],INV_WRITE:[15,5,1,""],Json:[15,1,1,""],LargeObject:[14,2,1,""],Literal:[15,1,1,""],NotificationHandler:[16,2,1,""],Query:[17,2,1,""],SEEK_CUR:[15,5,1,""],SEEK_END:[15,5,1,""],SEEK_SET:[15,5,1,""],TRANS_ACTIVE:[15,5,1,""],TRANS_IDLE:[15,5,1,""],TRANS_INERROR:[15,5,1,""],TRANS_INTRANS:[15,5,1,""],TRANS_UNKNOWN:[15,5,1,""],__version__:[15,5,1,""],cast_array:[15,1,1,""],cast_record:[15,1,1,""],connect:[15,1,1,""],escape_bytea:[15,1,1,""],escape_string:[15,1,1,""],get_array:[15,1,1,""],get_bool:[15,1,1,""],get_bytea_escaped:[15,1,1,""],get_datestyle:[15,1,1,""],get_decimal:[15,1,1,""],get_decimal_point:[15,1,1,""],get_defbase:[15,1,1,""],get_defhost:[15,1,1,""],get_defopt:[15,1,1,""],get_defpasswd:[15,1,1,""],get_defport:[15,1,1,""],get_defuser:[15,1,1,""],get_jsondecode:[15,1,1,""],get_typecast:[15,4,1,""],set_array:[15,1,1,""],set_bool:[15,1,1,""],set_bytea_escaped:[15,1,1,""],set_datestyle:[15,1,1,""],set_decimal:[15,1,1,""],set_decimal_point:[15,1,1,""],set_defbase:[15,1,1,""],set_defhost:[15,1,1,""],set_defopt:[15,1,1,""],set_defpasswd:[15,1,1,""],set_defport:[15,1,1,""],set_defuser:[15,1,1,""],set_jsondecode:[15,1,1,""],set_typecast:[15,4,1,""],unescape_bytea:[15,1,1,""],version:[15,5,1,""]},pgdb:{Binary:[25,1,1,""],Connection:[19,2,1,""],Cursor:[20,2,1,""],DataError:[23,6,1,""],DatabaseError:[23,6,1,""],Date:[25,1,1,""],DateFromTicks:[25,1,1,""],Error:[23,6,1,""],Hstore:[25,1,1,""],IntegrityError:[23,6,1,""],InterfaceError:[23,6,1,""],Interval:[25,1,1,""],Json:[25,1,1,""],Literal:[25,1,1,""],NotSupportedError:[23,6,1,""],OperationalError:[23,6,1,""],ProgrammingError:[23,6,1,""],Time:[25,1,1,""],TimeFromTicks:[25,1,1,""],Timestamp:[25,1,1,""],TimestampFromTicks:[25,1,1,""],Type:[25,2,1,""],TypeCache:[24,2,1,""],Uuid:[25,1,1,""],Warning:[23,6,1,""],apilevel:[23,5,1,""],connect:[23,1,1,""],get_typecast:[23,4,1,""],paramstyle:[23,5,1,""],reset_typecast:[23,4,1,""],set_typecast:[23,4,1,""],threadsafety:[23,5,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","attribute","Python attribute"],"4":["py","method","Python method"],"5":["py","data","Python data"],"6":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:attribute","4":"py:method","5":"py:data","6":"py:exception"},terms:{"":[0,6,7,8,10,11,14,15,18,20,23,24,25,26,27,28,31],"0x80000000":3,"24e":26,"583e":26,"64bit":1,"694e":26,"913e":26,"boolean":[3,10,11,12,24,25],"break":[3,8,11],"byte":[3,8,11,14,15,18,20,25],"case":[3,8,9,11,15,16,18,19,20,23,24,25,27],"catch":23,"char":[8,18,25],"class":[0,3,8,9,10,12,13,14,15,16,17,18,19,20,23,24,25,27,31],"default":[3,7,8,10,11,12,16,18,19,20,23,24,25,31],"export":[3,12,25],"final":[3,7,8,11,18,27,31],"float":[3,8,11,15,16,18,25],"function":[3,8,10,11,12,13,14,16,17,18,19,20,21,24,25,29,31],"import":[3,7,8,11,12,15,18,23,26,27,28,30,31],"int":[3,8,9,10,11,14,15,16,17,18,20,23,24,25,27,31],"long":[3,8,9,18,20,25],"new":[0,2,3,6,7,8,9,10,11,14,15,16,17,20,21,23,24,25,27,28,31,34],"null":[3,15,18,20,25],"public":[3,27,31],"return":[3,8,9,10,12,14,16,18,20,21,23,24,25,27,28,30,31],"switch":[3,5,23],"true":[3,7,8,9,11,15,18,19,20,25,31],"try":[7,8,18,19,27,28,31],"while":[3,8,11,18,23],AND:[8,18,27,30,32],Added:3,And:[27,30],BUT:32,But:[8,18,27],FOR:32,For:[0,3,7,8,10,11,15,18,20,23,24,25,26,28,30,31,33],INTO:[7,8,18,26,27,28],ITS:32,Las:26,NOT:[30,32],ONE:28,One:8,SUCH:32,THE:32,That:8,The:[0,2,3,4,5,7,8,13,14,15,17,18,22,23,25,27,28,29,30,31,33,34],Then:31,There:[0,1,4,8,11,27],These:[7,9,10,11,14,15,19,20,23,24],USE:32,Use:3,Used:25,Using:[8,18,31],With:[0,8,11,18],__doc__:3,__init__:[8,11,18],__pg_repr__:[8,18],__pg_str__:8,__str__:[8,18],__version__:[3,15],_asdict:20,_make:20,_pg:[3,7,33],_quot:3,abandon:12,abl:[3,14,15],abort:11,about:[3,7,8,9,10,11,14,15,17,18,20,24,25,30,34],abov:[7,10,11,15,27,32],absent:16,accept:3,access:[3,7,9,11,13,15,17,27,31,34],accomplish:18,accord:[9,10,24],accordingli:[8,18,25],account:[2,3,15],accur:3,achiev:[3,8,18,27],activ:[2,15],actual:[3,11,18,30],adam:3,adapt:[3,11,12,15,21],add:[2,3,7,8,9,11,15,18,23,30,31],add_em:28,added:[2,3,7,9,11],adding:[8,18],addit:[3,10,15,23,24,27,31],addition:25,adjac:15,adjust:11,advanc:[3,29,31],advis:32,affect:[3,9,14,15,20,23,27],affix:3,after:[2,3,8,11,27],again:[3,8,11,18,20,27,31],against:20,age:28,aggfnoid:30,aggreg:[3,29],ago:3,agreement:32,alet:3,algorithm:20,alias:27,all:[0,2,3,7,8,9,10,11,13,14,15,16,18,19,21,23,24,26,27,30,31,32,33],alloc:[9,15,23],allow:[0,3,9,11,14,15,17,18,23,27],alreadi:[3,7,8,9,10,11,14,15,23,24,26,27,28,30],also:[0,1,2,3,7,8,9,10,11,14,15,16,17,18,19,20,23,24,25,27,28,31,33],alter:[8,15],altern:[9,20],although:11,altitud:26,alum:3,alwai:[3,8,9,10,11,15,16,17,18,19,20,23,24,25],ambigu:[11,18],amd64:33,amnam:30,amop:30,amopfamili:30,amopopr:30,amount:[3,11,31],andi:28,andr:[0,3,32],andrew:22,ani:[3,9,11,14,16,17,19,20,27,28,32],announc:[33,34],anonym:3,anoth:[5,8,10,11,14,15,16,18,23,24,27,31],ansi:3,answer:[2,28],anyarrai:[3,23],anyon:32,anyth:[11,31],anywai:[3,11,17],api:[3,5,6,9,11,13,19,20,22,23,24,25,33],apilevel:23,appear:[11,32],appl:31,appli:[19,30],applic:[0,4,8,20],application_nam:[3,9],appropri:[3,7],arbitrari:[3,9],archiv:2,arci:[0,1,2,8,32],arg1:11,arg2:11,arg:[3,9,11,30],arg_dict:[11,16],argument:[3,8,9,10,11,15,16,17,18,20,23,24,28,31],aris:32,around:[18,27],arrai:[3,8,10,11,12,17,18,23,24,25,29,30],array_low:[8,18],arrays:21,assign:20,associ:[10,14,16,20,24],assort:3,assum:[8,11,12,18,26,28,30],asynchron:16,atom:11,attack:[8,15,18],attempt:[19,20],attisdrop:30,attnam:[3,10,30],attnum:30,attrelid:30,attribut:[3,10,12,16,21,23,24,25,26,29,31],atttypid:30,atttypmod:30,augment:3,author:32,authorit:[5,22],autocommit:[3,19,31],automat:[3,7,8,9,11,15,17,18,19,31],auxiliari:12,avail:[0,1,3,7,8,11,15,16,18,20,33],avoid:[3,7,11,18],awai:3,back:[3,8,11,18,21,31],backend:9,backend_pid:[3,9],backslash:[3,11,15],backward:3,bad:[9,11,14,15,17,18],banana:31,bang:3,bar:[11,28],base:[0,3,10,11,15,16,20,23,24,25,26,29,32],basi:32,basic:[3,8,10,11,13,15,18,26,28,29,30,31],batch:31,bdfl:0,beamnet:3,becaus:[8,11,14,16,18],becom:[0,3,8,19],been:[1,3,5,7,8,9,10,11,12,16,18,19,20,22,27,29,31,32,33],befor:[3,8,11,14,20,28,31],begin:[3,12],beginn:5,behavior:[3,8,11,18,19,20],behind:8,being:[2,9,10,11,15,18,20,27],below:[3,7,9,11,19,25,27],berkelei:3,besid:[10,15],best:[0,11,20],beta:33,better:[3,8,18],between:[3,8,15,18,27,31],bigfoot:3,bigint:25,bill:[26,28],bin:3,binari:[3,11,12,20,25,30],binary_op:30,bind:[8,25],blank:3,block:[9,11,19],bob:15,bojnourdi:3,bond:18,bookkeep:30,bool:[3,8,11,12,16,18,20,25],boot:7,both:[1,3,5,8,16,18,20,31],bottom:4,bound:[3,8,10,18,20,24,25],bouska:3,box:[8,18],bpchar:[8,18],brace:[15,26],breakfast:26,brian:3,brit:3,broken:3,brows:2,browser:2,broytmann:4,bsd:0,buffer:[9,14,20],bug:[3,34],build:[3,8,11,12,17,18,20,33],build_row_factori:20,builder:20,built:[0,1,3,8,9,18,23,27],bump:3,bunch:31,bytea:[3,8,12,18,25],bytes_l:25,cach:[3,9,12,15,18,21,23],cain:[0,1,2,32],calcul:[8,18],call:[0,3,7,8,9,10,11,14,15,16,17,18,19,21,23,24,31],callabl:[9,15,20],callback:[9,11,16],caller:[3,11,16],callproc:[3,21],can:[0,2,3,4,5,7,8,9,10,11,14,15,16,17,18,19,20,23,24,25,26,27,28,29,30,31,33],cancel:12,candi:28,cannot:[11,15,18,19,20],capabl:[23,25],capit:[26,31],care:[8,11],carefulli:18,carol:26,carri:[3,10,24],cascad:[11,28],cast:[3,8,10,11,15,18,23,24],cast_arrai:[3,12],cast_circl:[8,18],cast_hstor:3,cast_item:[8,18],cast_json:18,cast_record:[3,15],cast_tupl:[8,18],caster:3,catalog:[11,29],categori:[10,24],caus:[3,11,15,16,18,19],central:2,certain:[3,9,10,11,15,23,24,27],cfg:33,cgi:33,chain:20,champion:3,chang:[1,2,3,6,8,9,11,14,15,18,19,20,23,27,30,31,34],changelog:[1,33],channel:16,chapter:[3,29,33],charact:[8,11,15,20],charli:3,cheap:9,check:[2,3,8,9,12,14,15,18,23,33],cherimaya:31,chifungfan:3,chimai:[0,32],choos:[12,13,22,31],chri:3,circl:[8,18],citi:[26,27],clair:28,clarifi:3,clash:3,class_nam:30,classic:[3,5,6,8,11,13,22,26,27,29,30,33],classifi:3,claus:[3,8,11,27],clean:3,clean_emp:28,cleanli:3,cleans:8,cleanup:3,clear:[0,3,8,12,25,27],client:[3,5,7,12,16,20],client_encod:9,client_min_messag:9,close:[3,11,12,16,21,31],cls:15,clue:8,cnri:3,coars:[3,10],code:[0,2,3,4,7,9,10,15,18,19,20,23,24,25,32,33],col:11,collect:[1,3,4,8,18,33],colnam:[3,20],coltyp:[3,20],column:[3,8,11,17,18,21,25,27,28,30,31],com:3,combin:0,come:[3,7,9,15,16,23],comma:15,command:[2,3,7,8,12,15,16,18,19,20,23,27,28,30,31,33],comment:2,commerci:0,commit:[3,12,20,21,31],common:[15,18,23],commun:2,compar:[0,8,25],compat:[3,33],compil:3,complain:18,complement:3,complet:[9,11,23],complianc:3,compliant:[0,5,6,11,13,20,22,23,31,33],complic:27,compos:15,composit:[3,8,10,11,14,15,18,24,29],compromis:27,comput:[5,7],con1:15,con2:15,con3:15,con4:15,con:[9,15,18,19,20,23,25,31],concaten:8,concept:[5,8,18],concern:[3,14],condit:[11,20,27],configur:[3,8,11,33],conflict:12,conform:[11,19],confus:[3,27],connect:[3,5,7,8,10,12,13,14,16,17,18,20,21,22,24,26,28,29,30,31],connect_timeout:15,connection_handl:16,consequenti:32,consid:[3,8,23],consider:20,consist:[3,5],consol:9,constant:[3,9,11,12,14,21],constraint:[3,11],construct:[15,25],constructor:[3,18,21],consult:26,contain:[3,7,8,9,11,15,17,18,20,23,25],content:[11,14,34],context:[2,3,8,11,15,19,20],continu:[8,10,24],contribut:[3,32],contributor:2,control:[3,11,21,24],conveni:[3,5,11,13,27,31],convers:[3,8,11,12,15,18],convert:[3,8,11,15,17,18,20,23],copi:[3,7,9,20,31,32],copy_from:[3,20,31],copy_to:[3,20,31],copyright:[0,34],correct:[3,17],correctli:[3,8,15,18],correspond:[3,8,9,10,11,15,18,27],could:[3,8,9,18,23,25],count:[8,18],cours:[8,18,27,31],cover:[2,3,13,22],creat:[2,3,8,12,14,16,18,20,25,26,29,30,31,33],creation:[3,9,14],csua:3,csv:20,cur:[18,20,31],currenc:15,current:[0,2,3,7,8,10,11,12,14,15,18,19,20,23,24,27,31,34],current_timestamp:3,cursor:[3,14,18,21,24,25,31],cursor_typ:[19,20],custom:[3,8,12,18,19,20],customiz:3,cuteri:3,cvsweb:33,dai:[25,27],damag:32,danger:8,darci:[0,1,32],dat:15,data:[0,3,9,10,12,14,20,21,23,25,26,29,31],databas:[0,1,3,4,5,7,8,12,13,14,16,17,18,19,21,22,23,25,26,28,29,30,31,33],databaseerror:[3,20,23],dataerror:23,datastr:[11,15],date:[3,8,12,18,25,27],date_format:[12,15],datebas:16,datefromtick:25,datestyl:[9,11,15,27],datetim:[3,8,9,18,25],db_ride:11,dbapi:3,dbname:[11,15,27,31],dbtype:[8,11,12,15],dbutil:5,deactiv:[3,15],deal:15,dealloc:[11,14],debug:3,decim:[3,8,12,18],decod:[3,12,18,20],decode_json:12,def:[8,11,18,20],defbas:15,defhost:15,defin:[9,11,14,15,17,23,25,26,29],definit:[9,14,15,17,33],defopt:15,defpasswd:15,defport:15,defus:15,degre:27,delet:[3,9,12,14,16,19,20,28,29,31],delete_prepar:12,delim:[10,15,24],delimit:[10,15,20,24],deliv:20,demo:4,demonstr:[27,29],denot:16,depend:[3,5,11,20],deprec:[3,17,20],dept:28,derefer:14,dereferenc:14,deriv:[9,15],descend:[11,26],describ:[12,15,19,20,23,25],describe_prepar:12,descript:[3,9,11,15,19,21,24,25],descriptor:9,deseri:[11,15],design:15,desir:19,destroi:11,desynchron:9,detail:[0,3,7,8,9,10,11,18,19,21,24,31],detect:25,determin:[8,11,20],devel:7,develop:[0,3,5,22,34],dice:[8,18],dict:[3,8,9,12,15,16,17,18,20,23,25,27,31],dictcursor:20,dictionari:[3,8,9,10,12,15,16,18,19,24,25,27,31],dictit:[3,9,12],dictresult:[3,9,11,12,27,31],did:[8,9,11,20],didn:18,diff:2,differ:[8,11,15,16,18,20,23,27,31],dig:11,dildog:3,dimension:[3,15],direct:[3,7,9,32],directli:[2,3,7,8,9,11,15,16,17,18,31],directori:[1,7,33],disabl:[3,9,15],discard:[11,31],discern:31,disclaim:32,disconnect:23,discov:27,discuss:2,disk:[11,30],displai:17,display_s:20,distinct:27,distribut:[0,4,32,34],distutil:3,divis:23,dll:7,dml:20,dno_def_var:[7,15],dno_direct:[7,9],dno_larg:[7,9],dno_pqsocket:7,doc:33,docstr:3,document:[2,3,7,10,13,22,24,29,32,33,34],doe:[3,8,9,11,15,17,18,25,27,31],doesn:[3,7,9],doing:[9,27,31],don:[3,7,8,9,11,15,16,18,19,27],done:[7,8,18,20,27],doubl:[3,8,9],double_salari:28,download:[7,34],dql:20,dream:28,driver:[3,5],drop:[3,27,28,30,31],druid:[0,3,32],dsn:23,due:[11,20,23],dump:[11,14,25,31],duplic:[9,11,14,15,17,27,31],dure:[9,11,15,23],durian:31,dust:3,dynam:[0,20],dyson:3,each:[3,9,11,20,25,27,30],earlier:[3,15],eas:[8,18],easi:[0,7],easier:[5,18],easili:[0,8,15,18,20,31],ebeon:3,ecp:[0,3,32],edu:3,eevolut:3,effect:[3,8,9,11,15,18,19,20],effici:[3,17,20],eggfruit:31,ein:[7,11],either:[4,8,10,11,13,16,22,26,31],element:[3,8,15,18,20,31],els:[11,13,22,31],emb:[0,9,14],embed:[8,11],emc:3,emp:28,emphas:18,employe:[9,11,15,26,28],empti:[3,9,12,17,20,27],enabl:[3,9,10,11],encapsul:[3,11],encod:[12,23,25],encode_json:11,end:[11,19,31],endcopi:12,enhanc:[3,32],enough:[8,18],ensur:9,enter:[2,27],enterpris:0,entri:[3,11,20],enumer:31,env:3,environ:[5,7,14,15],equal:[3,10,18,24,25],equival:[8,11,18,28],error:[3,8,9,11,14,15,17,18,19,20,21,27],escap:[3,8,9,12,18],escape_byt:11,escape_bytea:[11,12],escape_identifi:11,escape_liter:12,escape_str:[3,8,11,12],especi:[3,15],essenti:[10,20,24],establish:11,etc:[3,8,9,10,11,28],evalu:3,even:[0,3,8,11,15,18,20,31,32],event:[11,16,32],ever:9,everi:[3,9,11,15,18,19,28],everyth:11,exact:[8,14,18],exactli:[3,9,14,15,17,27],exampl:[6,7,8,9,11,13,15,18,23,25,29,31],except:[0,2,3,8,9,11,15,19,20,23],exclud:11,exe:33,execut:[3,7,8,12,17,18,19,21,25,27,31],executemani:[3,21,25,31],exist:[3,5,9,11,23,27],expand:3,expect:[18,20],expens:3,explain:[3,8,11,18,26,27,28,30],explan:[8,18],explicitli:[9,11,19,27],exploit:[8,18],expos:3,express:[11,27,28],extend:[20,23],extens:[0,3,5,7,8,9,10,15,31,33],extern:9,extra:[3,9,16,26],extract:28,facto:0,factori:[3,20],fail:[9,23],fallback:12,fals:[3,8,9,11,15,19,20,25],famili:29,far:[4,27],fast:[3,12],faster:[11,31],favicon:3,featur:[0,2,3,5,7,11,18,27,29,31],fed:15,fee:32,feet:26,fetch:[3,11,17,18,21,31],fetchal:[3,18,21,31],fetchmani:[21,31],fetchon:[3,18,21,31],few:[3,4,11,15],fewer:20,field:[3,9,10,11,12,15,20,24,25,26,31],fieldnam:[9,11,12],fieldnum:[9,11,12],fig:31,file:[1,3,4,7,12,15,20,34],fileno:[3,12],fill:27,filonenko:3,find:[3,5,7,8,17,18,26,27,33],fine:[10,11],first:[3,6,7,8,9,11,14,17,18,19,26,27],fit:32,fix:[3,12],flag:[3,11,15,16],flavor:31,float4:[8,18],float8:[8,18,27],follow:[2,3,7,8,9,10,11,13,15,16,17,18,19,20,22,23,24,25,26,27,28,30,31,32],foo:[11,28],foo_bar_t:11,forbid:9,forc:3,foreign:[11,23],forget:15,form:[5,8,9,11,15,20],format:[3,8,12,18,20,23,25,27,33],format_typ:30,forward:[19,20],found:[3,20,23],four:15,fpic:7,fraction:16,framework:3,francisco:[26,27],frederick:3,free:3,freebsd:[1,33],freed:3,freeli:0,from:[0,3,5,8,12,14,15,17,18,19,20,22,23,25,26,27,28,29,30,31,33],fromkei:11,frontend:9,frozenset:3,fruit:31,ftp:33,fulfil:11,full:[0,3,7,9,11,17,24,31],fulli:3,func:[3,9,15],further:[0,2,3,32],furthermor:11,futur:[2,15,23,34],fuzzi:[8,18],garbag:3,garfield:15,gate:3,gener:[2,6,9,14,15,16,18,20,31],geometr:[8,18],gerhard:3,get:[2,3,8,10,12,18,19,20,21,24,27,31],get_arrai:15,get_as_dict:[3,8,11,31],get_as_list:[3,12],get_attnam:[3,10,12,31],get_bool:15,get_bytea_escap:15,get_cast_hook:9,get_databas:12,get_datestyl:15,get_decim:15,get_decimal_point:15,get_defbas:15,get_defhost:15,get_defopt:15,get_defpasswd:15,get_defport:15,get_defus:15,get_field:24,get_jsondecod:15,get_notice_receiv:[3,9],get_paramet:[3,9,11,15],get_regtyp:11,get_rel:[3,12],get_tabl:[3,12,27,31],get_typecast:[3,8,9,10,15,18,23,24],getattnam:3,getlin:12,getlo:[12,14],getnotifi:[3,12],getresult:[3,8,9,11,12,15,27,31],gif:15,gil:3,ginger:28,give:[3,8,9,11,15,18,27],given:[10,11,12,15,17,20,23,24,25,30],glad:2,glanc:8,global:[3,8,10,15,21,24],gmake:33,good:[7,18,27],got:3,grab:8,grain:[3,10,11],grant:32,grapefruit:31,greatli:3,greet:18,group:[2,27],guess:11,guid:14,guido:23,had:[3,11,15,18,20],hal:18,hand:[26,30],handl:[3,9,12,13,15,24],handler:[3,12],happen:[3,8,10,15],hardcod:17,harri:3,has:[0,1,3,4,5,7,8,9,11,12,16,17,18,19,20,22,23,27,28,31,32,33],has_table_privileg:[12,31],hash:3,have:[1,2,3,4,5,7,8,9,10,11,15,17,18,20,25,26,27,28,29,30,31,32,33],haven:7,haystack:18,hayward:27,header:7,heavi:15,heavili:32,hello:[8,18],help:2,helper:[3,11,12,25],here:[4,8,9,11,15,18,24,27,28],herebi:32,hereund:32,hex:25,hide:[9,14],hierarchi:[3,7],high:[0,5],high_pai:28,higher:[3,5,8,11,26],highest:17,highli:0,hilton:3,hint:[3,9,15,18],histori:[0,6],hold:[8,15,18,25],home:34,homepag:7,hood:27,host:[3,5,9,12,14,23,27,31],hostnam:23,hour:25,how:[3,8,9,18,23,26,27,28,30,31],howev:[3,8,11,18,25,28,31],hstore:[3,8,15,18,25],html:33,http:[1,2,33],huge:11,human:[8,9,18],i386:33,ico:3,idea:33,ident:27,identifi:[12,20,27],idl:9,ignor:[8,11,18,20],ignore_thi:28,imagin:8,img:15,immedi:[11,19,20],implement:[0,3,9,14,19,20],impli:32,implicit:19,implicitli:16,improv:[2,3],incident:32,includ:[3,7,9,11,15,18,26,30,31,32],includedir:7,incompat:3,increas:3,index:[3,6,7,8,11,17,18,31,33],index_nam:30,indexrelid:30,indic:[11,20,27,28,29,34],indirect:32,indirectli:5,individu:[2,11,15,23,31],indkei:30,indrelid:30,inf:3,infinit:3,inform:[3,6,8,9,10,11,14,15,17,18,19,20,22,24,25,27,30,34],information_schema:30,infrastructur:2,ing:[9,14],inher:8,inherit:29,inhomogen:15,init:3,initcap:31,initi:[12,14,15],inject:[3,8,9,15,18],inlin:[3,8,11],inop:11,input:[3,8,9,11,15,20,25],insensit:27,insert:[3,7,8,12,15,16,18,20,23,25,26,28,29,30,31],insertt:[3,12,27,31],insid:[8,19],instal:[3,5,6,34],instanc:[3,8,11,13,14,15,16,18,20,23,25,26,27,28,31],instanti:12,instead:[3,8,9,11,15,16,17,18,20,27,31],instruct:[3,7],int2:[8,18],int2vector:[8,18],int4:[8,18,26,28],int8:[3,8,18,26],integ:[3,8,9,18,23,25,31],integer_datetim:9,integr:23,integrityerror:[3,23],intend:[8,15,18],intens:27,intent:25,interact:0,interest:[11,14,27],interfac:[0,2,3,5,6,7,9,13,14,20,22,23,26,27,29,30],interfaceerror:[18,23],intern:[3,8,9,11,12,15,17,21,30],internal_s:20,internalerror:[9,11,15],interpret:[0,8,11,15,18],interrog:9,intersect:[8,18],interv:[3,8,18,25],intervalstyl:9,introduct:[12,21,29],introspect:20,intuit:3,inv_arch:9,inv_read:[9,14,15],inv_writ:[9,14,15],invalid:[3,9,11,14,15,17,19,20],invalid_oid:9,invalidresulterror:17,inventory_item:[8,18],inventoryitem:[8,18],invers:3,invok:[12,20],involv:[2,9,11,15,20],ioerror:[14,20],is_superus:9,isinst:3,isn:28,iso:11,isol:20,issu:[2,3,8,20],item:[8,11,18,20],iter:[3,9,11,17,20,31],its:[3,8,9,11,14,15,16,17,18,20,26,27,28,31,32],itself:[3,9,11,16,23,27],jacob:3,jame:18,jani:25,jarkko:3,java:0,jeremi:3,jerom:3,john:25,johnni:25,johnston:3,join:[2,29],josh:3,journal:22,json:[3,8,12,18,25],json_data:18,jsonb:[3,8,11,18,25],jsondata:25,just:[3,7,8,9,11,16,27,28],justin:3,kavou:3,keep:[3,8,14,16,20],kei:[3,8,12,17,18,20,23,27,31],kept:20,keyerror:[3,11],keynam:11,keyword:[3,7,11,15,18,23,27,31],kid:[20,25],kind:[11,20],know:[9,18,20],known:9,kuchl:22,kwarg:23,l0pht:3,lambda:[3,8,18,23],languag:[0,28,29],lannam:30,larg:[3,7,11,12,13,15,31],largeobject:[9,12,13,15],larger:3,last:[3,8,11,12,14,18,20],lastfoot:3,later:[3,9,11,14],latest:[3,33],latter:[3,20],layer:33,layout:3,lcrypt:7,ld_library_path:5,leak:3,least:[8,18],leav:[9,11],left:[11,27,30],left_opr:30,left_unari:30,len:[3,17,24,31],less:3,let:[3,8,18,26,27,28,31],letter:11,level:[0,3,5,8,9,10,11,15,20,23,24,27,31],liabl:32,lib:7,libdir:7,liber:0,libpq:[3,5,7,9,15],librari:[0,3,5,7,11,18],licens:[0,3,32],lifetim:11,like:[2,3,5,7,8,9,11,12,15,18,20,22,23,27,31],limit:[3,11,31,32],line:[3,7,12,15,23],liner:3,link:[2,4],linux:[7,22,33],list:[3,4,7,8,10,12,18,20,23,24,25,27,28,29,31,33,34],listen:[9,11,16],listfield:[9,11,12],liter:[3,8,9,11,15,18,25],littl:3,live:7,load:[11,15,18],local:[5,7,8,11,15,18,27,31],localhost:[3,7],locat:[18,26,27],lock:[14,23],locreat:[12,15],log:8,login:[8,27,31],loimport:12,longer:[3,7,27],look:[1,8,9,10,11,15,18,20,23,24,27,33],loop:16,lost:[8,9,11,18,32],lot:3,low:[2,5,10],lower:[3,8,18,27,31],lpq:7,lunch:26,mac:0,made:[3,8],madison:26,magic:[8,18],mai:[2,5,7,8,9,10,11,13,14,15,17,18,20,22,23,24,27,28,31],mail:[3,34],main:[3,16],mainli:9,maintain:[2,3],mainten:32,major:3,make:[3,7,8,9,18,25,27,28,30,31,33],makefil:7,malici:[8,18],manag:[0,3,11,19,20],mandatori:3,mani:[0,3,9,11,14,15,17,21,27,31],manipul:3,manual:[8,9,15,19,29],map:[8,10,11,18,20,24,31],mariposa:26,mark:12,match:[7,11],matter:[8,18],matthew:3,max:27,maxim:14,maximum:11,mayb:9,mcphee:3,mdy:27,mean:[3,5,11,15,20,31],meaning:15,mechan:[3,8,11,18],meet:26,mejia:3,member:[3,11],memori:[3,12,17,23,27],memoryerror:[9,17],mention:3,merchant:32,mess:30,messag:[2,3,9,14,20],metadata:30,method:[3,5,7,8,9,10,11,12,14,15,18,19,21,23,24,25,27,31],mfc:0,michael:3,microsecond:25,might:[11,18],mikhail:3,mind:8,mingw:1,minor:3,minut:25,miscellan:3,misinterpret:11,miss:[3,11,18],mistak:18,mit:3,mode:[3,9,11,14,15,19],modern:2,modif:[0,14,32],modifi:[11,14,15,32],modul:[0,3,5,6,7,8,9,10,11,12,13,14,16,21,22,25,27,31,33],monei:[3,8,15,18,25],monetari:12,month:25,more:[0,2,3,7,8,9,10,11,14,15,17,18,19,20,23,25,26,27,28,31],more_fruit:31,most:[0,3,7,11,20,27],mostli:3,motif:0,motorcycl:4,move:[3,7,14],mspo:3,msvc:3,much:[8,29,31],multi:[3,5,14],multipl:[9,11,17,20,27,29,31],multipleresultserror:17,mung:[3,11],must:[2,3,5,7,8,9,10,11,15,16,18,20,23,24,25,27],mwa:3,mxdatetim:3,mydb:23,myhost:[15,23],name:[3,7,8,9,10,12,14,16,18,20,23,24,25,26,27,28,30,31],namedit:[3,9,12],namedresult:[3,9,12,27,31],namedtupl:[3,8,18,20],namespac:3,nan:3,natur:[18,31],necessari:[3,10,15,23,24,31],necessarili:23,need:[0,2,3,7,8,9,11,14,15,16,18,19,20,24,25,27,31],needl:18,neg:[3,14,28],nest:15,net:[0,3,32],netbsd:[0,1,7,33],never:[8,16,18],new_emp:28,newer:[5,13,22,31],newli:[9,31],newlin:9,next:[8,17,18,19,21,27],ngp:3,niall:3,nice:[3,27],nicer:3,no_snprintf:3,nobodi:28,non:[3,20],none:[3,9,10,11,14,15,16,17,18,19,20,23,24,25,27,28],noresulterror:17,normal:[8,9,11,15,16,17,18,19,27,30],notabl:3,notat:28,note:[2,3,5,7,8,9,10,11,15,17,18,19,20,23,24,26,27,28,31],noth:31,notic:[0,3,8,12,18,28,34],notif:[3,9,12],notifi:[12,16],notification_handl:12,notificationhandl:[3,11,16],notsupportederror:23,nov:27,now:[3,7,8,18,19,20,26,27,31],nowadai:3,ntupl:[3,12],null_ok:20,num:17,number:[3,9,11,12,15,16,21,23,25,27,31],numer:[3,8,11,12,18,20,23,25],numericoid:3,obj:[11,15,25],object:[0,3,7,8,10,11,12,13,16,18,21,23,24,27,31],oblig:32,obtain:[9,11,14],obviou:25,obvious:18,occur:[3,15,20,23],off:[3,23,27],offici:3,offset:[11,14],often:0,oid:[3,8,10,11,12,14,18,24,25,30,31],old:3,older:[0,3,7,11,13,34],oleg:4,omit:[20,27,31],on_hand:[8,18],onc:[3,11,27,31],one:[3,8,9,10,11,12,15,18,20,23,24,25,27,28,30,31],onedict:[3,12],onenam:[3,12],ones:15,onescalar:[3,12,31],ongo:3,onli:[3,4,5,7,8,9,10,11,13,14,15,16,17,18,20,22,26,27,31],ontario:4,open:[0,2,3,8,11,12,21],opensus:[1,33],oper:[3,7,8,11,15,16,18,19,21,23,25,29,31],operand:30,operationalerror:[9,11,23],opf:30,opfmethod:30,opfnam:30,oprkind:30,oprleft:30,oprnam:30,oprresult:30,oprright:30,opt:[15,23],optim:20,option:[3,5,7,9,11,12,16,20,23,25,27],order:[3,9,10,11,15,17,23,27,28,30,31],ordereddict:[11,20,31],ordinari:[3,11,20,31],org:[1,2,32,33],orient:0,origin:27,orm:5,other:[0,3,8,9,11,15,18,20,23,26,29,31],otherwis:[9,11,15,18,20],our:[8,18,27,33],out:[2,3,8,11,16,18,23,27,28,32],output:[3,15,20,25,27,31],outsid:11,over:[3,11,17,20],overflow:[9,11],overhead:[3,11],overlap:[8,18],overlook:[8,18],overpaid:28,overrid:[11,15],overridden:3,overview:2,overwrit:20,own:[8,11,18,19,27],packag:[1,3,7,33],page:[4,6,13],pai:26,pair:[11,24],pami:3,paragraph:32,param:[8,18],paramet:[3,10,12,14,15,16,17,20,21,23,24,25,27,31],parameter:11,paramstyl:23,paramt:8,parent:11,parenthes:15,pars:[3,15,24,30],parse_int:18,parser:[3,11,12],part:[3,4,5,7,11,13,21,22,23,24,25,31],parti:32,particip:33,particular:[3,11,15,23,25,32],particularli:[3,17,18],pascal:[0,32],pass:[3,8,9,10,11,15,16,18,20,23,24,25,27,31],passwd:[8,15,23,27,31],password:[3,5,8,12,23,31],past:33,path:[5,7],patrick:3,pay_by_extra_quart:26,pay_by_quart:26,payload:[3,9,16],peer:2,peifeng:3,pend:19,pep:[5,22],per:20,percent:3,perform:[3,8,11,15,19,20,31],perhap:8,perl:0,perman:31,permiss:[3,32],peter:3,pg_aggreg:30,pg_am:30,pg_amop:30,pg_attribut:30,pg_authid:30,pg_catalog:30,pg_class:30,pg_config:7,pg_databas:30,pg_index:30,pg_languag:30,pg_oper:30,pg_opfamili:30,pg_proc:30,pg_toast:30,pg_type:[10,24,30],pgcnx:[9,14],pgdb:[3,5,7,11,13,18,19,20,22,23,24,25,31,33],pgdbtypecach:3,pgext:3,pginc:7,pglarge_writ:3,pglib:7,pgmodul:[3,7,33],pgnotifi:[3,16],pgqueryobject:3,pgserver:[27,31],pgsql:7,pgtype:[10,33],pheng:[3,16],phone:[9,11,15],pick:[11,15,23,33],pictur:15,pid:[9,16],ping:7,pkei:[3,12],pkgsrc:33,place:[7,8],placehold:[8,11],platform:[0,5,7],pleas:[1,2,8,9,15,18,31,33],plu:[26,31],plug:9,point:[3,4,8,15,16,18,19,20,27],poll:16,ponder:18,pong:7,popul:[8,18,26,28],port:[1,3,5,9,12,23,27,31,33],posgresql:15,posit:[3,8,9,11,14,15,17,20],possibl:[3,7,8,11,18,20,28,32],post1:3,postgr:[3,7,30],postgresql:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,18,19,20,21,22,24,25,26,27,28,30,31,33],power:0,pprint:27,pqconsumeinput:3,pqescapebyteaconn:3,pqescapeidentifi:3,pqescapeliter:3,pqescapestringconn:3,pqfreemem:3,prcp:27,pre:[7,9,11,33],preced:[11,23],precipit:27,precis:[3,15,20],precompil:7,predic:27,prefer:[9,11],prefix:[11,16],preload:3,prepar:[3,12,20],present:[11,26],preserv:11,pretti:3,prevent:[11,15],previou:[8,12,15,18,20],previous:[9,11,14],price:[8,18],primari:[3,8,9,12,18,27,31],primer:[4,6],print:[3,9,11,26,27,28,30,31],printra:3,privat:3,privileg:[3,12],proargtyp:30,probabl:27,problem:[2,3,7,8,9,14,23],procedur:21,process:[2,3,8,11,12,20,23,27],procnam:20,produc:[16,20],profit:32,program:[0,3,6,14,15,22,23,30,31],programm:[0,14,23],programmingerror:[3,8,9,11,18,23,27],progress:9,project:[28,34],prolang:30,prompt:15,pronam:30,pronarg:30,proper:[3,11,15,17,25],properli:[3,8,11,18],properti:[11,15],propos:[2,3,11],prorettyp:30,protect:23,protocol:[3,9,19,20],protocol_vers:[3,9],prototyp:3,provid:[3,5,8,9,10,11,13,14,15,17,18,20,22,23,24,25,31,32],pryzbi:3,psinc:7,pub:33,pure:7,purpos:[2,3,32],put:[8,11,15,18],putlin:12,py2:33,py3:33,py3c:33,pyarg_parsetupleandkeyword:3,pyd:7,pyformat:23,pygres95:[0,3],pygresql:[3,4,7,8,9,10,11,13,15,16,17,18,22,23,24,25,26,27,29,30,32],pyinc:7,pyos_snprintf:3,pypi:33,python:[0,1,2,3,5,9,10,11,12,14,15,17,20,21,22,23,24,25,27,31,33],pythonpath:7,qiu:3,qualif:28,qualifi:3,quarter:26,queri:[0,3,7,8,12,13,15,18,21,25,26,27,28,30,31],query_format:[3,8,12,15],query_prepar:12,question:2,quickli:[0,3,9,12,15,27,31],quirk:15,quit:18,quot:[3,8,11,15,18],race:11,raini:27,rais:[3,9,11,14,15,17,19,20,21],rang:[3,23,27],rank:17,rare:8,rather:[3,11,19,20,23],raw:[8,11,15,18,23],reach:16,read:[7,8,9,12,15,18,19,20,27,33],readabl:[8,9,18],readi:[3,7],readm:[3,33],real:[8,9],realli:9,reason:[3,8,18],rebuild:7,receiv:[3,12,15,16],reclaim:11,recommend:[3,5,11],record:[3,8,12,18,25,27],recreat:30,recurr:14,recurs:3,redefinit:3,reduc:3,redund:3,refer:[1,9,11,14,15,20,26,31,32],referenc:11,reflect:11,reformat:3,regard:[2,21,27],regist:[3,8,10,12,18],regnamespac:30,regoper:30,regress:3,regtyp:[10,11],regular:30,rel:15,relat:[0,3,9,12,23,27],releas:[3,12,33,34],reli:3,relid:[10,24],relkind:30,relnam:[9,30],relnamespac:30,reload:11,remain:20,remark:[0,12,21],rememb:[8,9,14,18],remot:7,remov:[3,7,11,29,31],renam:[0,3,17,20],reopen:[3,11,15,18,23],reorder:3,replac:[3,9,11,20],report:[3,9],repositori:34,repres:[3,11,15,20,23,25],represent:[8,11,15,18,20,31],request:[2,3,8,9,11,13,14,15,18,20,31],requir:[3,7,8,11,18,19],reset:[10,11,12,15,18,23,24],reset_typecast:[8,10,15,18,21,24],resolut:12,resort:[0,8,18],respect:3,respond:[19,20],rest:[7,33],restart:11,reston:3,restor:[3,11],restrict:[11,20,27],result:[3,8,9,11,12,13,15,18,21,25,27,28,30,31],retain:20,retri:3,retriev:[12,26,29,31],return_typ:30,reus:[3,9,14],revers:[11,15],revert:3,review:2,reviv:3,rewrit:3,rewrot:3,rgb:4,richard:3,ride:[4,11],right:[8,15,30],right_opr:30,right_unari:30,risk:15,rlawrenc:3,roadmap:2,roll:[3,11,21],rollback:[3,12,21,31],rolnam:30,root:7,row:[3,7,8,9,12,17,18,21,26,27,28,30,31],row_factori:[3,20],rowcount:[3,21],rowid:25,rpm:[7,33],rsplit:18,rule:11,run:[0,1,3,7,8,10,12,15,16,18,19,23,24,33],sacramento:26,safe:[3,5,16,23,31],sal_emp:26,salari:28,salarresult:3,sam:28,same:[3,5,8,11,14,15,16,17,18,19,20,27,28,31],san:[26,27],sanit:11,satisfi:27,save:12,savepoint:[3,12],saw:[18,27],scalabl:0,scalar:[3,8,11,12,31],scalarit:[3,12],scalarresult:[3,12],scale:[3,20],scan:11,scene:8,schedul:26,schema:[3,30],scheme:0,schuller:3,scott:[27,31],script:[0,33],search:[6,7],search_path:3,search_term:33,second:[11,16,18,25],secondari:9,section:[8,18,19,26,27,30,33],secur:[3,15],see:[0,2,3,7,8,10,11,13,14,19,24,27,31,33],seek:[12,15],seek_cur:[14,15],seek_end:[14,15],seek_set:[14,15],seem:[7,8],seen:8,select:[3,7,8,9,11,15,18,20,25,26,27,28,30,31],self:[8,11,18,20],semicolon:[9,11],send:[2,3,4,8,9,12,14,27,31],sens:[8,18],sensit:27,sent:[3,9,11,16,18],sep:20,separ:[3,8,9,11,18,20,27],seq_of_paramet:20,sequenc:[11,17,20,31],serial:[8,11,18,25,31],serializ:[15,25],serv:[0,5,11,29],server:[7,12,14,17,20,23],server_encod:9,server_vers:[3,9],servic:33,session:[9,11,15],session_author:9,set:[3,5,7,8,10,12,14,15,16,18,19,21,24,27,31],set_arrai:[3,12],set_bool:[3,8,12],set_bytea_escap:[3,12],set_cast_hook:12,set_datestyl:12,set_decim:[3,8,12],set_decimal_point:12,set_defbas:12,set_defhost:12,set_defopt:12,set_defpasswd:12,set_defport:12,set_defus:12,set_jsondecod:[3,8,11,12],set_namedresult:3,set_notice_receiv:[3,12],set_paramet:[3,12,15],set_query_help:3,set_row_factory_s:3,set_typecast:[3,8,9,10,12,18,23,24],setof:28,settabl:11,setup:[7,9,15,33],sever:[9,11,15,23,27,30,31],shall:[10,11,15,20,23,24,25,27,32],share:[5,7,14],sharedinstal:7,sharpen:3,shoe:28,shortcut:11,should:[3,7,8,11,14,15,18,20,23,27,30],show:[4,9,26,30,31],side:[16,20],sig:[5,9,22,33],sign:3,signal:16,signatur:20,signific:[9,14],silent:15,similar:[3,8,11,21,31],simon:3,simpl:[3,4,8,9,10,11,13,14,25,27,28,30],simpler:[3,8],simplest:27,simpli:[3,7,8,9,15,18,20,31],simplic:3,simplif:3,simplifi:[3,11],sinc:[3,8,9,11,14,15,16,17,18,27],singl:[3,8,11,12,15,16,20,23,27,31],singledict:[3,12],singlenam:[3,12],singlescalar:[3,12],singleton:25,siong:[3,16],site:[7,34],situat:26,size:[3,12,20,24],skip:11,slight:18,slightli:[3,17],small:[3,31],smaller:11,smallint:[3,25],smart:3,smooth:3,snprintf:3,socket:[3,12],softwar:[0,32,33],solut:[0,8,18],some:[3,4,5,7,8,9,11,13,14,15,18,19,23,25,26,27,29,30,31],someth:[7,8,18],sometim:[10,15,18,19,23,24],somewhat:3,soon:8,sophist:28,sort:[11,27],sourc:[0,4,15,23,33,34],space:11,special:[11,15,16,25,32],specif:[3,5,8,11,15,17,18,22,23,25,32],specifi:[3,9,10,11,15,16,19,20,23,24,27,31],speed:3,speedup:3,sphinx:[3,33],split:[8,18,31],sporled:3,sql:[0,3,5,8,12,18,19,23,25,27,29,30],sqlalchemi:[3,5],sqlstate:[3,9,20,23],sqrt:[8,18],ssl:9,ssl_attribut:[3,9],ssl_in_us:[3,9],stamp:25,stand:8,standard:[3,5,11,15,18,21,23,24,25,31],standard_conforming_str:9,starship:3,start:[0,3,4,5,8,11,14,16,17,18,19,27,28,31],startup:9,state:[3,12,15,23,26,31],statement:[3,8,12,18,19,20,23,27,29,31],statu:9,step:6,still:[0,3,8,11,15,18,19,20],stop:[11,16],stop_:16,stop_ev:[11,16],storag:5,store:[4,8,11,15,21,30],str:[3,8,9,10,11,14,15,16,17,18,20,23,24],straight:3,stream:[3,20],string:[3,8,10,12,14,17,18,20,23,24,25,27,31],strlen:3,strptime:9,strtol:3,structur:3,studio:1,style:[3,8,12,27],stylist:3,subarrai:26,subclass:[3,11,19,20],subdirectori:7,submit:[2,3],subscript:3,subsequ:[3,11],subset:[11,20,27],substitut:9,subtl:[15,18],subvers:2,success:15,suggest:[3,9],suit:33,suitabl:[8,15,18,23],sum:28,supplement:9,suppli:[3,9,11,15],supplier_id:[8,18],support:[0,3,5,7,9,10,11,12,15,17,20,21,23,28,30,32,33,34],suppos:27,sure:[3,7,15,23,27,28],svn:2,symlink:7,sympi:[8,18],synchron:[12,16],syntax:[0,3,8,9,15,18,23,27],syntaxerror:15,system:[0,3,7,8,9,12,18,29,31],tabl:[3,8,10,12,15,17,18,20,23,24,25,26,28,29,30,31,34],tag:[9,15],take:[3,5,8,9,10,11,15,23,24,28],taken:[3,11,29],tar:[1,33],tarbal:7,target:[27,28],tbryan:3,tcl:0,team:[0,32],tediou:[8,18],tell:[12,31],temp:3,temp_avg:27,temp_hi:27,temp_lo:27,temperatur:27,temporari:[3,11,27],tempt:8,temptab:27,terekhov:3,term:32,termin:7,test:[0,1,3,7,20,33],testdb:[15,27,31],text:[3,8,11,12,18,20,25,26,28,31,33],textual:20,than:[0,3,8,11,14,15,17,18,19,20,23,25,28,31],thank:3,thei:[3,8,9,10,11,15,20,24,25,27,29,30,31],them:[0,3,8,9,11,15,18,19,20,23,31],themselv:[8,18],therefor:[5,8,15,18,23],thi:[0,1,2,3,5,7,8,9,10,11,14,15,16,17,18,19,20,21,24,26,27,29,30,31,32],thilo:[3,7],thing:[1,8,18,27,31],think:[8,18],third:[26,27],those:27,though:[3,14,18],thought:31,thread:[3,5,14,16,23],threadsafeti:23,three:[7,13],through:[2,3,8,9,11,15,20,33],thu:[9,14,23],tick:25,ticket:2,tiger:[27,31],time:[3,8,12,16,17,18,21,25,27,31],timedelta:[8,18],timefromtick:25,timelin:2,timeout:[11,16],timestamp:[3,8,9,18,25],timestampfromtick:25,timestamptz:[8,18],timetz:[8,18],timezon:9,toast:11,todai:3,togeth:[11,16],toi:28,toni:3,too:[8,9,11,14,15,17],tool:7,toolkit:5,top:7,torppa:3,tpye:20,trac:2,track:2,tracker:34,trail:9,train:26,trans_act:[9,15],trans_idl:[9,15],trans_inerror:[9,15],trans_intran:[9,15],trans_unknown:[9,15],transact:[3,12,15,19,20,23],transform:[8,20],treat:[3,18],tree:3,tri:[3,8,9,18,20],trigger:[9,11],triplet:9,trove:3,truncat:[3,12,23],trunk:2,tty:3,tuhnu:3,tupl:[3,8,9,11,12,15,18,20,27,28,31],turn:[3,15],tutori:[3,15,22,31],two:[3,5,7,8,15,18,26,27,28,32],txt:1,typ:[8,10,15,23,24],type:[0,3,7,9,12,13,14,17,19,20,21,23,26,27,29,33],type_cach:[18,19,23,24],type_cod:[3,20,24,25],typecach:[18,19,21,23],typecast:[3,10,11,12,19,21,24],typeerror:[9,11,14,15,17,20],typelem:30,typnam:30,typown:30,typrelid:30,typtyp:10,tzinfo:25,ubuntu:1,ugli:3,unari:30,unchang:11,uncom:7,und:11,under:[0,1,3,11,23,27],underli:[3,9],underscor:17,understand:[8,18,24],undocu:3,unescap:[3,12],unescape_bytea:[3,12],unexpect:23,unic:3,uninstal:7,uniqu:11,unit:[1,3,33],unix:[3,7,14],unknown:[3,9,17,27],unless:[9,10,11,19,20,24],unlik:11,unlink:[3,12],unlisten:16,unnam:[9,11],unnecessari:3,unpack:7,unprocess:15,unqualifi:11,unsupport:9,until:[11,16],untrustworthi:15,unus:[3,19,20],unwant:15,updat:[3,8,9,12,15,20,29,30,31,32],upper:11,upsert:[3,12],uri:15,url:[4,5],usabl:0,usag:[12,20],use:[0,2,3,4,5,7,8,9,10,11,12,13,16,17,18,19,20,22,23,24,26,27,29,31,32],use_regtyp:[3,10,12],used:[3,7,8,11,12,14,16,17,18,19,20,23,24,27,28,31],useful:[3,8,9,11,14,15,17,18,22],user:[3,8,9,11,12,23,26,27,29,31],user_t:8,usernam:5,uses:[3,4,8,9,11,15,18,23,25,31],using:[2,3,4,5,6,7,8,9,10,11,14,15,16,17,18,19,20,23,24,25,26,27,29,31],usr:[3,7],usual:[2,3,7,8,9,15,18,20],util:[3,5],uuid:[3,8,18,25],vacuum:[11,19],val:11,valid:[3,9,14,17,20],valu:[3,7,8,9,10,12,14,16,18,20,23,24,25,26,27,28,30,31],valueerror:[9,11,14,15,17,20],varchar:[8,18,25,27,28,31],variabl:[3,5,7,11,14,15,20],variat:18,variou:[0,3,11,19,33],vega:26,veri:[0,3,9,14,15,20,27],verifi:[8,18,31],version:[0,5,7,8,9,10,11,15,16,17,18,19,20,23,24,25,34],via:[0,3,8,18,20,32],vies:11,view:[3,4,12],violat:[3,11],visibl:20,visual:1,volum:2,volunt:2,vulner:[15,18],wai:[3,5,7,8,9,11,14,18,19,27,30,31,33],wait:[11,16],want:[2,3,7,8,9,11,13,14,15,16,18,19,20,23,27,30,31],warn:[3,8,9,11,14,23],warranti:32,weather:27,web:14,welcom:2,well:[0,3,11,23],were:[3,8,15,18,29],what:[6,8,9,11,18,27],whatev:14,when:[2,3,7,8,9,11,15,16,17,18,19,20,23,24,25,27],whenc:14,whenev:[9,19,20],where:[0,3,7,8,9,11,15,18,20,23,26,27,28,30,31],wherein:15,whether:[3,9,11,12,16,20],which:[3,5,7,8,9,10,11,13,15,17,18,19,20,23,24,25,27,30,31],who:32,whole:[9,31],whose:11,win32:[3,33],win:33,window:[0,1,3,7],within:[11,12],without:[3,8,9,15,16,18,19,31,32],won:[3,14,15,19,27],work:[3,7,8,9,11,18],world:8,worri:[3,9],wors:[8,18],would:[2,3,7,8,9,11,18,27],wrap:[3,11,15,25],wrapper:[3,5,7,9,12,13,15,25,27,31,33],write:[3,11,12,18,19,20,27],written:[0,4,9,14,16,22,32],wrong:[9,11,20,23],www:[2,33],x11:0,yahoo:3,year:[0,25],yet:[9,20],yield:[3,9,18,20,31],you:[0,1,2,3,4,5,7,8,9,10,11,13,14,15,16,17,18,19,20,22,23,24,25,26,27,28,30,31,33],your:[5,7,8,11,15,18,19,27,30,31],zero:[23,26],zip:20,zwei:7},titles:["About PyGreSQL","PyGreSQL Announcements","PyGreSQL Development and Support","ChangeLog","Examples","General PyGreSQL programming information","The PyGreSQL documentation","Installation","Remarks on Adaptation and Typecasting","Connection \u2013 The connection object","DbTypes \u2013 The internal cache for database types","The DB wrapper class","pg \u2014 The Classic PyGreSQL Interface","Introduction","LargeObject \u2013 Large Objects","Module functions and constants","The Notification Handler","Query methods","Remarks on Adaptation and Typecasting","Connection \u2013 The connection object","Cursor \u2013 The cursor object","pgdb \u2014 The DB-API Compliant Interface","Introduction","Module functions and constants","TypeCache \u2013 The internal cache for database types","Type \u2013 Type objects and constructors","Examples for advanced features","Basic examples","Examples for using SQL functions","A PostgreSQL Primer","Examples for using the system catalogs","First Steps with PyGreSQL","Copyright notice","Download information","Welcome to PyGreSQL"],titleterms:{"boolean":15,"class":11,"default":15,"export":14,"function":[9,15,23,28,30],"import":9,"new":[19,33],"return":[11,15,17,19],The:[6,9,10,11,12,16,19,20,21,24],abandon:9,about:0,access:2,adapt:[8,18],advanc:26,aggreg:[27,30],all:20,alon:7,announc:1,api:[21,31],arrai:[15,26],arrays:20,assum:15,attribut:[9,11,14,19,20,30],auxiliari:16,back:19,base:[28,30],basic:27,been:15,begin:11,binari:[7,15],bool:15,bug:2,build:[7,9],built:7,bytea:[11,15],cach:[10,24],call:20,callproc:20,cancel:9,cast_arrai:15,catalog:30,chang:33,changelog:3,check:11,choos:11,classic:[12,31],clear:11,client:9,close:[9,14,19,20],column:20,command:[9,11],commit:[11,19],compil:7,compliant:21,composit:28,conflict:11,connect:[9,11,15,19,23,27],constant:[15,23],constructor:25,content:[6,12,21,29],control:23,convers:17,copyright:32,creat:[9,11,27,28],current:[9,33],cursor:[19,20],custom:[9,15],data:[8,11,15,18,27],databas:[9,10,11,15,20,24,27],date:[9,15],date_format:9,dbtype:10,decim:15,decod:[11,15],decode_json:11,defin:30,delet:[11,27],delete_prepar:11,describ:[9,11],describe_prepar:[9,11],descript:20,detail:20,develop:[2,33],dict:11,dictionari:[11,17],dictit:17,dictresult:17,distribut:[7,33],distutil:7,document:6,download:33,empti:11,encod:11,endcopi:9,error:23,escap:[11,15],escape_bytea:15,escape_liter:11,escape_str:15,exampl:[4,26,27,28,30],execut:[9,11,20],executemani:20,fallback:9,famili:30,fast:15,featur:26,fetch:20,fetchal:20,fetchmani:20,fetchon:20,field:17,fieldnam:17,fieldnum:17,file:[9,14,33],fileno:9,first:31,fix:15,format:[9,11,15],from:[7,9,11],futur:33,gener:[5,7],get:[9,11,14,15,17,23],get_as_list:11,get_attnam:11,get_databas:11,get_rel:11,get_tabl:11,getlin:9,getlo:9,getnotifi:9,getresult:17,given:9,global:23,handl:[11,14],handler:[11,16],has:15,has_table_privileg:11,helper:15,home:[2,33],host:15,identifi:11,indic:[6,30],inform:[5,33],inherit:26,initi:11,insert:[9,11,27],insertt:9,instal:[7,33],instanti:16,interfac:[12,21,31],intern:[10,24],interpret:7,introduct:[13,22],invok:16,join:27,json:[11,15],kei:11,languag:30,larg:[9,14],largeobject:14,last:9,like:14,line:9,list:[2,9,11,15,17,30],listfield:17,locreat:9,loimport:9,mail:2,mani:20,manual:7,mark:15,memori:11,method:[16,17,20],modul:[15,23],monetari:15,multipl:28,name:[11,15,17],namedit:17,namedresult:17,next:20,notic:[9,32],notif:[11,16],notifi:9,notification_handl:11,ntupl:17,number:[17,20],numer:15,object:[9,14,15,17,19,20,25],oid:9,older:33,one:17,onedict:17,onenam:17,onescalar:17,open:[14,15,23],oper:[20,30],option:15,other:27,paramet:[8,9,11,18],parser:15,part:[19,20],password:15,pgdb:21,pip:7,pkei:11,port:15,postgresql:[15,23,29],prepar:[9,11],previou:17,primari:11,primer:29,privileg:11,procedur:20,process:9,program:5,project:[2,33],putlin:9,pygresql:[0,1,2,5,6,12,31,33,34],python:[7,8,18],queri:[9,11,17,20],query_format:11,query_prepar:[9,11],quickli:11,rais:23,read:[11,14],receiv:9,record:15,regard:20,regist:11,relat:11,releas:[1,11],remark:[8,18],remov:[27,28],repositori:2,reset:9,reset_typecast:23,resolut:11,result:[17,20],retriev:[11,15,27],roll:19,rollback:[11,19],row:[11,20],rowcount:20,run:11,save:14,savepoint:11,scalar:17,scalarit:17,scalarresult:17,seek:14,send:16,server:[9,15],set:[9,11,20,23],set_arrai:15,set_bool:15,set_bytea_escap:15,set_cast_hook:9,set_datestyl:15,set_decim:15,set_decimal_point:15,set_defbas:15,set_defhost:15,set_defopt:15,set_defpasswd:15,set_defport:15,set_defus:15,set_jsondecod:15,set_notice_receiv:9,set_paramet:11,set_typecast:15,similar:20,singl:17,singledict:17,singlenam:17,singlescalar:17,site:[2,33],size:14,socket:9,sourc:[2,7],sql:[9,11,15,28],stand:7,standard:[19,20],state:9,statement:[9,11,28],step:31,store:20,string:[9,11,15],style:15,support:[2,8,18],synchron:9,system:[11,30],tabl:[6,9,11,27],tell:14,text:15,thi:[23,28],time:[11,20],tracker:2,transact:[9,11],truncat:11,tupl:17,type:[8,10,11,15,18,24,25,28,30],typecach:24,typecast:[8,9,15,18,23],unescap:[11,15],unescape_bytea:[11,15],unlink:14,updat:[11,27],upsert:11,usag:11,use:15,use_regtyp:11,used:[9,15],user:[15,30],using:[28,30],valu:[11,15,17],version:[1,3,33],vesion:3,view:11,welcom:34,were:28,whether:15,within:15,wrapper:11,write:[9,14]}})PyGreSQL-5.1/docs/_build/html/download/0000755000175100077410000000000013470245541017665 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/download/index.html0000644000175100077410000003054013470245537021671 0ustar darcypyg00000000000000 Download information — PyGreSQL 5.1

Download information¶

Older PyGreSQL versions¶

You can look for older PyGreSQL versions at

News, Changes and Future Development¶

See the PyGreSQL Announcements for current news.

For a list of all changes in the current version 5.1 and in past versions, have a look at the ChangeLog.

The section on PyGreSQL Development and Support lists ideas for future developments and ways to participate.

Installation¶

Please read the chapter on Installation in our documentation.

Distribution files¶

pgmodule.c the C Python module (_pg)
pgtypes.h PostgreSQL type definitions
py3c.h Python 2/3 compatibility layer for the C extension
pg.py the “classic†PyGreSQL module
pgdb.py a DB-SIG DB-API 2.0 compliant API wrapper for PygreSQL
setup.py

the Python setup script

To install PyGreSQL, you can run “python setup.py installâ€.

setup.cfg the Python setup configuration
docs/

documentation directory

The documentation has been created with Sphinx. All text files are in ReST format; a HTML version of the documentation can be created with the command “make html†or “gmake htmlâ€.

tests/ a suite of unit tests for PyGreSQL

Project home sites¶

Python:
http://www.python.org
PostgreSQL:
http://www.postgresql.org
PyGreSQL:
http://www.pygresql.org
PyGreSQL-5.1/docs/_build/html/_sources/0000755000175100077410000000000013470245541017700 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_sources/about.rst.txt0000644000175100077410000000006513466770070022370 0ustar darcypyg00000000000000About PyGreSQL ============== .. include:: about.txtPyGreSQL-5.1/docs/_build/html/_sources/contents/0000755000175100077410000000000013470245541021535 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/0000755000175100077410000000000013470245541022451 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/connection.rst.txt0000644000175100077410000000741513466770070026174 0ustar darcypyg00000000000000Connection -- The connection object =================================== .. py:currentmodule:: pgdb .. class:: Connection These connection objects respond to the following methods. Note that ``pgdb.Connection`` objects also implement the context manager protocol, i.e. you can use them in a ``with`` statement. When the ``with`` block ends, the current transaction will be automatically committed or rolled back if there was an exception, and you won't need to do this manually. close -- close the connection ----------------------------- .. method:: Connection.close() Close the connection now (rather than whenever it is deleted) :rtype: None The connection will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed. commit -- commit the connection ------------------------------- .. method:: Connection.commit() Commit any pending transaction to the database :rtype: None Note that connections always use a transaction, unless you set the :attr:`Connection.autocommit` attribute described below. rollback -- roll back the connection ------------------------------------ .. method:: Connection.rollback() Roll back any pending transaction to the database :rtype: None This method causes the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed. cursor -- return a new cursor object ------------------------------------ .. method:: Connection.cursor() Return a new cursor object using the connection :returns: a connection object :rtype: :class:`Cursor` This method returns a new :class:`Cursor` object that can be used to operate on the database in the way described in the next section. Attributes that are not part of the standard -------------------------------------------- .. note:: The following attributes are not part of the DB-API 2 standard. .. attribute:: Connection.closed This is *True* if the connection has been closed or has become invalid .. attribute:: Connection.cursor_type The default cursor type used by the connection If you want to use your own custom subclass of the :class:`Cursor` class with he connection, set this attribute to your custom cursor class. You will then get your custom cursor whenever you call :meth:`Connection.cursor`. .. versionadded:: 5.0 .. attribute:: Connection.type_cache A dictionary with the various type codes for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`TypeCache` class for details. .. versionadded:: 5.0 .. attribute:: Connection.autocommit A read/write attribute to get/set the autocommit mode Normally, all DB-API 2 SQL commands are run inside a transaction. Sometimes this behavior is not desired; there are also some SQL commands such as VACUUM which cannot be run inside a transaction. By setting this attribute to ``True`` you can change this behavior so that no transactions will be started for that connection. In this case every executed SQL command has immediate effect on the database and you don't need to call :meth:`Connection.commit` explicitly. In this mode, you can still use ``with con:`` blocks to run parts of the code using the connection ``con`` inside a transaction. By default, this attribute is set to ``False`` which conforms to the behavior specified by the DB-API 2 standard (manual commit required). .. versionadded:: 5.1 PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/introduction.rst.txt0000644000175100077410000000137113466770070026551 0ustar darcypyg00000000000000Introduction ============ You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the newer :mod:`pgdb` API. **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. .. seealso:: A useful tutorial-like `introduction to the DB-API `_ has been written by Andrew M. Kuchling for the LINUX Journal in 1998. PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/index.rst.txt0000644000175100077410000000043513466770070025137 0ustar darcypyg00000000000000---------------------------------------------- :mod:`pgdb` --- The DB-API Compliant Interface ---------------------------------------------- .. module:: pgdb Contents ======== .. toctree:: introduction module connection cursor types typecache adaptation PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/cursor.rst.txt0000644000175100077410000003670113466770070025352 0ustar darcypyg00000000000000Cursor -- The cursor object =========================== .. py:currentmodule:: pgdb .. class:: Cursor These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors created from the same connection are not isolated, i.e., any changes done to the database by a cursor are immediately visible by the other cursors. Cursors created from different connections can or can not be isolated, depending on the level of transaction isolation. The default PostgreSQL transaction isolation level is "read committed". Cursor objects respond to the following methods and attributes. Note that ``Cursor`` objects also implement both the iterator and the context manager protocol, i.e. you can iterate over them and you can use them in a ``with`` statement. description -- details regarding the result columns --------------------------------------------------- .. attribute:: Cursor.description This read-only attribute is a sequence of 7-item named tuples. Each of these named tuples contains information describing one result column: - *name* - *type_code* - *display_size* - *internal_size* - *precision* - *scale* - *null_ok* The values for *precision* and *scale* are only set for numeric types. The values for *display_size* and *null_ok* are always ``None``. This attribute will be ``None`` for operations that do not return rows or if the cursor has not had an operation invoked via the :meth:`Cursor.execute` or :meth:`Cursor.executemany` method yet. .. versionchanged:: 5.0 Before version 5.0, this attribute was an ordinary tuple. rowcount -- number of rows of the result ---------------------------------------- .. attribute:: Cursor.rowcount This read-only attribute specifies the number of rows that the last :meth:`Cursor.execute` or :meth:`Cursor.executemany` call produced (for DQL statements like SELECT) or affected (for DML statements like UPDATE or INSERT). It is also set by the :meth:`Cursor.copy_from` and :meth:`Cursor.copy_to` methods. The attribute is -1 in case no such method call has been performed on the cursor or the rowcount of the last operation cannot be determined by the interface. close -- close the cursor ------------------------- .. method:: Cursor.close() Close the cursor now (rather than whenever it is deleted) :rtype: None The cursor will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the cursor. execute -- execute a database operation --------------------------------------- .. method:: Cursor.execute(operation, [parameters]) Prepare and execute a database operation (query or command) :param str operation: the database operation :param parameters: a sequence or mapping of parameters :returns: the cursor, so you can chain commands Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times). The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is deprecated: :meth:`Cursor.executemany` should be used instead. Note that in case this method raises a :exc:`DatabaseError`, you can get information about the error condition that has occurred by introspecting its :attr:`DatabaseError.sqlstate` attribute, which will be the ``SQLSTATE`` error code associated with the error. Applications that need to know which error condition has occurred should usually test the error code, rather than looking at the textual error message. executemany -- execute many similar database operations ------------------------------------------------------- .. method:: Cursor.executemany(operation, [seq_of_parameters]) Prepare and execute many similar database operations (queries or commands) :param str operation: the database operation :param seq_of_parameters: a sequence or mapping of parameter tuples or mappings :returns: the cursor, so you can chain commands Prepare a database operation (query or command) and then execute it against all parameter tuples or mappings found in the sequence *seq_of_parameters*. Parameters are bound to the query using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. callproc -- Call a stored procedure ----------------------------------- .. method:: Cursor.callproc(self, procname, [parameters]): Call a stored database procedure with the given name :param str procname: the name of the database function :param parameters: a sequence of parameters (can be empty or omitted) This method calls a stored procedure (function) in the PostgreSQL database. The sequence of parameters must contain one entry for each input argument that the function expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported. The function may also provide a result set as output. These can be requested through the standard fetch methods of the cursor. .. versionadded:: 5.0 fetchone -- fetch next row of the query result ---------------------------------------------- .. method:: Cursor.fetchone() Fetch the next row of a query result set :returns: the next row of the query result set :rtype: named tuple or None Fetch the next row of a query result set, returning a single named tuple, or ``None`` when no more data is available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchmany -- fetch next set of rows of the query result ------------------------------------------------------- .. method:: Cursor.fetchmany([size=None], [keep=False]) Fetch the next set of rows of a query result :param size: the number of rows to be fetched :type size: int or None :param keep: if set to true, will keep the passed arraysize :tpye keep: bool :returns: the next set of rows of the query result :rtype: list of named tuples Fetch the next set of rows of a query result, returning a list of named tuples. An empty sequence is returned when no more rows are available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. The number of rows to fetch per call is specified by the *size* parameter. If it is not given, the cursor's :attr:`arraysize` determines the number of rows to be fetched. If you set the *keep* parameter to True, this is kept as new :attr:`arraysize`. The method tries to fetch as many rows as indicated by the *size* parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. Note there are performance considerations involved with the *size* parameter. For optimal performance, it is usually best to use the :attr:`arraysize` attribute. If the *size* parameter is used, then it is best for it to retain the same value from one :meth:`Cursor.fetchmany` call to the next. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchall -- fetch all rows of the query result ---------------------------------------------- .. method:: Cursor.fetchall() Fetch all (remaining) rows of a query result :returns: the set of all rows of the query result :rtype: list of named tuples Fetch all (remaining) rows of a query result, returning them as list of named tuples. The field names of the named tuple are the same as the column names of the database query as long as they are valid as field names for named tuples, otherwise they are given positional names. Note that the cursor's :attr:`arraysize` attribute can affect the performance of this operation. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. arraysize - the number of rows to fetch at a time ------------------------------------------------- .. attribute:: Cursor.arraysize The number of rows to fetch at a time This read/write attribute specifies the number of rows to fetch at a time with :meth:`Cursor.fetchmany`. It defaults to 1, meaning to fetch a single row at a time. Methods and attributes that are not part of the standard -------------------------------------------------------- .. note:: The following methods and attributes are not part of the DB-API 2 standard. .. method:: Cursor.copy_from(stream, table, [format], [sep], [null], [size], [columns]) Copy data from an input stream to the specified table :param stream: the input stream (must be a file-like object, a string or an iterable returning strings) :param str table: the name of a database table :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param int size: the size of the buffer when reading file-like objects :param list column: an optional list of column names :returns: the cursor, so you can chain commands :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from an input stream on the client side to a database table on the server side using the ``COPY FROM`` command. The input stream can be provided in form of a file-like object (which must have a ``read()`` method), a string, or an iterable returning one row or multiple rows of input data on each iteration. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the input. The size option sets the size of the buffer used when reading data from file-like objects. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.copy_to(stream, table, [format], [sep], [null], [decode], [columns]) Copy data from the specified table to an output stream :param stream: the output stream (must be a file-like object or ``None``) :param str table: the name of a database table or a ``SELECT`` query :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param bool decode: whether decoded strings shall be returned for non-binary formats (the default is True in Python 3) :param list column: an optional list of column names :returns: a generator if stream is set to ``None``, otherwise the cursor :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from a database table on the server side to an output stream on the client side using the ``COPY TO`` command. The output stream can be provided in form of a file-like object (which must have a ``write()`` method). Alternatively, if ``None`` is passed as the output stream, the method will return a generator yielding one row of output data on each iteration. Output will be returned as byte strings unless you set decode to true. Note that you can also use a ``SELECT`` query instead of the table name. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the output. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.row_factory(row) Process rows before they are returned :param list row: the currently processed row of the result set :returns: the transformed row that the fetch methods shall return This method is used for processing result rows before returning them through one of the fetch methods. By default, rows are returned as named tuples. You can overwrite this method with a custom row factory if you want to return the rows as different kids of objects. This same row factory will then be used for all result sets. If you overwrite this method, the method :meth:`Cursor.build_row_factory` for creating row factories dynamically will be ignored. Note that named tuples are very efficient and can be easily converted to dicts (even OrderedDicts) by calling ``row._asdict()``. If you still want to return rows as dicts, you can create a custom cursor class like this:: class DictCursor(pgdb.Cursor): def row_factory(self, row): return {key: value for key, value in zip(self.colnames, row)} cur = DictCursor(con) # get one DictCursor instance or con.cursor_type = DictCursor # always use DictCursor instances .. versionadded:: 4.0 .. method:: Cursor.build_row_factory() Build a row factory based on the current description :returns: callable with the signature of :meth:`Cursor.row_factory` This method returns row factories for creating named tuples. It is called whenever a new result set is created, and :attr:`Cursor.row_factory` is then assigned the return value of this method. You can overwrite this method with a custom row factory builder if you want to use different row factories for different result sets. Otherwise, you can also simply overwrite the :meth:`Cursor.row_factory` method. This method will then be ignored. The default implementation that delivers rows as named tuples essentially looks like this:: def build_row_factory(self): return namedtuple('Row', self.colnames, rename=True)._make .. versionadded:: 5.0 .. attribute:: Cursor.colnames The list of columns names of the current result set The values in this list are the same values as the *name* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 .. attribute:: Cursor.coltypes The list of columns types of the current result set The values in this list are the same values as the *type_code* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/adaptation.rst.txt0000644000175100077410000003531613466770070026162 0ustar darcypyg00000000000000Remarks on Adaptation and Typecasting ===================================== .. py:currentmodule:: pgdb Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int [#int8]_ int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#int8] int8 is converted to long in Python 2 .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ PyGreSQL knows how to adapt the common Python types to get a suitable representation of their values for PostgreSQL when you pass parameters to a query. For example:: >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> parameters = (144, 3.75, 'hello', None) >>> tuple(cur.execute('SELECT %s, %s, %s, %s', parameters).fetchone() (144, Decimal('3.75'), 'hello', None) This is the result we can expect, so obviously PyGreSQL has adapted the parameters and sent the following query to PostgreSQL: .. code-block:: sql SELECT 144, 3.75, 'hello', NULL Note the subtle, but important detail that even though the SQL string passed to :meth:`cur.execute` contains conversion specifications normally used in Python with the ``%`` operator for formatting strings, we didn't use the ``%`` operator to format the parameters, but passed them as the second argument to :meth:`cur.execute`. I.e. we **didn't** write the following:: >>> tuple(cur.execute('SELECT %s, %s, %s, %s' % parameters).fetchone() If we had done this, PostgreSQL would have complained because the parameters were not adapted. Particularly, there would be no quotes around the value ``'hello'``, so PostgreSQL would have interpreted this as a database column, which would have caused a :exc:`ProgrammingError`. Also, the Python value ``None`` would have been included in the SQL command literally, instead of being converted to the SQL keyword ``NULL``, which would have been another reason for PostgreSQL to complain about our bad query: .. code-block:: sql SELECT 144, 3.75, hello, None Even worse, building queries with the use of the ``%`` operator makes us vulnerable to so called "SQL injection" exploits, where an attacker inserts malicious SQL statements into our queries that we never intended to be executed. We could avoid this by carefully quoting and escaping the parameters, but this would be tedious and if we overlook something, our code will still be vulnerable. So please don't do this. This cannot be emphasized enough, because it is such a subtle difference and using the ``%`` operator looks so natural: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. The good thing is that by letting PyGreSQL do the work for you, you can treat all your parameters equally and don't need to ponder where you need to put quotes or need to escape strings. You can and should also always use the general ``%s`` specification instead of e.g. using ``%d`` for integers. Actually, to avoid mistakes and make it easier to insert parameters at more than one location, you can and should use named specifications, like this:: >>> params = dict(greeting='Hello', name='HAL') >>> sql = """SELECT %(greeting)s || ', ' || %(name)s ... || '. Do you read me, ' || %(name)s || '?'""" >>> cur.execute(sql, params).fetchone()[0] 'Hello, HAL. Do you read me, HAL?' PyGreSQL does not only adapt the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> cur.execute("SELECT %(array)s", params).fetchone()[0] [[1, 2], [3, 4]] Note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. The query that was actually executed was this: .. code-block:: sql SELECT ARRAY[[1,2],[3,4]] Again, if we had inserted the list using the ``%`` operator without adaptation, the ``ARRAY`` keyword would have been missing in the query. Tuples are adapted as PostgreSQL composite types:: >>> params = dict(record=('Bond', 'James')) >>> cur.execute("SELECT %(record)s", params).fetchone()[0] ('Bond', 'James') You can also use this feature with the ``IN`` syntax of SQL:: >>> params = dict(what='needle', where=('needle', 'haystack')) >>> cur.execute("SELECT %(what)s IN %(where)s", params).fetchone()[0] True Sometimes a Python type can be ambiguous. For instance, you might want to insert a Python list not into an array column, but into a JSON column. Or you want to interpret a string as a date and insert it into a DATE column. In this case you can give PyGreSQL a hint by using :ref:`type_constructors`:: >>> cur.execute("CREATE TABLE json_data (data json, created date)") >>> params = dict( ... data=pgdb.Json([1, 2, 3]), created=pgdb.Date(2016, 1, 29)) >>> sql = ("INSERT INTO json_data VALUES (%(data)s, %(created)s)") >>> cur.execute(sql, params) >>> cur.execute("SELECT * FROM json_data").fetchone() Row(data=[1, 2, 3], created='2016-01-29') Let's think of another example where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> cur.execute("SELECT * FROM on_hand").fetchone() Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) However, we may not want to use named tuples, but custom Python classes to hold our values, like this one:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=InventoryItem('fuzzy dice', 42, 1.99), count=1000)) InterfaceError: Do not know how to adapt type While PyGreSQL knows how to adapt tuples, it does not know what to make out of our custom class. To simply convert the object to a string using the ``str`` function is not a solution, since this yields a human readable string that is not useful for PostgreSQL. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_repr__``, like this:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) ... ... def __pg_repr__(self): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. Note that PyGreSQL adapts the result of ``__pg_repr__`` again if it is a tuple or a list. Otherwise, it must be a properly escaped string. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via one of the "fetch" methods of a cursor. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set, and :func:`reset_typecast` allows you to reset the typecast function to its default. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pgdb.get_typecast('int4') int You can change this to return float values instead:: >>> pgdb.set_typecast('int4', float) >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> cur.execute('select 42::int4').fetchone()[0] 42.0 Note that the connections cache the typecast functions, so you may need to reopen the database connection, or reset the cache of the connection to make this effective, using the following command:: >>> con.type_cache.reset_typecast() The :class:`TypeCache` of the connection can also be used to change typecast functions locally for one database connection only. As a more useful example, we can create a typecast function that casts items of the composite type used as example in the previous section to instances of the corresponding Python class:: >>> con.type_cache.reset_typecast() >>> cast_tuple = con.type_cache.get_typecast('inventory_item') >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) >>> con.type_cache.set_typecast('inventory_item', cast_item) >>> str(cur.execute("SELECT * FROM on_hand").fetchone()[0]) 'fuzzy dice (from 42, at $1.99)' As you saw in the last section you, PyGreSQL also has a typecast function for JSON, which is the default JSON decoder from the standard library. Let's assume we want to use a slight variation of that decoder in which every integer in JSON is converted to a float in Python. This can be accomplished as follows:: >>> from json import loads >>> cast_json = lambda v: loads(v, parse_int=float) >>> pgdb.set_typecast('json', cast_json) >>> cur.execute("SELECT data FROM json_data").fetchone()[0] [1.0, 2.0, 3.0] Note again that you may need to run ``con.type_cache.reset_typecast()`` to make this effective. Also note that the two types ``json`` and ``jsonb`` have their own typecast functions, so if you use ``jsonb`` instead of ``json``, you need to use this type name when setting the typecast function:: >>> pgdb.set_typecast('jsonb', cast_json) As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> con.cursor().execute("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""").fetchone()[0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 9.5). So let' resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].rsplit(',', 1) ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pgdb.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python quite easily:: >>> circle = {c.name: c.circle for c in con.cursor().execute( ... "SELECT * FROM circle").fetchall()} The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle {'C1': Circle(Point(2, 3), 3.0), 'C2': Circle(Point(1, -1), 4.0)} Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/types.rst.txt0000644000175100077410000001311513466770070025173 0ustar darcypyg00000000000000Type -- Type objects and constructors ===================================== .. py:currentmodule:: pgdb .. _type_constructors: Type constructors ----------------- For binding to an operation's input parameters, PostgreSQL needs to have the input in a particular format. However, from the parameters to the :meth:`Cursor.execute` and :meth:`Cursor.executemany` methods it is not always obvious as which PostgreSQL data types they shall be bound. For instance, a Python string could be bound as a simple ``char`` value, or also as a ``date`` or a ``time``. Or a list could be bound as a ``array`` or a ``json`` object. To make the intention clear in such cases, you can wrap the parameters in type helper objects. PyGreSQL provides the constructors defined below to create such objects that can hold special values. When passed to the cursor methods, PyGreSQL can then detect the proper type of the input parameter and bind it accordingly. The :mod:`pgdb` module exports the following type constructors as part of the DB-API 2 standard: .. function:: Date(year, month, day) Construct an object holding a date value .. function:: Time(hour, [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time value .. function:: Timestamp(year, month, day, [hour], [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time stamp value .. function:: DateFromTicks(ticks) Construct an object holding a date value from the given *ticks* value .. function:: TimeFromTicks(ticks) Construct an object holding a time value from the given *ticks* value .. function:: TimestampFromTicks(ticks) Construct an object holding a time stamp from the given *ticks* value .. function:: Binary(bytes) Construct an object capable of holding a (long) binary string value Additionally, PyGreSQL provides the following constructors for PostgreSQL specific data types: .. function:: Interval(days, hours=0, minutes=0, seconds=0, microseconds=0) Construct an object holding a time interval value .. versionadded:: 5.0 .. function:: Uuid([hex], [bytes], [bytes_le], [fields], [int], [version]) Construct an object holding a UUID value .. versionadded:: 5.0 .. function:: Hstore(dict) Construct a wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj, [encode]) Construct a wrapper for holding an object serializable to JSON You can pass an optional serialization function as a parameter. By default, PyGreSQL uses :func:`json.dumps` to serialize it. .. function:: Literal(sql) Construct a wrapper for holding a literal SQL string .. versionadded:: 5.0 Example for using a type constructor:: >>> cursor.execute("create table jsondata (data jsonb)") >>> data = {'id': 1, 'name': 'John Doe', 'kids': ['Johnnie', 'Janie']} >>> cursor.execute("insert into jsondata values (%s)", [Json(data)]) .. note:: SQL ``NULL`` values are always represented by the Python *None* singleton on input and output. .. _type_objects: Type objects ------------ .. class:: Type The :attr:`Cursor.description` attribute returns information about each of the result columns of a query. The *type_code* must compare equal to one of the :class:`Type` objects defined below. Type objects can be equal to more than one type code (e.g. :class:`DATETIME` is equal to the type codes for ``date``, ``time`` and ``timestamp`` columns). The pgdb module exports the following :class:`Type` objects as part of the DB-API 2 standard: .. object:: STRING Used to describe columns that are string-based (e.g. ``char``, ``varchar``, ``text``) .. object:: BINARY Used to describe (long) binary columns (``bytea``) .. object:: NUMBER Used to describe numeric columns (e.g. ``int``, ``float``, ``numeric``, ``money``) .. object:: DATETIME Used to describe date/time columns (e.g. ``date``, ``time``, ``timestamp``, ``interval``) .. object:: ROWID Used to describe the ``oid`` column of PostgreSQL database tables .. note:: The following more specific type objects are not part of the DB-API 2 standard. .. object:: BOOL Used to describe ``boolean`` columns .. object:: SMALLINT Used to describe ``smallint`` columns .. object:: INTEGER Used to describe ``integer`` columns .. object:: LONG Used to describe ``bigint`` columns .. object:: FLOAT Used to describe ``float`` columns .. object:: NUMERIC Used to describe ``numeric`` columns .. object:: MONEY Used to describe ``money`` columns .. object:: DATE Used to describe ``date`` columns .. object:: TIME Used to describe ``time`` columns .. object:: TIMESTAMP Used to describe ``timestamp`` columns .. object:: INTERVAL Used to describe date and time ``interval`` columns .. object:: UUID Used to describe ``uuid`` columns .. object:: HSTORE Used to describe ``hstore`` columns .. versionadded:: 5.0 .. object:: JSON Used to describe ``json`` and ``jsonb`` columns .. versionadded:: 5.0 .. object:: ARRAY Used to describe columns containing PostgreSQL arrays .. versionadded:: 5.0 .. object:: RECORD Used to describe columns containing PostgreSQL records .. versionadded:: 5.0 Example for using some type objects:: >>> cursor = con.cursor() >>> cursor.execute("create table jsondata (created date, data jsonb)") >>> cursor.execute("select * from jsondata") >>> (created, data) = (d.type_code for d in cursor.description) >>> created == DATE True >>> created == DATETIME True >>> created == TIME False >>> data == JSON True >>> data == STRING False PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/typecache.rst.txt0000644000175100077410000000665313466770070026005 0ustar darcypyg00000000000000TypeCache -- The internal cache for database types ================================================== .. py:currentmodule:: pgdb .. class:: TypeCache .. versionadded:: 5.0 The internal :class:`TypeCache` of PyGreSQL is not part of the DB-API 2 standard, but is documented here in case you need full control and understanding of the internal handling of database types. The TypeCache is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to DB-API 2 "type codes" (which are also returned as the *type_code* field of the :attr:`Cursor.description` attribute). These type codes are strings which are equal to the PostgreSQL internal type name, but they are also carrying additional information about the associated PostgreSQL type in the following attributes: - *oid* -- the OID of the type - *len* -- the internal size - *type* -- ``'b'`` = base, ``'c'`` = composite, ... - *category* -- ``'A'`` = Array, ``'B'`` = Boolean, ... - *delim* -- delimiter to be used when parsing arrays - *relid* -- the table OID for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`TypeCache` provides the following methods: .. method:: TypeCache.get_fields(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: a list of pairs of field names and types :rtype: list .. method:: TypeCache.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. method:: TypeCache.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: TypeCache.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. method:: TypeCache.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that the :class:`TypeCache` is always bound to a database connection. You can also get, set and reset typecast functions on a global level using the functions :func:`pgdb.get_typecast`, :func:`pgdb.set_typecast` and :func:`pgdb.reset_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless call the :meth:`TypeCache.reset_typecast` method on the :attr:`Connection.type_cache` objects of the running connections. PyGreSQL-5.1/docs/_build/html/_sources/contents/pgdb/module.rst.txt0000644000175100077410000001572313466770070025323 0ustar darcypyg00000000000000Module functions and constants ============================== .. py:currentmodule:: pgdb The :mod:`pgdb` module defines a :func:`connect` function that allows to connect to a database, some global constants describing the capabilities of the module as well as several exception classes. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dsn], [user], [password], [host], [database], [**kwargs]) Return a new connection to the database :param str dsn: data source name as string :param str user: the database user name :param str password: the database password :param str host: the hostname of the database :param database: the name of the database :param dict kwargs: other connection parameters :returns: a connection object :rtype: :class:`Connection` :raises pgdb.OperationalError: error connecting to the database This function takes parameters specifying how to connect to a PostgreSQL database and returns a :class:`Connection` object using these parameters. If specified, the *dsn* parameter must be a string with the format ``'host:base:user:passwd:opt'``. All of the parts specified in the *dsn* are optional. You can also specify the parameters individually using keyword arguments, which always take precedence. The *host* can also contain a port if specified in the format ``'host:port'``. In the *opt* part of the *dsn* you can pass command-line options to the server. You can pass additional connection parameters using the optional *kwargs* keyword arguments. Example:: con = connect(dsn='myhost:mydb', user='guido', password='234$') .. versionchanged:: 5.0.1 Support for additional parameters passed as *kwargs*. get/set/reset_typecast -- Control the global typecast functions --------------------------------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides built-in typecast functions for the common database types, but if you want to change these or add more typecast functions, you can set these up using the following functions. .. note:: The following functions are not part of the DB-API 2 standard. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 As of version 5.0.3 you can also use this method to change the typecasting of PostgreSQL array types. You must run ``set_typecast('anyarray', cast)`` in order to do this. The ``cast`` method must take a string value and a cast function for the base type and return the array converted to a Python object. For instance, run ``set_typecast('anyarray', lambda v, c: v)`` to switch off the casting of arrays completely, and always return them encoded as strings. .. method:: reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`TypeCache` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`TypeCache.get_typecast`, :meth:`TypeCache.set_typecast` and :meth:`TypeCache.reset_typecast` of the :attr:`Connection.type_cache`. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`TypeCache.reset_typecast` on the :attr:`Connection.type_cache`. Module constants ---------------- .. data:: apilevel The string constant ``'2.0'``, stating that the module is DB-API 2.0 level compliant. .. data:: threadsafety The integer constant 1, stating that the module itself is thread-safe, but the connections are not thread-safe, and therefore must be protected with a lock if you want to use them from different threads. .. data:: paramstyle The string constant ``pyformat``, stating that parameters should be passed using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. Errors raised by this module ---------------------------- The errors that can be raised by the :mod:`pgdb` module are the following: .. exception:: Warning Exception raised for important warnings like data truncations while inserting. .. exception:: Error Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single except statement. Warnings are not considered errors and thus do not use this class as base. .. exception:: InterfaceError Exception raised for errors that are related to the database interface rather than the database itself. .. exception:: DatabaseError Exception raised for errors that are related to the database. In PyGreSQL, this also has a :attr:`DatabaseError.sqlstate` attribute that contains the ``SQLSTATE`` error code of this error. .. exception:: DataError Exception raised for errors that are due to problems with the processed data like division by zero or numeric value out of range. .. exception:: OperationalError Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, or a memory allocation error occurred during processing. .. exception:: IntegrityError Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails. .. exception:: ProgrammingError Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement or wrong number of parameters specified. .. exception:: NotSupportedError Exception raised in case a method or database API was used which is not supported by the database. PyGreSQL-5.1/docs/_build/html/_sources/contents/changelog.rst.txt0000644000175100077410000007434413470244250025044 0ustar darcypyg00000000000000ChangeLog ========= Version 5.1 (2019-05-17) ------------------------ - Changes to the classic PyGreSQL module (pg): - Support for prepared statements (following a suggestion and first implementation by Justin Pryzby on the mailing list). - DB wrapper objects based on existing connections can now be closed and reopened properly (but the underlying connection will not be affected). - The query object can now be used as an iterator similar to query.getresult() and will then yield the rows as tuples. Thanks to Justin Pryzby for the proposal and most of the implementation. - Deprecated query.ntuples() in the classic API, since len(query) can now be used and returns the same number. - The i-th row of the result can now be accessed as `query[i]`. - New method query.scalarresult() that gets only the first field of each row as a list of scalar values. - New methods query.one(), query.onenamed(), query.onedict() and query.onescalar() that fetch only one row from the result or None if there are no more rows, similar to the cursor.fetchone() method in DB-API 2. - New methods query.single(), query.singlenamed(), query.singledict() and query.singlescalar() that fetch only one row from the result, and raise an error if the result does not have exactly one row. - New methods query.dictiter(), query.namediter() and query.scalariter() returning the same values as query.dictresult(), query.namedresult() and query.salarresult(), but as iterables instead of lists. This avoids creating a Python list of all results and can be slightly more efficient. - Removed pg.get/set_namedresult. You can configure the named tuples factory with the pg.set_row_factory_size() function and change the implementation with pg.set_query_helpers(), but this is not recommended and this function is not part of the official API. - Added new connection attributes `socket`, `backend_pid`, `ssl_in_use` and `ssl_attributes` (the latter need PostgreSQL >= 9.5 on the client). - Changes to the DB-API 2 module (pgdb): - Connections now have an `autocommit` attribute which is set to `False` by default but can be set to `True` to switch to autocommit mode where no transactions are started and calling commit() is not required. Note that this is not part of the DB-API 2 standard. Vesion 5.0.7 (2019-05-17) ------------------------- - This version officially supports the new PostgreSQL 11. - Fixed a bug in parsing array subscript ranges (reported by Justin Pryzby). - Fixed an issue when deleting a DB wrapper object with the underlying connection already closed (bug report by Jacob Champion). Vesion 5.0.6 (2018-07-29) ------------------------- - This version officially supports the new Python 3.7. - Correct trove classifier for the PostgreSQL License. Version 5.0.5 (2018-04-25) -------------------------- - This version officially supports the new PostgreSQL 10. - The memory for the string with the number of rows affected by a classic pg module query() was already freed (bug report and fix by Peifeng Qiu). Version 5.0.4 (2017-07-23) -------------------------- - This version officially supports the new Python 3.6 and PostgreSQL 9.6. - query_formatted() can now be used without parameters. - The automatic renaming of columns that are invalid as field names of named tuples now works more accurately in Python 2.6 and 3.0. - Fixed error checks for unlink() and export() methods of large objects (bug report by Justin Pryzby). - Fixed a compilation issue under OS X (bug report by Josh Johnston). Version 5.0.3 (2016-12-10) -------------------------- - It is now possible to use a custom array cast function by changing the type caster for the 'anyarray' type. For instance, by calling set_typecast('anyarray', lambda v, c: v) you can have arrays returned as strings instead of lists. Note that in the pg module, you can also call set_array(False) in order to return arrays as strings. - The namedtuple classes used for the rows of query results are now cached and reused internally, since creating namedtuples classes in Python is a somewhat expensive operation. By default the cache has a size of 1024 entries, but this can be changed with the set_row_factory_size() function. In certain cases this change can notably improve the performance. - The namedresult() method in the classic API now also tries to rename columns that would result in invalid field names. Version 5.0.2 (2016-09-13) -------------------------- - Fixed an infinite recursion problem in the DB wrapper class of the classic module that could occur when the underlying connection could not be properly opened (bug report by Justin Pryzby). Version 5.0.1 (2016-08-18) -------------------------- - The update() and delete() methods of the DB wrapper now use the OID instead of the primary key if both are provided. This restores backward compatibility with PyGreSQL 4.x and allows updating the primary key itself if an OID exists. - The connect() function of the DB API 2.0 module now accepts additional keyword parameters such as "application_name" which will be passed on to PostgreSQL. - PyGreSQL now adapts some queries to be able to access older PostgreSQL 8.x databases (as suggested on the mailing list by Andres Mejia). However, these old versions of PostgreSQL are not officially supported and tested any more. - Fixed an issue with Postgres types that have an OID >= 0x80000000 (reported on the mailing list by Justin Pryzby). - Allow extra values that are not used in the command in the parameter dict passed to the query_formatted() method (as suggested by Justin Pryzby). - Improved handling of empty arrays in the classic module. - Unused classic connections were not properly garbage collected which could cause memory leaks (reported by Justin Pryzby). - Made C extension compatible with MSVC 9 again (this was needed to compile for Python 2 on Windows). Version 5.0 (2016-03-20) ------------------------ - This version now runs on both Python 2 and Python 3. - The supported versions are Python 2.6 to 2.7, and 3.3 to 3.5. - PostgreSQL is supported in all versions from 9.0 to 9.5. - Changes in the classic PyGreSQL module (pg): - The classic interface got two new methods get_as_list() and get_as_dict() returning a database table as a Python list or dict. The amount of data returned can be controlled with various parameters. - A method upsert() has been added to the DB wrapper class that utilizes the "upsert" feature that is new in PostgreSQL 9.5. The new method nicely complements the existing get/insert/update/delete() methods. - When using insert/update/upsert(), you can now pass PostgreSQL arrays as lists and PostgreSQL records as tuples in the classic module. - Conversely, when the query method returns a PostgreSQL array, it is passed to Python as a list. PostgreSQL records are converted to named tuples as well, but only if you use one of the get/insert/update/delete() methods. PyGreSQL uses a new fast built-in parser to achieve this. The automatic conversion of arrays to lists can be disabled with set_array(False). - The pkey() method of the classic interface now returns tuples instead of frozenset. The order of the tuples is like in the primary key index. - Like the DB-API 2 module, the classic module now also returns bool values from the database as Python bool objects instead of strings. You can still restore the old behavior by calling set_bool(False). - Like the DB-API 2 module, the classic module now also returns bytea data fetched from the database as byte strings, so you don't need to call unescape_bytea() any more. This has been made configurable though, and you can restore the old behavior by calling set_bytea_escaped(True). - A method set_jsondecode() has been added for changing or removing the function that automatically decodes JSON data coming from the database. By default, decoding JSON is now enabled and uses the decoder function in the standard library with its default parameters. - The table name that is affixed to the name of the OID column returned by the get() method of the classic interface will not automatically be fully qualified any more. This reduces overhead from the interface, but it means you must always write the table name in the same way when you call the methods using it and you are using tables with OIDs. Also, OIDs are now only used when access via primary key is not possible. Note that OIDs are considered deprecated anyway, and they are not created by default any more in PostgreSQL 8.1 and later. - The internal caching and automatic quoting of class names in the classic interface has been simplified and improved, it should now perform better and use less memory. Also, overhead for quoting values in the DB wrapper methods has been reduced and security has been improved by passing the values to libpq separately as parameters instead of inline. - It is now possible to use the registered type names instead of the more coarse-grained type names that are used by default in PyGreSQL, without breaking any of the mechanisms for quoting and typecasting, which rely on the type information. This is achieved while maintaining simplicity and backward compatibility by augmenting the type name string objects with all the necessary information under the cover. To switch registered type names on or off (this is the default), call the DB wrapper method use_regtypes(). - A new method query_formatted() has been added to the DB wrapper class that allows using the format specifications from Python. A flag "inline" can be set to specify whether parameters should be sent to the database separately or formatted into the SQL. - A new type helper Bytea() has been added. - Changes in the DB-API 2 module (pgdb): - The DB-API 2 module now always returns result rows as named tuples instead of simply lists as before. The documentation explains how you can restore the old behavior or use custom row objects instead. - The names of the various classes used by the classic and DB-API 2 modules have been renamed to become simpler, more intuitive and in line with the names used in the DB-API 2 documentation. Since the API provides only objects of these types through constructor functions, this should not cause any incompatibilities. - The DB-API 2 module now supports the callproc() cursor method. Note that output parameters are currently not replaced in the return value. - The DB-API 2 module now supports copy operations between data streams on the client and database tables via the COPY command of PostgreSQL. The cursor method copy_from() can be used to copy data from the database to the client, and the cursor method copy_to() can be used to copy data from the client to the database. - The 7-tuples returned by the description attribute of a pgdb cursor are now named tuples, i.e. their elements can be also accessed by name. The column names and types can now also be requested through the colnames and coltypes attributes, which are not part of DB-API 2 though. The type_code provided by the description attribute is still equal to the PostgreSQL internal type name, but now carries some more information in additional attributes. The size, precision and scale information that is part of the description is now properly set for numeric types. - If you pass a Python list as one of the parameters to a DB-API 2 cursor, it is now automatically bound using an ARRAY constructor. If you pass a Python tuple, it is bound using a ROW constructor. This is useful for passing records as well as making use of the IN syntax. - Inversely, when a fetch method of a DB-API 2 cursor returns a PostgreSQL array, it is passed to Python as a list, and when it returns a PostgreSQL composite type, it is passed to Python as a named tuple. PyGreSQL uses a new fast built-in parser to achieve this. Anonymous composite types are also supported, but yield only an ordinary tuple containing text strings. - New type helpers Interval() and Uuid() have been added. - The connection has a new attribute "closed" that can be used to check whether the connection is closed or broken. - SQL commands are always handled as if they include parameters, i.e. literal percent signs must always be doubled. This consistent behavior is necessary for using pgdb with wrappers like SQLAlchemy. - PyGreSQL 5.0 will be supported as a database driver by SQLAlchemy 1.1. - Changes concerning both modules: - PyGreSQL now tries to raise more specific and appropriate subclasses of DatabaseError than just ProgrammingError. Particularly, when database constraints are violated, it raises an IntegrityError now. - The modules now provide get_typecast() and set_typecast() methods allowing to control the typecasting on the global level. The connection objects have got type caches with the same methods which give control over the typecasting on the level of the current connection. See the documentation on details about the type cache and the typecast mechanisms provided by PyGreSQL. - Dates, times, timestamps and time intervals are now returned as the corresponding Python objects from the datetime module of the standard library. In earlier versions of PyGreSQL they had been returned as strings. You can restore the old behavior by deactivating the respective typecast functions, e.g. set_typecast('date', str). - PyGreSQL now support the "uuid" data type, converting such columns automatically to and from Python uuid.UUID objects. - PyGreSQL now supports the "hstore" data type, converting such columns automatically to and from Python dictionaries. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new HStore() type constructor as a hint to PyGreSQL. - PyGreSQL now supports the "json" and "jsonb" data types, converting such columns automatically to and from Python objects. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new Json() type constructor as a hint to PyGreSQL. - A new type helper Literal() for inserting parameters literally as SQL has been added. This is useful for table names, for instance. - Fast parsers cast_array(), cast_record() and cast_hstore for the input and output syntax for PostgreSQL arrays, composite types and the hstore type have been added to the C extension module. The array parser also allows using multi-dimensional arrays with PyGreSQL. - The tty parameter and attribute of database connections has been removed since it is not supported any more since PostgreSQL 7.4. Version 4.2.2 (2016-03-18) -------------------------- - The get_relations() and get_tables() methods now also return system views and tables if you set the optional "system" parameter to True. - Fixed a regression when using temporary tables with DB wrapper methods (thanks to Patrick TJ McPhee for reporting). Version 4.2.1 (2016-02-18) -------------------------- - Fixed a small bug when setting the notice receiver. - Some more minor fixes and re-packaging with proper permissions. Version 4.2 (2016-01-21) ------------------------ - The supported Python versions are 2.4 to 2.7. - PostgreSQL is supported in all versions from 8.3 to 9.5. - Set a better default for the user option "escaping-funcs". - Force build to compile with no errors. - New methods get_parameters() and set_parameters() in the classic interface which can be used to get or set run-time parameters. - New method truncate() in the classic interface that can be used to quickly empty a table or a set of tables. - Fix decimal point handling. - Add option to return boolean values as bool objects. - Add option to return money values as string. - get_tables() does not list information schema tables any more. - Fix notification handler (Thanks Patrick TJ McPhee). - Fix a small issue with large objects. - Minor improvements of the NotificationHandler. - Converted documentation to Sphinx and added many missing parts. - The tutorial files have become a chapter in the documentation. - Greatly improved unit testing, tests run with Python 2.4 to 2.7 again. Version 4.1.1 (2013-01-08) -------------------------- - Add NotificationHandler class and method. Replaces need for pgnotify. - Sharpen test for inserting current_timestamp. - Add more quote tests. False and 0 should evaluate to NULL. - More tests - Any number other than 0 is True. - Do not use positional parameters internally. This restores backward compatibility with version 4.0. - Add methods for changing the decimal point. Version 4.1 (2013-01-01) ------------------------ - Dropped support for Python below 2.5 and PostgreSQL below 8.3. - Added support for Python up to 2.7 and PostgreSQL up to 9.2. - Particularly, support PQescapeLiteral() and PQescapeIdentifier(). - The query method of the classic API now supports positional parameters. This an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors (contribution by Patrick TJ McPhee). - The classic API now supports a method namedresult() in addition to getresult() and dictresult(), which returns the rows of the result as named tuples if these are supported (Python 2.6 or higher). - The classic API has got the new methods begin(), commit(), rollback(), savepoint() and release() for handling transactions. - Both classic and DBAPI 2 connections can now be used as context managers for encapsulating transactions. - The execute() and executemany() methods now return the cursor object, so you can now write statements like "for row in cursor.execute(...)" (as suggested by Adam Frederick). - Binary objects are now automatically escaped and unescaped. - Bug in money quoting fixed. Amounts of $0.00 handled correctly. - Proper handling of date and time objects as input. - Proper handling of floats with 'nan' or 'inf' values as input. - Fixed the set_decimal() function. - All DatabaseError instances now have a sqlstate attribute. - The getnotify() method can now also return payload strings (#15). - Better support for notice processing with the new methods set_notice_receiver() and get_notice_receiver() (as suggested by Michael Filonenko, see #37). - Open transactions are rolled back when pgdb connections are closed (as suggested by Peter Harris, see #46). - Connections and cursors can now be used with the "with" statement (as suggested by Peter Harris, see #46). - New method use_regtypes() that can be called to let getattnames() return registered type names instead of the simplified classic types (#44). Version 4.0 (2009-01-01) ------------------------ - Dropped support for Python below 2.3 and PostgreSQL below 7.4. - Improved performance of fetchall() for large result sets by speeding up the type casts (as suggested by Peter Schuller). - Exposed exceptions as attributes of the connection object. - Exposed connection as attribute of the cursor object. - Cursors now support the iteration protocol. - Added new method to get parameter settings. - Added customizable row_factory as suggested by Simon Pamies. - Separated between mandatory and additional type objects. - Added keyword args to insert, update and delete methods. - Added exception handling for direct copy. - Start transactions only when necessary, not after every commit(). - Release the GIL while making a connection (as suggested by Peter Schuller). - If available, use decimal.Decimal for numeric types. - Allow DB wrapper to be used with DB-API 2 connections (as suggested by Chris Hilton). - Made private attributes of DB wrapper accessible. - Dropped dependence on mx.DateTime module. - Support for PQescapeStringConn() and PQescapeByteaConn(); these are now also used by the internal _quote() functions. - Added 'int8' to INTEGER types. New SMALLINT type. - Added a way to find the number of rows affected by a query() with the classic pg module by returning it as a string. For single inserts, query() still returns the oid as an integer. The pgdb module already provides the "rowcount" cursor attribute for the same purpose. - Improved getnotify() by calling PQconsumeInput() instead of submitting an empty command. - Removed compatibility code for old OID munging style. - The insert() and update() methods now use the "returning" clause if possible to get all changed values, and they also check in advance whether a subsequent select is possible, so that ongoing transactions won't break if there is no select privilege. - Added "protocol_version" and "server_version" attributes. - Revived the "user" attribute. - The pg module now works correctly with composite primary keys; these are represented as frozensets. - Removed the undocumented and actually unnecessary "view" parameter from the get() method. - get() raises a nicer ProgrammingError instead of a KeyError if no primary key was found. - delete() now also works based on the primary key if no oid available and returns whether the row existed or not. Version 3.8.1 (2006-06-05) -------------------------- - Use string methods instead of deprecated string functions. - Only use SQL-standard way of escaping quotes. - Added the functions escape_string() and escape/unescape_bytea() (as suggested by Charlie Dyson and Kavous Bojnourdi a long time ago). - Reverted code in clear() method that set date to current. - Added code for backwards compatibility in OID munging code. - Reorder attnames tests so that "interval" is checked for before "int." - If caller supplies key dictionary, make sure that all has a namespace. Version 3.8 (2006-02-17) ------------------------ - Installed new favicon.ico from Matthew Sporleder - Replaced snprintf by PyOS_snprintf. - Removed NO_SNPRINTF switch which is not needed any longer - Clean up some variable names and namespace - Add get_relations() method to get any type of relation - Rewrite get_tables() to use get_relations() - Use new method in get_attnames method to get attributes of views as well - Add Binary type - Number of rows is now -1 after executing no-result statements - Fix some number handling - Non-simple types do not raise an error any more - Improvements to documentation framework - Take into account that nowadays not every table must have an oid column - Simplification and improvement of the inserttable() function - Fix up unit tests - The usual assortment of minor fixes and enhancements Version 3.7 (2005-09-07) ------------------------ Improvement of pgdb module: - Use Python standard `datetime` if `mxDateTime` is not available Major improvements and clean-up in classic pg module: - All members of the underlying connection directly available in `DB` - Fixes to quoting function - Add checks for valid database connection to methods - Improved namespace support, handle `search_path` correctly - Removed old dust and unnecessary imports, added docstrings - Internal sql statements as one-liners, smoothed out ugly code Version 3.6.2 (2005-02-23) -------------------------- - Further fixes to namespace handling Version 3.6.1 (2005-01-11) -------------------------- - Fixes to namespace handling Version 3.6 (2004-12-17) ------------------------ - Better DB-API 2.0 compliance - Exception hierarchy moved into C module and made available to both APIs - Fix error in update method that caused false exceptions - Moved to standard exception hierarchy in classic API - Added new method to get transaction state - Use proper Python constants where appropriate - Use Python versions of strtol, etc. Allows Win32 build. - Bug fixes and cleanups Version 3.5 (2004-08-29) ------------------------ Fixes and enhancements: - Add interval to list of data types - fix up method wrapping especially close() - retry pkeys once if table missing in case it was just added - wrap query method separately to handle debug better - use isinstance instead of type - fix free/PQfreemem issue - finally - miscellaneous cleanups and formatting Version 3.4 (2004-06-02) ------------------------ Some cleanups and fixes. This is the first version where PyGreSQL is moved back out of the PostgreSQL tree. A lot of the changes mentioned below were actually made while in the PostgreSQL tree since their last release. - Allow for larger integer returns - Return proper strings for true and false - Cleanup convenience method creation - Enhance debugging method - Add reopen method - Allow programs to preload field names for speedup - Move OID handling so that it returns long instead of int - Miscellaneous cleanups and formatting Version 3.3 (2001-12-03) ------------------------ A few cleanups. Mostly there was some confusion about the latest version and so I am bumping the number to keep it straight. - Added NUMERICOID to list of returned types. This fixes a bug when returning aggregates in the latest version of PostgreSQL. Version 3.2 (2001-06-20) ------------------------ Note that there are very few changes to PyGreSQL between 3.1 and 3.2. The main reason for the release is the move into the PostgreSQL development tree. Even the WIN32 changes are pretty minor. - Add Win32 support (gerhard@bigfoot.de) - Fix some DB-API quoting problems (niall.smart@ebeon.com) - Moved development into PostgreSQL development tree. Version 3.1 (2000-11-06) ------------------------ - Fix some quoting functions. In particular handle NULLs better. - Use a method to add primary key information rather than direct manipulation of the class structures - Break decimal out in `_quote` (in pg.py) and treat it as float - Treat timestamp like date for quoting purposes - Remove a redundant SELECT from the `get` method speeding it, and `insert` (since it calls `get`) up a little. - Add test for BOOL type in typecast method to `pgdbTypeCache` class (tv@beamnet.de) - Fix pgdb.py to send port as integer to lower level function (dildog@l0pht.com) - Change pg.py to speed up some operations - Allow updates on tables with no primary keys Version 3.0 (2000-05-30) ------------------------ - Remove strlen() call from pglarge_write() and get size from object (Richard@Bouska.cz) - Add a little more error checking to the quote function in the wrapper - Add extra checking in `_quote` function - Wrap query in pg.py for debugging - Add DB-API 2.0 support to pgmodule.c (andre@via.ecp.fr) - Add DB-API 2.0 wrapper pgdb.py (andre@via.ecp.fr) - Correct keyword clash (temp) in tutorial - Clean up layout of tutorial - Return NULL values as None (rlawrence@lastfoot.com) (WARNING: This will cause backwards compatibility issues) - Change None to NULL in insert and update - Change hash-bang lines to use /usr/bin/env - Clearing date should be blank (NULL) not TODAY - Quote backslashes in strings in `_quote` (brian@CSUA.Berkeley.EDU) - Expanded and clarified build instructions (tbryan@starship.python.net) - Make code thread safe (Jerome.Alet@unice.fr) - Add README.distutils (mwa@gate.net & jeremy@cnri.reston.va.us) - Many fixes and increased DB-API compliance by chifungfan@yahoo.com, tony@printra.net, jeremy@alum.mit.edu and others to get the final version ready to release. Version 2.4 (1999-06-15) ------------------------ - Insert returns None if the user doesn't have select permissions on the table. It can (and does) happen that one has insert but not select permissions on a table. - Added ntuples() method to query object (brit@druid.net) - Corrected a bug related to getresult() and the money type - Corrected a bug related to negative money amounts - Allow update based on primary key if munged oid not available and table has a primary key - Add many __doc__ strings (andre@via.ecp.fr) - Get method works with views if key specified Version 2.3 (1999-04-17) ------------------------ - connect.host returns "localhost" when connected to Unix socket (torppa@tuhnu.cutery.fi) - Use `PyArg_ParseTupleAndKeywords` in connect() (torppa@tuhnu.cutery.fi) - fixes and cleanups (torppa@tuhnu.cutery.fi) - Fixed memory leak in dictresult() (terekhov@emc.com) - Deprecated pgext.py - functionality now in pg.py - More cleanups to the tutorial - Added fileno() method - terekhov@emc.com (Mikhail Terekhov) - added money type to quoting function - Compiles cleanly with more warnings turned on - Returns PostgreSQL error message on error - Init accepts keywords (Jarkko Torppa) - Convenience functions can be overridden (Jarkko Torppa) - added close() method Version 2.2 (1998-12-21) ------------------------ - Added user and password support thanks to Ng Pheng Siong (ngps@post1.com) - Insert queries return the inserted oid - Add new `pg` wrapper (C module renamed to _pg) - Wrapped database connection in a class - Cleaned up some of the tutorial. (More work needed.) - Added `version` and `__version__`. Thanks to thilo@eevolute.com for the suggestion. Version 2.1 (1998-03-07) ------------------------ - return fields as proper Python objects for field type - Cleaned up pgext.py - Added dictresult method Version 2.0 (1997-12-23) ------------------------ - Updated code for PostgreSQL 6.2.1 and Python 1.5 - Reformatted code and converted to use full ANSI style prototypes - Changed name to PyGreSQL (from PyGres95) - Changed order of arguments to connect function - Created new type `pgqueryobject` and moved certain methods to it - Added a print function for pgqueryobject - Various code changes - mostly stylistic Version 1.0b (1995-11-04) ------------------------- - Keyword support for connect function moved from library file to C code and taken away from library - Rewrote documentation - Bug fix in connect function - Enhancements in large objects interface methods Version 1.0a (1995-10-30) ------------------------- A limited release. - Module adapted to standard Python syntax - Keyword support for connect function in library file - Rewrote default parameters interface (internal use of strings) - Fixed minor bugs in module interface - Redefinition of error messages Version 0.9b (1995-10-10) ------------------------- The first public release. - Large objects implementation - Many bug fixes, enhancements, ... Version 0.1a (1995-10-07) ------------------------- - Basic libpq functions (SQL access) PyGreSQL-5.1/docs/_build/html/_sources/contents/tutorial.rst.txt0000644000175100077410000002255413466770070024765 0ustar darcypyg00000000000000First Steps with PyGreSQL ========================= In this small tutorial we show you the basic operations you can perform with both flavors of the PyGreSQL interface. Please choose your flavor: .. contents:: :local: First Steps with the classic PyGreSQL Interface ----------------------------------------------- .. py:currentmodule:: pg Before doing anything else, it's necessary to create a database connection. To do this, simply import the :class:`DB` wrapper class and create an instance of it, passing the necessary connection parameters, like this:: >>> from pg import DB >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') You can omit one or even all parameters if you want to use their default values. PostgreSQL will use the name of the current operating system user as the login and the database name, and will try to connect to the local host on port 5432 if nothing else is specified. The `db` object has all methods of the lower-level :class:`Connection` class plus some more convenience methods provided by the :class:`DB` wrapper. You can now execute database queries using the :meth:`DB.query` method:: >>> db.query("create table fruits(id serial primary key, name varchar)") You can list all database tables with the :meth:`DB.get_tables` method:: >>> db.get_tables() ['public.fruits'] To get the attributes of the *fruits* table, use :meth:`DB.get_attnames`:: >>> db.get_attnames('fruits') {'id': 'int', 'name': 'text'} Verify that you can insert into the newly created *fruits* table: >>> db.has_table_privilege('fruits', 'insert') True You can insert a new row into the table using the :meth:`DB.insert` method, for example:: >>> db.insert('fruits', name='apple') {'name': 'apple', 'id': 1} Note how this method returns the full row as a dictionary including its *id* column that has been generated automatically by a database sequence. You can also pass a dictionary to the :meth:`DB.insert` method instead of or in addition to using keyword arguments. Let's add another row to the table: >>> banana = db.insert('fruits', name='banana') Or, you can add a whole bunch of fruits at the same time using the :meth:`Connection.inserttable` method. Note that this method uses the COPY command of PostgreSQL to insert all data in one batch operation, which is much faster than sending many individual INSERT commands:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> data = list(enumerate(more_fruits, start=3)) >>> db.inserttable('fruits', data) We can now query the database for all rows that have been inserted into the *fruits* table:: >>> print(db.query('select * from fruits')) id| name --+---------- 1|apple 2|banana 3|cherimaya 4|durian 5|eggfruit 6|fig 7|grapefruit (7 rows) Instead of simply printing the :class:`Query` instance that has been returned by this query, we can also request the data as list of tuples:: >>> q = db.query('select * from fruits') >>> q.getresult() ... [(1, 'apple'), ..., (7, 'grapefruit')] Instead of a list of tuples, we can also request a list of dicts:: >>> q.dictresult() [{'id': 1, 'name': 'apple'}, ..., {'id': 7, 'name': 'grapefruit'}] You can also return the rows as named tuples:: >>> rows = q.namedresult() >>> rows[3].name 'durian' In PyGreSQL 5.1 and newer, you can also use the :class:`Query` instance directly as an iterable that yields the rows as tuples, and there are also methods that return iterables for rows as dictionaries, named tuples or scalar values. Other methods like :meth:`Query.one` or :meth:`Query.onescalar` return only one row or only the first field of that row. You can get the number of rows with the :func:`len` function. Using the method :meth:`DB.get_as_dict`, you can easily import the whole table into a Python dictionary mapping the primary key *id* to the *name*:: >>> db.get_as_dict('fruits', scalar=True) OrderedDict([(1, 'apple'), (2, 'banana'), (3, 'cherimaya'), (4, 'durian'), (5, 'eggfruit'), (6, 'fig'), (7, 'grapefruit')]) To change a single row in the database, you can use the :meth:`DB.update` method. For instance, if you want to capitalize the name 'banana':: >>> db.update('fruits', banana, name=banana['name'].capitalize()) {'id': 2, 'name': 'Banana'} >>> print(db.query('select * from fruits where id between 1 and 3')) id| name --+--------- 1|apple 2|Banana 3|cherimaya (3 rows) Let's also capitalize the other names in the database:: >>> db.query('update fruits set name=initcap(name)') '7' The returned string `'7'` tells us the number of updated rows. It is returned as a string to discern it from an OID which will be returned as an integer, if a new row has been inserted into a table with an OID column. To delete a single row from the database, use the :meth:`DB.delete` method:: >>> db.delete('fruits', banana) 1 The returned integer value `1` tells us that one row has been deleted. If we try it again, the method returns the integer value `0`. Naturally, this method can only return 0 or 1:: >>> db.delete('fruits', banana) 0 Of course, we can insert the row back again:: >>> db.insert('fruits', banana) {'id': 2, 'name': 'Banana'} If we want to change a different row, we can get its current state with:: >>> apple = db.get('fruits', 1) >>> apple {'name': 'Apple', 'id': 1} We can duplicate the row like this:: >>> db.insert('fruits', apple, id=8) {'id': 8, 'name': 'Apple'} To remove the duplicated row, we can do:: >>> db.delete('fruits', id=8) 1 Finally, to remove the table from the database and close the connection:: >>> db.query("drop table fruits") >>> db.close() For more advanced features and details, see the reference: :doc:`pg/index` First Steps with the DB-API 2.0 Interface ----------------------------------------- .. py:currentmodule:: pgdb As with the classic interface, the first thing you need to do is to create a database connection. To do this, use the function :func:`pgdb.connect` in the :mod:`pgdb` module, passing the connection parameters:: >>> from pgdb import connect >>> con = connect(database='testdb', host='pgserver:5432', ... user='scott', password='tiger') As in the classic interface, you can omit parameters if they are the default values used by PostgreSQL. To do anything with the connection, you need to request a cursor object from it, which is thought of as the Python representation of a database cursor. The connection has a method that lets you get a cursor:: >>> cursor = con.cursor() The cursor has a method that lets you execute database queries:: >>> cursor.execute("create table fruits(" ... "id serial primary key, name varchar)") You can also use this method to insert data into the table:: >>> cursor.execute("insert into fruits (name) values ('apple')") You can pass parameters in a safe way:: >>> cursor.execute("insert into fruits (name) values (%s)", ('banana',)) To insert multiple rows at once, you can use the following method:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> parameters = [(name,) for name in more_fruits] >>> cursor.executemany("insert into fruits (name) values (%s)", parameters) The cursor also has a :meth:`Cursor.copy_from` method to quickly insert large amounts of data into the database, and a :meth:`Cursor.copy_to` method to quickly dump large amounts of data from the database, using the PostgreSQL COPY command. Note however, that these methods are an extension provided by PyGreSQL, they are not part of the DB-API 2 standard. Also note that the DB API 2.0 interface does not have an autocommit as you may be used from PostgreSQL. So in order to make these inserts permanent, you need to commit them to the database:: >>> con.commit() If you end the program without calling the commit method of the connection, or if you call the rollback method of the connection, then the changes will be discarded. In a similar way, you can update or delete rows in the database, executing UPDATE or DELETE statements instead of INSERT statements. To fetch rows from the database, execute a SELECT statement first. Then you can use one of several fetch methods to retrieve the results. For instance, to request a single row:: >>> cursor.execute('select * from fruits where id=1') >>> cursor.fetchone() Row(id=1, name='apple') The result is a named tuple. This means you can access its elements either using an index number as for an ordinary tuple, or using the column name as for access to object attributes. To fetch all rows of the query, use this method instead:: >>> cursor.execute('select * from fruits') >>> cursor.fetchall() [Row(id=1, name='apple'), ..., Row(id=7, name='grapefruit')] The output is a list of named tuples. If you want to fetch only a limited number of rows from the query:: >>> cursor.execute('select * from fruits') >>> cursor.fetchmany(2) [Row(id=1, name='apple'), Row(id=2, name='banana')] Finally, to remove the table from the database and close the connection:: >>> db.execute("drop table fruits") >>> cur.close() >>> con.close() For more advanced features and details, see the reference: :doc:`pgdb/index` PyGreSQL-5.1/docs/_build/html/_sources/contents/examples.rst.txt0000644000175100077410000000121213466770070024724 0ustar darcypyg00000000000000Examples ======== I am starting to collect examples of applications that use PyGreSQL. So far I only have a few but if you have an example for me, you can either send me the files or the URL for me to point to. The :doc:`postgres/index` that is part of the PyGreSQL distribution shows some examples of using PostgreSQL with PyGreSQL. Here is a `list of motorcycle rides in Ontario `_ that uses a PostgreSQL database to store the rides. There is a link at the bottom of the page to view the source code. Oleg Broytmann has written a simple example `RGB database demo `_ PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/0000755000175100077410000000000013470245541022143 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/query.rst.txt0000644000175100077410000003175013466770070024673 0ustar darcypyg00000000000000Query methods ============= .. py:currentmodule:: pg .. class:: Query The :class:`Query` object returned by :meth:`Connection.query` and :meth:`DB.query` can be used as an iterable returning rows as tuples. You can also directly access row tuples using their index, and get the number of rows with the :func:`len` function. The :class:`Query` class also provides the following methods for accessing the results of the query: getresult -- get query values as list of tuples ----------------------------------------------- .. method:: Query.getresult() Get query values as list of tuples :returns: result values as a list of tuples :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of tuples. More information about this result may be accessed using :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. Since PyGreSQL 5.1 the :class:`Query` can be also used directly as an iterable sequence, i.e. you can iterate over the :class:`Query` object to get the same tuples as returned by :meth:`Query.getresult`. This is slightly more efficient than getting the full list of results, but note that the full result is always fetched from the server anyway when the query is executed. You can also call :func:`len` on a query to find the number of rows in the result, and access row tuples using their index directly on the :class:`Query` object. dictresult/dictiter -- get query values as dictionaries ------------------------------------------------------- .. method:: Query.dictresult() Get query values as list of dictionaries :returns: result values as a list of dictionaries :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of dictionaries which have the field names as keys. If the query has duplicate field names, you will get the value for the field with the highest index in the query. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. method:: Query.dictiter() Get query values as iterable of dictionaries :returns: result values as an iterable of dictionaries :rtype: iterable :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as an iterable of dictionaries which have the field names as keys. This is slightly more efficient than getting the full list of results as dictionaries, but note that the full result is always fetched from the server anyway when the query is executed. If the query has duplicate field names, you will get the value for the field with the highest index in the query. .. versionadded:: 5.1 namedresult/namediter -- get query values a named tuples -------------------------------------------------------- .. method:: Query.namedresult() Get query values as list of named tuples :returns: result values as a list of named tuples :rtype: list :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as a list of named tuples with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. versionadded:: 4.1 .. method:: Query.namediter() Get query values as iterable of named tuples :returns: result values as an iterable of named tuples :rtype: iterable :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as an iterable of named tuples with proper field names. This is slightly more efficient than getting the full list of results as named tuples, but note that the full result is always fetched from the server anyway when the query is executed. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. .. versionadded:: 5.1 scalarresult/scalariter -- get query values as scalars ------------------------------------------------------ .. method:: Query.scalarresult() Get first fields from query result as list of scalar values :returns: first fields from result as a list of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as a list of scalar values in the order returned by the server. .. versionadded:: 5.1 .. method:: Query.scalariter() Get first fields from query result as iterable of scalar values :returns: first fields from result as an iterable of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as an iterable of scalar values in the order returned by the server. This is slightly more efficient than getting the full list of results as rows or scalar values, but note that the full result is always fetched from the server anyway when the query is executed. .. versionadded:: 5.1 one/onedict/onenamed/onescalar -- get one result of a query ----------------------------------------------------------- .. method:: Query.one() Get one row from the result of a query as a tuple :returns: next row from the query results as a tuple of fields :rtype: tuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a tuple of fields. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onedict() Get one row from the result of a query as a dictionary :returns: next row from the query results as a dictionary :rtype: dict or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a dictionary with the field names used as the keys. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onenamed() Get one row from the result of a query as named tuple :returns: next row from the query results as a named tuple :rtype: named tuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onescalar() Get one row from the result of a query as scalar value :returns: next row from the query results as a scalar value :rtype: type of first field or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of the next row from the result as a scalar value. This method can be called multiple times to return more rows as scalars. It returns None if the result does not contain one more row. .. versionadded:: 5.1 single/singledict/singlenamed/singlescalar -- get single result of a query -------------------------------------------------------------------------- .. method:: Query.single() Get single row from the result of a query as a tuple :returns: single row from the query results as a tuple of fields :rtype: tuple :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a tuple of fields. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singledict() Get single row from the result of a query as a dictionary :returns: single row from the query results as a dictionary :rtype: dict :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a dictionary with the field names used as the keys. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlenamed() Get single row from the result of a query as named tuple :returns: single row from the query results as a named tuple :rtype: named tuple :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns single row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlescalar() Get single row from the result of a query as scalar value :returns: single row from the query results as a scalar value :rtype: type of first field :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of a single row from the result as a scalar value. This method returns the same single row as scalar when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 listfields -- list fields names of previous query result -------------------------------------------------------- .. method:: Query.listfields() List fields names of previous query result :returns: field names :rtype: list :raises TypeError: too many parameters This method returns the list of field names defined for the query result. The fields are in the same order as the result values. fieldname, fieldnum -- field name/number conversion --------------------------------------------------- .. method:: Query.fieldname(num) Get field name from its number :param int num: field number :returns: field name :rtype: str :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: invalid field number This method allows to find a field name from its rank number. It can be useful for displaying a result. The fields are in the same order as the result values. .. method:: Query.fieldnum(name) Get field number from its name :param str name: field name :returns: field number :rtype: int :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: unknown field name This method returns a field number given its name. It can be used to build a function that converts result list strings to their correct type, using a hardcoded table definition. The number returned is the field rank in the query result. ntuples -- return number of tuples in query object -------------------------------------------------- .. method:: Query.ntuples() Return number of tuples in query object :returns: number of tuples in :class:`Query` :rtype: int :raises TypeError: Too many arguments. This method returns the number of tuples in the query result. .. deprecated:: 5.1 You can use the normal :func:`len` function instead. PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/adaptation.rst.txt0000644000175100077410000004150013466770070025644 0ustar darcypyg00000000000000Remarks on Adaptation and Typecasting ===================================== .. py:currentmodule:: pg Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int [#int8]_ int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#int8] int8 is converted to long in Python 2 .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ When you use the higher level methods of the classic :mod:`pg` module like :meth:`DB.insert()` or :meth:`DB.update()`, you don't need to care about adaptation of parameters, since all of this is happening automatically behind the scenes. You only need to consider this issue when creating SQL commands manually and sending them to the database using the :meth:`DB.query` method. Imagine you have created a user login form that stores the login name as *login* and the password as *passwd* and you now want to get the user data for that user. You may be tempted to execute a query like this:: >>> db = pg.DB(...) >>> sql = "SELECT * FROM user_table WHERE login = '%s' AND passwd = '%s'" >>> db.query(sql % (login, passwd)).getresult()[0] This seems to work at a first glance, but you will notice an error as soon as you try to use a login name containing a single quote. Even worse, this error can be exploited through so-called "SQL injection", where an attacker inserts malicious SQL statements into the query that you never intended to be executed. For instance, with a login name something like ``' OR ''='`` the attacker could easily log in and see the user data of another user in the database. One solution for this problem would be to cleanse your input of "dangerous" characters like the single quote, but this is tedious and it is likely that you overlook something or break the application e.g. for users with names like "D'Arcy". A better solution is to use the escaping functions provided by PostgreSQL which are available as methods on the :class:`DB` object:: >>> login = "D'Arcy" >>> db.escape_string(login) "D''Arcy" As you see, :meth:`DB.escape_string` has doubled the single quote which is the right thing to do in SQL. However, there are better ways of passing parameters to the query, without having to manually escape them. If you pass the parameters as positional arguments to :meth:`DB.query`, then PyGreSQL will send them to the database separately, without the need for quoting them inside the SQL command, and without the problems inherent with that process. In this case you must put placeholders of the form ``$1``, ``$2`` etc. in the SQL command in place of the parameters that should go there. For instance:: >>> sql = "SELECT * FROM user_table WHERE login = $1 AND passwd = $2" >>> db.query(sql, login, passwd).getresult()[0] That's much better. So please always keep the following warning in mind: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. If you like the ``%`` format specifications of Python better than the placeholders used by PostgreSQL, there is still a way to use them, via the :meth:`DB.query_formatted` method:: >>> sql = "SELECT * FROM user_table WHERE login = %s AND passwd = %s" >>> db.query_formatted(sql, (login, passwd)).getresult()[0] Note that we need to pass the parameters not as positional arguments here, but as a single tuple. Also note again that we did not use the ``%`` operator of Python to format the SQL string, we just used the ``%s`` format specifications of Python and let PyGreSQL care about the formatting. Even better, you can also pass the parameters as a dictionary if you use the :meth:`DB.query_formatted` method:: >>> sql = """SELECT * FROM user_table ... WHERE login = %(login)s AND passwd = %(passwd)s""" >>> parameters = dict(login=login, passwd=passwd) >>> db.query_formatted(sql, parameters).getresult()[0] Here is another example:: >>> sql = "SELECT 'Hello, ' || %s || '!'" >>> db.query_formatted(sql, (login,)).getresult()[0] You would think that the following even simpler example should work, too: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,)).getresult()[0] ProgrammingError: Could not determine data type of parameter $1 The issue here is that :meth:`DB.query_formatted` by default still uses PostgreSQL parameters, transforming the Python style ``%s`` placeholder into a ``$1`` placeholder, and sending the login name separately from the query. In the query we looked at before, the concatenation with other strings made it clear that it should be interpreted as a string. This simple query however does not give PostgreSQL a clue what data type the ``$1`` placeholder stands for. This is different when you are embedding the login name directly into the query instead of passing it as parameter to PostgreSQL. You can achieve this by setting the *inline* parameter of :meth:`DB.query_formatted`, like so:: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,), inline=True).getresult()[0] Another way of making this query work while still sending the parameters separately is to simply cast the parameter values:: >>> sql = "SELECT %s::text" >>> db.query_formatted(sql, (login,), inline=False).getresult()[0] In real world examples you will rarely have to cast your parameters like that, since in an INSERT statement or a WHERE clause comparing the parameter to a table column the data type will be clear from the context. When binding the parameters to a query, PyGreSQL not only adapts the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> db.query_formatted("SELECT %(array)s::int[]", params).getresult()[0][0] [[1, 2], [3, 4]] Note that again we only need to cast the array parameter or use inline parameters because this simple query does not provide enough context. Also note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. Tuples are adapted as PostgreSQL composite types. If you use inline paramters, they can also be used with the ``IN`` syntax. Let's think of a more real world example again where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> db.query_formatted("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> db.query("SELECT * FROM on_hand").getresult()[0][0] Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) The :meth:`DB.insert` method provides a simpler way to achieve the same:: >>> row = dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99'))} Perhaps we want to use custom Python classes instead of named tuples to hold our values:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error. This is because PyGreSQL tries to pass the string representation of the object as a parameter to PostgreSQL, but this is just a human readable string and not useful for PostgreSQL to build a composite type. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_str__``, like so:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) ... ... def __pg_str__(self, typ): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. You can even make these objects adapt to different types in different ways:: >>> class InventoryItem: ... ... ... ... ... def __pg_str__(self, typ): ... if typ == 'text': ... return str(self) ... return (self.name, self.supplier_id, self.price) ... >>> db.query("ALTER TABLE on_hand ADD COLUMN remark varchar") >>> item=InventoryItem('fuzzy dice', 42, 1.99) >>> row = dict(item=item, remark=item, count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), 'remark': 'fuzzy dice (from 42, at $1.99)'} There is also another "magic" method ``__pg_repr__`` which does not take the *typ* parameter. That method is used instead of ``__pg_str__`` when passing parameters inline. You must be more careful when using ``__pg_repr__``, because it must return a properly escaped string that can be put literally inside the SQL. The only exception is when you return a tuple or list, because these will be adapted and properly escaped by PyGreSQL again. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via the :meth:`DB.get()`, :meth:`Query.getresult()` and similar methods. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pg.get_typecast('int4') int In the classic PyGreSQL module, the typecasting for these basic types is always done internally by the C extension module for performance reasons. We can set a different typecast function for ``int4``, but it will not become effective, the C module continues to use its internal typecasting. However, we can add new typecast functions for the database types that are not supported by the C module. For example, we can create a typecast function that casts items of the composite PostgreSQL type used as example in the previous section to instances of the corresponding Python class. To do this, at first we get the default typecast function that PyGreSQL has created for the current :class:`DB` connection. This default function casts composite types to named tuples, as we have seen in the section before. We can grab it from the :attr:`DB.dbtypes` object as follows:: >>> cast_tuple = db.dbtypes.get_typecast('inventory_item') Now we can create a new typecast function that converts the tuple to an instance of our custom class:: >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) Finally, we set this typecast function, either globally with :func:`set_typecast`, or locally for the current connection like this:: >>> db.dbtypes.set_typecast('inventory_item', cast_item) Now we can get instances of our custom class directly from the database:: >>> item = db.query("SELECT * FROM on_hand").getresult()[0][0] >>> str(item) 'fuzzy dice (from 42, at $1.99)' Note that some of the typecast functions used by the C module are configurable with separate module level functions, such as :meth:`set_decimal`, :meth:`set_bool` or :meth:`set_jsondecode`. You need to use these instead of :meth:`set_typecast` if you want to change the behavior of the C module. Also note that after changing global typecast functions with :meth:`set_typecast`, you may need to run ``db.dbtypes.reset_typecast()`` to make these changes effective on connections that were already open. As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> q = db.query("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""") >>> q.getresult()[0][0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 11). So let's resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].split(',') ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pg.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python simply using:: >>> circle = db.get_as_dict('circle', scalar=True) The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle['C1'] Circle(Point(2, 3), 3.0) >>> circle['C2'] Circle(Point(1, -1), 4.0) Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/introduction.rst.txt0000644000175100077410000000147513466770070026250 0ustar darcypyg00000000000000Introduction ============ You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the older :mod:`pg` API. The :mod:`pg` module handles three types of objects, - the :class:`Connection` instances, which handle the connection and all the requests to the database, - the :class:`LargeObject` instances, which handle all the accesses to PostgreSQL large objects, - the :class:`Query` instances that handle query results and it provides a convenient wrapper class :class:`DB` for the basic :class:`Connection` class. .. seealso:: If you want to see a simple example of the use of some of these functions, see the :doc:`../examples` page. PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/notification.rst.txt0000644000175100077410000001070013466770070026204 0ustar darcypyg00000000000000The Notification Handler ======================== .. py:currentmodule:: pg PyGreSQL comes with a client-side asynchronous notification handler that was based on the ``pgnotify`` module written by Ng Pheng Siong. .. versionadded:: 4.1.1 Instantiating the notification handler -------------------------------------- .. class:: NotificationHandler(db, event, callback, [arg_dict], [timeout], [stop_event]) Create an instance of the notification handler :param int db: the database connection :type db: :class:`Connection` :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event You can also create an instance of the NotificationHandler using the :class:`DB.connection_handler` method. In this case you don't need to pass a database connection because the :class:`DB` connection itself will be used as the datebase connection for the notification handler. You must always pass the name of an *event* (notification channel) to listen for and a *callback* function. You can also specify a dictionary *arg_dict* that will be passed as the single argument to the callback function, and a *timeout* value in seconds (a floating point number denotes fractions of seconds). If it is absent or *None*, the callers will never time out. If the time-out is reached, the callback function will be called with a single argument that is *None*. If you set the *timeout* to ``0``, the handler will poll notifications synchronously and return. You can specify the name of the event that will be used to signal the handler to stop listening as *stop_event*. By default, it will be the event name prefixed with ``'stop_'``. All of the parameters will be also available as attributes of the created notification handler object. Invoking the notification handler --------------------------------- To invoke the notification handler, just call the instance without passing any parameters. The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated *pid*, *event* and *extra* (the payload passed with the notification) are inserted into its *arg_dict* dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return. In the special case that the timeout of the handler has been set to ``0``, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event. .. warning:: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. Sending notifications --------------------- You can send notifications by either running ``NOTIFY`` commands on the database directly, or using the following method: .. method:: NotificationHandler.notify([db], [stop], [payload]) Generate a notification :param int db: the database connection for sending the notification :type db: :class:`Connection` :param bool stop: whether to produce a normal event or a stop event :param str payload: an optional payload to be sent with the notification This method sends a notification event together with an optional *payload*. If you set the *stop* flag, a stop notification will be sent instead of a normal notification. This will cause the handler to stop listening. .. warning:: If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe. Auxiliary methods ----------------- .. method:: NotificationHandler.listen() Start listening for the event and the stop event This method is called implicitly when the handler is invoked. .. method:: NotificationHandler.unlisten() Stop listening for the event and the stop event This method is called implicitly when the handler receives a stop event or when it is closed or deleted. .. method:: NotificationHandler.close() Stop listening and close the database connection You can call this method instead of :meth:`NotificationHandler.unlisten` if you want to close not only the handler, but also the database connection it was created with.PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/db_wrapper.rst.txt0000644000175100077410000011344413466770070025654 0ustar darcypyg00000000000000The DB wrapper class ==================== .. py:currentmodule:: pg .. class:: DB The :class:`Connection` methods are wrapped in the class :class:`DB` which also adds convenient higher level methods for working with the database. It also serves as a context manager for the connection. The preferred way to use this module is as follows:: import pg with pg.DB(...) as db: # for parameters, see below for r in db.query( # just for example "SELECT foo, bar FROM foo_bar_table WHERE foo !~ bar" ).dictresult(): print('%(foo)s %(bar)s' % r) This class can be subclassed as in this example:: import pg class DB_ride(pg.DB): """Ride database wrapper This class encapsulates the database functions and the specific methods for the ride database.""" def __init__(self): """Open a database connection to the rides database""" pg.DB.__init__(self, dbname='ride') self.query("SET DATESTYLE TO 'ISO'") [Add or override methods here] The following describes the methods and variables of this class. Initialization -------------- The :class:`DB` class is initialized with the same arguments as the :func:`connect` function described above. It also initializes a few internal variables. The statement ``db = DB()`` will open the local database with the name of the user just like ``connect()`` does. You can also initialize the DB class with an existing :mod:`pg` or :mod:`pgdb` connection. Pass this connection as a single unnamed parameter, or as a single parameter named ``db``. This allows you to use all of the methods of the DB class with a DB-API 2 compliant connection. Note that the :meth:`Connection.close` and :meth:`Connection.reopen` methods are inoperative in this case. pkey -- return the primary key of a table ----------------------------------------- .. method:: DB.pkey(table) Return the primary key of a table :param str table: name of table :returns: Name of the field which is the primary key of the table :rtype: str :raises KeyError: the table does not have a primary key This method returns the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key. get_databases -- get list of databases in the system ---------------------------------------------------- .. method:: DB.get_databases() Get the list of databases in the system :returns: all databases in the system :rtype: list Although you can do this with a simple select, it is added here for convenience. get_relations -- get list of relations in connected database ------------------------------------------------------------ .. method:: DB.get_relations([kinds], [system]) Get the list of relations in connected database :param str kinds: a string or sequence of type letters :param bool system: whether system relations should be returned :returns: all relations of the given kinds in the database :rtype: list This method returns the list of relations in the connected database. Although you can do this with a simple select, it is added here for convenience. You can select which kinds of relations you are interested in by passing type letters in the `kinds` parameter. The type letters are ``r`` = ordinary table, ``i`` = index, ``S`` = sequence, ``v`` = view, ``c`` = composite type, ``s`` = special, ``t`` = TOAST table. If `kinds` is None or an empty string, all relations are returned (this is also the default). If `system` is set to `True`, then system tables and views (temporary tables, toast tables, catalog vies and tables) will be returned as well, otherwise they will be ignored. get_tables -- get list of tables in connected database ------------------------------------------------------ .. method:: DB.get_tables([system]) Get the list of tables in connected database :param bool system: whether system tables should be returned :returns: all tables in connected database :rtype: list This is a shortcut for ``get_relations('r', system)`` that has been added for convenience. get_attnames -- get the attribute names of a table -------------------------------------------------- .. method:: DB.get_attnames(table) Get the attribute names of a table :param str table: name of table :returns: an ordered dictionary mapping attribute names to type names Given the name of a table, digs out the set of attribute names. Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes' types) with the column names in the proper order if you iterate over it. By default, only a limited number of simple types will be returned. You can get the registered types instead, if enabled by calling the :meth:`DB.use_regtypes` method. has_table_privilege -- check table privilege -------------------------------------------- .. method:: DB.has_table_privilege(table, privilege) Check whether current user has specified table privilege :param str table: the name of the table :param str privilege: privilege to be checked -- default is 'select' :returns: whether current user has specified table privilege :rtype: bool Returns True if the current user has the specified privilege for the table. .. versionadded:: 4.0 get/set_parameter -- get or set run-time parameters ---------------------------------------------------- .. method:: DB.get_parameter(parameter) Get the value of run-time parameters :param parameter: the run-time parameter(s) to get :type param: str, tuple, list or dict :returns: the current value(s) of the run-time parameter(s) :rtype: str, list or dict :raises TypeError: Invalid parameter type(s) :raises pg.ProgrammingError: Invalid parameter name(s) If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name. You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys. By passing the special name ``'all'`` as the parameter, you can get a dict of all existing configuration parameters. Note that you can request most of the important parameters also using :meth:`Connection.parameter()` which does not involve a database query, unlike :meth:`DB.get_parameter` and :meth:`DB.set_parameter`. .. versionadded:: 4.2 .. method:: DB.set_parameter(parameter, [value], [local]) Set the value of run-time parameters :param parameter: the run-time parameter(s) to set :type param: string, tuple, list or dict :param value: the value to set :type param: str or None :raises TypeError: Invalid parameter type(s) :raises ValueError: Invalid value argument(s) :raises pg.ProgrammingError: Invalid parameter name(s) or values If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or *None* is passed as a value, then the run-time parameter will be restored to its default value. You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict. By passing the special name ``'all'`` as the parameter, you can reset all existing settable run-time parameters to their default values. If you set *local* to `True`, then the command takes effect for only the current transaction. After :meth:`DB.commit` or :meth:`DB.rollback`, the session-level setting takes effect again. Setting *local* to `True` will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately. .. versionadded:: 4.2 begin/commit/rollback/savepoint/release -- transaction handling --------------------------------------------------------------- .. method:: DB.begin([mode]) Begin a transaction :param str mode: an optional transaction mode such as 'READ ONLY' This initiates a transaction block, that is, all following queries will be executed in a single transaction until :meth:`DB.commit` or :meth:`DB.rollback` is called. .. versionadded:: 4.1 .. method:: DB.start() This is the same as the :meth:`DB.begin` method. .. method:: DB.commit() Commit a transaction This commits the current transaction. .. method:: DB.end() This is the same as the :meth:`DB.commit` method. .. versionadded:: 4.1 .. method:: DB.rollback([name]) Roll back a transaction :param str name: optionally, roll back to the specified savepoint This rolls back the current transaction, discarding all its changes. .. method:: DB.abort() This is the same as the :meth:`DB.rollback` method. .. versionadded:: 4.2 .. method:: DB.savepoint(name) Define a new savepoint :param str name: the name to give to the new savepoint This establishes a new savepoint within the current transaction. .. versionadded:: 4.1 .. method:: DB.release(name) Destroy a savepoint :param str name: the name of the savepoint to destroy This destroys a savepoint previously defined in the current transaction. .. versionadded:: 4.1 get -- get a row from a database table or view ---------------------------------------------- .. method:: DB.get(table, row, [keyname]) Get a row from a database table or view :param str table: name of table or view :param row: either a dictionary or the value to be looked up :param str keyname: name of field to use as key (optional) :returns: A dictionary - the keys are the attribute names, the values are the row values. :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row This method is the basic mechanism to get a single row. It assumes that the *keyname* specifies a unique row. It must be the name of a single column or a tuple of column names. If *keyname* is not specified, then the primary key for the table is used. If *row* is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed *keyname* or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values if the row was passed as a dictionary. The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as ``oid(table)`` using the actual name of the table. Note that since PyGreSQL 5.0 this will return the value of an array type column as a Python list by default. insert -- insert a row into a database table -------------------------------------------- .. method:: DB.insert(table, [row], [col=val, ...]) Insert a row into a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the inserted values in the database :rtype: dict :raises pg.ProgrammingError: missing privilege or conflict This method inserts a row into a table. If the optional dictionary is not supplied then the required values must be included as keyword/value pairs. If a dictionary is supplied then any keywords provided will be added to or replace the entry in the dictionary. The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. Note that since PyGreSQL 5.0 it is possible to insert a value for an array type column by passing it as a Python list. update -- update a row in a database table ------------------------------------------ .. method:: DB.update(table, [row], [col=val, ...]) Update a row in a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself. The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc. Like insert, the dictionary is optional and updates will be performed on the fields in the keywords. There must be an OID or primary key either specified using the ``'oid'`` keyword or in the dictionary, in which case the OID must be munged. upsert -- insert a row with conflict resolution ----------------------------------------------- .. method:: DB.upsert(table, [row], [col=val, ...]) Insert a row into a database table with conflict resolution :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for specifying the update :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege This method inserts a row into a table, but instead of raising a ProgrammingError exception in case of violating a constraint or unique index, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided. Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary. Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way: A value of `False` or `None` means the column shall not be updated, a value of `True` means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value `True`. So if in the case of a conflict you want to update every column that has been passed in the dictionary `d` , you would call ``upsert(table, d)``. If you don't want to do anything in case of a conflict, i.e. leave the existing row as it is, call ``upsert(table, d, **dict.fromkeys(d))``. If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by writing the table prefix ``included.`` before the column name, and you can refer to the value that has been proposed for insertion by writing ``excluded.`` as table prefix. The dictionary is modified in any case to reflect the values in the database after the operation has completed. .. note:: The method uses the PostgreSQL "upsert" feature which is only available since PostgreSQL 9.5. With older PostgreSQL versions, you will get a ProgrammingError if you use this method. .. versionadded:: 5.0 query -- execute a SQL command string ------------------------------------- .. method:: DB.query(command, [arg1, [arg2, ...]]) Execute a SQL command string :param str command: SQL command :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to the :class:`Connection` function with the same name, except that positional arguments can be passed either as a single list or tuple, or as individual positional arguments. These arguments will then be used as parameter values of parameterized queries. Example:: name = input("Name? ") phone = input("Phone? ") rows = db.query("update employees set phone=$2 where name=$1", name, phone).getresult()[0][0] # or rows = db.query("update employees set phone=$2 where name=$1", (name, phone)).getresult()[0][0] query_formatted -- execute a formatted SQL command string --------------------------------------------------------- .. method:: DB.query_formatted(command, [parameters], [types], [inline]) Execute a formatted SQL command string :param str command: SQL command :param parameters: the values of the parameters for the SQL command :type parameters: tuple, list or dict :param types: optionally, the types of the parameters :type types: tuple, list or dict :param bool inline: whether the parameters should be passed in the SQL :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to :meth:`DB.query`, but using Python format placeholders of the form ``%s`` or ``%(names)s`` instead of PostgreSQL placeholders of the form ``$1``. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity. If you set *inline* to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately. If you set *inline* to True or don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case. Note that the adaption and conversion of the parameters causes a certain performance overhead. Depending on the type of values, the overhead can be smaller for *inline* queries or if you pass the types of the parameters, so that they don't need to be guessed from the values. For best performance, we recommend using a raw :meth:`DB.query` or :meth:`DB.query_prepared` if you are executing many of the same operations with different parameters. Example:: name = input("Name? ") phone = input("Phone? ") rows = db.query_formatted( "update employees set phone=%s where name=%s", (phone, name)).getresult()[0][0] # or rows = db.query_formatted( "update employees set phone=%(phone)s where name=%(name)s", dict(name=name, phone=phone)).getresult()[0][0] query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: DB.query_prepared(name, [arg1, [arg2, ...]]) Execute a prepared statement :param str name: name of the prepared statement :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This methods works like the :meth:`DB.query` method, except that instead of passing the SQL command, you pass the name of a prepared statement created previously using the :meth:`DB.prepare` method. Passing an empty string or *None* as the name will execute the unnamed statement (see warning about the limited lifetime of the unnamed statement in :meth:`DB.prepare`). The functionality of this method is equivalent to that of the SQL ``EXECUTE`` command. Note that calling EXECUTE would require parameters to be sent inline, and be properly sanitized (escaped, quoted). .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: DB.prepare(name, command) Create a prepared statement :param str command: SQL command :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for later execution of the given command with the :meth:`DB.query_prepared` method. If the name is empty or *None*, the unnamed prepared statement is used, in which case any pre-existing unnamed statement is replaced. Otherwise, if a prepared statement with the specified name is already defined in the current database session, a :exc:`pg.ProgrammingError` is raised. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then be passed to the :meth:`Connection.query_prepared` method as positional arguments. The functionality of this method is equivalent to that of the SQL ``PREPARE`` command. Example:: db.prepare('change phone', "update employees set phone=$2 where ein=$1") while True: ein = input("Employee ID? ") if not ein: break phone = input("Phone? ") db.query_prepared('change phone', ein, phone) .. note:: We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations on the database. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: DB.describe_prepared([name]) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 delete_prepared -- delete a prepared statement ---------------------------------------------- .. method:: DB.delete_prepared([name]) Delete a prepared statement :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are always deallocated automatically when the current session ends. .. versionadded:: 5.1 clear -- clear row values in memory ----------------------------------- .. method:: DB.clear(table, [row]) Clear row values in memory :param str table: name of table :param dict row: optional dictionary of values :returns: an empty row :rtype: dict This method clears all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to *False*, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged. If the dictionary is not supplied a new one is created. delete -- delete a row from a database table -------------------------------------------- .. method:: DB.delete(table, [row], [col=val, ...]) Delete a row from a database table :param str table: name of table :param dict d: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :rtype: None :raises pg.ProgrammingError: table has no primary key, row is still referenced or missing privilege :raises KeyError: missing key value for the row This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method will raise a ProgrammingError. truncate -- quickly empty database tables ----------------------------------------- .. method:: DB.truncate(table, [restart], [cascade], [only]) Empty a table or set of tables :param table: the name of the table(s) :type table: str, list or set :param bool restart: whether table sequences should be restarted :param bool cascade: whether referenced tables should also be truncated :param only: whether only parent tables should be truncated :type only: bool or list This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables. If *restart* is set to `True`, sequences owned by columns of the truncated table(s) are automatically restarted. If *cascade* is set to `True`, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter *only* is not set to `True`, all the descendant tables (if any) will also be truncated. Optionally, a ``*`` can be specified after the table name to explicitly indicate that descendant tables are included. If the parameter *table* is a list, the parameter *only* can also be a list of corresponding boolean values. .. versionadded:: 4.2 get_as_list/dict -- read a table as a list or dictionary -------------------------------------------------------- .. method:: DB.get_as_list(table, [what], [where], [order], [limit], [offset], [scalar]) Get a table as a list :param str table: the name of the table (the FROM clause) :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: list :raises TypeError: the table name has not been specified This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters. The parameter *what* can restrict the query to only return a subset of the table columns. The parameter *where* can restrict the query to only return a subset of the table rows. The specified SQL expressions all need to be fulfilled for a row to get into the result. The parameter *order* specifies the ordering of the rows. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set *order* to *False* if you don't care about the ordering. The parameters *limit* and *offset* specify the maximum number of rows returned and a number of rows skipped over. If you set the *scalar* option to *True*, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway. .. versionadded:: 5.0 .. method:: DB.get_as_dict(table, [keyname], [what], [where], [order], [limit], [offset], [scalar]) Get a table as a dictionary :param str table: the name of the table (the FROM clause) :param keyname: column(s) to be used as key(s) of the dictionary :type keyname: str, list, tuple or None :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: dict or OrderedDict :raises TypeError: the table name has not been specified :raises KeyError: keyname(s) are invalid or not part of the result :raises pg.ProgrammingError: no keyname(s) and table has no primary key This method is similar to :meth:`DB.get_as_list`, but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the *scalar* option has been set to *True*. With the optional parameter *keyname* you can specify a different set of columns to be used as the keys of the dictionary. If the Python version supports it, the dictionary will be an *OrderedDict* using the order specified with the *order* parameter or the key column(s) if not specified. You can set *order* to *False* if you don't care about the ordering. In this case the returned dictionary will be an ordinary one. .. versionadded:: 5.0 escape_literal/identifier/string/bytea -- escape for SQL -------------------------------------------------------- The following methods escape text or binary strings so that they can be inserted directly into an SQL command. Except for :meth:`DB.escape_byte`, you don't need to call these methods for the strings passed as parameters to :meth:`DB.query`. You also don't need to call any of these methods when storing data using :meth:`DB.insert` and similar. .. method:: DB.escape_literal(string) Escape a string for use within SQL as a literal constant :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. .. versionadded:: 4.1 .. method:: DB.escape_identifier(string) Escape a string for use within SQL as an identifier :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use as an SQL identifier, such as a table, column, or function name. This is useful when a user-supplied identifier might contain special characters that would otherwise be misinterpreted by the SQL parser, or when the identifier might contain upper case characters whose case should be preserved. .. versionadded:: 4.1 .. method:: DB.escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str Similar to the module function :func:`pg.escape_string` with the same name, but the behavior of this method is adjusted depending on the connection properties (such as character encoding). .. method:: DB.escape_bytea(datastring) Escape binary data for use within SQL as type ``bytea`` :param str datastring: string containing the binary data that is to be escaped :returns: the escaped string :rtype: str Similar to the module function :func:`pg.escape_bytea` with the same name, but the behavior of this method is adjusted depending on the connection properties (in particular, whether standard-conforming strings are enabled). unescape_bytea -- unescape data retrieved from the database ----------------------------------------------------------- .. method:: DB.unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param datastring: the ``bytea`` data string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :meth:`DB.escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. encode/decode_json -- encode and decode JSON data ------------------------------------------------- The following methods can be used to encode end decode data in `JSON `_ format. .. method:: DB.encode_json(obj) Encode a Python object for use within SQL as type ``json`` or ``jsonb`` :param obj: Python object that shall be encoded to JSON format :type obj: dict, list or None :returns: string representation of the Python object in JSON format :rtype: str This method serializes a Python object into a JSON formatted string that can be used within SQL. You don't need to use this method on the data stored with :meth:`DB.insert` and similar, only if you store the data directly as part of an SQL command or parameter with :meth:`DB.query`. This is the same as the :func:`json.dumps` function from the standard library. .. versionadded:: 5.0 .. method:: DB.decode_json(string) Decode ``json`` or ``jsonb`` data that has been retrieved as text :param string: JSON formatted string shall be decoded into a Python object :type string: str :returns: Python object representing the JSON formatted string :rtype: dict, list or None This method deserializes a JSON formatted string retrieved as text from the database to a Python object. You normally don't need to use this method as JSON data is automatically decoded by PyGreSQL. If you don't want the data to be decoded, then you can cast ``json`` or ``jsonb`` columns to ``text`` in PostgreSQL or you can set the decoding function to *None* or a different function using :func:`pg.set_jsondecode`. By default this is the same as the :func:`json.loads` function from the standard library. .. versionadded:: 5.0 use_regtypes -- choose usage of registered type names ----------------------------------------------------- .. method:: DB.use_regtypes([regtypes]) Determine whether registered type names shall be used :param bool regtypes: if passed, set whether registered type names shall be used :returns: whether registered type names are used The :meth:`DB.get_attnames` method can return either simplified "classic" type names (the default) or more fine-grained "registered" type names. Which kind of type names is used can be changed by calling :meth:`DB.get_regtypes`. If you pass a boolean, it sets whether registered type names shall be used. The method can also be used to check through its return value whether registered type names are currently used. .. versionadded:: 4.1 notification_handler -- create a notification handler ----------------------------------------------------- .. class:: DB.notification_handler(event, callback, [arg_dict], [timeout], [stop_event]) Create a notification handler instance :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event This method creates a :class:`pg.NotificationHandler` object using the :class:`DB` connection as explained under :doc:`notification`. .. versionadded:: 4.1.1 Attributes of the DB wrapper class ---------------------------------- .. attribute:: DB.db The wrapped :class:`Connection` object You normally don't need this, since all of the members can be accessed from the :class:`DB` wrapper class as well. .. attribute:: DB.dbname The name of the database that the connection is using .. attribute:: DB.dbtypes A dictionary with the various type names for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`DbTypes` class for details. .. versionadded:: 5.0 .. attribute:: DB.adapter A class with some helper functions for adapting parameters This can be used for building queries with parameters. You normally will not need this, as you can use the :class:`DB.query_formatted` method. .. versionadded:: 5.0 PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/large_objects.rst.txt0000644000175100077410000001435413466770070026332 0ustar darcypyg00000000000000LargeObject -- Large Objects ============================ .. py:currentmodule:: pg .. class:: LargeObject Objects that are instances of the class :class:`LargeObject` are used to handle all the requests concerning a PostgreSQL large object. These objects embed and hide all the "recurrent" variables (object OID and connection), exactly in the same way :class:`Connection` instances do, thus only keeping significant parameters in function calls. The :class:`LargeObject` instance keeps a reference to the :class:`Connection` object used for its creation, sending requests though with its parameters. Any modification but dereferencing the :class:`Connection` object will thus affect the :class:`LargeObject` instance. Dereferencing the initial :class:`Connection` object is not a problem since Python won't deallocate it before the :class:`LargeObject` instance dereferences it. All functions return a generic error message on call error, whatever the exact error was. The :attr:`error` attribute of the object allows to get the exact error message. See also the PostgreSQL programmer's guide for more information about the large object interface. open -- open a large object --------------------------- .. method:: LargeObject.open(mode) Open a large object :param int mode: open mode definition :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: already opened object, or open error This method opens a large object for reading/writing, in the same way than the Unix open() function. The mode value can be obtained by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE`). close -- close a large object ----------------------------- .. method:: LargeObject.close() Close a large object :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters :raises IOError: object is not opened, or close error This method closes a previously opened large object, in the same way than the Unix close() function. read, write, tell, seek, unlink -- file-like large object handling ------------------------------------------------------------------ .. method:: LargeObject.read(size) Read data from large object :param int size: maximal size of the buffer to be read :returns: the read buffer :rtype: bytes :raises TypeError: invalid connection, invalid object, bad parameter type, or too many parameters :raises ValueError: if `size` is negative :raises IOError: object is not opened, or read error This function allows to read data from a large object, starting at current position. .. method:: LargeObject.write(string) Read data to large object :param bytes string: string buffer to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: object is not opened, or write error This function allows to write data to a large object, starting at current position. .. method:: LargeObject.seek(offset, whence) Change current position in large object :param int offset: position offset :param int whence: positional parameter :returns: new position in object :rtype: int :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not opened, or seek error This method allows to move the position cursor in the large object. The valid values for the whence parameter are defined as constants in the :mod:`pg` module (:const:`SEEK_SET`, :const:`SEEK_CUR`, :const:`SEEK_END`). .. method:: LargeObject.tell() Return current position in large object :returns: current position in large object :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek error This method allows to get the current position in the large object. .. method:: LargeObject.unlink() Delete large object :rtype: None :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not closed, or unlink error This methods unlinks (deletes) the PostgreSQL large object. size -- get the large object size --------------------------------- .. method:: LargeObject.size() Return the large object size :returns: the large object size :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek/tell error This (composite) method allows to get the size of a large object. It was implemented because this function is very useful for a web interfaced database. Currently, the large object needs to be opened first. export -- save a large object to a file --------------------------------------- .. method:: LargeObject.export(name) Export a large object to a file :param str name: file to be created :rtype: None :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not closed, or export error This methods allows to dump the content of a large object in a very simple way. The exported file is created on the host of the program, not the server host. Object attributes ----------------- :class:`LargeObject` objects define a read-only set of attributes that allow to get some information about it. These attributes are: .. attribute:: LargeObject.oid the OID associated with the large object (int) .. attribute:: LargeObject.pgcnx the :class:`Connection` object associated with the large object .. attribute:: LargeObject.error the last warning/error message of the connection (str) .. warning:: In multi-threaded environments, :attr:`LargeObject.error` may be modified by another thread using the same :class:`Connection`. Remember these object are shared, not duplicated. You should provide some locking to be able if you want to check this. The :attr:`LargeObject.oid` attribute is very interesting, because it allows you to reuse the OID later, creating the :class:`LargeObject` object with a :meth:`Connection.getlo` method call. PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/index.rst.txt0000644000175100077410000000047313466770070024633 0ustar darcypyg00000000000000-------------------------------------------- :mod:`pg` --- The Classic PyGreSQL Interface -------------------------------------------- .. module:: pg Contents ======== .. toctree:: introduction module connection db_wrapper query large_objects notification db_types adaptation PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/module.rst.txt0000644000175100077410000006706313466770070025021 0ustar darcypyg00000000000000Module functions and constants ============================== .. py:currentmodule:: pg The :mod:`pg` module defines a few functions that allow to connect to a database and to define "default variables" that override the environment variables used by PostgreSQL. These "default variables" were designed to allow you to handle general connection parameters without heavy code in your programs. You can prompt the user for a value, put it in the default variable, and forget it, without having to modify your environment. The support for default variables can be disabled by setting the ``-DNO_DEF_VAR`` option in the Python setup file. Methods relative to this are specified by the tag [DV]. All variables are set to ``None`` at module initialization, specifying that standard environment variables should be used. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dbname], [host], [port], [opt], [user], [passwd]) Open a :mod:`pg` connection :param dbname: name of connected database (*None* = :data:`defbase`) :type str: str or None :param host: name of the server host (*None* = :data:`defhost`) :type host: str or None :param port: port used by the database server (-1 = :data:`defport`) :type port: int :param opt: connection options (*None* = :data:`defopt`) :type opt: str or None :param user: PostgreSQL user (*None* = :data:`defuser`) :type user: str or None :param passwd: password for user (*None* = :data:`defpasswd`) :type passwd: str or None :returns: If successful, the :class:`Connection` handling the connection :rtype: :class:`Connection` :raises TypeError: bad argument type, or too many arguments :raises SyntaxError: duplicate argument definition :raises pg.InternalError: some error occurred during pg connection definition :raises Exception: (all exceptions relative to object allocation) This function opens a connection to a specified database on a given PostgreSQL server. You can use keywords here, as described in the Python tutorial. The names of the keywords are the name of the parameters given in the syntax line. The ``opt`` parameter can be used to pass command-line options to the server. For a precise description of the parameters, please refer to the PostgreSQL user manual. If you want to add additional parameters not specified here, you must pass a connection string or a connection URI instead of the ``dbname`` (as in ``con3`` and ``con4`` in the following example). Example:: import pg con1 = pg.connect('testdb', 'myhost', 5432, None, 'bob', None) con2 = pg.connect(dbname='testdb', host='myhost', user='bob') con3 = pg.connect('host=myhost user=bob dbname=testdb connect_timeout=10') con4 = pg.connect('postgresql://bob@myhost/testdb?connect_timeout=10') get/set_defhost -- default server host [DV] ------------------------------------------- .. function:: get_defhost(host) Get the default host :returns: the current default host specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default host specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defhost(host) Set the default host :param host: the new default host specification :type host: str or None :returns: the previous default host specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default host value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defport -- default server port [DV] ------------------------------------------- .. function:: get_defport() Get the default port :returns: the current default port specification :rtype: int :raises TypeError: too many arguments This method returns the current default port specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defport(port) Set the default port :param port: the new default port :type port: int :returns: previous default port specification :rtype: int or None This methods sets the default port value for new connections. If -1 is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default port. get/set_defopt -- default connection options [DV] -------------------------------------------------- .. function:: get_defopt() Get the default connection options :returns: the current default options specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default connection options specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defopt(options) Set the default connection options :param options: the new default connection options :type options: str or None :returns: previous default options specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default connection options value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default options. get/set_defbase -- default database name [DV] --------------------------------------------- .. function:: get_defbase() Get the default database name :returns: the current default database name specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database name specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defbase(base) Set the default database name :param base: the new default base name :type base: str or None :returns: the previous default database name specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database name value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defuser -- default database user [DV] --------------------------------------------- .. function:: get_defuser() Get the default database user :returns: the current default database user specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database user specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defuser(user) Set the default database user :param user: the new default database user :type base: str or None :returns: the previous default database user specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database user name for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defpasswd -- default database password [DV] --------------------------------------------------- .. function:: get_defpasswd() Get the default database password :returns: the current default database password specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database password specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defpasswd(passwd) Set the default database password :param passwd: the new default database password :type base: str or None :returns: the previous default database password specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database password for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. escape_string -- escape a string for use within SQL --------------------------------------------------- .. function:: escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str :raises TypeError: bad argument type, or too many arguments This function escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. :func:`escape_string` performs this operation. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. .. note:: It is especially important to do proper escaping when handling strings that were received from an untrustworthy source. Otherwise there is a security risk: you are vulnerable to "SQL injection" attacks wherein unwanted SQL commands are fed to your database. Example:: name = input("Name? ") phone = con.query("select phone from employees where name='%s'" % escape_string(name)).getresult() escape_bytea -- escape binary data for use within SQL ----------------------------------------------------- .. function:: escape_bytea(datastring) escape binary data for use within SQL as type ``bytea`` :param str datastring: string containing the binary data that is to be escaped :returns: the escaped string :rtype: str :raises TypeError: bad argument type, or too many arguments Escapes binary data for use within an SQL command with the type ``bytea``. As with :func:`escape_string`, this is only used when inserting data directly into an SQL command string. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. Example:: picture = open('garfield.gif', 'rb').read() con.query("update pictures set img='%s' where name='Garfield'" % escape_bytea(picture)) unescape_bytea -- unescape data that has been retrieved as text --------------------------------------------------------------- .. function:: unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param str datastring: the ``bytea`` data string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes :raises TypeError: bad argument type, or too many arguments Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :func:`escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. Note that there is also a :class:`DB` method with the same name which does exactly the same. get/set_decimal -- decimal type to be used for numeric values ------------------------------------------------------------- .. function:: get_decimal() Get the decimal type to be used for numeric values :returns: the Python class used for PostgreSQL numeric values :rtype: class This function returns the Python class that is used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal` if available, otherwise the :class:`float` type is used. .. function:: set_decimal(cls) Set a decimal type to be used for numeric values :param class cls: the Python class to be used for PostgreSQL numeric values This function can be used to specify the Python class that shall be used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal` if available, otherwise the :class:`float` type is used. get/set_decimal_point -- decimal mark used for monetary values -------------------------------------------------------------- .. function:: get_decimal_point() Get the decimal mark used for monetary values :returns: string with one character representing the decimal mark :rtype: str This function returns the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values when converting them to decimal numbers. The default setting is ``'.'`` as a decimal point. This setting is not adapted automatically to the locale used by PostgreSQL, but you can use :func:`set_decimal()` to set a different decimal mark manually. A return value of ``None`` means monetary values are not interpreted as decimal numbers, but returned as strings including the formatting and currency. .. versionadded:: 4.1.1 .. function:: set_decimal_point(string) Specify which decimal mark is used for interpreting monetary values :param str string: string with one character representing the decimal mark This function can be used to specify the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values. The default value is '.' as a decimal point. This value is not adapted automatically to the locale used by PostgreSQL, so if you are dealing with a database set to a locale that uses a ``','`` instead of ``'.'`` as the decimal point, then you need to call ``set_decimal(',')`` to have PyGreSQL interpret monetary values correctly. If you don't want money values to be converted to decimal numbers, then you can call ``set_decimal(None)``, which will cause PyGreSQL to return monetary values as strings including their formatting and currency. .. versionadded:: 4.1.1 get/set_bool -- whether boolean values are returned as bool objects ------------------------------------------------------------------- .. function:: get_bool() Check whether boolean values are returned as bool objects :returns: whether or not bool objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this with the :func:`set_bool` function. .. versionadded:: 4.2 .. function:: set_bool(on) Set whether boolean values are returned as bool objects :param on: whether or not bool objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this by calling ``set_bool(True)``. .. versionadded:: 4.2 .. versionchanged:: 5.0 Boolean values had been returned as string by default in earlier versions. get/set_array -- whether arrays are returned as list objects ------------------------------------------------------------ .. function:: get_array() Check whether arrays are returned as list objects :returns: whether or not list objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this with the :func:`set_array` function. .. versionadded:: 5.0 .. function:: set_array(on) Set whether arrays are returned as list objects :param on: whether or not list objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this by calling ``set_array(False)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Arrays had been always returned as text strings only in earlier versions. get/set_bytea_escaped -- whether bytea data is returned escaped --------------------------------------------------------------- .. function:: get_bytea_escaped() Check whether bytea values are returned as escaped strings :returns: whether or not bytea objects will be returned escaped :rtype: bool This function checks whether PyGreSQL returns PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this with the :func:`set_bytea_escaped` function. .. versionadded:: 5.0 .. function:: set_bytea_escaped(on) Set whether bytea values are returned as escaped strings :param on: whether or not bytea objects shall be returned escaped This function can be used to specify whether PyGreSQL shall return PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this by calling ``set_bytea_escaped(True)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Bytea data had been returned in escaped form by default in earlier versions. get/set_jsondecode -- decoding JSON format ------------------------------------------ .. function:: get_jsondecode() Get the function that deserializes JSON formatted strings This returns the function used by PyGreSQL to construct Python objects from JSON formatted strings. .. function:: set_jsondecode(func) Set a function that will deserialize JSON formatted strings :param func: the function to be used for deserializing JSON strings You can use this if you do not want to deserialize JSON strings coming in from the database, or if want to use a different function than the standard function :func:`json.loads` or if you want to use it with parameters different from the default ones. If you set this function to *None*, then the automatic deserialization of JSON strings will be deactivated. .. versionadded:: 5.0 .. versionchanged:: 5.0 JSON data had been always returned as text strings in earlier versions. get/set_datestyle -- assume a fixed date style ---------------------------------------------- .. function:: get_datestyle() Get the assumed date style for typecasting This returns the PostgreSQL date style that is silently assumed when typecasting dates or *None* if no fixed date style is assumed, in which case the date style is requested from the database when necessary (this is the default). Note that this method will *not* get the date style that is currently set in the session or in the database. You can get the current setting with the methods :meth:`DB.get_parameter` and :meth:`Connection.parameter`. You can also get the date format corresponding to the current date style by calling :meth:`Connection.date_format`. .. versionadded:: 5.0 .. function:: set_datestyle(datestyle) Set a fixed date style that shall be assumed when typecasting :param str datestyle: the date style that shall be assumed, or *None* if no fixed dat style shall be assumed PyGreSQL is able to automatically pick up the right date style for typecasting date values from the database, even if you change it for the current session with a ``SET DateStyle`` command. This is happens very effectively without an additional database request being involved. If you still want to have PyGreSQL always assume a fixed date style instead, then you can set one with this function. Note that calling this function will *not* alter the date style of the database or the current session. You can do that by calling the method :meth:`DB.set_parameter` instead. .. versionadded:: 5.0 get/set_typecast -- custom typecasting -------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides through its C extension module basic typecast functions for the common database types, but if you want to add more typecast functions, you can set these using the following functions. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`DbTypes` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`DbTypes.get_typecast`, :meth:`DbTypes.set_typecast` and :meth:`DbTypes.reset_typecast` of the :attr:`DB.dbtypes` object. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`DbTypes.reset_typecast` on the :attr:`DB.dbtypes` object. Also note that the typecasting for all of the basic types happens already in the C extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension module. cast_array/record -- fast parsers for arrays and records -------------------------------------------------------- PosgreSQL returns arrays and records (composite types) using a special output syntax with several quirks that cannot easily and quickly be parsed in Python. Therefore the C extension module provides two fast parsers that allow quickly turning these text representations into Python objects: Arrays will be converted to Python lists, and records to Python tuples. These fast parsers are used automatically by PyGreSQL in order to return arrays and records from database queries as lists and tuples, so you normally don't need to call them directly. You may only need them for typecasting arrays of data types that are not supported by default in PostgreSQL. .. function:: cast_array(string, [cast], [delim]) Cast a string representing a PostgreSQL array to a Python list :param str string: the string with the text representation of the array :param cast: a typecast function for the elements of the array :type cast: callable or None :param delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a list representing the PostgreSQL array in Python :rtype: list :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL array (which may look like ``'{{1,2}{3,4}}'`` for a two-dimensional array), a typecast function *cast* that is called for every element, and an optional delimiter character *delim* (usually a comma), and returns a Python list representing the array (which may be nested like ``[[1, 2], [3, 4]]`` in this example). The cast function must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the list. If you don't pass a cast function or set it to *None*, then unprocessed text strings will be returned as elements of the array. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 .. function:: cast_record(string, [cast], [delim]) Cast a string representing a PostgreSQL record to a Python tuple :param str string: the string with the text representation of the record :param cast: typecast function(s) for the elements of the record :type cast: callable, list or tuple of callables, or None :param delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a tuple representing the PostgreSQL record in Python :rtype: tuple :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL record (which may look like ``'(1,a,2,b)'`` for a record composed of four fields), a typecast function *cast* that is called for every element, or a list or tuple of such functions corresponding to the individual fields of the record, and an optional delimiter character *delim* (usually a comma), and returns a Python tuple representing the record (which may be inhomogeneous like ``(1, 'a', 2, 'b')`` in this example). The cast function(s) must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the tuple. If you don't pass cast function(s) or pass *None* instead, then unprocessed text strings will be returned as elements of the tuple. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 Note that besides using parentheses instead of braces, there are other subtle differences in escaping special characters and NULL values between the syntax used for arrays and the one used for composite types, which these functions take into account. Type helpers ------------ The module provides the following type helper functions. You can wrap parameters with these functions when passing them to :meth:`DB.query` or :meth:`DB.query_formatted` in order to give PyGreSQL a hint about the type of the parameters, if it cannot be derived from the context. .. function:: Bytea(bytes) A wrapper for holding a bytea value .. versionadded:: 5.0 .. function:: HStore(dict) A wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj) A wrapper for holding an object serializable to JSON .. versionadded:: 5.0 The following additional type helper is only meaningful when used with :meth:`DB.query_formatted`. It marks a parameter as text that shall be literally included into the SQL. This is useful for passing table names for instance. .. function:: Literal(sql) A wrapper for holding a literal SQL string .. versionadded:: 5.0 Module constants ---------------- Some constants are defined in the module dictionary. They are intended to be used as parameters for methods calls. You should refer to the libpq description in the PostgreSQL user manual for more information about them. These constants are: .. data:: version .. data:: __version__ constants that give the current version .. data:: INV_READ .. data:: INV_WRITE large objects access modes, used by :meth:`Connection.locreate` and :meth:`LargeObject.open` .. data:: SEEK_SET .. data:: SEEK_CUR .. data:: SEEK_END positional flags, used by :meth:`LargeObject.seek` .. data:: TRANS_IDLE .. data:: TRANS_ACTIVE .. data:: TRANS_INTRANS .. data:: TRANS_INERROR .. data:: TRANS_UNKNOWN transaction states, used by :meth:`Connection.transaction` PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/connection.rst.txt0000644000175100077410000004754313466770070025674 0ustar darcypyg00000000000000Connection -- The connection object =================================== .. py:currentmodule:: pg .. class:: Connection This object handles a connection to a PostgreSQL database. It embeds and hides all the parameters that define this connection, thus just leaving really significant parameters in function calls. .. note:: Some methods give direct access to the connection socket. *Do not use them unless you really know what you are doing.* If you prefer disabling them, set the ``-DNO_DIRECT`` option in the Python setup file. These methods are specified by the tag [DA]. .. note:: Some other methods give access to large objects (refer to PostgreSQL user manual for more information about these). If you want to forbid access to these from the module, set the ``-DNO_LARGE`` option in the Python setup file. These methods are specified by the tag [LO]. query -- execute a SQL command string ------------------------------------- .. method:: Connection.query(command, [args]) Execute a SQL command string :param str command: SQL command :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row as an integer. If the query is an update or delete statement, or an insert statement that did not insert exactly one row, or on a table without OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an ``"insert/update ... returning"`` statement), this method returns a :class:`Query`. Otherwise, it returns ``None``. You can use the :class:`Query` object as an iterator that yields all results as tuples, or call :meth:`Query.getresult` to get the result as a list of tuples. Alternatively, you can call :meth:`Query.dictresult` or :meth:`Query.dictiter` if you want to get the rows as dictionaries, or :meth:`Query.namedresult` or :meth:`Query.namediter` if you want to get the rows as named tuples. You can also simply print the :class:`Query` object to show the query results on the console. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data, in which case the values must be supplied separately as a tuple. The values are substituted by the database in such a way that they don't need to be escaped, making this an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors. If you don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case. When the database could not process the query, a :exc:`pg.ProgrammingError` or a :exc:`pg.InternalError` is raised. You can check the ``SQLSTATE`` error code of this error by reading its :attr:`sqlstate` attribute. Example:: name = input("Name? ") phone = con.query("select phone from employees where name=$1", (name,)).getresult() query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: Connection.query_prepared(name, [args]) Execute a prepared statement :param str name: name of the prepared statement :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This method works exactly like :meth:`Connection.query` except that instead of passing the command itself, you pass the name of a prepared statement. An empty name corresponds to the unnamed statement. You must have previously created the corresponding named or unnamed statement with :meth:`Connection.prepare`, or an :exc:`pg.OperationalError` will be raised. .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: Connection.prepare(name, command) Create a prepared statement :param str name: name of the prepared statement :param str command: SQL command :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for the given command for later execution with the :meth:`Connection.query_prepared` method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise a :exc:`pg.ProgrammingError` is raised if the statement name is already defined in the current database session. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then later be passed to the :meth:`Connection.query_prepared` method separately as a tuple. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: Connection.describe_prepared(name) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 reset -- reset the connection ----------------------------- .. method:: Connection.reset() Reset the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method resets the current database connection. cancel -- abandon processing of current SQL command --------------------------------------------------- .. method:: Connection.cancel() :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method requests that the server abandon processing of the current SQL command. close -- close the database connection -------------------------------------- .. method:: Connection.close() Close the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments This method closes the database connection. The connection will be closed in any case when the connection is deleted but this allows you to explicitly close it. It is mainly here to allow the DB-SIG API wrapper to implement a close function. transaction -- get the current transaction state ------------------------------------------------ .. method:: Connection.transaction() Get the current in-transaction status of the server :returns: the current in-transaction status :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection The status returned by this method can be :const:`TRANS_IDLE` (currently idle), :const:`TRANS_ACTIVE` (a command is in progress), :const:`TRANS_INTRANS` (idle, in a valid transaction block), or :const:`TRANS_INERROR` (idle, in a failed transaction block). :const:`TRANS_UNKNOWN` is reported if the connection is bad. The status :const:`TRANS_ACTIVE` is reported only when a query has been sent to the server and not yet completed. parameter -- get a current server parameter setting --------------------------------------------------- .. method:: Connection.parameter(name) Look up a current parameter setting of the server :param str name: the name of the parameter to look up :returns: the current setting of the specified parameter :rtype: str or None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection Certain parameter values are reported by the server automatically at connection startup or whenever their values change. This method can be used to interrogate these settings. It returns the current value of a parameter if known, or *None* if the parameter is not known. You can use this method to check the settings of important parameters such as `server_version`, `server_encoding`, `client_encoding`, `application_name`, `is_superuser`, `session_authorization`, `DateStyle`, `IntervalStyle`, `TimeZone`, `integer_datetimes`, and `standard_conforming_strings`. Values that are not reported by this method can be requested using :meth:`DB.get_parameter`. .. versionadded:: 4.0 date_format -- get the currently used date format ------------------------------------------------- .. method:: Connection.date_format() Look up the date format currently being used by the database :returns: the current date format :rtype: str :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the current date format used by the server. Note that it is cheap to call this method, since there is no database query involved and the setting is also cached internally. You will need the date format when you want to manually typecast dates and timestamps coming from the database instead of using the built-in typecast functions. The date format returned by this method can be directly used with date formatting functions such as :meth:`datetime.strptime`. It is derived from the current setting of the database parameter ``DateStyle``. .. versionadded:: 5.0 fileno -- get the socket used to connect to the database -------------------------------------------------------- .. method:: Connection.fileno() Get the socket used to connect to the database :returns: the socket id of the database connection :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the underlying socket id used to connect to the database. This is useful for use in select calls, etc. getnotify -- get the last notify from the server ------------------------------------------------ .. method:: Connection.getnotify() Get the last notify from the server :returns: last notify from server :rtype: tuple, None :raises TypeError: too many parameters :raises TypeError: invalid connection This method tries to get a notify from the server (from the SQL statement NOTIFY). If the server returns no notify, the methods returns None. Otherwise, it returns a tuple (triplet) *(relname, pid, extra)*, where *relname* is the name of the notify, *pid* is the process id of the connection that triggered the notify, and *extra* is a payload string that has been sent with the notification. Remember to do a listen query first, otherwise :meth:`Connection.getnotify` will always return ``None``. .. versionchanged:: 4.1 Support for payload strings was added in version 4.1. inserttable -- insert a list into a table ----------------------------------------- .. method:: Connection.inserttable(table, values) Insert a Python list into a database table :param str table: the table name :param list values: list of rows values :rtype: None :raises TypeError: invalid connection, bad argument type, or too many arguments :raises MemoryError: insert buffer could not be allocated :raises ValueError: unsupported values This method allows to *quickly* insert large blocks of data in a table: It inserts the whole values list into the given table. Internally, it uses the COPY command of the PostgreSQL database. The list is a list of tuples/lists that define the values for each inserted row. The rows values may contain string, integer, long or double (real) values. .. warning:: This method doesn't type check the fields according to the table definition; it just looks whether or not it knows how to handle such types. get/set_cast_hook -- fallback typecast function ----------------------------------------------- .. method:: Connection.get_cast_hook() Get the function that handles all external typecasting :returns: the current external typecast function :rtype: callable, None :raises TypeError: too many (any) arguments This returns the callback function used by PyGreSQL to provide plug-in Python typecast functions for the connection. .. versionadded:: 5.0 .. method:: Connection.set_cast_hook(func) Set a function that will handle all external typecasting :param func: the function to be used as a callback :rtype: None :raises TypeError: the specified notice receiver is not callable This methods allows setting a custom fallback function for providing Python typecast functions for the connection to supplement the C extension module. If you set this function to *None*, then only the typecast functions implemented in the C extension module are enabled. You normally would not want to change this. Instead, you can use :func:`get_typecast` and :func:`set_typecast` to add or change the plug-in Python typecast functions. .. versionadded:: 5.0 get/set_notice_receiver -- custom notice receiver ------------------------------------------------- .. method:: Connection.get_notice_receiver() Get the current notice receiver :returns: the current notice receiver callable :rtype: callable, None :raises TypeError: too many (any) arguments This method gets the custom notice receiver callback function that has been set with :meth:`Connection.set_notice_receiver`, or ``None`` if no custom notice receiver has ever been set on the connection. .. versionadded:: 4.1 .. method:: Connection.set_notice_receiver(func) Set a custom notice receiver :param func: the custom notice receiver callback function :rtype: None :raises TypeError: the specified notice receiver is not callable This method allows setting a custom notice receiver callback function. When a notice or warning message is received from the server, or generated internally by libpq, and the message level is below the one set with ``client_min_messages``, the specified notice receiver function will be called. This function must take one parameter, the :class:`Notice` object, which provides the following read-only attributes: .. attribute:: Notice.pgcnx the connection .. attribute:: Notice.message the full message with a trailing newline .. attribute:: Notice.severity the level of the message, e.g. 'NOTICE' or 'WARNING' .. attribute:: Notice.primary the primary human-readable error message .. attribute:: Notice.detail an optional secondary error message .. attribute:: Notice.hint an optional suggestion what to do about the problem .. versionadded:: 4.1 putline -- write a line to the server socket [DA] ------------------------------------------------- .. method:: Connection.putline(line) Write a line to the server socket :param str line: line to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters This method allows to directly write a string to the server socket. getline -- get a line from server socket [DA] --------------------------------------------- .. method:: Connection.getline() Get a line from server socket :returns: the line read :rtype: str :raises TypeError: invalid connection :raises TypeError: too many parameters :raises MemoryError: buffer overflow This method allows to directly read a string from the server socket. endcopy -- synchronize client and server [DA] --------------------------------------------- .. method:: Connection.endcopy() Synchronize client and server :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters The use of direct access methods may desynchronize client and server. This method ensure that client and server will be synchronized. locreate -- create a large object in the database [LO] ------------------------------------------------------ .. method:: Connection.locreate(mode) Create a large object in the database :param int mode: large object create mode :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises pg.OperationalError: creation error This method creates a large object in the database. The mode can be defined by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE` and :const:`INV_ARCHIVE`). Please refer to PostgreSQL user manual for a description of the mode values. getlo -- build a large object from given oid [LO] ------------------------------------------------- .. method:: Connection.getlo(oid) Create a large object in the database :param int oid: OID of the existing large object :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: bad OID value (0 is invalid_oid) This method allows reusing a previously created large object through the :class:`LargeObject` interface, provided the user has its OID. loimport -- import a file to a large object [LO] ------------------------------------------------ .. method:: Connection.loimport(name) Import a file to a large object :param str name: the name of the file to be imported :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad argument type, or too many arguments :raises pg.OperationalError: error during file import This methods allows to create large objects in a very simple way. You just give the name of a file containing the data to be used. Object attributes ----------------- Every :class:`Connection` defines a set of read-only attributes that describe the connection and its status. These attributes are: .. attribute:: Connection.host the host name of the server (str) .. attribute:: Connection.port the port of the server (int) .. attribute:: Connection.db the selected database (str) .. attribute:: Connection.options the connection options (str) .. attribute:: Connection.user user name on the database system (str) .. attribute:: Connection.protocol_version the frontend/backend protocol being used (int) .. versionadded:: 4.0 .. attribute:: Connection.server_version the backend version (int, e.g. 90305 for 9.3.5) .. versionadded:: 4.0 .. attribute:: Connection.status the status of the connection (int: 1 = OK, 0 = bad) .. attribute:: Connection.error the last warning/error message from the server (str) .. attribute:: Connection.socket the file descriptor number of the connection socket to the server (int) .. versionadded:: 5.1 .. attribute:: Connection.backend_pid the PID of the backend process handling this connection (int) .. versionadded:: 5.1 .. attribute:: Connection.ssl_in_use this is True if the connection uses SSL, False if not .. versionadded:: 5.1 (needs PostgreSQL >= 9.5) .. attribute:: Connection.ssl_attributes SSL-related information about the connection (dict) .. versionadded:: 5.1 (needs PostgreSQL >= 9.5) PyGreSQL-5.1/docs/_build/html/_sources/contents/pg/db_types.rst.txt0000644000175100077410000000741613466770070025341 0ustar darcypyg00000000000000DbTypes -- The internal cache for database types ================================================ .. py:currentmodule:: pg .. class:: DbTypes .. versionadded:: 5.0 The :class:`DbTypes` object is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to PyGreSQL "type names" (which are also returned by :meth:`DB.get_attnames` as dictionary values). These type names are strings which are equal to either the simple PyGreSQL names or to the more fine-grained registered PostgreSQL type names if these have been enabled with :meth:`DB.use_regtypes`. Besides being strings, they carry additional information about the associated PostgreSQL type in the following attributes: - *oid* -- the PostgreSQL type OID - *pgtype* -- the internal PostgreSQL data type name - *regtype* -- the registered PostgreSQL data type name - *simple* -- the more coarse-grained PyGreSQL type name - *typtype* -- `b` = base type, `c` = composite type etc. - *category* -- `A` = Array, `b` =Boolean, `C` = Composite etc. - *delim* -- delimiter for array types - *relid* -- corresponding table for composite types - *attnames* -- attributes for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`DbTypes` class also provides the following methods: .. method:: DbTypes.get_attnames(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: an ordered dictionary mapping field names to type names .. method:: DbTypes.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. method:: DbTypes.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: DbTypes.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or list of type names, or None to reset all typecast functions :type typ: str, list or None .. method:: DbTypes.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that :class:`DbTypes` object is always bound to a database connection. You can also get and set and reset typecast functions on a global level using the functions :func:`pg.get_typecast` and :func:`pg.set_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless you reset the typecast functions by calling the :meth:`DbTypes.reset_typecast` method on :attr:`DB.dbtypes` objects of the running connections. Also note that the typecasting for all of the basic types happens already in the C low-level extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension. PyGreSQL-5.1/docs/_build/html/_sources/contents/index.rst.txt0000644000175100077410000000105313466770070024220 0ustar darcypyg00000000000000The PyGreSQL documentation ========================== Contents -------- .. toctree:: :maxdepth: 1 Installing PyGreSQL What's New and History of Changes General PyGreSQL Programming Information First Steps with PyGreSQL The Classic PyGreSQL Interface The DB-API Compliant Interface A PostgreSQL Primer Examples for using PyGreSQL Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` PyGreSQL-5.1/docs/_build/html/_sources/contents/general.rst.txt0000644000175100077410000000367713466770070024544 0ustar darcypyg00000000000000General PyGreSQL programming information ---------------------------------------- PyGreSQL consists of two parts: the "classic" PyGreSQL interface provided by the :mod:`pg` module and the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. If you use only the standard features of the DB-API 2.0 interface, it will be easier to switch from PostgreSQL to another database for which a DB-API 2.0 compliant interface exists. The "classic" interface may be easier to use for beginners, and it provides some higher-level and PostgreSQL specific convenience methods. .. seealso:: **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. Both Python modules utilize the same low-level C extension, which serves as a wrapper for the "libpq" library, the C API to PostgreSQL. This means you must have the libpq library installed as a shared library on your client computer, in a version that is supported by PyGreSQL. Depending on the client platform, you may have to set environment variables like `PATH` or `LD_LIBRARY_PATH` so that PyGreSQL can find the library. .. warning:: Note that PyGreSQL is not thread-safe on the connection level. Therefore we recommend using `DBUtils `_ for multi-threaded environments, which supports both PyGreSQL interfaces. Another option is using PyGreSQL indirectly as a database driver for the high-level `SQLAlchemy `_ SQL toolkit and ORM, which supports PyGreSQL starting with SQLAlchemy 1.1 and which provides a way to use PyGreSQL in a multi-threaded environment using the concept of "thread local storage". Database URLs for PyGreSQL take this form:: postgresql+pygresql://username:password@host:port/database PyGreSQL-5.1/docs/_build/html/_sources/contents/install.rst.txt0000644000175100077410000001520113466770070024557 0ustar darcypyg00000000000000Installation ============ General ------- You must first install Python and PostgreSQL on your system. If you want to access remote databases only, you don't need to install the full PostgreSQL server, but only the libpq C-interface library. If you are on Windows, make sure that the directory that contains libpq.dll is part of your ``PATH`` environment variable. The current version of PyGreSQL has been tested with Python versions 2.6, 2.7 and 3.3 to 3.7, and PostgreSQL versions 9.0 to 9.6 and 10 or 11. PyGreSQL will be installed as three modules, a shared library called _pg.so (on Linux) or a DLL called _pg.pyd (on Windows), and two pure Python wrapper modules called pg.py and pgdb.py. All three files will be installed directly into the Python site-packages directory. To uninstall PyGreSQL, simply remove these three files. Installing with Pip ------------------- This is the most easy way to install PyGreSQL if you have "pip" installed. Just run the following command in your terminal:: pip install PyGreSQL This will automatically try to find and download a distribution on the `Python Package Index `_ that matches your operating system and Python version and install it. Installing from a Binary Distribution ------------------------------------- If you don't want to use "pip", or "pip" doesn't find an appropriate distribution for your computer, you can also try to manually download and install a distribution. When you download the source distribution, you will need to compile the C extension, for which you need a C compiler installed. If you don't want to install a C compiler or avoid possible problems with the compilation, you can search for a pre-compiled binary distribution of PyGreSQL on the Python Package Index or the PyGreSQL homepage. You can currently download PyGreSQL as Linux RPM, NetBSD package and Windows installer. Make sure the required Python version of the binary package matches the Python version you have installed. Install the package as usual on your system. Note that the documentation is currently only included in the source package. Installing from Source ---------------------- If you want to install PyGreSQL from Source, or there is no binary package available for your platform, follow these instructions. Make sure the Python header files and PostgreSQL client and server header files are installed. These come usually with the "devel" packages on Unix systems and the installer executables on Windows systems. If you are using a precompiled PostgreSQL, you will also need the pg_config tool. This is usually also part of the "devel" package on Unix, and will be installed as part of the database server feature on Windows systems. Building and installing with Distutils ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can build and install PyGreSQL using `Distutils `_. Download and unpack the PyGreSQL source tarball if you haven't already done so. Type the following commands to build and install PyGreSQL:: python setup.py build python setup.py install Now you should be ready to use PyGreSQL. Compiling Manually ~~~~~~~~~~~~~~~~~~ The source file for compiling the C extension module is pgmodule.c. You have two options. You can compile PyGreSQL as a stand-alone module or you can build it into the Python interpreter. Stand-Alone ^^^^^^^^^^^ * In the directory containing ``pgmodule.c``, run the following command:: cc -fpic -shared -o _pg.so -I$PYINC -I$PGINC -I$PSINC -L$PGLIB -lpq pgmodule.c where you have to set:: PYINC = path to the Python include files (usually something like /usr/include/python) PGINC = path to the PostgreSQL client include files (something like /usr/include/pgsql or /usr/include/postgresql) PSINC = path to the PostgreSQL server include files (like /usr/include/pgsql/server or /usr/include/postgresql/server) PGLIB = path to the PostgreSQL object code libraries (usually /usr/lib) If you are not sure about the above paths, try something like:: PYINC=`find /usr -name Python.h` PGINC=`find /usr -name libpq-fe.h` PSINC=`find /usr -name postgres.h` PGLIB=`find /usr -name libpq.so` If you have the ``pg_config`` tool installed, you can set:: PGINC=`pg_config --includedir` PSINC=`pg_config --includedir-server` PGLIB=`pg_config --libdir` Some options may be added to this line:: -DNO_DEF_VAR no default variables support -DNO_DIRECT no direct access methods -DNO_LARGE no large object support -DNO_PQSOCKET if running an older PostgreSQL On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * Test the new module. Something like the following should work:: $ python >>> import _pg >>> db = _pg.connect('thilo','localhost') >>> db.query("INSERT INTO test VALUES ('ping','pong')") 18304 >>> db.query("SELECT * FROM test") eins|zwei ----+---- ping|pong (1 row) * Finally, move the ``_pg.so``, ``pg.py``, and ``pgdb.py`` to a directory in your ``PYTHONPATH``. A good place would be ``/usr/lib/python/site-packages`` if your Python modules are in ``/usr/lib/python``. Built-in to Python interpreter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Find the directory where your ``Setup`` file lives (usually in the ``Modules`` subdirectory) in the Python source hierarchy and copy or symlink the ``pgmodule.c`` file there. * Add the following line to your 'Setup' file:: _pg pgmodule.c -I$PGINC -I$PSINC -L$PGLIB -lpq where:: PGINC = path to the PostgreSQL client include files (see above) PSINC = path to the PostgreSQL server include files (see above) PGLIB = path to the PostgreSQL object code libraries (see above) Some options may be added to this line:: -DNO_DEF_VAR no default variables support -DNO_DIRECT no direct access methods -DNO_LARGE no large object support -DNO_PQSOCKET if running an older PostgreSQL (see above) On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * If you want a shared module, make sure that the ``shared`` keyword is uncommented and add the above line below it. You used to need to install your shared modules with ``make sharedinstall`` but this no longer seems to be true. * Copy ``pg.py`` to the lib directory where the rest of your modules are. For example, that's ``/usr/local/lib/Python`` on my system. * Rebuild Python from the root directory of the Python source hierarchy by running ``make -f Makefile.pre.in boot`` and ``make && make install``. * For more details read the documentation at the top of ``Makefile.pre.in``. PyGreSQL-5.1/docs/_build/html/_sources/contents/postgres/0000755000175100077410000000000013470245541023403 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_sources/contents/postgres/func.rst.txt0000644000175100077410000001154513466770070025721 0ustar darcypyg00000000000000Examples for using SQL functions ================================ .. py:currentmodule:: pg We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Creating SQL Functions on Base Types ------------------------------------ A **CREATE FUNCTION** statement lets you create a new function that can be used in expressions (in SELECT, INSERT, etc.). We will start with functions that return values of base types. Let's create a simple SQL function that takes no arguments and returns 1:: >>> query("""CREATE FUNCTION one() RETURNS int4 ... AS 'SELECT 1 as ONE' LANGUAGE SQL""") Functions can be used in any expressions (eg. in the target list or qualifications):: >>> print(db.query("SELECT one() AS answer")) answer ------ 1 (1 row) Here's how you create a function that takes arguments. The following function returns the sum of its two arguments:: >>> query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4 ... AS $$ SELECT $1 + $2 $$ LANGUAGE SQL""") >>> print(query("SELECT add_em(1, 2) AS answer")) answer ------ 3 (1 row) Creating SQL Functions on Composite Types ----------------------------------------- It is also possible to create functions that return values of composite types. Before we create more sophisticated functions, let's populate an EMP table:: >>> query("""CREATE TABLE EMP ( ... name text, ... salary int4, ... age f int4, ... dept varchar(16))""") >>> emps = ["'Sam', 1200, 16, 'toy'", ... "'Claire', 5000, 32, 'shoe'", ... "'Andy', -1000, 2, 'candy'", ... "'Bill', 4200, 36, 'shoe'", ... "'Ginger', 4800, 30, 'candy'"] >>> for emp in emps: ... query("INSERT INTO EMP VALUES (%s)" % emp) Every INSERT statement will return a '1' indicating that it has inserted one row into the EMP table. The argument of a function can also be a tuple. For instance, *double_salary* takes a tuple of the EMP table:: >>> query("""CREATE FUNCTION double_salary(EMP) RETURNS int4 ... AS $$ SELECT $1.salary * 2 AS salary $$ LANGUAGE SQL""") >>> print(query("""SELECT name, double_salary(EMP) AS dream ... FROM EMP WHERE EMP.dept = 'toy'""")) name|dream ----+----- Sam | 2400 (1 row) The return value of a function can also be a tuple. However, make sure that the expressions in the target list are in the same order as the columns of EMP:: >>> query("""CREATE FUNCTION new_emp() RETURNS EMP AS $$ ... SELECT 'None'::text AS name, ... 1000 AS salary, ... 25 AS age, ... 'None'::varchar(16) AS dept ... $$ LANGUAGE SQL""") You can then extract a column out of the resulting tuple by using the "function notation" for projection columns (i.e. ``bar(foo)`` is equivalent to ``foo.bar``). Note that ``new_emp().name`` isn't supported:: >>> print(query("SELECT name(new_emp()) AS nobody")) nobody ------ None (1 row) Let's try one more function that returns tuples:: >>> query("""CREATE FUNCTION high_pay() RETURNS setof EMP ... AS 'SELECT * FROM EMP where salary > 1500' ... LANGUAGE SQL""") >>> query("SELECT name(high_pay()) AS overpaid") overpaid -------- Claire Bill Ginger (3 rows) Creating SQL Functions with multiple SQL statements --------------------------------------------------- You can also create functions that do more than just a SELECT. You may have noticed that Andy has a negative salary. We'll create a function that removes employees with negative salaries:: >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Andy | -1000| 2|candy Bill | 4200| 36|shoe Ginger| 4800| 30|candy (5 rows) >>> query("""CREATE FUNCTION clean_EMP () RETURNS int4 AS ... 'DELETE FROM EMP WHERE EMP.salary < 0; ... SELECT 1 AS ignore_this' ... LANGUAGE SQL""") >>> query("SELECT clean_EMP()") clean_emp --------- 1 (1 row) >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Bill | 4200| 36|shoe Ginger| 4800| 30|candy (4 rows) Remove functions that were created in this example -------------------------------------------------- We can remove the functions that we have created in this example and the table EMP, by using the DROP command:: query("DROP FUNCTION clean_EMP()") query("DROP FUNCTION high_pay()") query("DROP FUNCTION new_emp()") query("DROP FUNCTION add_em(int4, int4)") query("DROP FUNCTION one()") query("DROP TABLE EMP CASCADE") PyGreSQL-5.1/docs/_build/html/_sources/contents/postgres/advanced.rst.txt0000644000175100077410000001121513466770070026525 0ustar darcypyg00000000000000Examples for advanced features ============================== .. py:currentmodule:: pg In this section, we show how to use some advanced features of PostgreSQL using the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Inheritance ----------- A table can inherit from zero or more tables. A query can reference either all rows of a table or all rows of a table plus all of its descendants. For example, the capitals table inherits from cities table (it inherits all data fields from cities):: >>> data = [('cities', [ ... "'San Francisco', 7.24E+5, 63", ... "'Las Vegas', 2.583E+5, 2174", ... "'Mariposa', 1200, 1953"]), ... ('capitals', [ ... "'Sacramento',3.694E+5,30,'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"])] Now, let's populate the tables:: >>> data = ['cities', [ ... "'San Francisco', 7.24E+5, 63" ... "'Las Vegas', 2.583E+5, 2174" ... "'Mariposa', 1200, 1953"], ... 'capitals', [ ... "'Sacramento',3.694E+5,30,'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"]] >>> for table, rows in data: ... for row in rows: ... query("INSERT INTO %s VALUES (%s)" % (table, row)) >>> print(query("SELECT * FROM cities")) name |population|altitude -------------+----------+-------- San Francisco| 724000| 63 Las Vegas | 258300| 2174 Mariposa | 1200| 1953 Sacramento | 369400| 30 Madison | 191300| 845 (5 rows) >>> print(query("SELECT * FROM capitals")) name |population|altitude|state ----------+----------+--------+----- Sacramento| 369400| 30|CA Madison | 191300| 845|WI (2 rows) You can find all cities, including capitals, that are located at an altitude of 500 feet or higher by:: >>> print(query("""SELECT c.name, c.altitude ... FROM cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 Madison | 845 (3 rows) On the other hand, the following query references rows of the base table only, i.e. it finds all cities that are not state capitals and are situated at an altitude of 500 feet or higher:: >>> print(query("""SELECT name, altitude ... FROM ONLY cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 (2 rows) Arrays ------ Attributes can be arrays of base types or user-defined types:: >>> query("""CREATE TABLE sal_emp ( ... name text, ... pay_by_quarter int4[], ... pay_by_extra_quarter int8[], ... schedule text[][])""") Insert instances with array attributes. Note the use of braces:: >>> query("""INSERT INTO sal_emp VALUES ( ... 'Bill', '{10000,10000,10000,10000}', ... '{9223372036854775800,9223372036854775800,9223372036854775800}', ... '{{"meeting", "lunch"}, {"training", "presentation"}}')""") >>> query("""INSERT INTO sal_emp VALUES ( ... 'Carol', '{20000,25000,25000,25000}', ... '{9223372036854775807,9223372036854775807,9223372036854775807}', ... '{{"breakfast", "consulting"}, {"meeting", "lunch"}}')""") Queries on array attributes:: >>> query("""SELECT name FROM sal_emp WHERE ... sal_emp.pay_by_quarter[1] != sal_emp.pay_by_quarter[2]""") name ----- Carol (1 row) Retrieve third quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_quarter[3] FROM sal_emp") pay_by_quarter -------------- 10000 25000 (2 rows) Retrieve third quarter extra pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp") pay_by_extra_quarter -------------------- 9223372036854775800 9223372036854775807 (2 rows) Retrieve first two quarters of extra quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp") pay_by_extra_quarter ----------------------------------------- {9223372036854775800,9223372036854775800} {9223372036854775807,9223372036854775807} (2 rows) Select subarrays:: >>> query("""SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp ... WHERE sal_emp.name = 'Bill'""") schedule ---------------------- {{meeting},{training}} (1 row) PyGreSQL-5.1/docs/_build/html/_sources/contents/postgres/index.rst.txt0000644000175100077410000000063513466770070026073 0ustar darcypyg00000000000000------------------- A PostgreSQL Primer ------------------- The examples in this chapter of the documentation have been taken from the PostgreSQL manual. They demonstrate some PostgreSQL features using the classic PyGreSQL interface. They can serve as an introduction to PostgreSQL, but not so much as examples for the use of PyGreSQL. Contents ======== .. toctree:: basic advanced func syscat PyGreSQL-5.1/docs/_build/html/_sources/contents/postgres/basic.rst.txt0000644000175100077410000003020013466770070026034 0ustar darcypyg00000000000000Basic examples ============== .. py:currentmodule:: pg In this section, we demonstrate how to use some of the very basic features of PostgreSQL using the classic PyGreSQL interface. Creating a connection to the database ------------------------------------- We start by creating a **connection** to the PostgreSQL database:: >>> from pg import DB >>> db = DB() If you pass no parameters when creating the :class:`DB` instance, then PyGreSQL will try to connect to the database on the local host that has the same name as the current user, and also use that name for login. You can also pass the database name, host, port and login information as parameters when creating the :class:`DB` instance:: >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') The :class:`DB` class of which ``db`` is an object is a wrapper around the lower level :class:`Connection` class of the :mod:`pg` module. The most important method of such connection objects is the ``query`` method that allows you to send SQL commands to the database. Creating tables --------------- The first thing you would want to do in an empty database is creating a table. To do this, you need to send a **CREATE TABLE** command to the database. PostgreSQL has its own set of built-in types that can be used for the table columns. Let us create two tables "weather" and "cities":: >>> db.query("""CREATE TABLE weather ( ... city varchar(80), ... temp_lo int, temp_hi int, ... prcp float8, ... date date)""") >>> db.query("""CREATE TABLE cities ( ... name varchar(80), ... location point)""") .. note:: Keywords are case-insensitive but identifiers are case-sensitive. You can get a list of all tables in the database with:: >>> db.get_tables() ['public.cities', 'public.weather'] Insert data ----------- Now we want to fill our tables with data. An **INSERT** statement is used to insert a new row into a table. There are several ways you can specify what columns the data should go to. Let us insert a row into each of these tables. The simplest case is when the list of values corresponds to the order of the columns specified in the CREATE TABLE command:: >>> db.query("""INSERT INTO weather ... VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')""") >>> db.query("""INSERT INTO cities ... VALUES ('San Francisco', '(-194.0, 53.0)')""") You can also specify the columns to which the values correspond. The columns can be specified in any order. You may also omit any number of columns, such as with unknown precipitation, below:: >>> db.query("""INSERT INTO weather (date, city, temp_hi, temp_lo) ... VALUES ('11/29/1994', 'Hayward', 54, 37)""") If you get errors regarding the format of the date values, your database is probably set to a different date style. In this case you must change the date style like this:: >>> db.query("set datestyle = MDY") Instead of explicitly writing the INSERT statement and sending it to the database with the :meth:`DB.query` method, you can also use the more convenient :meth:`DB.insert` method that does the same under the hood:: >>> db.insert('weather', ... date='11/29/1994', city='Hayward', temp_hi=54, temp_lo=37) And instead of using keyword parameters, you can also pass the values to the :meth:`DB.insert` method in a single Python dictionary. If you have a Python list with many rows that shall be used to fill a database table quickly, you can use the :meth:`DB.inserttable` method. Retrieving data --------------- After having entered some data into our tables, let's see how we can get the data out again. A **SELECT** statement is used for retrieving data. The basic syntax is: .. code-block:: psql SELECT columns FROM tables WHERE predicates A simple one would be the following query:: >>> q = db.query("SELECT * FROM weather") >>> print(q) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 37| 54| |1994-11-29 (2 rows) You may also specify expressions in the target list. (The 'AS column' specifies the column name of the result. It is optional.) :: >>> print(db.query("""SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date ... FROM weather""")) city |temp_avg| date -------------+--------+---------- San Francisco| 48|1994-11-27 Hayward | 45|1994-11-29 (2 rows) If you want to retrieve rows that satisfy certain condition (i.e. a restriction), specify the condition in a WHERE clause. The following retrieves the weather of San Francisco on rainy days:: >>> print(db.query("""SELECT * FROM weather ... WHERE city = 'San Francisco' AND prcp > 0.0""")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Here is a more complicated one. Duplicates are removed when DISTINCT is specified. ORDER BY specifies the column to sort on. (Just to make sure the following won't confuse you, DISTINCT and ORDER BY can be used separately.) :: >>> print(db.query("SELECT DISTINCT city FROM weather ORDER BY city")) city ------------- Hayward San Francisco (2 rows) So far we have only printed the output of a SELECT query. The object that is returned by the query is an instance of the :class:`Query` class that can print itself in the nicely formatted way we saw above. But you can also retrieve the results as a list of tuples, by using the :meth:`Query.getresult` method:: >>> from pprint import pprint >>> q = db.query("SELECT * FROM weather") >>> pprint(q.getresult()) [('San Francisco', 46, 50, 0.25, '1994-11-27'), ('Hayward', 37, 54, None, '1994-11-29')] Here we used pprint to print out the returned list in a nicely formatted way. If you want to retrieve the results as a list of dictionaries instead of tuples, use the :meth:`Query.dictresult` method instead:: >>> pprint(q.dictresult()) [{'city': 'San Francisco', 'date': '1994-11-27', 'prcp': 0.25, 'temp_hi': 50, 'temp_lo': 46}, {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37}] Finally, you can also retrieve the results as a list of named tuples, using the :meth:`Query.namedresult` method. This can be a good compromise between simple tuples and the more memory intensive dictionaries: >>> for row in q.namedresult(): ... print(row.city, row.date) ... San Francisco 1994-11-27 Hayward 1994-11-29 If you only want to retrieve a single row of data, you can use the more convenient :meth:`DB.get` method that does the same under the hood:: >>> d = dict(city='Hayward') >>> db.get('weather', d, 'city') >>> pprint(d) {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37} As you see, the :meth:`DB.get` method returns a dictionary with the column names as keys. In the third parameter you can specify which column should be looked up in the WHERE statement of the SELECT statement that is executed by the :meth:`DB.get` method. You normally don't need it when the table was created with a primary key. Retrieving data into other tables --------------------------------- A SELECT ... INTO statement can be used to retrieve data into another table:: >>> db.query("""SELECT * INTO TEMPORARY TABLE temptab FROM weather ... WHERE city = 'San Francisco' and prcp > 0.0""") This fills a temporary table "temptab" with a subset of the data in the original "weather" table. It can be listed with:: >>> print(db.query("SELECT * from temptab")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Aggregates ---------- Let's try the following query:: >>> print(db.query("SELECT max(temp_lo) FROM weather")) max --- 46 (1 row) You can also use aggregates with the GROUP BY clause:: >>> print(db.query("SELECT city, max(temp_lo) FROM weather GROUP BY city")) city |max -------------+--- Hayward | 37 San Francisco| 46 (2 rows) Joining tables -------------- Queries can access multiple tables at once or access the same table in such a way that multiple instances of the table are being processed at the same time. Suppose we want to find all the records that are in the temperature range of other records. W1 and W2 are aliases for weather. We can use the following query to achieve that:: >>> print(db.query("""SELECT W1.city, W1.temp_lo, W1.temp_hi, ... W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2 ... WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi""")) city |temp_lo|temp_hi| city |temp_lo|temp_hi -------+-------+-------+-------------+-------+------- Hayward| 37| 54|San Francisco| 46| 50 (1 row) Now let's join two different tables. The following joins the "weather" table and the "cities" table:: >>> print(db.query("""SELECT city, location, prcp, date ... FROM weather, cities ... WHERE name = city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Since the column names are all different, we don't have to specify the table name. If you want to be clear, you can do the following. They give identical results, of course:: >>> print(db.query("""SELECT w.city, c.location, w.prcp, w.date ... FROM weather w, cities c WHERE c.name = w.city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Updating data ------------- It you want to change the data that has already been inserted into a database table, you will need the **UPDATE** statement. Suppose you discover the temperature readings are all off by 2 degrees as of Nov 28, you may update the data as follow:: >>> db.query("""UPDATE weather ... SET temp_hi = temp_hi - 2, temp_lo = temp_lo - 2 ... WHERE date > '11/28/1994'""") '1' >>> print(db.query("SELECT * from weather")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 35| 52| |1994-11-29 (2 rows) Note that the UPDATE statement returned the string ``'1'``, indicating that exactly one row of data has been affected by the update. If you retrieved one row of data as a dictionary using the :meth:`DB.get` method, then you can also update that row with the :meth:`DB.update` method. Deleting data ------------- To delete rows from a table, a **DELETE** statement can be used. Suppose you are no longer interested in the weather of Hayward, you can do the following to delete those rows from the table:: >>> db.query("DELETE FROM weather WHERE city = 'Hayward'") '1' Again, you get the string ``'1'`` as return value, indicating that exactly one row of data has been deleted. You can also delete all the rows in a table by doing the following. This is different from DROP TABLE which removes the table itself in addition to the removing the rows, as explained in the next section. :: >>> db.query("DELETE FROM weather") '1' >>> print(db.query("SELECT * from weather")) city|temp_lo|temp_hi|prcp|date ----+-------+-------+----+---- (0 rows) Since only one row was left in the table, the DELETE query again returns the string ``'1'``. The SELECT query now gives an empty result. If you retrieved a row of data as a dictionary using the :meth:`DB.get` method, then you can also delete that row with the :meth:`DB.delete` method. Removing the tables ------------------- The **DROP TABLE** command is used to remove tables. After you have done this, you can no longer use those tables:: >>> db.query("DROP TABLE weather, cities") >>> db.query("select * from weather") pg.ProgrammingError: Error: Relation "weather" does not exist PyGreSQL-5.1/docs/_build/html/_sources/contents/postgres/syscat.rst.txt0000644000175100077410000001107613466770070026273 0ustar darcypyg00000000000000Examples for using the system catalogs ====================================== .. py:currentmodule:: pg The system catalogs are regular tables where PostgreSQL stores schema metadata, such as information about tables and columns, and internal bookkeeping information. You can drop and recreate the tables, add columns, insert and update values, and severely mess up your system that way. Normally, one should not change the system catalogs by hand: there are SQL commands to make all supported changes. For example, CREATE DATABASE inserts a row into the *pg_database* catalog — and actually creates the database on disk. It this section we want to show examples for how to parse some of the system catalogs, making queries with the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Lists indices ------------- This query lists all simple indices in the database:: print(query("""SELECT bc.relname AS class_name, ic.relname AS index_name, a.attname FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a WHERE i.indrelid = bc.oid AND i.indexrelid = ic.oid AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid AND NOT a.attisdropped AND a.attnum>0 ORDER BY class_name, index_name, attname""")) List user defined attributes ---------------------------- This query lists all user-defined attributes and their types in user-defined tables:: print(query("""SELECT c.relname, a.attname, format_type(a.atttypid, a.atttypmod) FROM pg_class c, pg_attribute a WHERE c.relkind = 'r' AND c.relnamespace!=ALL(ARRAY[ 'pg_catalog','pg_toast', 'information_schema']::regnamespace[]) AND a.attnum > 0 AND a.attrelid = c.oid AND NOT a.attisdropped ORDER BY relname, attname""")) List user defined base types ---------------------------- This query lists all user defined base types:: print(query("""SELECT r.rolname, t.typname FROM pg_type t, pg_authid r WHERE r.oid = t.typowner AND t.typrelid = '0'::oid and t.typelem = '0'::oid AND r.rolname != 'postgres' ORDER BY rolname, typname""")) List operators -------------- This query lists all right-unary operators:: print(query("""SELECT o.oprname AS right_unary, lt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type lt, pg_type result WHERE o.oprkind='r' and o.oprleft = lt.oid AND o.oprresult = result.oid ORDER BY operand""")) This query lists all left-unary operators:: print(query("""SELECT o.oprname AS left_unary, rt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type result WHERE o.oprkind='l' AND o.oprright = rt.oid AND o.oprresult = result.oid ORDER BY operand""")) And this one lists all of the binary operators:: print(query("""SELECT o.oprname AS binary_op, rt.typname AS right_opr, lt.typname AS left_opr, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type lt, pg_type result WHERE o.oprkind = 'b' AND o.oprright = rt.oid AND o.oprleft = lt.oid AND o.oprresult = result.oid""")) List functions of a language ---------------------------- Given a programming language, this query returns the name, args and return type from all functions of a language:: language = 'sql' print(query("""SELECT p.proname, p.pronargs, t.typname FROM pg_proc p, pg_language l, pg_type t WHERE p.prolang = l.oid AND p.prorettype = t.oid AND l.lanname = $1 ORDER BY proname""", (language,))) List aggregate functions ------------------------ This query lists all of the aggregate functions and the type to which they can be applied:: print(query("""SELECT p.proname, t.typname FROM pg_aggregate a, pg_proc p, pg_type t WHERE a.aggfnoid = p.oid and p.proargtypes[0] = t.oid ORDER BY proname, typname""")) List operator families ---------------------- The following query lists all defined operator families and all the operators included in each family:: print(query("""SELECT am.amname, opf.opfname, amop.amopopr::regoperator FROM pg_am am, pg_opfamily opf, pg_amop amop WHERE opf.opfmethod = am.oid AND amop.amopfamily = opf.oid ORDER BY amname, opfname, amopopr""")) PyGreSQL-5.1/docs/_build/html/_sources/download/0000755000175100077410000000000013470245541021507 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_sources/download/index.rst.txt0000644000175100077410000000111413466770070024170 0ustar darcypyg00000000000000Download information ==================== .. include:: download.rst News, Changes and Future Development ------------------------------------ See the :doc:`../announce` for current news. For a list of all changes in the current version |version| and in past versions, have a look at the :doc:`../contents/changelog`. The section on :doc:`../community/index` lists ideas for future developments and ways to participate. Installation ------------ Please read the chapter on :doc:`../contents/install` in our documentation. .. include:: files.rst .. include:: ../community/homes.rstPyGreSQL-5.1/docs/_build/html/_sources/copyright.rst.txt0000644000175100077410000000245513466770070023273 0ustar darcypyg00000000000000Copyright notice ================ Written by D'Arcy J.M. Cain (darcy@druid.net) Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain (darcy@PyGreSQL.org) Further modifications copyright (c) 2009-2019 by the PyGreSQL team. Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. In this license the term "AUTHORS" refers to anyone who has contributed code to PyGreSQL. IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. PyGreSQL-5.1/docs/_build/html/_sources/index.rst.txt0000644000175100077410000000034513470245525022363 0ustar darcypyg00000000000000.. PyGreSQL index page with toc (for use without cloud theme) Welcome to PyGreSQL =================== .. toctree:: :maxdepth: 2 about copyright announce download/index contents/index community/indexPyGreSQL-5.1/docs/_build/html/_sources/community/0000755000175100077410000000000013470245541021724 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_sources/community/index.rst.txt0000644000175100077410000000073513466770070024415 0ustar darcypyg00000000000000PyGreSQL Development and Support ================================ PyGreSQL is an open-source project created by a group of volunteers. The project and the development infrastructure are currently maintained by D'Arcy J.M. Cain. We would be glad to welcome more contributors so that PyGreSQL can be further developed, modernized and improved. .. include:: mailinglist.rst .. include:: source.rst .. include:: bugtracker.rst .. include:: support.rst .. include:: homes.rst PyGreSQL-5.1/docs/_build/html/_sources/announce.rst.txt0000644000175100077410000000142413466776511023072 0ustar darcypyg00000000000000====================== PyGreSQL Announcements ====================== ------------------------------- Release of PyGreSQL version 5.1 ------------------------------- Release 5.1 of PyGreSQL. It is available at: http://pygresql.org/files/PyGreSQL-5.1.tar.gz. If you are running NetBSD, look in the packages directory under databases. There is also a package in the FreeBSD ports collection. Please refer to `changelog.txt `_ for things that have changed in this version. This version has been built and unit tested on: - NetBSD - FreeBSD - openSUSE - Ubuntu - Windows 7 and 10 with both MinGW and Visual Studio - PostgreSQL 9.0 to 9.6 and 10 or 11 (32 and 64bit) - Python 2.6, 2.7 and 3.3 to 3.7 (32 and 64bit) | D'Arcy J.M. Cain | darcy@PyGreSQL.org PyGreSQL-5.1/docs/_build/html/_static/0000755000175100077410000000000013470245541017504 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/_static/file.png0000644000175100077410000000043613173077335021140 0ustar darcypyg00000000000000‰PNG  IHDRóÿaåIDATx­“ƒR…÷){…l× Û¶ÙfÛ=@®å œ:¿¹¾3ßú~箄þþþ¹òòrX$AðX-öD ~ñýý òóó€Ç‰(ŠP%¾€8<<9:: ãøøØP•êO&’$é Øl~‚X÷ìãûæ&ȽÖEWÀ^4µwQ}ÂÎö^ü˜Ô÷Í£¾ ‹¨iê©ïš0/H/é@F)éDzq+’ój”[žSU5¾€Ìèhš¦/ð¿oY– G&Lfs|£¡»»{Íêßøß3%¸U+S°é`AFÒIEND®B`‚PyGreSQL-5.1/docs/_build/html/_static/pygments.css0000644000175100077410000001045313470245537022074 0ustar darcypyg00000000000000.highlight .hll { background-color: #ffffcc } .highlight { background: #eeffcc; } .highlight .c { color: #408090; font-style: italic } /* Comment */ .highlight .err { border: 1px solid #FF0000 } /* Error */ .highlight .k { color: #007020; font-weight: bold } /* Keyword */ .highlight .o { color: #666666 } /* Operator */ .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ .highlight .cp { color: #007020 } /* Comment.Preproc */ .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #A00000 } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #FF0000 } /* Generic.Error */ .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ .highlight .gi { color: #00A000 } /* Generic.Inserted */ .highlight .go { color: #333333 } /* Generic.Output */ .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ .highlight .gt { color: #0044DD } /* Generic.Traceback */ .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #007020 } /* Keyword.Pseudo */ .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #902000 } /* Keyword.Type */ .highlight .m { color: #208050 } /* Literal.Number */ .highlight .s { color: #4070a0 } /* Literal.String */ .highlight .na { color: #4070a0 } /* Name.Attribute */ .highlight .nb { color: #007020 } /* Name.Builtin */ .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ .highlight .no { color: #60add5 } /* Name.Constant */ .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ .highlight .ne { color: #007020 } /* Name.Exception */ .highlight .nf { color: #06287e } /* Name.Function */ .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #bb60d5 } /* Name.Variable */ .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #208050 } /* Literal.Number.Bin */ .highlight .mf { color: #208050 } /* Literal.Number.Float */ .highlight .mh { color: #208050 } /* Literal.Number.Hex */ .highlight .mi { color: #208050 } /* Literal.Number.Integer */ .highlight .mo { color: #208050 } /* Literal.Number.Oct */ .highlight .sa { color: #4070a0 } /* Literal.String.Affix */ .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ .highlight .sc { color: #4070a0 } /* Literal.String.Char */ .highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ .highlight .sx { color: #c65d09 } /* Literal.String.Other */ .highlight .sr { color: #235388 } /* Literal.String.Regex */ .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ .highlight .ss { color: #517918 } /* Literal.String.Symbol */ .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #06287e } /* Name.Function.Magic */ .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ .highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */PyGreSQL-5.1/docs/_build/html/_static/jquery.js0000644000175100077410000025051713173077335021377 0ustar darcypyg00000000000000/*! jQuery v3.1.0 | (c) jQuery Foundation | jquery.org/license */ !function(a,b){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){"use strict";var c=[],d=a.document,e=Object.getPrototypeOf,f=c.slice,g=c.concat,h=c.push,i=c.indexOf,j={},k=j.toString,l=j.hasOwnProperty,m=l.toString,n=m.call(Object),o={};function p(a,b){b=b||d;var c=b.createElement("script");c.text=a,b.head.appendChild(c).parentNode.removeChild(c)}var q="3.1.0",r=function(a,b){return new r.fn.init(a,b)},s=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,t=/^-ms-/,u=/-([a-z])/g,v=function(a,b){return b.toUpperCase()};r.fn=r.prototype={jquery:q,constructor:r,length:0,toArray:function(){return f.call(this)},get:function(a){return null!=a?a<0?this[a+this.length]:this[a]:f.call(this)},pushStack:function(a){var b=r.merge(this.constructor(),a);return b.prevObject=this,b},each:function(a){return r.each(this,a)},map:function(a){return this.pushStack(r.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(f.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(a<0?b:0);return this.pushStack(c>=0&&c0&&b-1 in a)}var x=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=function(a,b){for(var c=0,d=a.length;c+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(N),U=new RegExp("^"+L+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L+"|[*])"),ATTR:new RegExp("^"+M),PSEUDO:new RegExp("^"+N),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),aa=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:d<0?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ba=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g,ca=function(a,b){return b?"\0"===a?"\ufffd":a.slice(0,-1)+"\\"+a.charCodeAt(a.length-1).toString(16)+" ":"\\"+a},da=function(){m()},ea=ta(function(a){return a.disabled===!0},{dir:"parentNode",next:"legend"});try{G.apply(D=H.call(v.childNodes),v.childNodes),D[v.childNodes.length].nodeType}catch(fa){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s=b&&b.ownerDocument,w=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==w&&9!==w&&11!==w)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==w&&(l=Z.exec(a)))if(f=l[1]){if(9===w){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(s&&(j=s.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(l[2])return G.apply(d,b.getElementsByTagName(a)),d;if((f=l[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==w)s=b,r=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(ba,ca):b.setAttribute("id",k=u),o=g(a),h=o.length;while(h--)o[h]="#"+k+" "+sa(o[h]);r=o.join(","),s=$.test(a)&&qa(b.parentNode)||b}if(r)try{return G.apply(d,s.querySelectorAll(r)),d}catch(x){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(P,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("fieldset");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&a.sourceIndex-b.sourceIndex;if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return function(b){return"label"in b&&b.disabled===a||"form"in b&&b.disabled===a||"form"in b&&b.disabled===!1&&(b.isDisabled===a||b.isDisabled!==!a&&("label"in b||!ea(b))!==a)}}function pa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function qa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return!!b&&"HTML"!==b.nodeName},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),v!==n&&(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(n.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){if("undefined"!=typeof b.getElementsByClassName&&p)return b.getElementsByClassName(a)},r=[],q=[],(c.qsa=Y.test(n.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){a.innerHTML="";var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+K+"*[*^$|!~]?="),2!==a.querySelectorAll(":enabled").length&&q.push(":enabled",":disabled"),o.appendChild(a).disabled=!0,2!==a.querySelectorAll(":disabled").length&&q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Y.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"*"),s.call(a,"[s!='']:x"),r.push("!=",N)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Y.test(o.compareDocumentPosition),t=b||Y.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?I(k,a)-I(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?I(k,a)-I(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?la(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(S,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.escape=function(a){return(a+"").replace(ba,ca)},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(_,aa),a[3]=(a[3]||a[4]||a[5]||"").replace(_,aa),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return V.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&T.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(_,aa).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:!b||(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(O," ")+" ").indexOf(c)>-1:"|="===b&&(e===c||e.slice(0,c.length+1)===c+"-"))}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(P,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(_,aa),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return U.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(_,aa).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:oa(!1),disabled:oa(!0),checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:pa(function(){return[0]}),last:pa(function(a,b){return[b-1]}),eq:pa(function(a,b,c){return[c<0?c+b:c]}),even:pa(function(a,b){for(var c=0;c=0;)a.push(d);return a}),gt:pa(function(a,b,c){for(var d=c<0?c+b:c;++d1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function va(a,b,c){for(var d=0,e=b.length;d-1&&(f[j]=!(g[j]=l))}}else r=wa(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ya(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ta(function(a){return a===b},h,!0),l=ta(function(a){return I(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];i1&&ua(m),i>1&&sa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(P,"$1"),c,i0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=E.call(i));u=wa(u)}G.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&ga.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=ya(b[c]),f[u]?d.push(f):e.push(f);f=A(a,za(e,d)),f.selector=a}return f},i=ga.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(_,aa),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=V.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(_,aa),$.test(j[0].type)&&qa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&sa(j),!a)return G.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||$.test(a)&&qa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("fieldset"))}),ja(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){if(!c)return a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){if(!c&&"input"===a.nodeName.toLowerCase())return a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(J,function(a,b,c){var d;if(!c)return a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);r.find=x,r.expr=x.selectors,r.expr[":"]=r.expr.pseudos,r.uniqueSort=r.unique=x.uniqueSort,r.text=x.getText,r.isXMLDoc=x.isXML,r.contains=x.contains,r.escapeSelector=x.escape;var y=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&r(a).is(c))break;d.push(a)}return d},z=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},A=r.expr.match.needsContext,B=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i,C=/^.[^:#\[\.,]*$/;function D(a,b,c){if(r.isFunction(b))return r.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return r.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(C.test(b))return r.filter(b,a,c);b=r.filter(b,a)}return r.grep(a,function(a){return i.call(b,a)>-1!==c&&1===a.nodeType})}r.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?r.find.matchesSelector(d,a)?[d]:[]:r.find.matches(a,r.grep(b,function(a){return 1===a.nodeType}))},r.fn.extend({find:function(a){var b,c,d=this.length,e=this;if("string"!=typeof a)return this.pushStack(r(a).filter(function(){for(b=0;b1?r.uniqueSort(c):c},filter:function(a){return this.pushStack(D(this,a||[],!1))},not:function(a){return this.pushStack(D(this,a||[],!0))},is:function(a){return!!D(this,"string"==typeof a&&A.test(a)?r(a):a||[],!1).length}});var E,F=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,G=r.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||E,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:F.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof r?b[0]:b,r.merge(this,r.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),B.test(e[1])&&r.isPlainObject(b))for(e in b)r.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&(this[0]=f,this.length=1),this}return a.nodeType?(this[0]=a,this.length=1,this):r.isFunction(a)?void 0!==c.ready?c.ready(a):a(r):r.makeArray(a,this)};G.prototype=r.fn,E=r(d);var H=/^(?:parents|prev(?:Until|All))/,I={children:!0,contents:!0,next:!0,prev:!0};r.fn.extend({has:function(a){var b=r(a,this),c=b.length;return this.filter(function(){for(var a=0;a-1:1===c.nodeType&&r.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?r.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?i.call(r(a),this[0]):i.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(r.uniqueSort(r.merge(this.get(),r(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function J(a,b){while((a=a[b])&&1!==a.nodeType);return a}r.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return y(a,"parentNode")},parentsUntil:function(a,b,c){return y(a,"parentNode",c)},next:function(a){return J(a,"nextSibling")},prev:function(a){return J(a,"previousSibling")},nextAll:function(a){return y(a,"nextSibling")},prevAll:function(a){return y(a,"previousSibling")},nextUntil:function(a,b,c){return y(a,"nextSibling",c)},prevUntil:function(a,b,c){return y(a,"previousSibling",c)},siblings:function(a){return z((a.parentNode||{}).firstChild,a)},children:function(a){return z(a.firstChild)},contents:function(a){return a.contentDocument||r.merge([],a.childNodes)}},function(a,b){r.fn[a]=function(c,d){var e=r.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=r.filter(d,e)),this.length>1&&(I[a]||r.uniqueSort(e),H.test(a)&&e.reverse()),this.pushStack(e)}});var K=/\S+/g;function L(a){var b={};return r.each(a.match(K)||[],function(a,c){b[c]=!0}),b}r.Callbacks=function(a){a="string"==typeof a?L(a):r.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h-1)f.splice(c,1),c<=h&&h--}),this},has:function(a){return a?r.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||b||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j};function M(a){return a}function N(a){throw a}function O(a,b,c){var d;try{a&&r.isFunction(d=a.promise)?d.call(a).done(b).fail(c):a&&r.isFunction(d=a.then)?d.call(a,b,c):b.call(void 0,a)}catch(a){c.call(void 0,a)}}r.extend({Deferred:function(b){var c=[["notify","progress",r.Callbacks("memory"),r.Callbacks("memory"),2],["resolve","done",r.Callbacks("once memory"),r.Callbacks("once memory"),0,"resolved"],["reject","fail",r.Callbacks("once memory"),r.Callbacks("once memory"),1,"rejected"]],d="pending",e={state:function(){return d},always:function(){return f.done(arguments).fail(arguments),this},"catch":function(a){return e.then(null,a)},pipe:function(){var a=arguments;return r.Deferred(function(b){r.each(c,function(c,d){var e=r.isFunction(a[d[4]])&&a[d[4]];f[d[1]](function(){var a=e&&e.apply(this,arguments);a&&r.isFunction(a.promise)?a.promise().progress(b.notify).done(b.resolve).fail(b.reject):b[d[0]+"With"](this,e?[a]:arguments)})}),a=null}).promise()},then:function(b,d,e){var f=0;function g(b,c,d,e){return function(){var h=this,i=arguments,j=function(){var a,j;if(!(b=f&&(d!==N&&(h=void 0,i=[a]),c.rejectWith(h,i))}};b?k():(r.Deferred.getStackHook&&(k.stackTrace=r.Deferred.getStackHook()),a.setTimeout(k))}}return r.Deferred(function(a){c[0][3].add(g(0,a,r.isFunction(e)?e:M,a.notifyWith)),c[1][3].add(g(0,a,r.isFunction(b)?b:M)),c[2][3].add(g(0,a,r.isFunction(d)?d:N))}).promise()},promise:function(a){return null!=a?r.extend(a,e):e}},f={};return r.each(c,function(a,b){var g=b[2],h=b[5];e[b[1]]=g.add,h&&g.add(function(){d=h},c[3-a][2].disable,c[0][2].lock),g.add(b[3].fire),f[b[0]]=function(){return f[b[0]+"With"](this===f?void 0:this,arguments),this},f[b[0]+"With"]=g.fireWith}),e.promise(f),b&&b.call(f,f),f},when:function(a){var b=arguments.length,c=b,d=Array(c),e=f.call(arguments),g=r.Deferred(),h=function(a){return function(c){d[a]=this,e[a]=arguments.length>1?f.call(arguments):c,--b||g.resolveWith(d,e)}};if(b<=1&&(O(a,g.done(h(c)).resolve,g.reject),"pending"===g.state()||r.isFunction(e[c]&&e[c].then)))return g.then();while(c--)O(e[c],h(c),g.reject);return g.promise()}});var P=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;r.Deferred.exceptionHook=function(b,c){a.console&&a.console.warn&&b&&P.test(b.name)&&a.console.warn("jQuery.Deferred exception: "+b.message,b.stack,c)},r.readyException=function(b){a.setTimeout(function(){throw b})};var Q=r.Deferred();r.fn.ready=function(a){return Q.then(a)["catch"](function(a){r.readyException(a)}),this},r.extend({isReady:!1,readyWait:1,holdReady:function(a){a?r.readyWait++:r.ready(!0)},ready:function(a){(a===!0?--r.readyWait:r.isReady)||(r.isReady=!0,a!==!0&&--r.readyWait>0||Q.resolveWith(d,[r]))}}),r.ready.then=Q.then;function R(){d.removeEventListener("DOMContentLoaded",R),a.removeEventListener("load",R),r.ready()}"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(r.ready):(d.addEventListener("DOMContentLoaded",R),a.addEventListener("load",R));var S=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===r.type(c)){e=!0;for(h in c)S(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0, r.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(r(a),c)})),b))for(;h1,null,!0)},removeData:function(a){return this.each(function(){W.remove(this,a)})}}),r.extend({queue:function(a,b,c){var d;if(a)return b=(b||"fx")+"queue",d=V.get(a,b),c&&(!d||r.isArray(c)?d=V.access(a,b,r.makeArray(c)):d.push(c)),d||[]},dequeue:function(a,b){b=b||"fx";var c=r.queue(a,b),d=c.length,e=c.shift(),f=r._queueHooks(a,b),g=function(){r.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return V.get(a,c)||V.access(a,c,{empty:r.Callbacks("once memory").add(function(){V.remove(a,[b+"queue",c])})})}}),r.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length\x20\t\r\n\f]+)/i,ja=/^$|\/(?:java|ecma)script/i,ka={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};ka.optgroup=ka.option,ka.tbody=ka.tfoot=ka.colgroup=ka.caption=ka.thead,ka.th=ka.td;function la(a,b){var c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&r.nodeName(a,b)?r.merge([a],c):c}function ma(a,b){for(var c=0,d=a.length;c-1)e&&e.push(f);else if(j=r.contains(f.ownerDocument,f),g=la(l.appendChild(f),"script"),j&&ma(g),c){k=0;while(f=g[k++])ja.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),o.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="",o.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var pa=d.documentElement,qa=/^key/,ra=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,sa=/^([^.]*)(?:\.(.+)|)/;function ta(){return!0}function ua(){return!1}function va(){try{return d.activeElement}catch(a){}}function wa(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)wa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=ua;else if(!e)return a;return 1===f&&(g=e,e=function(a){return r().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=r.guid++)),a.each(function(){r.event.add(this,b,e,d,c)})}r.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=V.get(a);if(q){c.handler&&(f=c,c=f.handler,e=f.selector),e&&r.find.matchesSelector(pa,e),c.guid||(c.guid=r.guid++),(i=q.events)||(i=q.events={}),(g=q.handle)||(g=q.handle=function(b){return"undefined"!=typeof r&&r.event.triggered!==b.type?r.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(K)||[""],j=b.length;while(j--)h=sa.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n&&(l=r.event.special[n]||{},n=(e?l.delegateType:l.bindType)||n,l=r.event.special[n]||{},k=r.extend({type:n,origType:p,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&r.expr.match.needsContext.test(e),namespace:o.join(".")},f),(m=i[n])||(m=i[n]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,o,g)!==!1||a.addEventListener&&a.addEventListener(n,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),r.event.global[n]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=V.hasData(a)&&V.get(a);if(q&&(i=q.events)){b=(b||"").match(K)||[""],j=b.length;while(j--)if(h=sa.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n){l=r.event.special[n]||{},n=(d?l.delegateType:l.bindType)||n,m=i[n]||[],h=h[2]&&new RegExp("(^|\\.)"+o.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&p!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,o,q.handle)!==!1||r.removeEvent(a,n,q.handle),delete i[n])}else for(n in i)r.event.remove(a,n+b[j],c,d,!0);r.isEmptyObject(i)&&V.remove(a,"handle events")}},dispatch:function(a){var b=r.event.fix(a),c,d,e,f,g,h,i=new Array(arguments.length),j=(V.get(this,"events")||{})[b.type]||[],k=r.event.special[b.type]||{};for(i[0]=b,c=1;c-1:r.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h\x20\t\r\n\f]*)[^>]*)\/>/gi,ya=/\s*$/g;function Ca(a,b){return r.nodeName(a,"table")&&r.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a:a}function Da(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function Ea(a){var b=Aa.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Fa(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(V.hasData(a)&&(f=V.access(a),g=V.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;c1&&"string"==typeof q&&!o.checkClone&&za.test(q))return a.each(function(e){var f=a.eq(e);s&&(b[0]=q.call(this,e,f.html())),Ha(f,b,c,d)});if(m&&(e=oa(b,a[0].ownerDocument,!1,a,d),f=e.firstChild,1===e.childNodes.length&&(e=f),f||d)){for(h=r.map(la(e,"script"),Da),i=h.length;l")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=r.contains(a.ownerDocument,a);if(!(o.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||r.isXMLDoc(a)))for(g=la(h),f=la(a),d=0,e=f.length;d0&&ma(g,!i&&la(a,"script")),h},cleanData:function(a){for(var b,c,d,e=r.event.special,f=0;void 0!==(c=a[f]);f++)if(T(c)){if(b=c[V.expando]){if(b.events)for(d in b.events)e[d]?r.event.remove(c,d):r.removeEvent(c,d,b.handle);c[V.expando]=void 0}c[W.expando]&&(c[W.expando]=void 0)}}}),r.fn.extend({detach:function(a){return Ia(this,a,!0)},remove:function(a){return Ia(this,a)},text:function(a){return S(this,function(a){return void 0===a?r.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.appendChild(a)}})},prepend:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(r.cleanData(la(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null!=a&&a,b=null==b?a:b,this.map(function(){return r.clone(this,a,b)})},html:function(a){return S(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!ya.test(a)&&!ka[(ia.exec(a)||["",""])[1].toLowerCase()]){a=r.htmlPrefilter(a);try{for(;c1)}});function Xa(a,b,c,d,e){return new Xa.prototype.init(a,b,c,d,e)}r.Tween=Xa,Xa.prototype={constructor:Xa,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||r.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(r.cssNumber[c]?"":"px")},cur:function(){var a=Xa.propHooks[this.prop];return a&&a.get?a.get(this):Xa.propHooks._default.get(this)},run:function(a){var b,c=Xa.propHooks[this.prop];return this.options.duration?this.pos=b=r.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Xa.propHooks._default.set(this),this}},Xa.prototype.init.prototype=Xa.prototype,Xa.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=r.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){r.fx.step[a.prop]?r.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[r.cssProps[a.prop]]&&!r.cssHooks[a.prop]?a.elem[a.prop]=a.now:r.style(a.elem,a.prop,a.now+a.unit)}}},Xa.propHooks.scrollTop=Xa.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},r.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},r.fx=Xa.prototype.init,r.fx.step={};var Ya,Za,$a=/^(?:toggle|show|hide)$/,_a=/queueHooks$/;function ab(){Za&&(a.requestAnimationFrame(ab),r.fx.tick())}function bb(){return a.setTimeout(function(){Ya=void 0}),Ya=r.now()}function cb(a,b){var c,d=0,e={height:a};for(b=b?1:0;d<4;d+=2-b)c=aa[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function db(a,b,c){for(var d,e=(gb.tweeners[b]||[]).concat(gb.tweeners["*"]),f=0,g=e.length;f1)},removeAttr:function(a){return this.each(function(){r.removeAttr(this,a)})}}),r.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?r.prop(a,b,c):(1===f&&r.isXMLDoc(a)||(e=r.attrHooks[b.toLowerCase()]||(r.expr.match.bool.test(b)?hb:void 0)),void 0!==c?null===c?void r.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=r.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!o.radioValue&&"radio"===b&&r.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d=0,e=b&&b.match(K); if(e&&1===a.nodeType)while(c=e[d++])a.removeAttribute(c)}}),hb={set:function(a,b,c){return b===!1?r.removeAttr(a,c):a.setAttribute(c,c),c}},r.each(r.expr.match.bool.source.match(/\w+/g),function(a,b){var c=ib[b]||r.find.attr;ib[b]=function(a,b,d){var e,f,g=b.toLowerCase();return d||(f=ib[g],ib[g]=e,e=null!=c(a,b,d)?g:null,ib[g]=f),e}});var jb=/^(?:input|select|textarea|button)$/i,kb=/^(?:a|area)$/i;r.fn.extend({prop:function(a,b){return S(this,r.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[r.propFix[a]||a]})}}),r.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&r.isXMLDoc(a)||(b=r.propFix[b]||b,e=r.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=r.find.attr(a,"tabindex");return b?parseInt(b,10):jb.test(a.nodeName)||kb.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),o.optSelected||(r.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),r.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){r.propFix[this.toLowerCase()]=this});var lb=/[\t\r\n\f]/g;function mb(a){return a.getAttribute&&a.getAttribute("class")||""}r.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).addClass(a.call(this,b,mb(this)))});if("string"==typeof a&&a){b=a.match(K)||[];while(c=this[i++])if(e=mb(c),d=1===c.nodeType&&(" "+e+" ").replace(lb," ")){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=r.trim(d),e!==h&&c.setAttribute("class",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).removeClass(a.call(this,b,mb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(K)||[];while(c=this[i++])if(e=mb(c),d=1===c.nodeType&&(" "+e+" ").replace(lb," ")){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=r.trim(d),e!==h&&c.setAttribute("class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):r.isFunction(a)?this.each(function(c){r(this).toggleClass(a.call(this,c,mb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=r(this),f=a.match(K)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=mb(this),b&&V.set(this,"__className__",b),this.setAttribute&&this.setAttribute("class",b||a===!1?"":V.get(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+mb(c)+" ").replace(lb," ").indexOf(b)>-1)return!0;return!1}});var nb=/\r/g,ob=/[\x20\t\r\n\f]+/g;r.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=r.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,r(this).val()):a,null==e?e="":"number"==typeof e?e+="":r.isArray(e)&&(e=r.map(e,function(a){return null==a?"":a+""})),b=r.valHooks[this.type]||r.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=r.valHooks[e.type]||r.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(nb,""):null==c?"":c)}}}),r.extend({valHooks:{option:{get:function(a){var b=r.find.attr(a,"value");return null!=b?b:r.trim(r.text(a)).replace(ob," ")}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type,g=f?null:[],h=f?e+1:d.length,i=e<0?h:f?e:0;i-1)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),r.each(["radio","checkbox"],function(){r.valHooks[this]={set:function(a,b){if(r.isArray(b))return a.checked=r.inArray(r(a).val(),b)>-1}},o.checkOn||(r.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var pb=/^(?:focusinfocus|focusoutblur)$/;r.extend(r.event,{trigger:function(b,c,e,f){var g,h,i,j,k,m,n,o=[e||d],p=l.call(b,"type")?b.type:b,q=l.call(b,"namespace")?b.namespace.split("."):[];if(h=i=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!pb.test(p+r.event.triggered)&&(p.indexOf(".")>-1&&(q=p.split("."),p=q.shift(),q.sort()),k=p.indexOf(":")<0&&"on"+p,b=b[r.expando]?b:new r.Event(p,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=q.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:r.makeArray(c,[b]),n=r.event.special[p]||{},f||!n.trigger||n.trigger.apply(e,c)!==!1)){if(!f&&!n.noBubble&&!r.isWindow(e)){for(j=n.delegateType||p,pb.test(j+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),i=h;i===(e.ownerDocument||d)&&o.push(i.defaultView||i.parentWindow||a)}g=0;while((h=o[g++])&&!b.isPropagationStopped())b.type=g>1?j:n.bindType||p,m=(V.get(h,"events")||{})[b.type]&&V.get(h,"handle"),m&&m.apply(h,c),m=k&&h[k],m&&m.apply&&T(h)&&(b.result=m.apply(h,c),b.result===!1&&b.preventDefault());return b.type=p,f||b.isDefaultPrevented()||n._default&&n._default.apply(o.pop(),c)!==!1||!T(e)||k&&r.isFunction(e[p])&&!r.isWindow(e)&&(i=e[k],i&&(e[k]=null),r.event.triggered=p,e[p](),r.event.triggered=void 0,i&&(e[k]=i)),b.result}},simulate:function(a,b,c){var d=r.extend(new r.Event,c,{type:a,isSimulated:!0});r.event.trigger(d,null,b)}}),r.fn.extend({trigger:function(a,b){return this.each(function(){r.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];if(c)return r.event.trigger(a,b,c,!0)}}),r.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(a,b){r.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),r.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),o.focusin="onfocusin"in a,o.focusin||r.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){r.event.simulate(b,a.target,r.event.fix(a))};r.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=V.access(d,b);e||d.addEventListener(a,c,!0),V.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=V.access(d,b)-1;e?V.access(d,b,e):(d.removeEventListener(a,c,!0),V.remove(d,b))}}});var qb=a.location,rb=r.now(),sb=/\?/;r.parseXML=function(b){var c;if(!b||"string"!=typeof b)return null;try{c=(new a.DOMParser).parseFromString(b,"text/xml")}catch(d){c=void 0}return c&&!c.getElementsByTagName("parsererror").length||r.error("Invalid XML: "+b),c};var tb=/\[\]$/,ub=/\r?\n/g,vb=/^(?:submit|button|image|reset|file)$/i,wb=/^(?:input|select|textarea|keygen)/i;function xb(a,b,c,d){var e;if(r.isArray(b))r.each(b,function(b,e){c||tb.test(a)?d(a,e):xb(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==r.type(b))d(a,b);else for(e in b)xb(a+"["+e+"]",b[e],c,d)}r.param=function(a,b){var c,d=[],e=function(a,b){var c=r.isFunction(b)?b():b;d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(null==c?"":c)};if(r.isArray(a)||a.jquery&&!r.isPlainObject(a))r.each(a,function(){e(this.name,this.value)});else for(c in a)xb(c,a[c],b,e);return d.join("&")},r.fn.extend({serialize:function(){return r.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=r.prop(this,"elements");return a?r.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!r(this).is(":disabled")&&wb.test(this.nodeName)&&!vb.test(a)&&(this.checked||!ha.test(a))}).map(function(a,b){var c=r(this).val();return null==c?null:r.isArray(c)?r.map(c,function(a){return{name:b.name,value:a.replace(ub,"\r\n")}}):{name:b.name,value:c.replace(ub,"\r\n")}}).get()}});var yb=/%20/g,zb=/#.*$/,Ab=/([?&])_=[^&]*/,Bb=/^(.*?):[ \t]*([^\r\n]*)$/gm,Cb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Db=/^(?:GET|HEAD)$/,Eb=/^\/\//,Fb={},Gb={},Hb="*/".concat("*"),Ib=d.createElement("a");Ib.href=qb.href;function Jb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(K)||[];if(r.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Kb(a,b,c,d){var e={},f=a===Gb;function g(h){var i;return e[h]=!0,r.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Lb(a,b){var c,d,e=r.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&r.extend(!0,a,d),a}function Mb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}if(f)return f!==i[0]&&i.unshift(f),c[f]}function Nb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}r.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:qb.href,type:"GET",isLocal:Cb.test(qb.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Hb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":r.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Lb(Lb(a,r.ajaxSettings),b):Lb(r.ajaxSettings,a)},ajaxPrefilter:Jb(Fb),ajaxTransport:Jb(Gb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var e,f,g,h,i,j,k,l,m,n,o=r.ajaxSetup({},c),p=o.context||o,q=o.context&&(p.nodeType||p.jquery)?r(p):r.event,s=r.Deferred(),t=r.Callbacks("once memory"),u=o.statusCode||{},v={},w={},x="canceled",y={readyState:0,getResponseHeader:function(a){var b;if(k){if(!h){h={};while(b=Bb.exec(g))h[b[1].toLowerCase()]=b[2]}b=h[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return k?g:null},setRequestHeader:function(a,b){return null==k&&(a=w[a.toLowerCase()]=w[a.toLowerCase()]||a,v[a]=b),this},overrideMimeType:function(a){return null==k&&(o.mimeType=a),this},statusCode:function(a){var b;if(a)if(k)y.always(a[y.status]);else for(b in a)u[b]=[u[b],a[b]];return this},abort:function(a){var b=a||x;return e&&e.abort(b),A(0,b),this}};if(s.promise(y),o.url=((b||o.url||qb.href)+"").replace(Eb,qb.protocol+"//"),o.type=c.method||c.type||o.method||o.type,o.dataTypes=(o.dataType||"*").toLowerCase().match(K)||[""],null==o.crossDomain){j=d.createElement("a");try{j.href=o.url,j.href=j.href,o.crossDomain=Ib.protocol+"//"+Ib.host!=j.protocol+"//"+j.host}catch(z){o.crossDomain=!0}}if(o.data&&o.processData&&"string"!=typeof o.data&&(o.data=r.param(o.data,o.traditional)),Kb(Fb,o,c,y),k)return y;l=r.event&&o.global,l&&0===r.active++&&r.event.trigger("ajaxStart"),o.type=o.type.toUpperCase(),o.hasContent=!Db.test(o.type),f=o.url.replace(zb,""),o.hasContent?o.data&&o.processData&&0===(o.contentType||"").indexOf("application/x-www-form-urlencoded")&&(o.data=o.data.replace(yb,"+")):(n=o.url.slice(f.length),o.data&&(f+=(sb.test(f)?"&":"?")+o.data,delete o.data),o.cache===!1&&(f=f.replace(Ab,""),n=(sb.test(f)?"&":"?")+"_="+rb++ +n),o.url=f+n),o.ifModified&&(r.lastModified[f]&&y.setRequestHeader("If-Modified-Since",r.lastModified[f]),r.etag[f]&&y.setRequestHeader("If-None-Match",r.etag[f])),(o.data&&o.hasContent&&o.contentType!==!1||c.contentType)&&y.setRequestHeader("Content-Type",o.contentType),y.setRequestHeader("Accept",o.dataTypes[0]&&o.accepts[o.dataTypes[0]]?o.accepts[o.dataTypes[0]]+("*"!==o.dataTypes[0]?", "+Hb+"; q=0.01":""):o.accepts["*"]);for(m in o.headers)y.setRequestHeader(m,o.headers[m]);if(o.beforeSend&&(o.beforeSend.call(p,y,o)===!1||k))return y.abort();if(x="abort",t.add(o.complete),y.done(o.success),y.fail(o.error),e=Kb(Gb,o,c,y)){if(y.readyState=1,l&&q.trigger("ajaxSend",[y,o]),k)return y;o.async&&o.timeout>0&&(i=a.setTimeout(function(){y.abort("timeout")},o.timeout));try{k=!1,e.send(v,A)}catch(z){if(k)throw z;A(-1,z)}}else A(-1,"No Transport");function A(b,c,d,h){var j,m,n,v,w,x=c;k||(k=!0,i&&a.clearTimeout(i),e=void 0,g=h||"",y.readyState=b>0?4:0,j=b>=200&&b<300||304===b,d&&(v=Mb(o,y,d)),v=Nb(o,v,y,j),j?(o.ifModified&&(w=y.getResponseHeader("Last-Modified"),w&&(r.lastModified[f]=w),w=y.getResponseHeader("etag"),w&&(r.etag[f]=w)),204===b||"HEAD"===o.type?x="nocontent":304===b?x="notmodified":(x=v.state,m=v.data,n=v.error,j=!n)):(n=x,!b&&x||(x="error",b<0&&(b=0))),y.status=b,y.statusText=(c||x)+"",j?s.resolveWith(p,[m,x,y]):s.rejectWith(p,[y,x,n]),y.statusCode(u),u=void 0,l&&q.trigger(j?"ajaxSuccess":"ajaxError",[y,o,j?m:n]),t.fireWith(p,[y,x]),l&&(q.trigger("ajaxComplete",[y,o]),--r.active||r.event.trigger("ajaxStop")))}return y},getJSON:function(a,b,c){return r.get(a,b,c,"json")},getScript:function(a,b){return r.get(a,void 0,b,"script")}}),r.each(["get","post"],function(a,b){r[b]=function(a,c,d,e){return r.isFunction(c)&&(e=e||d,d=c,c=void 0),r.ajax(r.extend({url:a,type:b,dataType:e,data:c,success:d},r.isPlainObject(a)&&a))}}),r._evalUrl=function(a){return r.ajax({url:a,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},r.fn.extend({wrapAll:function(a){var b;return this[0]&&(r.isFunction(a)&&(a=a.call(this[0])),b=r(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this},wrapInner:function(a){return r.isFunction(a)?this.each(function(b){r(this).wrapInner(a.call(this,b))}):this.each(function(){var b=r(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=r.isFunction(a);return this.each(function(c){r(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(a){return this.parent(a).not("body").each(function(){r(this).replaceWith(this.childNodes)}),this}}),r.expr.pseudos.hidden=function(a){return!r.expr.pseudos.visible(a)},r.expr.pseudos.visible=function(a){return!!(a.offsetWidth||a.offsetHeight||a.getClientRects().length)},r.ajaxSettings.xhr=function(){try{return new a.XMLHttpRequest}catch(b){}};var Ob={0:200,1223:204},Pb=r.ajaxSettings.xhr();o.cors=!!Pb&&"withCredentials"in Pb,o.ajax=Pb=!!Pb,r.ajaxTransport(function(b){var c,d;if(o.cors||Pb&&!b.crossDomain)return{send:function(e,f){var g,h=b.xhr();if(h.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(g in b.xhrFields)h[g]=b.xhrFields[g];b.mimeType&&h.overrideMimeType&&h.overrideMimeType(b.mimeType),b.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest");for(g in e)h.setRequestHeader(g,e[g]);c=function(a){return function(){c&&(c=d=h.onload=h.onerror=h.onabort=h.onreadystatechange=null,"abort"===a?h.abort():"error"===a?"number"!=typeof h.status?f(0,"error"):f(h.status,h.statusText):f(Ob[h.status]||h.status,h.statusText,"text"!==(h.responseType||"text")||"string"!=typeof h.responseText?{binary:h.response}:{text:h.responseText},h.getAllResponseHeaders()))}},h.onload=c(),d=h.onerror=c("error"),void 0!==h.onabort?h.onabort=d:h.onreadystatechange=function(){4===h.readyState&&a.setTimeout(function(){c&&d()})},c=c("abort");try{h.send(b.hasContent&&b.data||null)}catch(i){if(c)throw i}},abort:function(){c&&c()}}}),r.ajaxPrefilter(function(a){a.crossDomain&&(a.contents.script=!1)}),r.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(a){return r.globalEval(a),a}}}),r.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),r.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(e,f){b=r("

Index

_ | A | B | C | D | E | F | G | H | I | J | L | M | N | O | P | Q | R | S | T | U | V | W

_

A

B

C

D

E

F

G

H

I

J

L

M

N

O

P

Q

R

S

T

U

V

W

PyGreSQL-5.1/docs/_build/html/py-modindex.html0000644000175100077410000001017513470245537021212 0ustar darcypyg00000000000000 Python Module Index — PyGreSQL 5.1

Python Module Index

p
 
p
pg
pgdb
PyGreSQL-5.1/docs/_build/html/about.html0000644000175100077410000001721413470245533020064 0ustar darcypyg00000000000000 About PyGreSQL — PyGreSQL 5.1

About PyGreSQL¶

PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script.

This software is copyright © 1995, Pascal Andre.
Further modifications are copyright © 1997-2008 by D’Arcy J.M. Cain.
Further modifications are copyright © 2009-2019 by the PyGreSQL team.
For licensing details, see the full Copyright notice.

PostgreSQL is a highly scalable, SQL compliant, open source object-relational database management system. With more than 20 years of development history, it is quickly becoming the de facto database for enterprise level open source solutions. Best of all, PostgreSQL’s source code is available under the most liberal open source license: the BSD license.

Python Python is an interpreted, interactive, object-oriented programming language. It is often compared to Tcl, Perl, Scheme or Java. Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++. Python is also usable as an extension language for applications that need a programmable interface. The Python implementation is copyrighted but freely usable and distributable, even for commercial use.

PyGreSQL is a Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script or application.

PyGreSQL is developed and tested on a NetBSD system, but it also runs on most other platforms where PostgreSQL and Python is running. It is based on the PyGres95 code written by Pascal Andre (andre@chimay.via.ecp.fr). D’Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 and serves as the “BDFL†of PyGreSQL.

The current version PyGreSQL 5.1 needs PostgreSQL 9.0 to 9.6 or 10 or 11, and Python 2.6, 2.7 or 3.3 to 3.7. If you need to support older PostgreSQL versions or older Python 2.x versions, you can resort to the PyGreSQL 4.x versions that still support them.

PyGreSQL-5.1/docs/_build/html/index.html0000644000175100077410000001530013470245537020057 0ustar darcypyg00000000000000 Welcome to PyGreSQL — PyGreSQL 5.1 PyGreSQL-5.1/docs/_build/html/community/0000755000175100077410000000000013470245541020102 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/community/index.html0000644000175100077410000002320013470245533022075 0ustar darcypyg00000000000000 PyGreSQL Development and Support — PyGreSQL 5.1

PyGreSQL Development and Support¶

PyGreSQL is an open-source project created by a group of volunteers. The project and the development infrastructure are currently maintained by D’Arcy J.M. Cain. We would be glad to welcome more contributors so that PyGreSQL can be further developed, modernized and improved.

Mailing list¶

You can join the mailing list to discuss future development of the PyGreSQL interface or if you have questions or problems with PyGreSQL that are not covered in the documentation.

This is usually a low volume list except when there are new features being added.

Access to the source repository¶

We are using a central Subversion source code repository for PyGreSQL.

The current trunk of the repository can be checked out with the command:

svn co svn://svn.pygresql.org/pygresql/trunk

You can also browse through the repository using the PyGreSQL Trac browser.

Bug Tracker¶

We are using Trac as an issue tracker.

Track tickets are usually entered after discussion on the mailing list, but you may also request an account for the issue tracker and add or process tickets if you want to get more involved into the development of the project. You can use the following links to get an overview:

Support¶

Python:
see http://www.python.org/community/
PostgreSQL:
see http://www.postgresql.org/support/
PyGreSQL:

Join the PyGreSQL mailing list if you need help regarding PyGreSQL.

Please also send context diffs there, if you would like to proposes changes.

Please note that messages to individual developers will generally not be answered directly. All questions, comments and code changes must be submitted to the mailing list for peer review and archiving purposes.

Project home sites¶

Python:
http://www.python.org
PostgreSQL:
http://www.postgresql.org
PyGreSQL:
http://www.pygresql.org
PyGreSQL-5.1/docs/_build/html/search.html0000644000175100077410000000771313470245537020226 0ustar darcypyg00000000000000 Search — PyGreSQL 5.1

Search

Please activate JavaScript to enable the search functionality.

From here you can search these documents. Enter your search words into the box below and click "search". Note that the search function will automatically search for all of the words. Pages containing fewer words won't appear in the result list.

PyGreSQL-5.1/docs/_build/html/contents/0000755000175100077410000000000013470245541017713 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/contents/postgres/0000755000175100077410000000000013470245541021561 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/contents/postgres/index.html0000644000175100077410000002234113470245536023564 0ustar darcypyg00000000000000 A PostgreSQL Primer — PyGreSQL 5.1 PyGreSQL-5.1/docs/_build/html/contents/postgres/func.html0000644000175100077410000004523113470245536023413 0ustar darcypyg00000000000000 Examples for using SQL functions — PyGreSQL 5.1

Examples for using SQL functions¶

We assume that you have already created a connection to the PostgreSQL database, as explained in the Basic examples:

>>> from pg import DB
>>> db = DB()
>>> query = db.query

Creating SQL Functions on Base Types¶

A CREATE FUNCTION statement lets you create a new function that can be used in expressions (in SELECT, INSERT, etc.). We will start with functions that return values of base types.

Let’s create a simple SQL function that takes no arguments and returns 1:

>>> query("""CREATE FUNCTION one() RETURNS int4
...     AS 'SELECT 1 as ONE' LANGUAGE SQL""")

Functions can be used in any expressions (eg. in the target list or qualifications):

>>> print(db.query("SELECT one() AS answer"))
answer
------
     1
(1 row)

Here’s how you create a function that takes arguments. The following function returns the sum of its two arguments:

>>> query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4
...     AS $$ SELECT $1 + $2 $$ LANGUAGE SQL""")
>>> print(query("SELECT add_em(1, 2) AS answer"))
answer
------
     3
(1 row)

Creating SQL Functions on Composite Types¶

It is also possible to create functions that return values of composite types.

Before we create more sophisticated functions, let’s populate an EMP table:

>>> query("""CREATE TABLE EMP (
...     name   text,
...     salary int4,
...     age f   int4,
...     dept   varchar(16))""")
>>> emps = ["'Sam', 1200, 16, 'toy'",
...     "'Claire', 5000, 32, 'shoe'",
...     "'Andy', -1000, 2, 'candy'",
...     "'Bill', 4200, 36, 'shoe'",
...     "'Ginger', 4800, 30, 'candy'"]
>>> for emp in emps:
...     query("INSERT INTO EMP VALUES (%s)" % emp)

Every INSERT statement will return a ‘1’ indicating that it has inserted one row into the EMP table.

The argument of a function can also be a tuple. For instance, double_salary takes a tuple of the EMP table:

>>> query("""CREATE FUNCTION double_salary(EMP) RETURNS int4
...     AS $$ SELECT $1.salary * 2 AS salary $$ LANGUAGE SQL""")
>>> print(query("""SELECT name, double_salary(EMP) AS dream
...     FROM EMP WHERE EMP.dept = 'toy'"""))
name|dream
----+-----
Sam | 2400
(1 row)

The return value of a function can also be a tuple. However, make sure that the expressions in the target list are in the same order as the columns of EMP:

>>> query("""CREATE FUNCTION new_emp() RETURNS EMP AS $$
...     SELECT 'None'::text AS name,
...         1000 AS salary,
...         25 AS age,
...         'None'::varchar(16) AS dept
...     $$ LANGUAGE SQL""")

You can then extract a column out of the resulting tuple by using the “function notation†for projection columns (i.e. bar(foo) is equivalent to foo.bar). Note that new_emp().name isn’t supported:

>>> print(query("SELECT name(new_emp()) AS nobody"))
nobody
------
None
(1 row)

Let’s try one more function that returns tuples:

>>> query("""CREATE FUNCTION high_pay() RETURNS setof EMP
...         AS 'SELECT * FROM EMP where salary > 1500'
...     LANGUAGE SQL""")
>>> query("SELECT name(high_pay()) AS overpaid")
overpaid
--------
Claire
Bill
Ginger
(3 rows)

Creating SQL Functions with multiple SQL statements¶

You can also create functions that do more than just a SELECT.

You may have noticed that Andy has a negative salary. We’ll create a function that removes employees with negative salaries:

>>> query("SELECT * FROM EMP")
 name |salary|age|dept
------+------+---+-----
Sam   |  1200| 16|toy
Claire|  5000| 32|shoe
Andy  | -1000|  2|candy
Bill  |  4200| 36|shoe
Ginger|  4800| 30|candy
(5 rows)
>>> query("""CREATE FUNCTION clean_EMP () RETURNS int4 AS
...         'DELETE FROM EMP WHERE EMP.salary < 0;
...          SELECT 1 AS ignore_this'
...     LANGUAGE SQL""")
>>> query("SELECT clean_EMP()")
clean_emp
---------
        1
(1 row)
>>> query("SELECT * FROM EMP")
 name |salary|age|dept
------+------+---+-----
Sam   |  1200| 16|toy
Claire|  5000| 32|shoe
Bill  |  4200| 36|shoe
Ginger|  4800| 30|candy
(4 rows)

Remove functions that were created in this example¶

We can remove the functions that we have created in this example and the table EMP, by using the DROP command:

query("DROP FUNCTION clean_EMP()")
query("DROP FUNCTION high_pay()")
query("DROP FUNCTION new_emp()")
query("DROP FUNCTION add_em(int4, int4)")
query("DROP FUNCTION one()")
query("DROP TABLE EMP CASCADE")
PyGreSQL-5.1/docs/_build/html/contents/postgres/syscat.html0000644000175100077410000003712113470245536023765 0ustar darcypyg00000000000000 Examples for using the system catalogs — PyGreSQL 5.1

Examples for using the system catalogs¶

The system catalogs are regular tables where PostgreSQL stores schema metadata, such as information about tables and columns, and internal bookkeeping information. You can drop and recreate the tables, add columns, insert and update values, and severely mess up your system that way. Normally, one should not change the system catalogs by hand: there are SQL commands to make all supported changes. For example, CREATE DATABASE inserts a row into the pg_database catalog — and actually creates the database on disk.

It this section we want to show examples for how to parse some of the system catalogs, making queries with the classic PyGreSQL interface.

We assume that you have already created a connection to the PostgreSQL database, as explained in the Basic examples:

>>> from pg import DB
>>> db = DB()
>>> query = db.query

Lists indices¶

This query lists all simple indices in the database:

print(query("""SELECT bc.relname AS class_name,
        ic.relname AS index_name, a.attname
    FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a
    WHERE i.indrelid = bc.oid AND i.indexrelid = ic.oid
        AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid
        AND NOT a.attisdropped AND a.attnum>0
    ORDER BY class_name, index_name, attname"""))

List user defined attributes¶

This query lists all user-defined attributes and their types in user-defined tables:

print(query("""SELECT c.relname, a.attname,
        format_type(a.atttypid, a.atttypmod)
    FROM pg_class c, pg_attribute a
    WHERE c.relkind = 'r' AND c.relnamespace!=ALL(ARRAY[
        'pg_catalog','pg_toast', 'information_schema']::regnamespace[])
        AND a.attnum > 0
        AND a.attrelid = c.oid
        AND NOT a.attisdropped
    ORDER BY relname, attname"""))

List user defined base types¶

This query lists all user defined base types:

print(query("""SELECT r.rolname, t.typname
    FROM pg_type t, pg_authid r
    WHERE r.oid = t.typowner
        AND t.typrelid = '0'::oid and t.typelem = '0'::oid
        AND r.rolname != 'postgres'
    ORDER BY rolname, typname"""))

List operators¶

This query lists all right-unary operators:

print(query("""SELECT o.oprname AS right_unary,
        lt.typname AS operand, result.typname AS return_type
    FROM pg_operator o, pg_type lt, pg_type result
    WHERE o.oprkind='r' and o.oprleft = lt.oid
        AND o.oprresult = result.oid
    ORDER BY operand"""))

This query lists all left-unary operators:

print(query("""SELECT o.oprname AS left_unary,
        rt.typname AS operand, result.typname AS return_type
    FROM pg_operator o, pg_type rt, pg_type result
    WHERE o.oprkind='l' AND o.oprright = rt.oid
        AND o.oprresult = result.oid
    ORDER BY operand"""))

And this one lists all of the binary operators:

print(query("""SELECT o.oprname AS binary_op,
        rt.typname AS right_opr, lt.typname AS left_opr,
        result.typname AS return_type
    FROM pg_operator o, pg_type rt, pg_type lt, pg_type result
    WHERE o.oprkind = 'b' AND o.oprright = rt.oid
        AND o.oprleft = lt.oid AND o.oprresult = result.oid"""))

List functions of a language¶

Given a programming language, this query returns the name, args and return type from all functions of a language:

language = 'sql'
print(query("""SELECT p.proname, p.pronargs, t.typname
    FROM pg_proc p, pg_language l, pg_type t
    WHERE p.prolang = l.oid AND p.prorettype = t.oid
        AND l.lanname = $1
    ORDER BY proname""", (language,)))

List aggregate functions¶

This query lists all of the aggregate functions and the type to which they can be applied:

print(query("""SELECT p.proname, t.typname
    FROM pg_aggregate a, pg_proc p, pg_type t
    WHERE a.aggfnoid = p.oid
        and p.proargtypes[0] = t.oid
    ORDER BY proname, typname"""))

List operator families¶

The following query lists all defined operator families and all the operators included in each family:

print(query("""SELECT am.amname, opf.opfname, amop.amopopr::regoperator
    FROM pg_am am, pg_opfamily opf, pg_amop amop
    WHERE opf.opfmethod = am.oid
        AND amop.amopfamily = opf.oid
    ORDER BY amname, opfname, amopopr"""))
PyGreSQL-5.1/docs/_build/html/contents/postgres/advanced.html0000644000175100077410000004324113470245536024224 0ustar darcypyg00000000000000 Examples for advanced features — PyGreSQL 5.1

Examples for advanced features¶

In this section, we show how to use some advanced features of PostgreSQL using the classic PyGreSQL interface.

We assume that you have already created a connection to the PostgreSQL database, as explained in the Basic examples:

>>> from pg import DB
>>> db = DB()
>>> query = db.query

Inheritance¶

A table can inherit from zero or more tables. A query can reference either all rows of a table or all rows of a table plus all of its descendants.

For example, the capitals table inherits from cities table (it inherits all data fields from cities):

>>> data = [('cities', [
...         "'San Francisco', 7.24E+5, 63",
...         "'Las Vegas', 2.583E+5, 2174",
...         "'Mariposa', 1200, 1953"]),
...     ('capitals', [
...         "'Sacramento',3.694E+5,30,'CA'",
...         "'Madison', 1.913E+5, 845, 'WI'"])]

Now, let’s populate the tables:

>>> data = ['cities', [
...         "'San Francisco', 7.24E+5, 63"
...         "'Las Vegas', 2.583E+5, 2174"
...         "'Mariposa', 1200, 1953"],
...     'capitals', [
...         "'Sacramento',3.694E+5,30,'CA'",
...         "'Madison', 1.913E+5, 845, 'WI'"]]
>>> for table, rows in data:
...     for row in rows:
...         query("INSERT INTO %s VALUES (%s)" % (table, row))
>>> print(query("SELECT * FROM cities"))
    name     |population|altitude
-------------+----------+--------
San Francisco|    724000|      63
Las Vegas    |    258300|    2174
Mariposa     |      1200|    1953
Sacramento   |    369400|      30
Madison      |    191300|     845
(5 rows)
>>> print(query("SELECT * FROM capitals"))
   name   |population|altitude|state
----------+----------+--------+-----
Sacramento|    369400|      30|CA
Madison   |    191300|     845|WI
(2 rows)

You can find all cities, including capitals, that are located at an altitude of 500 feet or higher by:

>>> print(query("""SELECT c.name, c.altitude
...     FROM cities
...     WHERE altitude > 500"""))
  name   |altitude
---------+--------
Las Vegas|    2174
Mariposa |    1953
Madison  |     845
(3 rows)

On the other hand, the following query references rows of the base table only, i.e. it finds all cities that are not state capitals and are situated at an altitude of 500 feet or higher:

>>> print(query("""SELECT name, altitude
...     FROM ONLY cities
...     WHERE altitude > 500"""))
  name   |altitude
---------+--------
Las Vegas|    2174
Mariposa |    1953
(2 rows)

Arrays¶

Attributes can be arrays of base types or user-defined types:

>>> query("""CREATE TABLE sal_emp (
...        name                  text,
...        pay_by_quarter        int4[],
...        pay_by_extra_quarter  int8[],
...        schedule              text[][])""")

Insert instances with array attributes. Note the use of braces:

>>> query("""INSERT INTO sal_emp VALUES (
...     'Bill', '{10000,10000,10000,10000}',
...     '{9223372036854775800,9223372036854775800,9223372036854775800}',
...     '{{"meeting", "lunch"}, {"training", "presentation"}}')""")
>>> query("""INSERT INTO sal_emp VALUES (
...     'Carol', '{20000,25000,25000,25000}',
...      '{9223372036854775807,9223372036854775807,9223372036854775807}',
...      '{{"breakfast", "consulting"}, {"meeting", "lunch"}}')""")

Queries on array attributes:

>>> query("""SELECT name FROM sal_emp WHERE
...     sal_emp.pay_by_quarter[1] != sal_emp.pay_by_quarter[2]""")
name
-----
Carol
(1 row)

Retrieve third quarter pay of all employees:

>>> query("SELECT sal_emp.pay_by_quarter[3] FROM sal_emp")
pay_by_quarter
--------------
         10000
         25000
(2 rows)

Retrieve third quarter extra pay of all employees:

>>> query("SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp")
pay_by_extra_quarter
--------------------
 9223372036854775800
 9223372036854775807
(2 rows)

Retrieve first two quarters of extra quarter pay of all employees:

>>> query("SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp")
          pay_by_extra_quarter
-----------------------------------------
{9223372036854775800,9223372036854775800}
{9223372036854775807,9223372036854775807}
(2 rows)

Select subarrays:

>>> query("""SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp
...     WHERE sal_emp.name = 'Bill'""")
       schedule
----------------------
{{meeting},{training}}
(1 row)
PyGreSQL-5.1/docs/_build/html/contents/postgres/basic.html0000644000175100077410000011216213470245536023537 0ustar darcypyg00000000000000 Basic examples — PyGreSQL 5.1

Basic examples¶

In this section, we demonstrate how to use some of the very basic features of PostgreSQL using the classic PyGreSQL interface.

Creating a connection to the database¶

We start by creating a connection to the PostgreSQL database:

>>> from pg import DB
>>> db = DB()

If you pass no parameters when creating the DB instance, then PyGreSQL will try to connect to the database on the local host that has the same name as the current user, and also use that name for login.

You can also pass the database name, host, port and login information as parameters when creating the DB instance:

>>> db = DB(dbname='testdb', host='pgserver', port=5432,
...     user='scott', passwd='tiger')

The DB class of which db is an object is a wrapper around the lower level Connection class of the pg module. The most important method of such connection objects is the query method that allows you to send SQL commands to the database.

Creating tables¶

The first thing you would want to do in an empty database is creating a table. To do this, you need to send a CREATE TABLE command to the database. PostgreSQL has its own set of built-in types that can be used for the table columns. Let us create two tables “weather†and “citiesâ€:

>>> db.query("""CREATE TABLE weather (
...     city varchar(80),
...     temp_lo int, temp_hi int,
...     prcp float8,
...     date date)""")
>>> db.query("""CREATE TABLE cities (
...     name varchar(80),
...     location point)""")

Note

Keywords are case-insensitive but identifiers are case-sensitive.

You can get a list of all tables in the database with:

>>> db.get_tables()
['public.cities', 'public.weather']

Insert data¶

Now we want to fill our tables with data. An INSERT statement is used to insert a new row into a table. There are several ways you can specify what columns the data should go to.

Let us insert a row into each of these tables. The simplest case is when the list of values corresponds to the order of the columns specified in the CREATE TABLE command:

>>> db.query("""INSERT INTO weather
...     VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')""")
>>> db.query("""INSERT INTO cities
...     VALUES ('San Francisco', '(-194.0, 53.0)')""")

You can also specify the columns to which the values correspond. The columns can be specified in any order. You may also omit any number of columns, such as with unknown precipitation, below:

>>> db.query("""INSERT INTO weather (date, city, temp_hi, temp_lo)
...     VALUES ('11/29/1994', 'Hayward', 54, 37)""")

If you get errors regarding the format of the date values, your database is probably set to a different date style. In this case you must change the date style like this:

>>> db.query("set datestyle = MDY")

Instead of explicitly writing the INSERT statement and sending it to the database with the DB.query() method, you can also use the more convenient DB.insert() method that does the same under the hood:

>>> db.insert('weather',
...     date='11/29/1994', city='Hayward', temp_hi=54, temp_lo=37)

And instead of using keyword parameters, you can also pass the values to the DB.insert() method in a single Python dictionary.

If you have a Python list with many rows that shall be used to fill a database table quickly, you can use the DB.inserttable() method.

Retrieving data¶

After having entered some data into our tables, let’s see how we can get the data out again. A SELECT statement is used for retrieving data. The basic syntax is:

SELECT columns FROM tables WHERE predicates

A simple one would be the following query:

>>> q = db.query("SELECT * FROM weather")
>>> print(q)
    city     |temp_lo|temp_hi|prcp|   date
-------------+-------+-------+----+----------
San Francisco|     46|     50|0.25|1994-11-27
Hayward      |     37|     54|    |1994-11-29
(2 rows)

You may also specify expressions in the target list. (The ‘AS column’ specifies the column name of the result. It is optional.)

>>> print(db.query("""SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date
...     FROM weather"""))
    city     |temp_avg|   date
-------------+--------+----------
San Francisco|      48|1994-11-27
Hayward      |      45|1994-11-29
(2 rows)

If you want to retrieve rows that satisfy certain condition (i.e. a restriction), specify the condition in a WHERE clause. The following retrieves the weather of San Francisco on rainy days:

>>> print(db.query("""SELECT * FROM weather
...     WHERE city = 'San Francisco' AND prcp > 0.0"""))
    city     |temp_lo|temp_hi|prcp|   date
-------------+-------+-------+----+----------
San Francisco|     46|     50|0.25|1994-11-27
(1 row)

Here is a more complicated one. Duplicates are removed when DISTINCT is specified. ORDER BY specifies the column to sort on. (Just to make sure the following won’t confuse you, DISTINCT and ORDER BY can be used separately.)

>>> print(db.query("SELECT DISTINCT city FROM weather ORDER BY city"))
    city
-------------
Hayward
San Francisco
(2 rows)

So far we have only printed the output of a SELECT query. The object that is returned by the query is an instance of the Query class that can print itself in the nicely formatted way we saw above. But you can also retrieve the results as a list of tuples, by using the Query.getresult() method:

>>> from pprint import pprint
>>> q = db.query("SELECT * FROM weather")
>>> pprint(q.getresult())
[('San Francisco', 46, 50, 0.25, '1994-11-27'),
 ('Hayward', 37, 54, None, '1994-11-29')]

Here we used pprint to print out the returned list in a nicely formatted way.

If you want to retrieve the results as a list of dictionaries instead of tuples, use the Query.dictresult() method instead:

>>> pprint(q.dictresult())
[{'city': 'San Francisco',
  'date': '1994-11-27',
  'prcp': 0.25,
  'temp_hi': 50,
  'temp_lo': 46},
 {'city': 'Hayward',
  'date': '1994-11-29',
  'prcp': None,
  'temp_hi': 54,
  'temp_lo': 37}]

Finally, you can also retrieve the results as a list of named tuples, using the Query.namedresult() method. This can be a good compromise between simple tuples and the more memory intensive dictionaries:

>>> for row in q.namedresult():
...     print(row.city, row.date)
...
San Francisco 1994-11-27
Hayward 1994-11-29

If you only want to retrieve a single row of data, you can use the more convenient DB.get() method that does the same under the hood:

>>> d = dict(city='Hayward')
>>> db.get('weather', d, 'city')
>>> pprint(d)
{'city': 'Hayward',
 'date': '1994-11-29',
 'prcp': None,
 'temp_hi': 54,
 'temp_lo': 37}

As you see, the DB.get() method returns a dictionary with the column names as keys. In the third parameter you can specify which column should be looked up in the WHERE statement of the SELECT statement that is executed by the DB.get() method. You normally don’t need it when the table was created with a primary key.

Retrieving data into other tables¶

A SELECT … INTO statement can be used to retrieve data into another table:

>>> db.query("""SELECT * INTO TEMPORARY TABLE temptab FROM weather
...     WHERE city = 'San Francisco' and prcp > 0.0""")

This fills a temporary table “temptab†with a subset of the data in the original “weather†table. It can be listed with:

>>> print(db.query("SELECT * from temptab"))
    city     |temp_lo|temp_hi|prcp|   date
-------------+-------+-------+----+----------
San Francisco|     46|     50|0.25|1994-11-27
(1 row)

Aggregates¶

Let’s try the following query:

>>> print(db.query("SELECT max(temp_lo) FROM weather"))
max
---
 46
(1 row)

You can also use aggregates with the GROUP BY clause:

>>> print(db.query("SELECT city, max(temp_lo) FROM weather GROUP BY city"))
    city     |max
-------------+---
Hayward      | 37
San Francisco| 46
(2 rows)

Joining tables¶

Queries can access multiple tables at once or access the same table in such a way that multiple instances of the table are being processed at the same time.

Suppose we want to find all the records that are in the temperature range of other records. W1 and W2 are aliases for weather. We can use the following query to achieve that:

>>> print(db.query("""SELECT W1.city, W1.temp_lo, W1.temp_hi,
...     W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2
...     WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi"""))
 city  |temp_lo|temp_hi|    city     |temp_lo|temp_hi
-------+-------+-------+-------------+-------+-------
Hayward|     37|     54|San Francisco|     46|     50
(1 row)

Now let’s join two different tables. The following joins the “weather†table and the “cities†table:

>>> print(db.query("""SELECT city, location, prcp, date
...     FROM weather, cities
...     WHERE name = city"""))
    city     |location |prcp|   date
-------------+---------+----+----------
San Francisco|(-194,53)|0.25|1994-11-27
(1 row)

Since the column names are all different, we don’t have to specify the table name. If you want to be clear, you can do the following. They give identical results, of course:

>>> print(db.query("""SELECT w.city, c.location, w.prcp, w.date
...     FROM weather w, cities c WHERE c.name = w.city"""))
    city     |location |prcp|   date
-------------+---------+----+----------
San Francisco|(-194,53)|0.25|1994-11-27
(1 row)

Updating data¶

It you want to change the data that has already been inserted into a database table, you will need the UPDATE statement.

Suppose you discover the temperature readings are all off by 2 degrees as of Nov 28, you may update the data as follow:

>>> db.query("""UPDATE weather
...     SET temp_hi = temp_hi - 2,  temp_lo = temp_lo - 2
...     WHERE date > '11/28/1994'""")
'1'
>>> print(db.query("SELECT * from weather"))
    city     |temp_lo|temp_hi|prcp|   date
-------------+-------+-------+----+----------
San Francisco|     46|     50|0.25|1994-11-27
Hayward      |     35|     52|    |1994-11-29
(2 rows)

Note that the UPDATE statement returned the string '1', indicating that exactly one row of data has been affected by the update.

If you retrieved one row of data as a dictionary using the DB.get() method, then you can also update that row with the DB.update() method.

Deleting data¶

To delete rows from a table, a DELETE statement can be used.

Suppose you are no longer interested in the weather of Hayward, you can do the following to delete those rows from the table:

>>> db.query("DELETE FROM weather WHERE city = 'Hayward'")
'1'

Again, you get the string '1' as return value, indicating that exactly one row of data has been deleted.

You can also delete all the rows in a table by doing the following. This is different from DROP TABLE which removes the table itself in addition to the removing the rows, as explained in the next section.

>>> db.query("DELETE FROM weather")
'1'
>>> print(db.query("SELECT * from weather"))
city|temp_lo|temp_hi|prcp|date
----+-------+-------+----+----
(0 rows)

Since only one row was left in the table, the DELETE query again returns the string '1'. The SELECT query now gives an empty result.

If you retrieved a row of data as a dictionary using the DB.get() method, then you can also delete that row with the DB.delete() method.

Removing the tables¶

The DROP TABLE command is used to remove tables. After you have done this, you can no longer use those tables:

>>> db.query("DROP TABLE weather, cities")
>>> db.query("select * from weather")
pg.ProgrammingError: Error:  Relation "weather" does not exist
PyGreSQL-5.1/docs/_build/html/contents/general.html0000644000175100077410000002006513470245533022222 0ustar darcypyg00000000000000 General PyGreSQL programming information — PyGreSQL 5.1

General PyGreSQL programming information¶

PyGreSQL consists of two parts: the “classic†PyGreSQL interface provided by the pg module and the newer DB-API 2.0 compliant interface provided by the pgdb module.

If you use only the standard features of the DB-API 2.0 interface, it will be easier to switch from PostgreSQL to another database for which a DB-API 2.0 compliant interface exists.

The “classic†interface may be easier to use for beginners, and it provides some higher-level and PostgreSQL specific convenience methods.

See also

DB-API 2.0 (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is PEP 0249.

Both Python modules utilize the same low-level C extension, which serves as a wrapper for the “libpq†library, the C API to PostgreSQL.

This means you must have the libpq library installed as a shared library on your client computer, in a version that is supported by PyGreSQL. Depending on the client platform, you may have to set environment variables like PATH or LD_LIBRARY_PATH so that PyGreSQL can find the library.

Warning

Note that PyGreSQL is not thread-safe on the connection level. Therefore we recommend using DBUtils for multi-threaded environments, which supports both PyGreSQL interfaces.

Another option is using PyGreSQL indirectly as a database driver for the high-level SQLAlchemy SQL toolkit and ORM, which supports PyGreSQL starting with SQLAlchemy 1.1 and which provides a way to use PyGreSQL in a multi-threaded environment using the concept of “thread local storageâ€. Database URLs for PyGreSQL take this form:

postgresql+pygresql://username:password@host:port/database
PyGreSQL-5.1/docs/_build/html/contents/index.html0000644000175100077410000001534613470245533021722 0ustar darcypyg00000000000000 The PyGreSQL documentation — PyGreSQL 5.1 PyGreSQL-5.1/docs/_build/html/contents/changelog.html0000644000175100077410000014326113470245533022540 0ustar darcypyg00000000000000 ChangeLog — PyGreSQL 5.1

ChangeLog¶

Version 5.1 (2019-05-17)¶

  • Changes to the classic PyGreSQL module (pg):
    • Support for prepared statements (following a suggestion and first implementation by Justin Pryzby on the mailing list).
    • DB wrapper objects based on existing connections can now be closed and reopened properly (but the underlying connection will not be affected).
    • The query object can now be used as an iterator similar to query.getresult() and will then yield the rows as tuples. Thanks to Justin Pryzby for the proposal and most of the implementation.
    • Deprecated query.ntuples() in the classic API, since len(query) can now be used and returns the same number.
    • The i-th row of the result can now be accessed as query[i].
    • New method query.scalarresult() that gets only the first field of each row as a list of scalar values.
    • New methods query.one(), query.onenamed(), query.onedict() and query.onescalar() that fetch only one row from the result or None if there are no more rows, similar to the cursor.fetchone() method in DB-API 2.
    • New methods query.single(), query.singlenamed(), query.singledict() and query.singlescalar() that fetch only one row from the result, and raise an error if the result does not have exactly one row.
    • New methods query.dictiter(), query.namediter() and query.scalariter() returning the same values as query.dictresult(), query.namedresult() and query.salarresult(), but as iterables instead of lists. This avoids creating a Python list of all results and can be slightly more efficient.
    • Removed pg.get/set_namedresult. You can configure the named tuples factory with the pg.set_row_factory_size() function and change the implementation with pg.set_query_helpers(), but this is not recommended and this function is not part of the official API.
    • Added new connection attributes socket, backend_pid, ssl_in_use and ssl_attributes (the latter need PostgreSQL >= 9.5 on the client).
  • Changes to the DB-API 2 module (pgdb):
    • Connections now have an autocommit attribute which is set to False by default but can be set to True to switch to autocommit mode where no transactions are started and calling commit() is not required. Note that this is not part of the DB-API 2 standard.

Vesion 5.0.7 (2019-05-17)¶

  • This version officially supports the new PostgreSQL 11.
  • Fixed a bug in parsing array subscript ranges (reported by Justin Pryzby).
  • Fixed an issue when deleting a DB wrapper object with the underlying connection already closed (bug report by Jacob Champion).

Vesion 5.0.6 (2018-07-29)¶

  • This version officially supports the new Python 3.7.
  • Correct trove classifier for the PostgreSQL License.

Version 5.0.5 (2018-04-25)¶

  • This version officially supports the new PostgreSQL 10.
  • The memory for the string with the number of rows affected by a classic pg module query() was already freed (bug report and fix by Peifeng Qiu).

Version 5.0.4 (2017-07-23)¶

  • This version officially supports the new Python 3.6 and PostgreSQL 9.6.
  • query_formatted() can now be used without parameters.
  • The automatic renaming of columns that are invalid as field names of named tuples now works more accurately in Python 2.6 and 3.0.
  • Fixed error checks for unlink() and export() methods of large objects (bug report by Justin Pryzby).
  • Fixed a compilation issue under OS X (bug report by Josh Johnston).

Version 5.0.3 (2016-12-10)¶

  • It is now possible to use a custom array cast function by changing the type caster for the ‘anyarray’ type. For instance, by calling set_typecast(‘anyarray’, lambda v, c: v) you can have arrays returned as strings instead of lists. Note that in the pg module, you can also call set_array(False) in order to return arrays as strings.
  • The namedtuple classes used for the rows of query results are now cached and reused internally, since creating namedtuples classes in Python is a somewhat expensive operation. By default the cache has a size of 1024 entries, but this can be changed with the set_row_factory_size() function. In certain cases this change can notably improve the performance.
  • The namedresult() method in the classic API now also tries to rename columns that would result in invalid field names.

Version 5.0.2 (2016-09-13)¶

  • Fixed an infinite recursion problem in the DB wrapper class of the classic module that could occur when the underlying connection could not be properly opened (bug report by Justin Pryzby).

Version 5.0.1 (2016-08-18)¶

  • The update() and delete() methods of the DB wrapper now use the OID instead of the primary key if both are provided. This restores backward compatibility with PyGreSQL 4.x and allows updating the primary key itself if an OID exists.
  • The connect() function of the DB API 2.0 module now accepts additional keyword parameters such as “application_name†which will be passed on to PostgreSQL.
  • PyGreSQL now adapts some queries to be able to access older PostgreSQL 8.x databases (as suggested on the mailing list by Andres Mejia). However, these old versions of PostgreSQL are not officially supported and tested any more.
  • Fixed an issue with Postgres types that have an OID >= 0x80000000 (reported on the mailing list by Justin Pryzby).
  • Allow extra values that are not used in the command in the parameter dict passed to the query_formatted() method (as suggested by Justin Pryzby).
  • Improved handling of empty arrays in the classic module.
  • Unused classic connections were not properly garbage collected which could cause memory leaks (reported by Justin Pryzby).
  • Made C extension compatible with MSVC 9 again (this was needed to compile for Python 2 on Windows).

Version 5.0 (2016-03-20)¶

  • This version now runs on both Python 2 and Python 3.
  • The supported versions are Python 2.6 to 2.7, and 3.3 to 3.5.
  • PostgreSQL is supported in all versions from 9.0 to 9.5.
  • Changes in the classic PyGreSQL module (pg):
    • The classic interface got two new methods get_as_list() and get_as_dict() returning a database table as a Python list or dict. The amount of data returned can be controlled with various parameters.
    • A method upsert() has been added to the DB wrapper class that utilizes the “upsert†feature that is new in PostgreSQL 9.5. The new method nicely complements the existing get/insert/update/delete() methods.
    • When using insert/update/upsert(), you can now pass PostgreSQL arrays as lists and PostgreSQL records as tuples in the classic module.
    • Conversely, when the query method returns a PostgreSQL array, it is passed to Python as a list. PostgreSQL records are converted to named tuples as well, but only if you use one of the get/insert/update/delete() methods. PyGreSQL uses a new fast built-in parser to achieve this. The automatic conversion of arrays to lists can be disabled with set_array(False).
    • The pkey() method of the classic interface now returns tuples instead of frozenset. The order of the tuples is like in the primary key index.
    • Like the DB-API 2 module, the classic module now also returns bool values from the database as Python bool objects instead of strings. You can still restore the old behavior by calling set_bool(False).
    • Like the DB-API 2 module, the classic module now also returns bytea data fetched from the database as byte strings, so you don’t need to call unescape_bytea() any more. This has been made configurable though, and you can restore the old behavior by calling set_bytea_escaped(True).
    • A method set_jsondecode() has been added for changing or removing the function that automatically decodes JSON data coming from the database. By default, decoding JSON is now enabled and uses the decoder function in the standard library with its default parameters.
    • The table name that is affixed to the name of the OID column returned by the get() method of the classic interface will not automatically be fully qualified any more. This reduces overhead from the interface, but it means you must always write the table name in the same way when you call the methods using it and you are using tables with OIDs. Also, OIDs are now only used when access via primary key is not possible. Note that OIDs are considered deprecated anyway, and they are not created by default any more in PostgreSQL 8.1 and later.
    • The internal caching and automatic quoting of class names in the classic interface has been simplified and improved, it should now perform better and use less memory. Also, overhead for quoting values in the DB wrapper methods has been reduced and security has been improved by passing the values to libpq separately as parameters instead of inline.
    • It is now possible to use the registered type names instead of the more coarse-grained type names that are used by default in PyGreSQL, without breaking any of the mechanisms for quoting and typecasting, which rely on the type information. This is achieved while maintaining simplicity and backward compatibility by augmenting the type name string objects with all the necessary information under the cover. To switch registered type names on or off (this is the default), call the DB wrapper method use_regtypes().
    • A new method query_formatted() has been added to the DB wrapper class that allows using the format specifications from Python. A flag “inline†can be set to specify whether parameters should be sent to the database separately or formatted into the SQL.
    • A new type helper Bytea() has been added.
  • Changes in the DB-API 2 module (pgdb):
    • The DB-API 2 module now always returns result rows as named tuples instead of simply lists as before. The documentation explains how you can restore the old behavior or use custom row objects instead.
    • The names of the various classes used by the classic and DB-API 2 modules have been renamed to become simpler, more intuitive and in line with the names used in the DB-API 2 documentation. Since the API provides only objects of these types through constructor functions, this should not cause any incompatibilities.
    • The DB-API 2 module now supports the callproc() cursor method. Note that output parameters are currently not replaced in the return value.
    • The DB-API 2 module now supports copy operations between data streams on the client and database tables via the COPY command of PostgreSQL. The cursor method copy_from() can be used to copy data from the database to the client, and the cursor method copy_to() can be used to copy data from the client to the database.
    • The 7-tuples returned by the description attribute of a pgdb cursor are now named tuples, i.e. their elements can be also accessed by name. The column names and types can now also be requested through the colnames and coltypes attributes, which are not part of DB-API 2 though. The type_code provided by the description attribute is still equal to the PostgreSQL internal type name, but now carries some more information in additional attributes. The size, precision and scale information that is part of the description is now properly set for numeric types.
    • If you pass a Python list as one of the parameters to a DB-API 2 cursor, it is now automatically bound using an ARRAY constructor. If you pass a Python tuple, it is bound using a ROW constructor. This is useful for passing records as well as making use of the IN syntax.
    • Inversely, when a fetch method of a DB-API 2 cursor returns a PostgreSQL array, it is passed to Python as a list, and when it returns a PostgreSQL composite type, it is passed to Python as a named tuple. PyGreSQL uses a new fast built-in parser to achieve this. Anonymous composite types are also supported, but yield only an ordinary tuple containing text strings.
    • New type helpers Interval() and Uuid() have been added.
    • The connection has a new attribute “closed†that can be used to check whether the connection is closed or broken.
    • SQL commands are always handled as if they include parameters, i.e. literal percent signs must always be doubled. This consistent behavior is necessary for using pgdb with wrappers like SQLAlchemy.
    • PyGreSQL 5.0 will be supported as a database driver by SQLAlchemy 1.1.
  • Changes concerning both modules:
    • PyGreSQL now tries to raise more specific and appropriate subclasses of DatabaseError than just ProgrammingError. Particularly, when database constraints are violated, it raises an IntegrityError now.
    • The modules now provide get_typecast() and set_typecast() methods allowing to control the typecasting on the global level. The connection objects have got type caches with the same methods which give control over the typecasting on the level of the current connection. See the documentation on details about the type cache and the typecast mechanisms provided by PyGreSQL.
    • Dates, times, timestamps and time intervals are now returned as the corresponding Python objects from the datetime module of the standard library. In earlier versions of PyGreSQL they had been returned as strings. You can restore the old behavior by deactivating the respective typecast functions, e.g. set_typecast(‘date’, str).
    • PyGreSQL now support the “uuid†data type, converting such columns automatically to and from Python uuid.UUID objects.
    • PyGreSQL now supports the “hstore†data type, converting such columns automatically to and from Python dictionaries. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new HStore() type constructor as a hint to PyGreSQL.
    • PyGreSQL now supports the “json†and “jsonb†data types, converting such columns automatically to and from Python objects. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new Json() type constructor as a hint to PyGreSQL.
    • A new type helper Literal() for inserting parameters literally as SQL has been added. This is useful for table names, for instance.
    • Fast parsers cast_array(), cast_record() and cast_hstore for the input and output syntax for PostgreSQL arrays, composite types and the hstore type have been added to the C extension module. The array parser also allows using multi-dimensional arrays with PyGreSQL.
    • The tty parameter and attribute of database connections has been removed since it is not supported any more since PostgreSQL 7.4.

Version 4.2.2 (2016-03-18)¶

  • The get_relations() and get_tables() methods now also return system views and tables if you set the optional “system†parameter to True.
  • Fixed a regression when using temporary tables with DB wrapper methods (thanks to Patrick TJ McPhee for reporting).

Version 4.2.1 (2016-02-18)¶

  • Fixed a small bug when setting the notice receiver.
  • Some more minor fixes and re-packaging with proper permissions.

Version 4.2 (2016-01-21)¶

  • The supported Python versions are 2.4 to 2.7.
  • PostgreSQL is supported in all versions from 8.3 to 9.5.
  • Set a better default for the user option “escaping-funcsâ€.
  • Force build to compile with no errors.
  • New methods get_parameters() and set_parameters() in the classic interface which can be used to get or set run-time parameters.
  • New method truncate() in the classic interface that can be used to quickly empty a table or a set of tables.
  • Fix decimal point handling.
  • Add option to return boolean values as bool objects.
  • Add option to return money values as string.
  • get_tables() does not list information schema tables any more.
  • Fix notification handler (Thanks Patrick TJ McPhee).
  • Fix a small issue with large objects.
  • Minor improvements of the NotificationHandler.
  • Converted documentation to Sphinx and added many missing parts.
  • The tutorial files have become a chapter in the documentation.
  • Greatly improved unit testing, tests run with Python 2.4 to 2.7 again.

Version 4.1.1 (2013-01-08)¶

  • Add NotificationHandler class and method. Replaces need for pgnotify.
  • Sharpen test for inserting current_timestamp.
  • Add more quote tests. False and 0 should evaluate to NULL.
  • More tests - Any number other than 0 is True.
  • Do not use positional parameters internally. This restores backward compatibility with version 4.0.
  • Add methods for changing the decimal point.

Version 4.1 (2013-01-01)¶

  • Dropped support for Python below 2.5 and PostgreSQL below 8.3.
  • Added support for Python up to 2.7 and PostgreSQL up to 9.2.
  • Particularly, support PQescapeLiteral() and PQescapeIdentifier().
  • The query method of the classic API now supports positional parameters. This an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors (contribution by Patrick TJ McPhee).
  • The classic API now supports a method namedresult() in addition to getresult() and dictresult(), which returns the rows of the result as named tuples if these are supported (Python 2.6 or higher).
  • The classic API has got the new methods begin(), commit(), rollback(), savepoint() and release() for handling transactions.
  • Both classic and DBAPI 2 connections can now be used as context managers for encapsulating transactions.
  • The execute() and executemany() methods now return the cursor object, so you can now write statements like “for row in cursor.execute(…)†(as suggested by Adam Frederick).
  • Binary objects are now automatically escaped and unescaped.
  • Bug in money quoting fixed. Amounts of $0.00 handled correctly.
  • Proper handling of date and time objects as input.
  • Proper handling of floats with ‘nan’ or ‘inf’ values as input.
  • Fixed the set_decimal() function.
  • All DatabaseError instances now have a sqlstate attribute.
  • The getnotify() method can now also return payload strings (#15).
  • Better support for notice processing with the new methods set_notice_receiver() and get_notice_receiver() (as suggested by Michael Filonenko, see #37).
  • Open transactions are rolled back when pgdb connections are closed (as suggested by Peter Harris, see #46).
  • Connections and cursors can now be used with the “with†statement (as suggested by Peter Harris, see #46).
  • New method use_regtypes() that can be called to let getattnames() return registered type names instead of the simplified classic types (#44).

Version 4.0 (2009-01-01)¶

  • Dropped support for Python below 2.3 and PostgreSQL below 7.4.
  • Improved performance of fetchall() for large result sets by speeding up the type casts (as suggested by Peter Schuller).
  • Exposed exceptions as attributes of the connection object.
  • Exposed connection as attribute of the cursor object.
  • Cursors now support the iteration protocol.
  • Added new method to get parameter settings.
  • Added customizable row_factory as suggested by Simon Pamies.
  • Separated between mandatory and additional type objects.
  • Added keyword args to insert, update and delete methods.
  • Added exception handling for direct copy.
  • Start transactions only when necessary, not after every commit().
  • Release the GIL while making a connection (as suggested by Peter Schuller).
  • If available, use decimal.Decimal for numeric types.
  • Allow DB wrapper to be used with DB-API 2 connections (as suggested by Chris Hilton).
  • Made private attributes of DB wrapper accessible.
  • Dropped dependence on mx.DateTime module.
  • Support for PQescapeStringConn() and PQescapeByteaConn(); these are now also used by the internal _quote() functions.
  • Added ‘int8’ to INTEGER types. New SMALLINT type.
  • Added a way to find the number of rows affected by a query() with the classic pg module by returning it as a string. For single inserts, query() still returns the oid as an integer. The pgdb module already provides the “rowcount†cursor attribute for the same purpose.
  • Improved getnotify() by calling PQconsumeInput() instead of submitting an empty command.
  • Removed compatibility code for old OID munging style.
  • The insert() and update() methods now use the “returning†clause if possible to get all changed values, and they also check in advance whether a subsequent select is possible, so that ongoing transactions won’t break if there is no select privilege.
  • Added “protocol_version†and “server_version†attributes.
  • Revived the “user†attribute.
  • The pg module now works correctly with composite primary keys; these are represented as frozensets.
  • Removed the undocumented and actually unnecessary “view†parameter from the get() method.
  • get() raises a nicer ProgrammingError instead of a KeyError if no primary key was found.
  • delete() now also works based on the primary key if no oid available and returns whether the row existed or not.

Version 3.8.1 (2006-06-05)¶

  • Use string methods instead of deprecated string functions.
  • Only use SQL-standard way of escaping quotes.
  • Added the functions escape_string() and escape/unescape_bytea() (as suggested by Charlie Dyson and Kavous Bojnourdi a long time ago).
  • Reverted code in clear() method that set date to current.
  • Added code for backwards compatibility in OID munging code.
  • Reorder attnames tests so that “interval†is checked for before “int.â€
  • If caller supplies key dictionary, make sure that all has a namespace.

Version 3.8 (2006-02-17)¶

  • Installed new favicon.ico from Matthew Sporleder <mspo@mspo.com>
  • Replaced snprintf by PyOS_snprintf.
  • Removed NO_SNPRINTF switch which is not needed any longer
  • Clean up some variable names and namespace
  • Add get_relations() method to get any type of relation
  • Rewrite get_tables() to use get_relations()
  • Use new method in get_attnames method to get attributes of views as well
  • Add Binary type
  • Number of rows is now -1 after executing no-result statements
  • Fix some number handling
  • Non-simple types do not raise an error any more
  • Improvements to documentation framework
  • Take into account that nowadays not every table must have an oid column
  • Simplification and improvement of the inserttable() function
  • Fix up unit tests
  • The usual assortment of minor fixes and enhancements

Version 3.7 (2005-09-07)¶

Improvement of pgdb module:

  • Use Python standard datetime if mxDateTime is not available

Major improvements and clean-up in classic pg module:

  • All members of the underlying connection directly available in DB
  • Fixes to quoting function
  • Add checks for valid database connection to methods
  • Improved namespace support, handle search_path correctly
  • Removed old dust and unnecessary imports, added docstrings
  • Internal sql statements as one-liners, smoothed out ugly code

Version 3.6.2 (2005-02-23)¶

  • Further fixes to namespace handling

Version 3.6.1 (2005-01-11)¶

  • Fixes to namespace handling

Version 3.6 (2004-12-17)¶

  • Better DB-API 2.0 compliance
  • Exception hierarchy moved into C module and made available to both APIs
  • Fix error in update method that caused false exceptions
  • Moved to standard exception hierarchy in classic API
  • Added new method to get transaction state
  • Use proper Python constants where appropriate
  • Use Python versions of strtol, etc. Allows Win32 build.
  • Bug fixes and cleanups

Version 3.5 (2004-08-29)¶

Fixes and enhancements:

  • Add interval to list of data types
  • fix up method wrapping especially close()
  • retry pkeys once if table missing in case it was just added
  • wrap query method separately to handle debug better
  • use isinstance instead of type
  • fix free/PQfreemem issue - finally
  • miscellaneous cleanups and formatting

Version 3.4 (2004-06-02)¶

Some cleanups and fixes. This is the first version where PyGreSQL is moved back out of the PostgreSQL tree. A lot of the changes mentioned below were actually made while in the PostgreSQL tree since their last release.

  • Allow for larger integer returns
  • Return proper strings for true and false
  • Cleanup convenience method creation
  • Enhance debugging method
  • Add reopen method
  • Allow programs to preload field names for speedup
  • Move OID handling so that it returns long instead of int
  • Miscellaneous cleanups and formatting

Version 3.3 (2001-12-03)¶

A few cleanups. Mostly there was some confusion about the latest version and so I am bumping the number to keep it straight.

  • Added NUMERICOID to list of returned types. This fixes a bug when returning aggregates in the latest version of PostgreSQL.

Version 3.2 (2001-06-20)¶

Note that there are very few changes to PyGreSQL between 3.1 and 3.2. The main reason for the release is the move into the PostgreSQL development tree. Even the WIN32 changes are pretty minor.

Version 3.1 (2000-11-06)¶

  • Fix some quoting functions. In particular handle NULLs better.
  • Use a method to add primary key information rather than direct manipulation of the class structures
  • Break decimal out in _quote (in pg.py) and treat it as float
  • Treat timestamp like date for quoting purposes
  • Remove a redundant SELECT from the get method speeding it, and insert (since it calls get) up a little.
  • Add test for BOOL type in typecast method to pgdbTypeCache class (tv@beamnet.de)
  • Fix pgdb.py to send port as integer to lower level function (dildog@l0pht.com)
  • Change pg.py to speed up some operations
  • Allow updates on tables with no primary keys

Version 3.0 (2000-05-30)¶

Version 2.4 (1999-06-15)¶

  • Insert returns None if the user doesn’t have select permissions on the table. It can (and does) happen that one has insert but not select permissions on a table.
  • Added ntuples() method to query object (brit@druid.net)
  • Corrected a bug related to getresult() and the money type
  • Corrected a bug related to negative money amounts
  • Allow update based on primary key if munged oid not available and table has a primary key
  • Add many __doc__ strings (andre@via.ecp.fr)
  • Get method works with views if key specified

Version 2.3 (1999-04-17)¶

  • connect.host returns “localhost†when connected to Unix socket (torppa@tuhnu.cutery.fi)
  • Use PyArg_ParseTupleAndKeywords in connect() (torppa@tuhnu.cutery.fi)
  • fixes and cleanups (torppa@tuhnu.cutery.fi)
  • Fixed memory leak in dictresult() (terekhov@emc.com)
  • Deprecated pgext.py - functionality now in pg.py
  • More cleanups to the tutorial
  • Added fileno() method - terekhov@emc.com (Mikhail Terekhov)
  • added money type to quoting function
  • Compiles cleanly with more warnings turned on
  • Returns PostgreSQL error message on error
  • Init accepts keywords (Jarkko Torppa)
  • Convenience functions can be overridden (Jarkko Torppa)
  • added close() method

Version 2.2 (1998-12-21)¶

  • Added user and password support thanks to Ng Pheng Siong (ngps@post1.com)
  • Insert queries return the inserted oid
  • Add new pg wrapper (C module renamed to _pg)
  • Wrapped database connection in a class
  • Cleaned up some of the tutorial. (More work needed.)
  • Added version and __version__. Thanks to thilo@eevolute.com for the suggestion.

Version 2.1 (1998-03-07)¶

  • return fields as proper Python objects for field type
  • Cleaned up pgext.py
  • Added dictresult method

Version 2.0 (1997-12-23)¶

  • Updated code for PostgreSQL 6.2.1 and Python 1.5
  • Reformatted code and converted to use full ANSI style prototypes
  • Changed name to PyGreSQL (from PyGres95)
  • Changed order of arguments to connect function
  • Created new type pgqueryobject and moved certain methods to it
  • Added a print function for pgqueryobject
  • Various code changes - mostly stylistic

Version 1.0b (1995-11-04)¶

  • Keyword support for connect function moved from library file to C code and taken away from library
  • Rewrote documentation
  • Bug fix in connect function
  • Enhancements in large objects interface methods

Version 1.0a (1995-10-30)¶

A limited release.

  • Module adapted to standard Python syntax
  • Keyword support for connect function in library file
  • Rewrote default parameters interface (internal use of strings)
  • Fixed minor bugs in module interface
  • Redefinition of error messages

Version 0.9b (1995-10-10)¶

The first public release.

  • Large objects implementation
  • Many bug fixes, enhancements, …

Version 0.1a (1995-10-07)¶

  • Basic libpq functions (SQL access)
PyGreSQL-5.1/docs/_build/html/contents/pg/0000755000175100077410000000000013470245541020321 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/contents/pg/adaptation.html0000644000175100077410000015053413470245534023345 0ustar darcypyg00000000000000 Remarks on Adaptation and Typecasting — PyGreSQL 5.1

Remarks on Adaptation and Typecasting¶

Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL.

Supported data types¶

The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections.

PostgreSQL Python
char, bpchar, name, text, varchar str
bool bool
bytea bytes
int2, int4, int8, oid, serial int [1]
int2vector list of int
float4, float8 float
numeric, money Decimal
date datetime.date
time, timetz datetime.time
timestamp, timestamptz datetime.datetime
interval datetime.timedelta
hstore dict
json, jsonb list or dict
uuid uuid.UUID
array list [2]
record tuple

Note

Elements of arrays and records will also be converted accordingly.

[1]int8 is converted to long in Python 2
[2]The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the array_lower() function provided by PostgreSQL.

Adaptation of parameters¶

When you use the higher level methods of the classic pg module like DB.insert() or DB.update(), you don’t need to care about adaptation of parameters, since all of this is happening automatically behind the scenes. You only need to consider this issue when creating SQL commands manually and sending them to the database using the DB.query() method.

Imagine you have created a user login form that stores the login name as login and the password as passwd and you now want to get the user data for that user. You may be tempted to execute a query like this:

>>> db = pg.DB(...)
>>> sql = "SELECT * FROM user_table WHERE login = '%s' AND passwd = '%s'"
>>> db.query(sql % (login, passwd)).getresult()[0]

This seems to work at a first glance, but you will notice an error as soon as you try to use a login name containing a single quote. Even worse, this error can be exploited through so-called “SQL injectionâ€, where an attacker inserts malicious SQL statements into the query that you never intended to be executed. For instance, with a login name something like ' OR ''=' the attacker could easily log in and see the user data of another user in the database.

One solution for this problem would be to cleanse your input of “dangerous†characters like the single quote, but this is tedious and it is likely that you overlook something or break the application e.g. for users with names like “D’Arcyâ€. A better solution is to use the escaping functions provided by PostgreSQL which are available as methods on the DB object:

>>> login = "D'Arcy"
>>> db.escape_string(login)
"D''Arcy"

As you see, DB.escape_string() has doubled the single quote which is the right thing to do in SQL. However, there are better ways of passing parameters to the query, without having to manually escape them. If you pass the parameters as positional arguments to DB.query(), then PyGreSQL will send them to the database separately, without the need for quoting them inside the SQL command, and without the problems inherent with that process. In this case you must put placeholders of the form $1, $2 etc. in the SQL command in place of the parameters that should go there. For instance:

>>> sql = "SELECT * FROM user_table WHERE login = $1 AND passwd = $2"
>>> db.query(sql, login, passwd).getresult()[0]

That’s much better. So please always keep the following warning in mind:

Warning

Remember to never insert parameters directly into your queries using the % operator. Always pass the parameters separately.

If you like the % format specifications of Python better than the placeholders used by PostgreSQL, there is still a way to use them, via the DB.query_formatted() method:

>>> sql = "SELECT * FROM user_table WHERE login = %s AND passwd = %s"
>>> db.query_formatted(sql, (login, passwd)).getresult()[0]

Note that we need to pass the parameters not as positional arguments here, but as a single tuple. Also note again that we did not use the % operator of Python to format the SQL string, we just used the %s format specifications of Python and let PyGreSQL care about the formatting. Even better, you can also pass the parameters as a dictionary if you use the DB.query_formatted() method:

>>> sql = """SELECT * FROM user_table
...     WHERE login = %(login)s AND passwd = %(passwd)s"""
>>> parameters = dict(login=login, passwd=passwd)
>>> db.query_formatted(sql, parameters).getresult()[0]

Here is another example:

>>> sql = "SELECT 'Hello, ' || %s || '!'"
>>> db.query_formatted(sql, (login,)).getresult()[0]

You would think that the following even simpler example should work, too:

>>> sql = "SELECT %s"
>>> db.query_formatted(sql, (login,)).getresult()[0]
ProgrammingError: Could not determine data type of parameter $1

The issue here is that DB.query_formatted() by default still uses PostgreSQL parameters, transforming the Python style %s placeholder into a $1 placeholder, and sending the login name separately from the query. In the query we looked at before, the concatenation with other strings made it clear that it should be interpreted as a string. This simple query however does not give PostgreSQL a clue what data type the $1 placeholder stands for.

This is different when you are embedding the login name directly into the query instead of passing it as parameter to PostgreSQL. You can achieve this by setting the inline parameter of DB.query_formatted(), like so:

>>> sql = "SELECT %s"
>>> db.query_formatted(sql, (login,), inline=True).getresult()[0]

Another way of making this query work while still sending the parameters separately is to simply cast the parameter values:

>>> sql = "SELECT %s::text"
>>> db.query_formatted(sql, (login,), inline=False).getresult()[0]

In real world examples you will rarely have to cast your parameters like that, since in an INSERT statement or a WHERE clause comparing the parameter to a table column the data type will be clear from the context.

When binding the parameters to a query, PyGreSQL not only adapts the basic types like int, float, bool and str, but also tries to make sense of Python lists and tuples.

Lists are adapted as PostgreSQL arrays:

>>> params = dict(array=[[1, 2],[3, 4]])
>>> db.query_formatted("SELECT %(array)s::int[]", params).getresult()[0][0]
[[1, 2], [3, 4]]

Note that again we only need to cast the array parameter or use inline parameters because this simple query does not provide enough context. Also note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section.

Tuples are adapted as PostgreSQL composite types. If you use inline paramters, they can also be used with the IN syntax.

Let’s think of a more real world example again where we create a table with a composite type in PostgreSQL:

CREATE TABLE on_hand (
    item      inventory_item,
    count     integer)

We assume the composite type inventory_item has been created like this:

CREATE TYPE inventory_item AS (
    name            text,
    supplier_id     integer,
    price           numeric)

In Python we can use a named tuple as an equivalent to this PostgreSQL type:

>>> from collections import namedtuple
>>> inventory_item = namedtuple(
...     'inventory_item', ['name', 'supplier_id', 'price'])

Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:

>>> db.query_formatted("INSERT INTO on_hand VALUES (%(item)s, %(count)s)",
...     dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000))
>>> db.query("SELECT * FROM on_hand").getresult()[0][0]
Row(item=inventory_item(name='fuzzy dice', supplier_id=42,
        price=Decimal('1.99')), count=1000)

The DB.insert() method provides a simpler way to achieve the same:

>>> row = dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)
>>> db.insert('on_hand', row)
{'count': 1000,  'item': inventory_item(name='fuzzy dice',
        supplier_id=42, price=Decimal('1.99'))}

Perhaps we want to use custom Python classes instead of named tuples to hold our values:

>>> class InventoryItem:
...
...     def __init__(self, name, supplier_id, price):
...         self.name = name
...         self.supplier_id = supplier_id
...         self.price = price
...
...     def __str__(self):
...         return '%s (from %s, at $%s)' % (
...             self.name, self.supplier_id, self.price)

But when we try to insert an instance of this class in the same way, we will get an error. This is because PyGreSQL tries to pass the string representation of the object as a parameter to PostgreSQL, but this is just a human readable string and not useful for PostgreSQL to build a composite type. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a “magic†method with the name __pg_str__, like so:

>>> class InventoryItem:
...
...     ...
...
...     def __str__(self):
...         return '%s (from %s, at $%s)' % (
...             self.name, self.supplier_id, self.price)
...
...     def __pg_str__(self, typ):
...         return (self.name, self.supplier_id, self.price)

Now you can insert class instances the same way as you insert named tuples. You can even make these objects adapt to different types in different ways:

>>> class InventoryItem:
...
...     ...
...
...     def __pg_str__(self, typ):
...         if typ == 'text':
...             return str(self)
...        return (self.name, self.supplier_id, self.price)
...
>>> db.query("ALTER TABLE on_hand ADD COLUMN remark varchar")
>>> item=InventoryItem('fuzzy dice', 42, 1.99)
>>> row = dict(item=item, remark=item, count=1000)
>>> db.insert('on_hand', row)
{'count': 1000, 'item': inventory_item(name='fuzzy dice',
    supplier_id=42, price=Decimal('1.99')),
    'remark': 'fuzzy dice (from 42, at $1.99)'}

There is also another “magic†method __pg_repr__ which does not take the typ parameter. That method is used instead of __pg_str__ when passing parameters inline. You must be more careful when using __pg_repr__, because it must return a properly escaped string that can be put literally inside the SQL. The only exception is when you return a tuple or list, because these will be adapted and properly escaped by PyGreSQL again.

Typecasting to Python¶

As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via the DB.get(), Query.getresult() and similar methods. This is done by the use of built-in typecast functions.

If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the set_typecast() function. With the get_typecast() function you can check which function is currently set. If no typecast function is set, then PyGreSQL will return the raw strings from the database.

For instance, you will find that PyGreSQL uses the normal int function to cast PostgreSQL int4 type values to Python:

>>> pg.get_typecast('int4')
int

In the classic PyGreSQL module, the typecasting for these basic types is always done internally by the C extension module for performance reasons. We can set a different typecast function for int4, but it will not become effective, the C module continues to use its internal typecasting.

However, we can add new typecast functions for the database types that are not supported by the C module. For example, we can create a typecast function that casts items of the composite PostgreSQL type used as example in the previous section to instances of the corresponding Python class.

To do this, at first we get the default typecast function that PyGreSQL has created for the current DB connection. This default function casts composite types to named tuples, as we have seen in the section before. We can grab it from the DB.dbtypes object as follows:

>>> cast_tuple = db.dbtypes.get_typecast('inventory_item')

Now we can create a new typecast function that converts the tuple to an instance of our custom class:

>>> cast_item = lambda value: InventoryItem(*cast_tuple(value))

Finally, we set this typecast function, either globally with set_typecast(), or locally for the current connection like this:

>>> db.dbtypes.set_typecast('inventory_item', cast_item)

Now we can get instances of our custom class directly from the database:

>>> item = db.query("SELECT * FROM on_hand").getresult()[0][0]
>>> str(item)
'fuzzy dice (from 42, at $1.99)'

Note that some of the typecast functions used by the C module are configurable with separate module level functions, such as set_decimal(), set_bool() or set_jsondecode(). You need to use these instead of set_typecast() if you want to change the behavior of the C module.

Also note that after changing global typecast functions with set_typecast(), you may need to run db.dbtypes.reset_typecast() to make these changes effective on connections that were already open.

As one last example, let us try to typecast the geometric data type circle of PostgreSQL into a SymPy Circle object. Let’s assume we have created and populated a table with two circles, like so:

CREATE TABLE circle (
    name varchar(8) primary key, circle circle);
INSERT INTO circle VALUES ('C1', '<(2, 3), 3>');
INSERT INTO circle VALUES ('C2', '<(1, -1), 4>');

With PostgreSQL we can easily calculate that these two circles overlap:

>>> q = db.query("""SELECT c1.circle && c2.circle
...     FROM circle c1, circle c2
...     WHERE c1.name = 'C1' AND c2.name = 'C2'""")
>>> q.getresult()[0][0]
True

However, calculating the intersection points between the two circles using the # operator does not work (at least not as of PostgreSQL version 11). So let’s resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:

>>> from sympy import Point, Circle
>>>
>>> def cast_circle(s):
...     p, r = s[1:-1].split(',')
...     p = p[1:-1].split(',')
...     return Circle(Point(float(p[0]), float(p[1])), float(r))
...
>>> pg.set_typecast('circle', cast_circle)

Now we can import the circles in the table into Python simply using:

>>> circle = db.get_as_dict('circle', scalar=True)

The result is a dictionary mapping circle names to SymPy Circle objects. We can verify that the circles have been imported correctly:

>>> circle['C1']
Circle(Point(2, 3), 3.0)
>>> circle['C2']
Circle(Point(1, -1), 4.0)

Finally we can find the exact intersection points with SymPy:

>>> circle['C1'].intersection(circle['C2'])
[Point(29/17 + 64564173230121*sqrt(17)/100000000000000,
    -80705216537651*sqrt(17)/500000000000000 + 31/17),
 Point(-64564173230121*sqrt(17)/100000000000000 + 29/17,
    80705216537651*sqrt(17)/500000000000000 + 31/17)]
PyGreSQL-5.1/docs/_build/html/contents/pg/large_objects.html0000644000175100077410000006263613470245535024032 0ustar darcypyg00000000000000 LargeObject – Large Objects — PyGreSQL 5.1

LargeObject – Large Objects¶

class pg.LargeObject¶

Objects that are instances of the class LargeObject are used to handle all the requests concerning a PostgreSQL large object. These objects embed and hide all the “recurrent†variables (object OID and connection), exactly in the same way Connection instances do, thus only keeping significant parameters in function calls. The LargeObject instance keeps a reference to the Connection object used for its creation, sending requests though with its parameters. Any modification but dereferencing the Connection object will thus affect the LargeObject instance. Dereferencing the initial Connection object is not a problem since Python won’t deallocate it before the LargeObject instance dereferences it. All functions return a generic error message on call error, whatever the exact error was. The error attribute of the object allows to get the exact error message.

See also the PostgreSQL programmer’s guide for more information about the large object interface.

open – open a large object¶

LargeObject.open(mode)¶

Open a large object

Parameters:

mode (int) – open mode definition

Return type:

None

Raises:
  • TypeError – invalid connection, bad parameter type, or too many parameters
  • IOError – already opened object, or open error

This method opens a large object for reading/writing, in the same way than the Unix open() function. The mode value can be obtained by OR-ing the constants defined in the pg module (INV_READ, INV_WRITE).

close – close a large object¶

LargeObject.close()¶

Close a large object

Return type:

None

Raises:
  • TypeError – invalid connection
  • TypeError – too many parameters
  • IOError – object is not opened, or close error

This method closes a previously opened large object, in the same way than the Unix close() function.

size – get the large object size¶

LargeObject.size()¶

Return the large object size

Returns:

the large object size

Return type:

int

Raises:
  • TypeError – invalid connection or invalid object
  • TypeError – too many parameters
  • IOError – object is not opened, or seek/tell error

This (composite) method allows to get the size of a large object. It was implemented because this function is very useful for a web interfaced database. Currently, the large object needs to be opened first.

export – save a large object to a file¶

LargeObject.export(name)¶

Export a large object to a file

Parameters:

name (str) – file to be created

Return type:

None

Raises:
  • TypeError – invalid connection or invalid object, bad parameter type, or too many parameters
  • IOError – object is not closed, or export error

This methods allows to dump the content of a large object in a very simple way. The exported file is created on the host of the program, not the server host.

Object attributes¶

LargeObject objects define a read-only set of attributes that allow to get some information about it. These attributes are:

LargeObject.oid¶

the OID associated with the large object (int)

LargeObject.pgcnx¶

the Connection object associated with the large object

LargeObject.error¶

the last warning/error message of the connection (str)

Warning

In multi-threaded environments, LargeObject.error may be modified by another thread using the same Connection. Remember these object are shared, not duplicated. You should provide some locking to be able if you want to check this. The LargeObject.oid attribute is very interesting, because it allows you to reuse the OID later, creating the LargeObject object with a Connection.getlo() method call.

PyGreSQL-5.1/docs/_build/html/contents/pg/index.html0000644000175100077410000005555313470245534022335 0ustar darcypyg00000000000000 pg — The Classic PyGreSQL Interface — PyGreSQL 5.1

pg — The Classic PyGreSQL Interface¶

Contents¶

PyGreSQL-5.1/docs/_build/html/contents/pg/db_wrapper.html0000644000175100077410000032406213470245534023345 0ustar darcypyg00000000000000 The DB wrapper class — PyGreSQL 5.1

The DB wrapper class¶

class pg.DB¶

The Connection methods are wrapped in the class DB which also adds convenient higher level methods for working with the database. It also serves as a context manager for the connection. The preferred way to use this module is as follows:

import pg

with pg.DB(...) as db:  # for parameters, see below
    for r in db.query(  # just for example
            "SELECT foo, bar FROM foo_bar_table WHERE foo !~ bar"
            ).dictresult():
        print('%(foo)s %(bar)s' % r)

This class can be subclassed as in this example:

import pg

class DB_ride(pg.DB):
    """Ride database wrapper

    This class encapsulates the database functions and the specific
    methods for the ride database."""

def __init__(self):
    """Open a database connection to the rides database"""
    pg.DB.__init__(self, dbname='ride')
    self.query("SET DATESTYLE TO 'ISO'")

[Add or override methods here]

The following describes the methods and variables of this class.

Initialization¶

The DB class is initialized with the same arguments as the connect() function described above. It also initializes a few internal variables. The statement db = DB() will open the local database with the name of the user just like connect() does.

You can also initialize the DB class with an existing pg or pgdb connection. Pass this connection as a single unnamed parameter, or as a single parameter named db. This allows you to use all of the methods of the DB class with a DB-API 2 compliant connection. Note that the Connection.close() and Connection.reopen() methods are inoperative in this case.

pkey – return the primary key of a table¶

DB.pkey(table)¶

Return the primary key of a table

Parameters:table (str) – name of table
Returns:Name of the field which is the primary key of the table
Return type:str
Raises:KeyError – the table does not have a primary key

This method returns the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key.

get_databases – get list of databases in the system¶

DB.get_databases()¶

Get the list of databases in the system

Returns:all databases in the system
Return type:list

Although you can do this with a simple select, it is added here for convenience.

get_relations – get list of relations in connected database¶

DB.get_relations([kinds][, system])¶

Get the list of relations in connected database

Parameters:
  • kinds (str) – a string or sequence of type letters
  • system (bool) – whether system relations should be returned
Returns:

all relations of the given kinds in the database

Return type:

list

This method returns the list of relations in the connected database. Although you can do this with a simple select, it is added here for convenience. You can select which kinds of relations you are interested in by passing type letters in the kinds parameter. The type letters are r = ordinary table, i = index, S = sequence, v = view, c = composite type, s = special, t = TOAST table. If kinds is None or an empty string, all relations are returned (this is also the default). If system is set to True, then system tables and views (temporary tables, toast tables, catalog vies and tables) will be returned as well, otherwise they will be ignored.

get_tables – get list of tables in connected database¶

DB.get_tables([system])¶

Get the list of tables in connected database

Parameters:system (bool) – whether system tables should be returned
Returns:all tables in connected database
Return type:list

This is a shortcut for get_relations('r', system) that has been added for convenience.

get_attnames – get the attribute names of a table¶

DB.get_attnames(table)¶

Get the attribute names of a table

Parameters:table (str) – name of table
Returns:an ordered dictionary mapping attribute names to type names

Given the name of a table, digs out the set of attribute names.

Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes’ types) with the column names in the proper order if you iterate over it.

By default, only a limited number of simple types will be returned. You can get the registered types instead, if enabled by calling the DB.use_regtypes() method.

has_table_privilege – check table privilege¶

DB.has_table_privilege(table, privilege)¶

Check whether current user has specified table privilege

Parameters:
  • table (str) – the name of the table
  • privilege (str) – privilege to be checked – default is ‘select’
Returns:

whether current user has specified table privilege

Return type:

bool

Returns True if the current user has the specified privilege for the table.

New in version 4.0.

get/set_parameter – get or set run-time parameters¶

DB.get_parameter(parameter)¶

Get the value of run-time parameters

Parameters:

parameter – the run-time parameter(s) to get

Returns:

the current value(s) of the run-time parameter(s)

Return type:

str, list or dict

Raises:
  • TypeError – Invalid parameter type(s)
  • pg.ProgrammingError – Invalid parameter name(s)

If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name.

You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys.

By passing the special name 'all' as the parameter, you can get a dict of all existing configuration parameters.

Note that you can request most of the important parameters also using Connection.parameter() which does not involve a database query, unlike DB.get_parameter() and DB.set_parameter().

New in version 4.2.

DB.set_parameter(parameter[, value][, local])¶

Set the value of run-time parameters

Parameters:
  • parameter – the run-time parameter(s) to set
  • value – the value to set
Raises:
  • TypeError – Invalid parameter type(s)
  • ValueError – Invalid value argument(s)
  • pg.ProgrammingError – Invalid parameter name(s) or values

If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or None is passed as a value, then the run-time parameter will be restored to its default value.

You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict.

By passing the special name 'all' as the parameter, you can reset all existing settable run-time parameters to their default values.

If you set local to True, then the command takes effect for only the current transaction. After DB.commit() or DB.rollback(), the session-level setting takes effect again. Setting local to True will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately.

New in version 4.2.

begin/commit/rollback/savepoint/release – transaction handling¶

DB.begin([mode])¶

Begin a transaction

Parameters:mode (str) – an optional transaction mode such as ‘READ ONLY’

This initiates a transaction block, that is, all following queries will be executed in a single transaction until DB.commit() or DB.rollback() is called.

New in version 4.1.

DB.start()¶

This is the same as the DB.begin() method.

DB.commit()¶

Commit a transaction

This commits the current transaction.

DB.end()¶

This is the same as the DB.commit() method.

New in version 4.1.

DB.rollback([name])¶

Roll back a transaction

Parameters:name (str) – optionally, roll back to the specified savepoint

This rolls back the current transaction, discarding all its changes.

DB.abort()¶

This is the same as the DB.rollback() method.

New in version 4.2.

DB.savepoint(name)¶

Define a new savepoint

Parameters:name (str) – the name to give to the new savepoint

This establishes a new savepoint within the current transaction.

New in version 4.1.

DB.release(name)¶

Destroy a savepoint

Parameters:name (str) – the name of the savepoint to destroy

This destroys a savepoint previously defined in the current transaction.

New in version 4.1.

get – get a row from a database table or view¶

DB.get(table, row[, keyname])¶

Get a row from a database table or view

Parameters:
  • table (str) – name of table or view
  • row – either a dictionary or the value to be looked up
  • keyname (str) – name of field to use as key (optional)
Returns:

A dictionary - the keys are the attribute names, the values are the row values.

Raises:
  • pg.ProgrammingError – table has no primary key or missing privilege
  • KeyError – missing key value for the row

This method is the basic mechanism to get a single row. It assumes that the keyname specifies a unique row. It must be the name of a single column or a tuple of column names. If keyname is not specified, then the primary key for the table is used.

If row is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed keyname or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values if the row was passed as a dictionary.

The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as oid(table) using the actual name of the table.

Note that since PyGreSQL 5.0 this will return the value of an array type column as a Python list by default.

insert – insert a row into a database table¶

DB.insert(table[, row][, col=val, ...])¶

Insert a row into a database table

Parameters:
  • table (str) – name of table
  • row (dict) – optional dictionary of values
  • col – optional keyword arguments for updating the dictionary
Returns:

the inserted values in the database

Return type:

dict

Raises:

pg.ProgrammingError – missing privilege or conflict

This method inserts a row into a table. If the optional dictionary is not supplied then the required values must be included as keyword/value pairs. If a dictionary is supplied then any keywords provided will be added to or replace the entry in the dictionary.

The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc.

Note that since PyGreSQL 5.0 it is possible to insert a value for an array type column by passing it as a Python list.

update – update a row in a database table¶

DB.update(table[, row][, col=val, ...])¶

Update a row in a database table

Parameters:
  • table (str) – name of table
  • row (dict) – optional dictionary of values
  • col – optional keyword arguments for updating the dictionary
Returns:

the new row in the database

Return type:

dict

Raises:
  • pg.ProgrammingError – table has no primary key or missing privilege
  • KeyError – missing key value for the row

Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by DB.get() or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself.

The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc.

Like insert, the dictionary is optional and updates will be performed on the fields in the keywords. There must be an OID or primary key either specified using the 'oid' keyword or in the dictionary, in which case the OID must be munged.

upsert – insert a row with conflict resolution¶

DB.upsert(table[, row][, col=val, ...])¶

Insert a row into a database table with conflict resolution

Parameters:
  • table (str) – name of table
  • row (dict) – optional dictionary of values
  • col – optional keyword arguments for specifying the update
Returns:

the new row in the database

Return type:

dict

Raises:

pg.ProgrammingError – table has no primary key or missing privilege

This method inserts a row into a table, but instead of raising a ProgrammingError exception in case of violating a constraint or unique index, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided.

Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary.

Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way:

A value of False or None means the column shall not be updated, a value of True means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value True.

So if in the case of a conflict you want to update every column that has been passed in the dictionary d , you would call upsert(table, d). If you don’t want to do anything in case of a conflict, i.e. leave the existing row as it is, call upsert(table, d, **dict.fromkeys(d)).

If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by writing the table prefix included. before the column name, and you can refer to the value that has been proposed for insertion by writing excluded. as table prefix.

The dictionary is modified in any case to reflect the values in the database after the operation has completed.

Note

The method uses the PostgreSQL “upsert†feature which is only available since PostgreSQL 9.5. With older PostgreSQL versions, you will get a ProgrammingError if you use this method.

New in version 5.0.

query – execute a SQL command string¶

DB.query(command[, arg1[, arg2, ...]])¶

Execute a SQL command string

Parameters:
  • command (str) – SQL command
  • arg* – optional positional arguments
Returns:

result values

Return type:

Query, None

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • ValueError – empty SQL query or lost connection
  • pg.ProgrammingError – error in query
  • pg.InternalError – error during query processing

Similar to the Connection function with the same name, except that positional arguments can be passed either as a single list or tuple, or as individual positional arguments. These arguments will then be used as parameter values of parameterized queries.

Example:

name = input("Name? ")
phone = input("Phone? ")
rows = db.query("update employees set phone=$2 where name=$1",
    name, phone).getresult()[0][0]
# or
rows = db.query("update employees set phone=$2 where name=$1",
    (name, phone)).getresult()[0][0]

query_formatted – execute a formatted SQL command string¶

DB.query_formatted(command[, parameters][, types][, inline])¶

Execute a formatted SQL command string

Parameters:
  • command (str) – SQL command
  • parameters (tuple, list or dict) – the values of the parameters for the SQL command
  • types (tuple, list or dict) – optionally, the types of the parameters
  • inline (bool) – whether the parameters should be passed in the SQL
Return type:

Query, None

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • ValueError – empty SQL query or lost connection
  • pg.ProgrammingError – error in query
  • pg.InternalError – error during query processing

Similar to DB.query(), but using Python format placeholders of the form %s or %(names)s instead of PostgreSQL placeholders of the form $1. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity.

If you set inline to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately.

If you set inline to True or don’t pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case.

Note that the adaption and conversion of the parameters causes a certain performance overhead. Depending on the type of values, the overhead can be smaller for inline queries or if you pass the types of the parameters, so that they don’t need to be guessed from the values. For best performance, we recommend using a raw DB.query() or DB.query_prepared() if you are executing many of the same operations with different parameters.

Example:

name = input("Name? ")
phone = input("Phone? ")
rows = db.query_formatted(
    "update employees set phone=%s where name=%s",
    (phone, name)).getresult()[0][0]
# or
rows = db.query_formatted(
    "update employees set phone=%(phone)s where name=%(name)s",
    dict(name=name, phone=phone)).getresult()[0][0]

query_prepared – execute a prepared statement¶

DB.query_prepared(name[, arg1[, arg2, ...]])¶

Execute a prepared statement

Parameters:
  • name (str) – name of the prepared statement
  • arg* – optional positional arguments
Returns:

result values

Return type:

Query, None

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • ValueError – empty SQL query or lost connection
  • pg.ProgrammingError – error in query
  • pg.InternalError – error during query processing
  • pg.OperationalError – prepared statement does not exist

This methods works like the DB.query() method, except that instead of passing the SQL command, you pass the name of a prepared statement created previously using the DB.prepare() method.

Passing an empty string or None as the name will execute the unnamed statement (see warning about the limited lifetime of the unnamed statement in DB.prepare()).

The functionality of this method is equivalent to that of the SQL EXECUTE command. Note that calling EXECUTE would require parameters to be sent inline, and be properly sanitized (escaped, quoted).

New in version 5.1.

prepare – create a prepared statement¶

DB.prepare(name, command)¶

Create a prepared statement

Parameters:
  • command (str) – SQL command
  • name (str) – name of the prepared statement
Return type:

None

Raises:
  • TypeError – bad argument types, or wrong number of arguments
  • TypeError – invalid connection
  • pg.ProgrammingError – error in query or duplicate query

This method creates a prepared statement with the specified name for later execution of the given command with the DB.query_prepared() method.

If the name is empty or None, the unnamed prepared statement is used, in which case any pre-existing unnamed statement is replaced.

Otherwise, if a prepared statement with the specified name is already defined in the current database session, a pg.ProgrammingError is raised.

The SQL command may optionally contain positional parameters of the form $1, $2, etc instead of literal data. The corresponding values must then be passed to the Connection.query_prepared() method as positional arguments.

The functionality of this method is equivalent to that of the SQL PREPARE command.

Example:

db.prepare('change phone',
    "update employees set phone=$2 where ein=$1")
while True:
    ein = input("Employee ID? ")
    if not ein:
        break
    phone = input("Phone? ")
    db.query_prepared('change phone', ein, phone)

Note

We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations on the database.

New in version 5.1.

describe_prepared – describe a prepared statement¶

DB.describe_prepared([name])¶

Describe a prepared statement

Parameters:

name (str) – name of the prepared statement

Return type:

Query

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • pg.OperationalError – prepared statement does not exist

This method returns a Query object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the Query.listfields(), Query.fieldname() and Query.fieldnum() methods.

New in version 5.1.

delete_prepared – delete a prepared statement¶

DB.delete_prepared([name])¶

Delete a prepared statement

Parameters:

name (str) – name of the prepared statement

Return type:

None

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • pg.OperationalError – prepared statement does not exist

This method deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are always deallocated automatically when the current session ends.

New in version 5.1.

clear – clear row values in memory¶

DB.clear(table[, row])¶

Clear row values in memory

Parameters:
  • table (str) – name of table
  • row (dict) – optional dictionary of values
Returns:

an empty row

Return type:

dict

This method clears all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to False, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged.

If the dictionary is not supplied a new one is created.

delete – delete a row from a database table¶

DB.delete(table[, row][, col=val, ...])¶

Delete a row from a database table

Parameters:
  • table (str) – name of table
  • d (dict) – optional dictionary of values
  • col – optional keyword arguments for updating the dictionary
Return type:

None

Raises:
  • pg.ProgrammingError – table has no primary key, row is still referenced or missing privilege
  • KeyError – missing key value for the row

This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by DB.get() or passed as keyword. The OID will take precedence if provided.

The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted).

Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method will raise a ProgrammingError.

truncate – quickly empty database tables¶

DB.truncate(table[, restart][, cascade][, only])¶

Empty a table or set of tables

Parameters:
  • table (str, list or set) – the name of the table(s)
  • restart (bool) – whether table sequences should be restarted
  • cascade (bool) – whether referenced tables should also be truncated
  • only (bool or list) – whether only parent tables should be truncated

This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables.

If restart is set to True, sequences owned by columns of the truncated table(s) are automatically restarted. If cascade is set to True, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter only is not set to True, all the descendant tables (if any) will also be truncated. Optionally, a * can be specified after the table name to explicitly indicate that descendant tables are included. If the parameter table is a list, the parameter only can also be a list of corresponding boolean values.

New in version 4.2.

get_as_list/dict – read a table as a list or dictionary¶

DB.get_as_list(table[, what][, where][, order][, limit][, offset][, scalar])¶

Get a table as a list

Parameters:
  • table (str) – the name of the table (the FROM clause)
  • what (str, list, tuple or None) – column(s) to be returned (the SELECT clause)
  • where (str, list, tuple or None) – conditions(s) to be fulfilled (the WHERE clause)
  • order (str, list, tuple, False or None) – column(s) to sort by (the ORDER BY clause)
  • limit (int) – maximum number of rows returned (the LIMIT clause)
  • offset (int) – number of rows to be skipped (the OFFSET clause)
  • scalar (bool) – whether only the first column shall be returned
Returns:

the content of the table as a list

Return type:

list

Raises:

TypeError – the table name has not been specified

This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters.

The parameter what can restrict the query to only return a subset of the table columns. The parameter where can restrict the query to only return a subset of the table rows. The specified SQL expressions all need to be fulfilled for a row to get into the result. The parameter order specifies the ordering of the rows. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set order to False if you don’t care about the ordering. The parameters limit and offset specify the maximum number of rows returned and a number of rows skipped over.

If you set the scalar option to True, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway.

New in version 5.0.

DB.get_as_dict(table[, keyname][, what][, where][, order][, limit][, offset][, scalar])¶

Get a table as a dictionary

Parameters:
  • table (str) – the name of the table (the FROM clause)
  • keyname (str, list, tuple or None) – column(s) to be used as key(s) of the dictionary
  • what (str, list, tuple or None) – column(s) to be returned (the SELECT clause)
  • where (str, list, tuple or None) – conditions(s) to be fulfilled (the WHERE clause)
  • order (str, list, tuple, False or None) – column(s) to sort by (the ORDER BY clause)
  • limit (int) – maximum number of rows returned (the LIMIT clause)
  • offset (int) – number of rows to be skipped (the OFFSET clause)
  • scalar (bool) – whether only the first column shall be returned
Returns:

the content of the table as a list

Return type:

dict or OrderedDict

Raises:
  • TypeError – the table name has not been specified
  • KeyError – keyname(s) are invalid or not part of the result
  • pg.ProgrammingError – no keyname(s) and table has no primary key

This method is similar to DB.get_as_list(), but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the scalar option has been set to True. With the optional parameter keyname you can specify a different set of columns to be used as the keys of the dictionary.

If the Python version supports it, the dictionary will be an OrderedDict using the order specified with the order parameter or the key column(s) if not specified. You can set order to False if you don’t care about the ordering. In this case the returned dictionary will be an ordinary one.

New in version 5.0.

escape_literal/identifier/string/bytea – escape for SQL¶

The following methods escape text or binary strings so that they can be inserted directly into an SQL command. Except for DB.escape_byte(), you don’t need to call these methods for the strings passed as parameters to DB.query(). You also don’t need to call any of these methods when storing data using DB.insert() and similar.

DB.escape_literal(string)¶

Escape a string for use within SQL as a literal constant

Parameters:string (str) – the string that is to be escaped
Returns:the escaped string
Return type:str

This method escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser.

New in version 4.1.

DB.escape_identifier(string)¶

Escape a string for use within SQL as an identifier

Parameters:string (str) – the string that is to be escaped
Returns:the escaped string
Return type:str

This method escapes a string for use as an SQL identifier, such as a table, column, or function name. This is useful when a user-supplied identifier might contain special characters that would otherwise be misinterpreted by the SQL parser, or when the identifier might contain upper case characters whose case should be preserved.

New in version 4.1.

DB.escape_string(string)¶

Escape a string for use within SQL

Parameters:string (str) – the string that is to be escaped
Returns:the escaped string
Return type:str

Similar to the module function pg.escape_string() with the same name, but the behavior of this method is adjusted depending on the connection properties (such as character encoding).

DB.escape_bytea(datastring)¶

Escape binary data for use within SQL as type bytea

Parameters:datastring (str) – string containing the binary data that is to be escaped
Returns:the escaped string
Return type:str

Similar to the module function pg.escape_bytea() with the same name, but the behavior of this method is adjusted depending on the connection properties (in particular, whether standard-conforming strings are enabled).

unescape_bytea – unescape data retrieved from the database¶

DB.unescape_bytea(string)¶

Unescape bytea data that has been retrieved as text

Parameters:datastring – the bytea data string that has been retrieved as text
Returns:byte string containing the binary data
Return type:bytes

Converts an escaped string representation of binary data stored as bytea into the raw byte string representing the binary data – this is the reverse of DB.escape_bytea(). Since the Query results will already return unescaped byte strings, you normally don’t have to use this method.

encode/decode_json – encode and decode JSON data¶

The following methods can be used to encode end decode data in JSON format.

DB.encode_json(obj)¶

Encode a Python object for use within SQL as type json or jsonb

Parameters:obj (dict, list or None) – Python object that shall be encoded to JSON format
Returns:string representation of the Python object in JSON format
Return type:str

This method serializes a Python object into a JSON formatted string that can be used within SQL. You don’t need to use this method on the data stored with DB.insert() and similar, only if you store the data directly as part of an SQL command or parameter with DB.query(). This is the same as the json.dumps() function from the standard library.

New in version 5.0.

DB.decode_json(string)¶

Decode json or jsonb data that has been retrieved as text

Parameters:string (str) – JSON formatted string shall be decoded into a Python object
Returns:Python object representing the JSON formatted string
Return type:dict, list or None

This method deserializes a JSON formatted string retrieved as text from the database to a Python object. You normally don’t need to use this method as JSON data is automatically decoded by PyGreSQL. If you don’t want the data to be decoded, then you can cast json or jsonb columns to text in PostgreSQL or you can set the decoding function to None or a different function using pg.set_jsondecode(). By default this is the same as the json.loads() function from the standard library.

New in version 5.0.

use_regtypes – choose usage of registered type names¶

DB.use_regtypes([regtypes])¶

Determine whether registered type names shall be used

Parameters:regtypes (bool) – if passed, set whether registered type names shall be used
Returns:whether registered type names are used

The DB.get_attnames() method can return either simplified “classic†type names (the default) or more fine-grained “registered†type names. Which kind of type names is used can be changed by calling DB.get_regtypes(). If you pass a boolean, it sets whether registered type names shall be used. The method can also be used to check through its return value whether registered type names are currently used.

New in version 4.1.

notification_handler – create a notification handler¶

class DB.notification_handler(event, callback[, arg_dict][, timeout][, stop_event])¶

Create a notification handler instance

Parameters:
  • event (str) – the name of an event to listen for
  • callback – a callback function
  • arg_dict (dict) – an optional dictionary for passing arguments
  • timeout (int, float or None) – the time-out when waiting for notifications
  • stop_event (str) – an optional different name to be used as stop event

This method creates a pg.NotificationHandler object using the DB connection as explained under The Notification Handler.

New in version 4.1.1.

Attributes of the DB wrapper class¶

DB.db¶

The wrapped Connection object

You normally don’t need this, since all of the members can be accessed from the DB wrapper class as well.

DB.dbname¶

The name of the database that the connection is using

DB.dbtypes¶

A dictionary with the various type names for the PostgreSQL types

This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the DbTypes class for details.

New in version 5.0.

DB.adapter¶

A class with some helper functions for adapting parameters

This can be used for building queries with parameters. You normally will not need this, as you can use the DB.query_formatted method.

New in version 5.0.

PyGreSQL-5.1/docs/_build/html/contents/pg/notification.html0000644000175100077410000003637613470245535023717 0ustar darcypyg00000000000000 The Notification Handler — PyGreSQL 5.1

The Notification Handler¶

PyGreSQL comes with a client-side asynchronous notification handler that was based on the pgnotify module written by Ng Pheng Siong.

New in version 4.1.1.

Instantiating the notification handler¶

class pg.NotificationHandler(db, event, callback[, arg_dict][, timeout][, stop_event])¶

Create an instance of the notification handler

Parameters:
  • db (Connection) – the database connection
  • event (str) – the name of an event to listen for
  • callback – a callback function
  • arg_dict (dict) – an optional dictionary for passing arguments
  • timeout (int, float or None) – the time-out when waiting for notifications
  • stop_event (str) – an optional different name to be used as stop event

You can also create an instance of the NotificationHandler using the DB.connection_handler method. In this case you don’t need to pass a database connection because the DB connection itself will be used as the datebase connection for the notification handler.

You must always pass the name of an event (notification channel) to listen for and a callback function.

You can also specify a dictionary arg_dict that will be passed as the single argument to the callback function, and a timeout value in seconds (a floating point number denotes fractions of seconds). If it is absent or None, the callers will never time out. If the time-out is reached, the callback function will be called with a single argument that is None. If you set the timeout to 0, the handler will poll notifications synchronously and return.

You can specify the name of the event that will be used to signal the handler to stop listening as stop_event. By default, it will be the event name prefixed with 'stop_'.

All of the parameters will be also available as attributes of the created notification handler object.

Invoking the notification handler¶

To invoke the notification handler, just call the instance without passing any parameters.

The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated pid, event and extra (the payload passed with the notification) are inserted into its arg_dict dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return.

In the special case that the timeout of the handler has been set to 0, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event.

Warning

If you run this loop in another thread, don’t use the same database connection for database operations in the main thread.

Sending notifications¶

You can send notifications by either running NOTIFY commands on the database directly, or using the following method:

NotificationHandler.notify([db][, stop][, payload])¶

Generate a notification

Parameters:
  • db (Connection) – the database connection for sending the notification
  • stop (bool) – whether to produce a normal event or a stop event
  • payload (str) – an optional payload to be sent with the notification

This method sends a notification event together with an optional payload. If you set the stop flag, a stop notification will be sent instead of a normal notification. This will cause the handler to stop listening.

Warning

If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe.

Auxiliary methods¶

NotificationHandler.listen()¶

Start listening for the event and the stop event

This method is called implicitly when the handler is invoked.

NotificationHandler.unlisten()¶

Stop listening for the event and the stop event

This method is called implicitly when the handler receives a stop event or when it is closed or deleted.

NotificationHandler.close()¶

Stop listening and close the database connection

You can call this method instead of NotificationHandler.unlisten() if you want to close not only the handler, but also the database connection it was created with.

PyGreSQL-5.1/docs/_build/html/contents/pg/query.html0000644000175100077410000012207713470245535022370 0ustar darcypyg00000000000000 Query methods — PyGreSQL 5.1

Query methods¶

class pg.Query¶

The Query object returned by Connection.query() and DB.query() can be used as an iterable returning rows as tuples. You can also directly access row tuples using their index, and get the number of rows with the len() function. The Query class also provides the following methods for accessing the results of the query:

getresult – get query values as list of tuples¶

Query.getresult()¶

Get query values as list of tuples

Returns:

result values as a list of tuples

Return type:

list

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

This method returns query results as a list of tuples. More information about this result may be accessed using Query.listfields(), Query.fieldname() and Query.fieldnum() methods.

Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists.

Since PyGreSQL 5.1 the Query can be also used directly as an iterable sequence, i.e. you can iterate over the Query object to get the same tuples as returned by Query.getresult(). This is slightly more efficient than getting the full list of results, but note that the full result is always fetched from the server anyway when the query is executed.

You can also call len() on a query to find the number of rows in the result, and access row tuples using their index directly on the Query object.

dictresult/dictiter – get query values as dictionaries¶

Query.dictresult()¶

Get query values as list of dictionaries

Returns:

result values as a list of dictionaries

Return type:

list

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

This method returns query results as a list of dictionaries which have the field names as keys.

If the query has duplicate field names, you will get the value for the field with the highest index in the query.

Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists.

Query.dictiter()¶

Get query values as iterable of dictionaries

Returns:

result values as an iterable of dictionaries

Return type:

iterable

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

This method returns query results as an iterable of dictionaries which have the field names as keys. This is slightly more efficient than getting the full list of results as dictionaries, but note that the full result is always fetched from the server anyway when the query is executed.

If the query has duplicate field names, you will get the value for the field with the highest index in the query.

New in version 5.1.

namedresult/namediter – get query values a named tuples¶

Query.namedresult()¶

Get query values as list of named tuples

Returns:

result values as a list of named tuples

Return type:

list

Raises:
  • TypeError – too many (any) parameters
  • TypeError – named tuples not supported
  • MemoryError – internal memory error

This method returns query results as a list of named tuples with proper field names.

Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names.

Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists.

New in version 4.1.

Query.namediter()¶

Get query values as iterable of named tuples

Returns:

result values as an iterable of named tuples

Return type:

iterable

Raises:
  • TypeError – too many (any) parameters
  • TypeError – named tuples not supported
  • MemoryError – internal memory error

This method returns query results as an iterable of named tuples with proper field names. This is slightly more efficient than getting the full list of results as named tuples, but note that the full result is always fetched from the server anyway when the query is executed.

Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names.

New in version 5.1.

scalarresult/scalariter – get query values as scalars¶

Query.scalarresult()¶

Get first fields from query result as list of scalar values

Returns:

first fields from result as a list of scalar values

Return type:

list

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

This method returns the first fields from the query results as a list of scalar values in the order returned by the server.

New in version 5.1.

Query.scalariter()¶

Get first fields from query result as iterable of scalar values

Returns:

first fields from result as an iterable of scalar values

Return type:

list

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

This method returns the first fields from the query results as an iterable of scalar values in the order returned by the server. This is slightly more efficient than getting the full list of results as rows or scalar values, but note that the full result is always fetched from the server anyway when the query is executed.

New in version 5.1.

one/onedict/onenamed/onescalar – get one result of a query¶

Query.one()¶

Get one row from the result of a query as a tuple

Returns:

next row from the query results as a tuple of fields

Return type:

tuple or None

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns only one row from the result as a tuple of fields.

This method can be called multiple times to return more rows. It returns None if the result does not contain one more row.

New in version 5.1.

Query.onedict()¶

Get one row from the result of a query as a dictionary

Returns:

next row from the query results as a dictionary

Return type:

dict or None

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns only one row from the result as a dictionary with the field names used as the keys.

This method can be called multiple times to return more rows. It returns None if the result does not contain one more row.

New in version 5.1.

Query.onenamed()¶

Get one row from the result of a query as named tuple

Returns:

next row from the query results as a named tuple

Return type:

named tuple or None

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns only one row from the result as a named tuple with proper field names.

Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names.

This method can be called multiple times to return more rows. It returns None if the result does not contain one more row.

New in version 5.1.

Query.onescalar()¶

Get one row from the result of a query as scalar value

Returns:

next row from the query results as a scalar value

Return type:

type of first field or None

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns the first field of the next row from the result as a scalar value.

This method can be called multiple times to return more rows as scalars. It returns None if the result does not contain one more row.

New in version 5.1.

single/singledict/singlenamed/singlescalar – get single result of a query¶

Query.single()¶

Get single row from the result of a query as a tuple

Returns:

single row from the query results as a tuple of fields

Return type:

tuple :raises InvalidResultError: result does not have exactly one row

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns a single row from the result as a tuple of fields.

This method returns the same single row when called multiple times. It raises an pg.InvalidResultError if the result does not have exactly one row. More specifically, this will be of type pg.NoResultError if it is empty and of type pg.MultipleResultsError if it has multiple rows.

New in version 5.1.

Query.singledict()¶

Get single row from the result of a query as a dictionary

Returns:

single row from the query results as a dictionary

Return type:

dict :raises InvalidResultError: result does not have exactly one row

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns a single row from the result as a dictionary with the field names used as the keys.

This method returns the same single row when called multiple times. It raises an pg.InvalidResultError if the result does not have exactly one row. More specifically, this will be of type pg.NoResultError if it is empty and of type pg.MultipleResultsError if it has multiple rows.

New in version 5.1.

Query.singlenamed()¶

Get single row from the result of a query as named tuple

Returns:

single row from the query results as a named tuple

Return type:

named tuple :raises InvalidResultError: result does not have exactly one row

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns single row from the result as a named tuple with proper field names.

Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names.

This method returns the same single row when called multiple times. It raises an pg.InvalidResultError if the result does not have exactly one row. More specifically, this will be of type pg.NoResultError if it is empty and of type pg.MultipleResultsError if it has multiple rows.

New in version 5.1.

Query.singlescalar()¶

Get single row from the result of a query as scalar value

Returns:

single row from the query results as a scalar value

Return type:

type of first field :raises InvalidResultError: result does not have exactly one row

Raises:
  • TypeError – too many (any) parameters
  • MemoryError – internal memory error

Returns the first field of a single row from the result as a scalar value.

This method returns the same single row as scalar when called multiple times. It raises an pg.InvalidResultError if the result does not have exactly one row. More specifically, this will be of type pg.NoResultError if it is empty and of type pg.MultipleResultsError if it has multiple rows.

New in version 5.1.

listfields – list fields names of previous query result¶

Query.listfields()¶

List fields names of previous query result

Returns:field names
Return type:list
Raises:TypeError – too many parameters

This method returns the list of field names defined for the query result. The fields are in the same order as the result values.

fieldname, fieldnum – field name/number conversion¶

Query.fieldname(num)¶

Get field name from its number

Parameters:

num (int) – field number

Returns:

field name

Return type:

str

Raises:
  • TypeError – invalid connection, bad parameter type, or too many parameters
  • ValueError – invalid field number

This method allows to find a field name from its rank number. It can be useful for displaying a result. The fields are in the same order as the result values.

Query.fieldnum(name)¶

Get field number from its name

Parameters:

name (str) – field name

Returns:

field number

Return type:

int

Raises:
  • TypeError – invalid connection, bad parameter type, or too many parameters
  • ValueError – unknown field name

This method returns a field number given its name. It can be used to build a function that converts result list strings to their correct type, using a hardcoded table definition. The number returned is the field rank in the query result.

ntuples – return number of tuples in query object¶

Query.ntuples()¶

Return number of tuples in query object

Returns:number of tuples in Query
Return type:int
Raises:TypeError – Too many arguments.

This method returns the number of tuples in the query result.

Deprecated since version 5.1: You can use the normal len() function instead.

PyGreSQL-5.1/docs/_build/html/contents/pg/introduction.html0000644000175100077410000001677513470245535023753 0ustar darcypyg00000000000000 Introduction — PyGreSQL 5.1

Introduction¶

You may either choose to use the “classic†PyGreSQL interface provided by the pg module or else the newer DB-API 2.0 compliant interface provided by the pgdb module.

The following part of the documentation covers only the older pg API.

The pg module handles three types of objects,

  • the Connection instances, which handle the connection and all the requests to the database,
  • the LargeObject instances, which handle all the accesses to PostgreSQL large objects,
  • the Query instances that handle query results

and it provides a convenient wrapper class DB for the basic Connection class.

See also

If you want to see a simple example of the use of some of these functions, see the Examples page.

PyGreSQL-5.1/docs/_build/html/contents/pg/db_types.html0000644000175100077410000003511713470245534023031 0ustar darcypyg00000000000000 DbTypes – The internal cache for database types — PyGreSQL 5.1

DbTypes – The internal cache for database types¶

class pg.DbTypes¶

New in version 5.0.

The DbTypes object is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to PyGreSQL “type names†(which are also returned by DB.get_attnames() as dictionary values).

These type names are strings which are equal to either the simple PyGreSQL names or to the more fine-grained registered PostgreSQL type names if these have been enabled with DB.use_regtypes(). Besides being strings, they carry additional information about the associated PostgreSQL type in the following attributes:

  • oid – the PostgreSQL type OID
  • pgtype – the internal PostgreSQL data type name
  • regtype – the registered PostgreSQL data type name
  • simple – the more coarse-grained PyGreSQL type name
  • typtype – b = base type, c = composite type etc.
  • category – A = Array, b =Boolean, C = Composite etc.
  • delim – delimiter for array types
  • relid – corresponding table for composite types
  • attnames – attributes for composite types

For details, see the PostgreSQL documentation on pg_type.

In addition to the dictionary methods, the DbTypes class also provides the following methods:

DbTypes.get_attnames(typ)¶

Get the names and types of the fields of composite types

Parameters:typ (str or int) – PostgreSQL type name or OID of a composite type
Returns:an ordered dictionary mapping field names to type names
DbTypes.get_typecast(typ)¶

Get the cast function for the given database type

Parameters:typ (str) – PostgreSQL type name
Returns:the typecast function for the specified type
Return type:function or None
DbTypes.set_typecast(typ, cast)¶

Set a typecast function for the given database type(s)

Parameters:
  • typ (str or int) – PostgreSQL type name or list of type names
  • cast – the typecast function to be set for the specified type(s)

The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named connection, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings.

DbTypes.reset_typecast([typ])¶

Reset the typecasts for the specified (or all) type(s) to their defaults

Parameters:typ (str, list or None) – PostgreSQL type name or list of type names, or None to reset all typecast functions
DbTypes.typecast(value, typ)¶

Cast the given value according to the given database type

Parameters:typ (str) – PostgreSQL type name or type code
Returns:the casted value

Note

Note that DbTypes object is always bound to a database connection. You can also get and set and reset typecast functions on a global level using the functions pg.get_typecast() and pg.set_typecast(). If you do this, the current database connections will continue to use their already cached typecast functions unless you reset the typecast functions by calling the DbTypes.reset_typecast() method on DB.dbtypes objects of the running connections.

Also note that the typecasting for all of the basic types happens already in the C low-level extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension.

PyGreSQL-5.1/docs/_build/html/contents/pg/connection.html0000644000175100077410000020266613470245534023364 0ustar darcypyg00000000000000 Connection – The connection object — PyGreSQL 5.1

Connection – The connection object¶

class pg.Connection¶

This object handles a connection to a PostgreSQL database. It embeds and hides all the parameters that define this connection, thus just leaving really significant parameters in function calls.

Note

Some methods give direct access to the connection socket. Do not use them unless you really know what you are doing. If you prefer disabling them, set the -DNO_DIRECT option in the Python setup file. These methods are specified by the tag [DA].

Note

Some other methods give access to large objects (refer to PostgreSQL user manual for more information about these). If you want to forbid access to these from the module, set the -DNO_LARGE option in the Python setup file. These methods are specified by the tag [LO].

query – execute a SQL command string¶

Connection.query(command[, args])¶

Execute a SQL command string

Parameters:
  • command (str) – SQL command
  • args – optional parameter values
Returns:

result values

Return type:

Query, None

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • ValueError – empty SQL query or lost connection
  • pg.ProgrammingError – error in query
  • pg.InternalError – error during query processing

This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row as an integer. If the query is an update or delete statement, or an insert statement that did not insert exactly one row, or on a table without OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an "insert/update ... returning" statement), this method returns a Query. Otherwise, it returns None.

You can use the Query object as an iterator that yields all results as tuples, or call Query.getresult() to get the result as a list of tuples. Alternatively, you can call Query.dictresult() or Query.dictiter() if you want to get the rows as dictionaries, or Query.namedresult() or Query.namediter() if you want to get the rows as named tuples. You can also simply print the Query object to show the query results on the console.

The SQL command may optionally contain positional parameters of the form $1, $2, etc instead of literal data, in which case the values must be supplied separately as a tuple. The values are substituted by the database in such a way that they don’t need to be escaped, making this an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors.

If you don’t pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case.

When the database could not process the query, a pg.ProgrammingError or a pg.InternalError is raised. You can check the SQLSTATE error code of this error by reading its sqlstate attribute.

Example:

name = input("Name? ")
phone = con.query("select phone from employees where name=$1",
    (name,)).getresult()

query_prepared – execute a prepared statement¶

Connection.query_prepared(name[, args])¶

Execute a prepared statement

Parameters:
  • name (str) – name of the prepared statement
  • args – optional parameter values
Returns:

result values

Return type:

Query, None

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • ValueError – empty SQL query or lost connection
  • pg.ProgrammingError – error in query
  • pg.InternalError – error during query processing
  • pg.OperationalError – prepared statement does not exist

This method works exactly like Connection.query() except that instead of passing the command itself, you pass the name of a prepared statement. An empty name corresponds to the unnamed statement. You must have previously created the corresponding named or unnamed statement with Connection.prepare(), or an pg.OperationalError will be raised.

New in version 5.1.

prepare – create a prepared statement¶

Connection.prepare(name, command)¶

Create a prepared statement

Parameters:
  • name (str) – name of the prepared statement
  • command (str) – SQL command
Return type:

None

Raises:
  • TypeError – bad argument types, or wrong number of arguments
  • TypeError – invalid connection
  • pg.ProgrammingError – error in query or duplicate query

This method creates a prepared statement with the specified name for the given command for later execution with the Connection.query_prepared() method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise a pg.ProgrammingError is raised if the statement name is already defined in the current database session.

The SQL command may optionally contain positional parameters of the form $1, $2, etc instead of literal data. The corresponding values must then later be passed to the Connection.query_prepared() method separately as a tuple.

New in version 5.1.

describe_prepared – describe a prepared statement¶

Connection.describe_prepared(name)¶

Describe a prepared statement

Parameters:

name (str) – name of the prepared statement

Return type:

Query

Raises:
  • TypeError – bad argument type, or too many arguments
  • TypeError – invalid connection
  • pg.OperationalError – prepared statement does not exist

This method returns a Query object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the Query.listfields(), Query.fieldname() and Query.fieldnum() methods.

New in version 5.1.

reset – reset the connection¶

Connection.reset()¶

Reset the pg connection

Return type:

None

Raises:
  • TypeError – too many (any) arguments
  • TypeError – invalid connection

This method resets the current database connection.

cancel – abandon processing of current SQL command¶

Connection.cancel()¶
Return type:

None

Raises:
  • TypeError – too many (any) arguments
  • TypeError – invalid connection

This method requests that the server abandon processing of the current SQL command.

close – close the database connection¶

Connection.close()¶

Close the pg connection

Return type:None
Raises:TypeError – too many (any) arguments

This method closes the database connection. The connection will be closed in any case when the connection is deleted but this allows you to explicitly close it. It is mainly here to allow the DB-SIG API wrapper to implement a close function.

transaction – get the current transaction state¶

Connection.transaction()¶

Get the current in-transaction status of the server

Returns:

the current in-transaction status

Return type:

int

Raises:
  • TypeError – too many (any) arguments
  • TypeError – invalid connection

The status returned by this method can be TRANS_IDLE (currently idle), TRANS_ACTIVE (a command is in progress), TRANS_INTRANS (idle, in a valid transaction block), or TRANS_INERROR (idle, in a failed transaction block). TRANS_UNKNOWN is reported if the connection is bad. The status TRANS_ACTIVE is reported only when a query has been sent to the server and not yet completed.

parameter – get a current server parameter setting¶

Connection.parameter(name)¶

Look up a current parameter setting of the server

Parameters:

name (str) – the name of the parameter to look up

Returns:

the current setting of the specified parameter

Return type:

str or None

Raises:
  • TypeError – too many (any) arguments
  • TypeError – invalid connection

Certain parameter values are reported by the server automatically at connection startup or whenever their values change. This method can be used to interrogate these settings. It returns the current value of a parameter if known, or None if the parameter is not known.

You can use this method to check the settings of important parameters such as server_version, server_encoding, client_encoding, application_name, is_superuser, session_authorization, DateStyle, IntervalStyle, TimeZone, integer_datetimes, and standard_conforming_strings.

Values that are not reported by this method can be requested using DB.get_parameter().

New in version 4.0.

date_format – get the currently used date format¶

Connection.date_format()¶

Look up the date format currently being used by the database

Returns:

the current date format

Return type:

str

Raises:
  • TypeError – too many (any) arguments
  • TypeError – invalid connection

This method returns the current date format used by the server. Note that it is cheap to call this method, since there is no database query involved and the setting is also cached internally. You will need the date format when you want to manually typecast dates and timestamps coming from the database instead of using the built-in typecast functions. The date format returned by this method can be directly used with date formatting functions such as datetime.strptime(). It is derived from the current setting of the database parameter DateStyle.

New in version 5.0.

fileno – get the socket used to connect to the database¶

Connection.fileno()¶

Get the socket used to connect to the database

Returns:

the socket id of the database connection

Return type:

int

Raises:
  • TypeError – too many (any) arguments
  • TypeError – invalid connection

This method returns the underlying socket id used to connect to the database. This is useful for use in select calls, etc.

getnotify – get the last notify from the server¶

Connection.getnotify()¶

Get the last notify from the server

Returns:

last notify from server

Return type:

tuple, None

Raises:
  • TypeError – too many parameters
  • TypeError – invalid connection

This method tries to get a notify from the server (from the SQL statement NOTIFY). If the server returns no notify, the methods returns None. Otherwise, it returns a tuple (triplet) (relname, pid, extra), where relname is the name of the notify, pid is the process id of the connection that triggered the notify, and extra is a payload string that has been sent with the notification. Remember to do a listen query first, otherwise Connection.getnotify() will always return None.

Changed in version 4.1: Support for payload strings was added in version 4.1.

inserttable – insert a list into a table¶

Connection.inserttable(table, values)¶

Insert a Python list into a database table

Parameters:
  • table (str) – the table name
  • values (list) – list of rows values
Return type:

None

Raises:
  • TypeError – invalid connection, bad argument type, or too many arguments
  • MemoryError – insert buffer could not be allocated
  • ValueError – unsupported values

This method allows to quickly insert large blocks of data in a table: It inserts the whole values list into the given table. Internally, it uses the COPY command of the PostgreSQL database. The list is a list of tuples/lists that define the values for each inserted row. The rows values may contain string, integer, long or double (real) values.

Warning

This method doesn’t type check the fields according to the table definition; it just looks whether or not it knows how to handle such types.

get/set_cast_hook – fallback typecast function¶

Connection.get_cast_hook()¶

Get the function that handles all external typecasting

Returns:the current external typecast function
Return type:callable, None
Raises:TypeError – too many (any) arguments

This returns the callback function used by PyGreSQL to provide plug-in Python typecast functions for the connection.

New in version 5.0.

Connection.set_cast_hook(func)¶

Set a function that will handle all external typecasting

Parameters:func – the function to be used as a callback
Return type:None
Raises:TypeError – the specified notice receiver is not callable

This methods allows setting a custom fallback function for providing Python typecast functions for the connection to supplement the C extension module. If you set this function to None, then only the typecast functions implemented in the C extension module are enabled. You normally would not want to change this. Instead, you can use get_typecast() and set_typecast() to add or change the plug-in Python typecast functions.

New in version 5.0.

get/set_notice_receiver – custom notice receiver¶

Connection.get_notice_receiver()¶

Get the current notice receiver

Returns:the current notice receiver callable
Return type:callable, None
Raises:TypeError – too many (any) arguments

This method gets the custom notice receiver callback function that has been set with Connection.set_notice_receiver(), or None if no custom notice receiver has ever been set on the connection.

New in version 4.1.

Connection.set_notice_receiver(func)¶

Set a custom notice receiver

Parameters:func – the custom notice receiver callback function
Return type:None
Raises:TypeError – the specified notice receiver is not callable

This method allows setting a custom notice receiver callback function. When a notice or warning message is received from the server, or generated internally by libpq, and the message level is below the one set with client_min_messages, the specified notice receiver function will be called. This function must take one parameter, the Notice object, which provides the following read-only attributes:

Notice.pgcnx¶

the connection

Notice.message¶

the full message with a trailing newline

Notice.severity¶

the level of the message, e.g. ‘NOTICE’ or ‘WARNING’

Notice.primary¶

the primary human-readable error message

Notice.detail¶

an optional secondary error message

Notice.hint¶

an optional suggestion what to do about the problem

New in version 4.1.

putline – write a line to the server socket [DA]¶

Connection.putline(line)¶

Write a line to the server socket

Parameters:line (str) – line to be written
Return type:None
Raises:TypeError – invalid connection, bad parameter type, or too many parameters

This method allows to directly write a string to the server socket.

getline – get a line from server socket [DA]¶

Connection.getline()¶

Get a line from server socket

Returns:

the line read

Return type:

str

Raises:
  • TypeError – invalid connection
  • TypeError – too many parameters
  • MemoryError – buffer overflow

This method allows to directly read a string from the server socket.

endcopy – synchronize client and server [DA]¶

Connection.endcopy()¶

Synchronize client and server

Return type:

None

Raises:
  • TypeError – invalid connection
  • TypeError – too many parameters

The use of direct access methods may desynchronize client and server. This method ensure that client and server will be synchronized.

locreate – create a large object in the database [LO]¶

Connection.locreate(mode)¶

Create a large object in the database

Parameters:

mode (int) – large object create mode

Returns:

object handling the PostgreSQL large object

Return type:

LargeObject

Raises:
  • TypeError – invalid connection, bad parameter type, or too many parameters
  • pg.OperationalError – creation error

This method creates a large object in the database. The mode can be defined by OR-ing the constants defined in the pg module (INV_READ, INV_WRITE and INV_ARCHIVE). Please refer to PostgreSQL user manual for a description of the mode values.

getlo – build a large object from given oid [LO]¶

Connection.getlo(oid)¶

Create a large object in the database

Parameters:

oid (int) – OID of the existing large object

Returns:

object handling the PostgreSQL large object

Return type:

LargeObject

Raises:
  • TypeError – invalid connection, bad parameter type, or too many parameters
  • ValueError – bad OID value (0 is invalid_oid)

This method allows reusing a previously created large object through the LargeObject interface, provided the user has its OID.

loimport – import a file to a large object [LO]¶

Connection.loimport(name)¶

Import a file to a large object

Parameters:

name (str) – the name of the file to be imported

Returns:

object handling the PostgreSQL large object

Return type:

LargeObject

Raises:
  • TypeError – invalid connection, bad argument type, or too many arguments
  • pg.OperationalError – error during file import

This methods allows to create large objects in a very simple way. You just give the name of a file containing the data to be used.

Object attributes¶

Every Connection defines a set of read-only attributes that describe the connection and its status. These attributes are:

Connection.host¶

the host name of the server (str)

Connection.port¶

the port of the server (int)

Connection.db¶

the selected database (str)

Connection.options¶

the connection options (str)

Connection.user¶

user name on the database system (str)

Connection.protocol_version¶

the frontend/backend protocol being used (int)

New in version 4.0.

Connection.server_version¶

the backend version (int, e.g. 90305 for 9.3.5)

New in version 4.0.

Connection.status¶

the status of the connection (int: 1 = OK, 0 = bad)

Connection.error¶

the last warning/error message from the server (str)

Connection.socket¶

the file descriptor number of the connection socket to the server (int)

New in version 5.1.

Connection.backend_pid¶

the PID of the backend process handling this connection (int)

New in version 5.1.

Connection.ssl_in_use¶

this is True if the connection uses SSL, False if not

New in version 5.1: (needs PostgreSQL >= 9.5)

Connection.ssl_attributes¶

SSL-related information about the connection (dict)

New in version 5.1: (needs PostgreSQL >= 9.5)

PyGreSQL-5.1/docs/_build/html/contents/pg/module.html0000644000175100077410000024017413470245535022507 0ustar darcypyg00000000000000 Module functions and constants — PyGreSQL 5.1

Module functions and constants¶

The pg module defines a few functions that allow to connect to a database and to define “default variables†that override the environment variables used by PostgreSQL.

These “default variables†were designed to allow you to handle general connection parameters without heavy code in your programs. You can prompt the user for a value, put it in the default variable, and forget it, without having to modify your environment. The support for default variables can be disabled by setting the -DNO_DEF_VAR option in the Python setup file. Methods relative to this are specified by the tag [DV].

All variables are set to None at module initialization, specifying that standard environment variables should be used.

connect – Open a PostgreSQL connection¶

pg.connect([dbname][, host][, port][, opt][, user][, passwd])¶

Open a pg connection

Parameters:
  • dbname – name of connected database (None = defbase)
  • host (str or None) – name of the server host (None = defhost)
  • port (int) – port used by the database server (-1 = defport)
  • opt (str or None) – connection options (None = defopt)
  • user (str or None) – PostgreSQL user (None = defuser)
  • passwd (str or None) – password for user (None = defpasswd)
Returns:

If successful, the Connection handling the connection

Return type:

Connection

Raises:
  • TypeError – bad argument type, or too many arguments
  • SyntaxError – duplicate argument definition
  • pg.InternalError – some error occurred during pg connection definition
  • Exception – (all exceptions relative to object allocation)

This function opens a connection to a specified database on a given PostgreSQL server. You can use keywords here, as described in the Python tutorial. The names of the keywords are the name of the parameters given in the syntax line. The opt parameter can be used to pass command-line options to the server. For a precise description of the parameters, please refer to the PostgreSQL user manual.

If you want to add additional parameters not specified here, you must pass a connection string or a connection URI instead of the dbname (as in con3 and con4 in the following example).

Example:

import pg

con1 = pg.connect('testdb', 'myhost', 5432, None, 'bob', None)
con2 = pg.connect(dbname='testdb', host='myhost', user='bob')
con3 = pg.connect('host=myhost user=bob dbname=testdb connect_timeout=10')
con4 = pg.connect('postgresql://bob@myhost/testdb?connect_timeout=10')

get/set_defhost – default server host [DV]¶

pg.get_defhost(host)¶

Get the default host

Returns:the current default host specification
Return type:str or None
Raises:TypeError – too many arguments

This method returns the current default host specification, or None if the environment variables should be used. Environment variables won’t be looked up.

pg.set_defhost(host)¶

Set the default host

Parameters:host (str or None) – the new default host specification
Returns:the previous default host specification
Return type:str or None
Raises:TypeError – bad argument type, or too many arguments

This methods sets the default host value for new connections. If None is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host.

get/set_defport – default server port [DV]¶

pg.get_defport()¶

Get the default port

Returns:the current default port specification
Return type:int
Raises:TypeError – too many arguments

This method returns the current default port specification, or None if the environment variables should be used. Environment variables won’t be looked up.

pg.set_defport(port)¶

Set the default port

Parameters:port (int) – the new default port
Returns:previous default port specification
Return type:int or None

This methods sets the default port value for new connections. If -1 is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default port.

get/set_defopt – default connection options [DV]¶

pg.get_defopt()¶

Get the default connection options

Returns:the current default options specification
Return type:str or None
Raises:TypeError – too many arguments

This method returns the current default connection options specification, or None if the environment variables should be used. Environment variables won’t be looked up.

pg.set_defopt(options)¶

Set the default connection options

Parameters:options (str or None) – the new default connection options
Returns:previous default options specification
Return type:str or None
Raises:TypeError – bad argument type, or too many arguments

This methods sets the default connection options value for new connections. If None is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default options.

get/set_defbase – default database name [DV]¶

pg.get_defbase()¶

Get the default database name

Returns:the current default database name specification
Return type:str or None
Raises:TypeError – too many arguments

This method returns the current default database name specification, or None if the environment variables should be used. Environment variables won’t be looked up.

pg.set_defbase(base)¶

Set the default database name

Parameters:base (str or None) – the new default base name
Returns:the previous default database name specification
Return type:str or None
Raises:TypeError – bad argument type, or too many arguments

This method sets the default database name value for new connections. If None is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host.

get/set_defuser – default database user [DV]¶

pg.get_defuser()¶

Get the default database user

Returns:the current default database user specification
Return type:str or None
Raises:TypeError – too many arguments

This method returns the current default database user specification, or None if the environment variables should be used. Environment variables won’t be looked up.

pg.set_defuser(user)¶

Set the default database user

Parameters:user – the new default database user
Returns:the previous default database user specification
Return type:str or None
Raises:TypeError – bad argument type, or too many arguments

This method sets the default database user name for new connections. If None is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host.

get/set_defpasswd – default database password [DV]¶

pg.get_defpasswd()¶

Get the default database password

Returns:the current default database password specification
Return type:str or None
Raises:TypeError – too many arguments

This method returns the current default database password specification, or None if the environment variables should be used. Environment variables won’t be looked up.

pg.set_defpasswd(passwd)¶

Set the default database password

Parameters:passwd – the new default database password
Returns:the previous default database password specification
Return type:str or None
Raises:TypeError – bad argument type, or too many arguments

This method sets the default database password for new connections. If None is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host.

escape_string – escape a string for use within SQL¶

pg.escape_string(string)¶

Escape a string for use within SQL

Parameters:string (str) – the string that is to be escaped
Returns:the escaped string
Return type:str
Raises:TypeError – bad argument type, or too many arguments

This function escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. escape_string() performs this operation. Note that there is also a Connection method with the same name which takes connection properties into account.

Note

It is especially important to do proper escaping when handling strings that were received from an untrustworthy source. Otherwise there is a security risk: you are vulnerable to “SQL injection†attacks wherein unwanted SQL commands are fed to your database.

Example:

name = input("Name? ")
phone = con.query("select phone from employees where name='%s'"
    % escape_string(name)).getresult()

escape_bytea – escape binary data for use within SQL¶

pg.escape_bytea(datastring)¶

escape binary data for use within SQL as type bytea

Parameters:datastring (str) – string containing the binary data that is to be escaped
Returns:the escaped string
Return type:str
Raises:TypeError – bad argument type, or too many arguments

Escapes binary data for use within an SQL command with the type bytea. As with escape_string(), this is only used when inserting data directly into an SQL command string.

Note that there is also a Connection method with the same name which takes connection properties into account.

Example:

picture = open('garfield.gif', 'rb').read()
con.query("update pictures set img='%s' where name='Garfield'"
    % escape_bytea(picture))

unescape_bytea – unescape data that has been retrieved as text¶

pg.unescape_bytea(string)¶

Unescape bytea data that has been retrieved as text

Parameters:datastring (str) – the bytea data string that has been retrieved as text
Returns:byte string containing the binary data
Return type:bytes
Raises:TypeError – bad argument type, or too many arguments

Converts an escaped string representation of binary data stored as bytea into the raw byte string representing the binary data – this is the reverse of escape_bytea(). Since the Query results will already return unescaped byte strings, you normally don’t have to use this method.

Note that there is also a DB method with the same name which does exactly the same.

get/set_decimal – decimal type to be used for numeric values¶

pg.get_decimal()¶

Get the decimal type to be used for numeric values

Returns:the Python class used for PostgreSQL numeric values
Return type:class

This function returns the Python class that is used by PyGreSQL to hold PostgreSQL numeric values. The default class is decimal.Decimal if available, otherwise the float type is used.

pg.set_decimal(cls)¶

Set a decimal type to be used for numeric values

Parameters:cls (class) – the Python class to be used for PostgreSQL numeric values

This function can be used to specify the Python class that shall be used by PyGreSQL to hold PostgreSQL numeric values. The default class is decimal.Decimal if available, otherwise the float type is used.

get/set_decimal_point – decimal mark used for monetary values¶

pg.get_decimal_point()¶

Get the decimal mark used for monetary values

Returns:string with one character representing the decimal mark
Return type:str

This function returns the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values when converting them to decimal numbers. The default setting is '.' as a decimal point. This setting is not adapted automatically to the locale used by PostgreSQL, but you can use set_decimal() to set a different decimal mark manually. A return value of None means monetary values are not interpreted as decimal numbers, but returned as strings including the formatting and currency.

New in version 4.1.1.

pg.set_decimal_point(string)¶

Specify which decimal mark is used for interpreting monetary values

Parameters:string (str) – string with one character representing the decimal mark

This function can be used to specify the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values. The default value is ‘.’ as a decimal point. This value is not adapted automatically to the locale used by PostgreSQL, so if you are dealing with a database set to a locale that uses a ',' instead of '.' as the decimal point, then you need to call set_decimal(',') to have PyGreSQL interpret monetary values correctly. If you don’t want money values to be converted to decimal numbers, then you can call set_decimal(None), which will cause PyGreSQL to return monetary values as strings including their formatting and currency.

New in version 4.1.1.

get/set_bool – whether boolean values are returned as bool objects¶

pg.get_bool()¶

Check whether boolean values are returned as bool objects

Returns:whether or not bool objects will be returned
Return type:bool

This function checks whether PyGreSQL returns PostgreSQL boolean values converted to Python bool objects, or as 'f' and 't' strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this with the set_bool() function.

New in version 4.2.

pg.set_bool(on)¶

Set whether boolean values are returned as bool objects

Parameters:on – whether or not bool objects shall be returned

This function can be used to specify whether PyGreSQL shall return PostgreSQL boolean values converted to Python bool objects, or as 'f' and 't' strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this by calling set_bool(True).

New in version 4.2.

Changed in version 5.0: Boolean values had been returned as string by default in earlier versions.

get/set_array – whether arrays are returned as list objects¶

pg.get_array()¶

Check whether arrays are returned as list objects

Returns:whether or not list objects will be returned
Return type:bool

This function checks whether PyGreSQL returns PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this with the set_array() function.

New in version 5.0.

pg.set_array(on)¶

Set whether arrays are returned as list objects

Parameters:on – whether or not list objects shall be returned

This function can be used to specify whether PyGreSQL shall return PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this by calling set_array(False).

New in version 5.0.

Changed in version 5.0: Arrays had been always returned as text strings only in earlier versions.

get/set_bytea_escaped – whether bytea data is returned escaped¶

pg.get_bytea_escaped()¶

Check whether bytea values are returned as escaped strings

Returns:whether or not bytea objects will be returned escaped
Return type:bool

This function checks whether PyGreSQL returns PostgreSQL bytea values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this with the set_bytea_escaped() function.

New in version 5.0.

pg.set_bytea_escaped(on)¶

Set whether bytea values are returned as escaped strings

Parameters:on – whether or not bytea objects shall be returned escaped

This function can be used to specify whether PyGreSQL shall return PostgreSQL bytea values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this by calling set_bytea_escaped(True).

New in version 5.0.

Changed in version 5.0: Bytea data had been returned in escaped form by default in earlier versions.

get/set_jsondecode – decoding JSON format¶

pg.get_jsondecode()¶

Get the function that deserializes JSON formatted strings

This returns the function used by PyGreSQL to construct Python objects from JSON formatted strings.

pg.set_jsondecode(func)¶

Set a function that will deserialize JSON formatted strings

Parameters:func – the function to be used for deserializing JSON strings

You can use this if you do not want to deserialize JSON strings coming in from the database, or if want to use a different function than the standard function json.loads() or if you want to use it with parameters different from the default ones. If you set this function to None, then the automatic deserialization of JSON strings will be deactivated.

New in version 5.0.

Changed in version 5.0: JSON data had been always returned as text strings in earlier versions.

get/set_datestyle – assume a fixed date style¶

pg.get_datestyle()¶

Get the assumed date style for typecasting

This returns the PostgreSQL date style that is silently assumed when typecasting dates or None if no fixed date style is assumed, in which case the date style is requested from the database when necessary (this is the default). Note that this method will not get the date style that is currently set in the session or in the database. You can get the current setting with the methods DB.get_parameter() and Connection.parameter(). You can also get the date format corresponding to the current date style by calling Connection.date_format().

New in version 5.0.

pg.set_datestyle(datestyle)¶

Set a fixed date style that shall be assumed when typecasting

Parameters:datestyle (str) – the date style that shall be assumed, or None if no fixed dat style shall be assumed

PyGreSQL is able to automatically pick up the right date style for typecasting date values from the database, even if you change it for the current session with a SET DateStyle command. This is happens very effectively without an additional database request being involved. If you still want to have PyGreSQL always assume a fixed date style instead, then you can set one with this function. Note that calling this function will not alter the date style of the database or the current session. You can do that by calling the method DB.set_parameter() instead.

New in version 5.0.

get/set_typecast – custom typecasting¶

PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value.

PyGreSQL provides through its C extension module basic typecast functions for the common database types, but if you want to add more typecast functions, you can set these using the following functions.

pg.get_typecast(typ)¶

Get the global cast function for the given database type

Parameters:typ (str) – PostgreSQL type name
Returns:the typecast function for the specified type
Return type:function or None

New in version 5.0.

pg.set_typecast(typ, cast)¶

Set a global typecast function for the given database type(s)

Parameters:
  • typ (str or int) – PostgreSQL type name or list of type names
  • cast – the typecast function to be set for the specified type(s)

The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named connection, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings.

New in version 5.0.

Note that database connections cache types and their cast functions using connection specific DbTypes objects. You can also get, set and reset typecast functions on the connection level using the methods DbTypes.get_typecast(), DbTypes.set_typecast() and DbTypes.reset_typecast() of the DB.dbtypes object. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call DbTypes.reset_typecast() on the DB.dbtypes object.

Also note that the typecasting for all of the basic types happens already in the C extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension module.

cast_array/record – fast parsers for arrays and records¶

PosgreSQL returns arrays and records (composite types) using a special output syntax with several quirks that cannot easily and quickly be parsed in Python. Therefore the C extension module provides two fast parsers that allow quickly turning these text representations into Python objects: Arrays will be converted to Python lists, and records to Python tuples. These fast parsers are used automatically by PyGreSQL in order to return arrays and records from database queries as lists and tuples, so you normally don’t need to call them directly. You may only need them for typecasting arrays of data types that are not supported by default in PostgreSQL.

pg.cast_array(string[, cast][, delim])¶

Cast a string representing a PostgreSQL array to a Python list

Parameters:
  • string (str) – the string with the text representation of the array
  • cast (callable or None) – a typecast function for the elements of the array
  • delim – delimiter character between adjacent elements
Returns:

a list representing the PostgreSQL array in Python

Return type:

list

Raises:
  • TypeError – invalid argument types
  • ValueError – error in the syntax of the given array

This function takes a string containing the text representation of a PostgreSQL array (which may look like '{{1,2}{3,4}}' for a two-dimensional array), a typecast function cast that is called for every element, and an optional delimiter character delim (usually a comma), and returns a Python list representing the array (which may be nested like [[1, 2], [3, 4]] in this example). The cast function must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the list. If you don’t pass a cast function or set it to None, then unprocessed text strings will be returned as elements of the array. If you don’t pass a delimiter character, then a comma will be used by default.

New in version 5.0.

pg.cast_record(string[, cast][, delim])¶

Cast a string representing a PostgreSQL record to a Python tuple

Parameters:
  • string (str) – the string with the text representation of the record
  • cast (callable, list or tuple of callables, or None) – typecast function(s) for the elements of the record
  • delim – delimiter character between adjacent elements
Returns:

a tuple representing the PostgreSQL record in Python

Return type:

tuple

Raises:
  • TypeError – invalid argument types
  • ValueError – error in the syntax of the given array

This function takes a string containing the text representation of a PostgreSQL record (which may look like '(1,a,2,b)' for a record composed of four fields), a typecast function cast that is called for every element, or a list or tuple of such functions corresponding to the individual fields of the record, and an optional delimiter character delim (usually a comma), and returns a Python tuple representing the record (which may be inhomogeneous like (1, 'a', 2, 'b') in this example). The cast function(s) must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the tuple. If you don’t pass cast function(s) or pass None instead, then unprocessed text strings will be returned as elements of the tuple. If you don’t pass a delimiter character, then a comma will be used by default.

New in version 5.0.

Note that besides using parentheses instead of braces, there are other subtle differences in escaping special characters and NULL values between the syntax used for arrays and the one used for composite types, which these functions take into account.

Type helpers¶

The module provides the following type helper functions. You can wrap parameters with these functions when passing them to DB.query() or DB.query_formatted() in order to give PyGreSQL a hint about the type of the parameters, if it cannot be derived from the context.

pg.Bytea(bytes)¶

A wrapper for holding a bytea value

New in version 5.0.

pg.HStore(dict)¶

A wrapper for holding an hstore dictionary

New in version 5.0.

pg.Json(obj)¶

A wrapper for holding an object serializable to JSON

New in version 5.0.

The following additional type helper is only meaningful when used with DB.query_formatted(). It marks a parameter as text that shall be literally included into the SQL. This is useful for passing table names for instance.

pg.Literal(sql)¶

A wrapper for holding a literal SQL string

New in version 5.0.

Module constants¶

Some constants are defined in the module dictionary. They are intended to be used as parameters for methods calls. You should refer to the libpq description in the PostgreSQL user manual for more information about them. These constants are:

pg.version¶
pg.__version__¶

constants that give the current version

pg.INV_READ¶
pg.INV_WRITE¶

large objects access modes, used by Connection.locreate() and LargeObject.open()

pg.SEEK_SET¶
pg.SEEK_CUR¶
pg.SEEK_END¶

positional flags, used by LargeObject.seek()

pg.TRANS_IDLE¶
pg.TRANS_ACTIVE¶
pg.TRANS_INTRANS¶
pg.TRANS_INERROR¶
pg.TRANS_UNKNOWN¶

transaction states, used by Connection.transaction()

PyGreSQL-5.1/docs/_build/html/contents/tutorial.html0000644000175100077410000010477213470245536022463 0ustar darcypyg00000000000000 First Steps with PyGreSQL — PyGreSQL 5.1

First Steps with PyGreSQL¶

In this small tutorial we show you the basic operations you can perform with both flavors of the PyGreSQL interface. Please choose your flavor:

First Steps with the classic PyGreSQL Interface¶

Before doing anything else, it’s necessary to create a database connection.

To do this, simply import the DB wrapper class and create an instance of it, passing the necessary connection parameters, like this:

>>> from pg import DB
>>> db = DB(dbname='testdb', host='pgserver', port=5432,
...     user='scott', passwd='tiger')

You can omit one or even all parameters if you want to use their default values. PostgreSQL will use the name of the current operating system user as the login and the database name, and will try to connect to the local host on port 5432 if nothing else is specified.

The db object has all methods of the lower-level Connection class plus some more convenience methods provided by the DB wrapper.

You can now execute database queries using the DB.query() method:

>>> db.query("create table fruits(id serial primary key, name varchar)")

You can list all database tables with the DB.get_tables() method:

>>> db.get_tables()
['public.fruits']

To get the attributes of the fruits table, use DB.get_attnames():

>>> db.get_attnames('fruits')
{'id': 'int', 'name': 'text'}

Verify that you can insert into the newly created fruits table:

>>> db.has_table_privilege('fruits', 'insert')
True

You can insert a new row into the table using the DB.insert() method, for example:

>>> db.insert('fruits', name='apple')
{'name': 'apple', 'id': 1}

Note how this method returns the full row as a dictionary including its id column that has been generated automatically by a database sequence. You can also pass a dictionary to the DB.insert() method instead of or in addition to using keyword arguments.

Let’s add another row to the table:

>>> banana = db.insert('fruits', name='banana')

Or, you can add a whole bunch of fruits at the same time using the Connection.inserttable() method. Note that this method uses the COPY command of PostgreSQL to insert all data in one batch operation, which is much faster than sending many individual INSERT commands:

>>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split()
>>> data = list(enumerate(more_fruits, start=3))
>>> db.inserttable('fruits', data)

We can now query the database for all rows that have been inserted into the fruits table:

>>> print(db.query('select * from fruits'))
id|   name
--+----------
 1|apple
 2|banana
 3|cherimaya
 4|durian
 5|eggfruit
 6|fig
 7|grapefruit
(7 rows)

Instead of simply printing the Query instance that has been returned by this query, we can also request the data as list of tuples:

>>> q = db.query('select * from fruits')
>>> q.getresult()
... [(1, 'apple'), ..., (7, 'grapefruit')]

Instead of a list of tuples, we can also request a list of dicts:

>>> q.dictresult()
[{'id': 1, 'name': 'apple'}, ..., {'id': 7, 'name': 'grapefruit'}]

You can also return the rows as named tuples:

>>> rows = q.namedresult()
>>> rows[3].name
'durian'

In PyGreSQL 5.1 and newer, you can also use the Query instance directly as an iterable that yields the rows as tuples, and there are also methods that return iterables for rows as dictionaries, named tuples or scalar values. Other methods like Query.one() or Query.onescalar() return only one row or only the first field of that row. You can get the number of rows with the len() function.

Using the method DB.get_as_dict(), you can easily import the whole table into a Python dictionary mapping the primary key id to the name:

>>> db.get_as_dict('fruits', scalar=True)
OrderedDict([(1, 'apple'),
             (2, 'banana'),
             (3, 'cherimaya'),
             (4, 'durian'),
             (5, 'eggfruit'),
             (6, 'fig'),
             (7, 'grapefruit')])

To change a single row in the database, you can use the DB.update() method. For instance, if you want to capitalize the name ‘banana’:

>>> db.update('fruits', banana, name=banana['name'].capitalize())
{'id': 2, 'name': 'Banana'}
>>> print(db.query('select * from fruits where id between 1 and 3'))
id|  name
--+---------
 1|apple
 2|Banana
 3|cherimaya
(3 rows)

Let’s also capitalize the other names in the database:

>>> db.query('update fruits set name=initcap(name)')
'7'

The returned string ‘7’ tells us the number of updated rows. It is returned as a string to discern it from an OID which will be returned as an integer, if a new row has been inserted into a table with an OID column.

To delete a single row from the database, use the DB.delete() method:

>>> db.delete('fruits', banana)
1

The returned integer value 1 tells us that one row has been deleted. If we try it again, the method returns the integer value 0. Naturally, this method can only return 0 or 1:

>>> db.delete('fruits', banana)
0

Of course, we can insert the row back again:

>>> db.insert('fruits', banana)
{'id': 2, 'name': 'Banana'}

If we want to change a different row, we can get its current state with:

>>> apple = db.get('fruits', 1)
>>> apple
{'name': 'Apple', 'id': 1}

We can duplicate the row like this:

   >>> db.insert('fruits', apple, id=8)
   {'id': 8, 'name': 'Apple'}

To remove the duplicated row, we can do::

   >>> db.delete('fruits', id=8)
   1

Finally, to remove the table from the database and close the connection:

>>> db.query("drop table fruits")
>>> db.close()

For more advanced features and details, see the reference: pg — The Classic PyGreSQL Interface

First Steps with the DB-API 2.0 Interface¶

As with the classic interface, the first thing you need to do is to create a database connection. To do this, use the function pgdb.connect() in the pgdb module, passing the connection parameters:

>>> from pgdb import connect
>>> con = connect(database='testdb', host='pgserver:5432',
...     user='scott', password='tiger')

As in the classic interface, you can omit parameters if they are the default values used by PostgreSQL.

To do anything with the connection, you need to request a cursor object from it, which is thought of as the Python representation of a database cursor. The connection has a method that lets you get a cursor:

>>> cursor = con.cursor()

The cursor has a method that lets you execute database queries:

>>> cursor.execute("create table fruits("
...     "id serial primary key, name varchar)")

You can also use this method to insert data into the table:

>>> cursor.execute("insert into fruits (name) values ('apple')")

You can pass parameters in a safe way:

>>> cursor.execute("insert into fruits (name) values (%s)", ('banana',))

To insert multiple rows at once, you can use the following method:

>>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split()
>>> parameters = [(name,) for name in more_fruits]
>>> cursor.executemany("insert into fruits (name) values (%s)", parameters)

The cursor also has a Cursor.copy_from() method to quickly insert large amounts of data into the database, and a Cursor.copy_to() method to quickly dump large amounts of data from the database, using the PostgreSQL COPY command. Note however, that these methods are an extension provided by PyGreSQL, they are not part of the DB-API 2 standard.

Also note that the DB API 2.0 interface does not have an autocommit as you may be used from PostgreSQL. So in order to make these inserts permanent, you need to commit them to the database:

>>> con.commit()

If you end the program without calling the commit method of the connection, or if you call the rollback method of the connection, then the changes will be discarded.

In a similar way, you can update or delete rows in the database, executing UPDATE or DELETE statements instead of INSERT statements.

To fetch rows from the database, execute a SELECT statement first. Then you can use one of several fetch methods to retrieve the results. For instance, to request a single row:

>>> cursor.execute('select * from fruits where id=1')
>>> cursor.fetchone()
Row(id=1, name='apple')

The result is a named tuple. This means you can access its elements either using an index number as for an ordinary tuple, or using the column name as for access to object attributes.

To fetch all rows of the query, use this method instead:

>>> cursor.execute('select * from fruits')
>>> cursor.fetchall()
[Row(id=1, name='apple'), ..., Row(id=7, name='grapefruit')]

The output is a list of named tuples.

If you want to fetch only a limited number of rows from the query:

>>> cursor.execute('select * from fruits')
>>> cursor.fetchmany(2)
[Row(id=1, name='apple'), Row(id=2, name='banana')]

Finally, to remove the table from the database and close the connection:

>>> db.execute("drop table fruits")
>>> cur.close()
>>> con.close()

For more advanced features and details, see the reference: pgdb — The DB-API Compliant Interface

PyGreSQL-5.1/docs/_build/html/contents/examples.html0000644000175100077410000001372013470245533022423 0ustar darcypyg00000000000000 Examples — PyGreSQL 5.1

Examples¶

I am starting to collect examples of applications that use PyGreSQL. So far I only have a few but if you have an example for me, you can either send me the files or the URL for me to point to.

The A PostgreSQL Primer that is part of the PyGreSQL distribution shows some examples of using PostgreSQL with PyGreSQL.

Here is a list of motorcycle rides in Ontario that uses a PostgreSQL database to store the rides. There is a link at the bottom of the page to view the source code.

Oleg Broytmann has written a simple example RGB database demo

PyGreSQL-5.1/docs/_build/html/contents/pgdb/0000755000175100077410000000000013470245541020627 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/_build/html/contents/pgdb/adaptation.html0000644000175100077410000013716013470245535023654 0ustar darcypyg00000000000000 Remarks on Adaptation and Typecasting — PyGreSQL 5.1

Remarks on Adaptation and Typecasting¶

Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL.

Supported data types¶

The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections.

PostgreSQL Python
char, bpchar, name, text, varchar str
bool bool
bytea bytes
int2, int4, int8, oid, serial int [1]
int2vector list of int
float4, float8 float
numeric, money Decimal
date datetime.date
time, timetz datetime.time
timestamp, timestamptz datetime.datetime
interval datetime.timedelta
hstore dict
json, jsonb list or dict
uuid uuid.UUID
array list [2]
record tuple

Note

Elements of arrays and records will also be converted accordingly.

[1]int8 is converted to long in Python 2
[2]The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the array_lower() function provided by PostgreSQL.

Adaptation of parameters¶

PyGreSQL knows how to adapt the common Python types to get a suitable representation of their values for PostgreSQL when you pass parameters to a query. For example:

>>> con = pgdb.connect(...)
>>> cur = con.cursor()
>>> parameters = (144, 3.75, 'hello', None)
>>> tuple(cur.execute('SELECT %s, %s, %s, %s', parameters).fetchone()
(144, Decimal('3.75'), 'hello', None)

This is the result we can expect, so obviously PyGreSQL has adapted the parameters and sent the following query to PostgreSQL:

SELECT 144, 3.75, 'hello', NULL

Note the subtle, but important detail that even though the SQL string passed to cur.execute() contains conversion specifications normally used in Python with the % operator for formatting strings, we didn’t use the % operator to format the parameters, but passed them as the second argument to cur.execute(). I.e. we didn’t write the following:

>>> tuple(cur.execute('SELECT %s, %s, %s, %s' % parameters).fetchone()

If we had done this, PostgreSQL would have complained because the parameters were not adapted. Particularly, there would be no quotes around the value 'hello', so PostgreSQL would have interpreted this as a database column, which would have caused a ProgrammingError. Also, the Python value None would have been included in the SQL command literally, instead of being converted to the SQL keyword NULL, which would have been another reason for PostgreSQL to complain about our bad query:

SELECT 144, 3.75, hello, None

Even worse, building queries with the use of the % operator makes us vulnerable to so called “SQL injection†exploits, where an attacker inserts malicious SQL statements into our queries that we never intended to be executed. We could avoid this by carefully quoting and escaping the parameters, but this would be tedious and if we overlook something, our code will still be vulnerable. So please don’t do this. This cannot be emphasized enough, because it is such a subtle difference and using the % operator looks so natural:

Warning

Remember to never insert parameters directly into your queries using the % operator. Always pass the parameters separately.

The good thing is that by letting PyGreSQL do the work for you, you can treat all your parameters equally and don’t need to ponder where you need to put quotes or need to escape strings. You can and should also always use the general %s specification instead of e.g. using %d for integers. Actually, to avoid mistakes and make it easier to insert parameters at more than one location, you can and should use named specifications, like this:

>>> params = dict(greeting='Hello', name='HAL')
>>> sql = """SELECT %(greeting)s || ', ' || %(name)s
...    || '. Do you read me, ' || %(name)s || '?'"""
>>> cur.execute(sql, params).fetchone()[0]
'Hello, HAL. Do you read me, HAL?'

PyGreSQL does not only adapt the basic types like int, float, bool and str, but also tries to make sense of Python lists and tuples.

Lists are adapted as PostgreSQL arrays:

>>> params = dict(array=[[1, 2],[3, 4]])
>>> cur.execute("SELECT %(array)s", params).fetchone()[0]
[[1, 2], [3, 4]]

Note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. The query that was actually executed was this:

SELECT ARRAY[[1,2],[3,4]]

Again, if we had inserted the list using the % operator without adaptation, the ARRAY keyword would have been missing in the query.

Tuples are adapted as PostgreSQL composite types:

>>> params = dict(record=('Bond', 'James'))
>>> cur.execute("SELECT %(record)s", params).fetchone()[0]
('Bond', 'James')

You can also use this feature with the IN syntax of SQL:

>>> params = dict(what='needle', where=('needle', 'haystack'))
>>> cur.execute("SELECT %(what)s IN %(where)s", params).fetchone()[0]
True

Sometimes a Python type can be ambiguous. For instance, you might want to insert a Python list not into an array column, but into a JSON column. Or you want to interpret a string as a date and insert it into a DATE column. In this case you can give PyGreSQL a hint by using Type constructors:

>>> cur.execute("CREATE TABLE json_data (data json, created date)")
>>> params = dict(
...     data=pgdb.Json([1, 2, 3]), created=pgdb.Date(2016, 1, 29))
>>> sql = ("INSERT INTO json_data VALUES (%(data)s, %(created)s)")
>>> cur.execute(sql, params)
>>> cur.execute("SELECT * FROM json_data").fetchone()
Row(data=[1, 2, 3], created='2016-01-29')

Let’s think of another example where we create a table with a composite type in PostgreSQL:

CREATE TABLE on_hand (
    item      inventory_item,
    count     integer)

We assume the composite type inventory_item has been created like this:

CREATE TYPE inventory_item AS (
    name            text,
    supplier_id     integer,
    price           numeric)

In Python we can use a named tuple as an equivalent to this PostgreSQL type:

>>> from collections import namedtuple
>>> inventory_item = namedtuple(
...     'inventory_item', ['name', 'supplier_id', 'price'])

Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:

>>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)",
...     dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000))
>>> cur.execute("SELECT * FROM on_hand").fetchone()
Row(item=inventory_item(name='fuzzy dice', supplier_id=42,
        price=Decimal('1.99')), count=1000)

However, we may not want to use named tuples, but custom Python classes to hold our values, like this one:

>>> class InventoryItem:
...
...     def __init__(self, name, supplier_id, price):
...         self.name = name
...         self.supplier_id = supplier_id
...         self.price = price
...
...     def __str__(self):
...         return '%s (from %s, at $%s)' % (
...             self.name, self.supplier_id, self.price)

But when we try to insert an instance of this class in the same way, we will get an error:

>>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)",
...     dict(item=InventoryItem('fuzzy dice', 42, 1.99), count=1000))
InterfaceError: Do not know how to adapt type <class 'InventoryItem'>

While PyGreSQL knows how to adapt tuples, it does not know what to make out of our custom class. To simply convert the object to a string using the str function is not a solution, since this yields a human readable string that is not useful for PostgreSQL. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a “magic†method with the name __pg_repr__, like this:

>>> class InventoryItem:
  ...
  ...     ...
  ...
  ...     def __str__(self):
  ...         return '%s (from %s, at $%s)' % (
  ...             self.name, self.supplier_id, self.price)
  ...
  ...     def __pg_repr__(self):
  ...         return (self.name, self.supplier_id, self.price)

Now you can insert class instances the same way as you insert named tuples.

Note that PyGreSQL adapts the result of __pg_repr__ again if it is a tuple or a list. Otherwise, it must be a properly escaped string.

Typecasting to Python¶

As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via one of the “fetch†methods of a cursor. This is done by the use of built-in typecast functions.

If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the set_typecast() function. With the get_typecast() function you can check which function is currently set, and reset_typecast() allows you to reset the typecast function to its default. If no typecast function is set, then PyGreSQL will return the raw strings from the database.

For instance, you will find that PyGreSQL uses the normal int function to cast PostgreSQL int4 type values to Python:

>>> pgdb.get_typecast('int4')
int

You can change this to return float values instead:

>>> pgdb.set_typecast('int4', float)
>>> con = pgdb.connect(...)
>>> cur = con.cursor()
>>> cur.execute('select 42::int4').fetchone()[0]
42.0

Note that the connections cache the typecast functions, so you may need to reopen the database connection, or reset the cache of the connection to make this effective, using the following command:

>>> con.type_cache.reset_typecast()

The TypeCache of the connection can also be used to change typecast functions locally for one database connection only.

As a more useful example, we can create a typecast function that casts items of the composite type used as example in the previous section to instances of the corresponding Python class:

>>> con.type_cache.reset_typecast()
>>> cast_tuple = con.type_cache.get_typecast('inventory_item')
>>> cast_item = lambda value: InventoryItem(*cast_tuple(value))
>>> con.type_cache.set_typecast('inventory_item', cast_item)
>>> str(cur.execute("SELECT * FROM on_hand").fetchone()[0])
'fuzzy dice (from 42, at $1.99)'

As you saw in the last section you, PyGreSQL also has a typecast function for JSON, which is the default JSON decoder from the standard library. Let’s assume we want to use a slight variation of that decoder in which every integer in JSON is converted to a float in Python. This can be accomplished as follows:

>>> from json import loads
>>> cast_json = lambda v: loads(v, parse_int=float)
>>> pgdb.set_typecast('json', cast_json)
>>> cur.execute("SELECT data FROM json_data").fetchone()[0]
[1.0, 2.0, 3.0]

Note again that you may need to run con.type_cache.reset_typecast() to make this effective. Also note that the two types json and jsonb have their own typecast functions, so if you use jsonb instead of json, you need to use this type name when setting the typecast function:

>>> pgdb.set_typecast('jsonb', cast_json)

As one last example, let us try to typecast the geometric data type circle of PostgreSQL into a SymPy Circle object. Let’s assume we have created and populated a table with two circles, like so:

CREATE TABLE circle (
    name varchar(8) primary key, circle circle);
INSERT INTO circle VALUES ('C1', '<(2, 3), 3>');
INSERT INTO circle VALUES ('C2', '<(1, -1), 4>');

With PostgreSQL we can easily calculate that these two circles overlap:

>>> con.cursor().execute("""SELECT c1.circle && c2.circle
...     FROM circle c1, circle c2
...     WHERE c1.name = 'C1' AND c2.name = 'C2'""").fetchone()[0]
True

However, calculating the intersection points between the two circles using the # operator does not work (at least not as of PostgreSQL version 9.5). So let’ resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:

>>> from sympy import Point, Circle
>>>
>>> def cast_circle(s):
...     p, r = s[1:-1].rsplit(',', 1)
...     p = p[1:-1].split(',')
...     return Circle(Point(float(p[0]), float(p[1])), float(r))
...
>>> pgdb.set_typecast('circle', cast_circle)

Now we can import the circles in the table into Python quite easily:

>>> circle = {c.name: c.circle for c in con.cursor().execute(
...     "SELECT * FROM circle").fetchall()}

The result is a dictionary mapping circle names to SymPy Circle objects. We can verify that the circles have been imported correctly:

>>> circle
{'C1': Circle(Point(2, 3), 3.0),
 'C2': Circle(Point(1, -1), 4.0)}

Finally we can find the exact intersection points with SymPy:

>>> circle['C1'].intersection(circle['C2'])
[Point(29/17 + 64564173230121*sqrt(17)/100000000000000,
    -80705216537651*sqrt(17)/500000000000000 + 31/17),
 Point(-64564173230121*sqrt(17)/100000000000000 + 29/17,
    80705216537651*sqrt(17)/500000000000000 + 31/17)]
PyGreSQL-5.1/docs/_build/html/contents/pgdb/module.html0000644000175100077410000005740313470245536023017 0ustar darcypyg00000000000000 Module functions and constants — PyGreSQL 5.1

Module functions and constants¶

The pgdb module defines a connect() function that allows to connect to a database, some global constants describing the capabilities of the module as well as several exception classes.

connect – Open a PostgreSQL connection¶

pgdb.connect([dsn][, user][, password][, host][, database][, **kwargs])¶

Return a new connection to the database

Parameters:
  • dsn (str) – data source name as string
  • user (str) – the database user name
  • password (str) – the database password
  • host (str) – the hostname of the database
  • database – the name of the database
  • kwargs (dict) – other connection parameters
Returns:

a connection object

Return type:

Connection

Raises:

pgdb.OperationalError – error connecting to the database

This function takes parameters specifying how to connect to a PostgreSQL database and returns a Connection object using these parameters. If specified, the dsn parameter must be a string with the format 'host:base:user:passwd:opt'. All of the parts specified in the dsn are optional. You can also specify the parameters individually using keyword arguments, which always take precedence. The host can also contain a port if specified in the format 'host:port'. In the opt part of the dsn you can pass command-line options to the server. You can pass additional connection parameters using the optional kwargs keyword arguments.

Example:

con = connect(dsn='myhost:mydb', user='guido', password='234$')

Changed in version 5.0.1: Support for additional parameters passed as kwargs.

get/set/reset_typecast – Control the global typecast functions¶

PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value.

PyGreSQL provides built-in typecast functions for the common database types, but if you want to change these or add more typecast functions, you can set these up using the following functions.

Note

The following functions are not part of the DB-API 2 standard.

pgdb.get_typecast(typ)¶

Get the global cast function for the given database type

Parameters:typ (str) – PostgreSQL type name or type code
Returns:the typecast function for the specified type
Return type:function or None

New in version 5.0.

pgdb.set_typecast(typ, cast)¶

Set a global typecast function for the given database type(s)

Parameters:
  • typ (str or int) – PostgreSQL type name or type code, or list of such
  • cast – the typecast function to be set for the specified type(s)

The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named connection, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings.

New in version 5.0.

As of version 5.0.3 you can also use this method to change the typecasting of PostgreSQL array types. You must run set_typecast('anyarray', cast) in order to do this. The cast method must take a string value and a cast function for the base type and return the array converted to a Python object. For instance, run set_typecast('anyarray', lambda v, c: v) to switch off the casting of arrays completely, and always return them encoded as strings.

pgdb.reset_typecast([typ])¶

Reset the typecasts for the specified (or all) type(s) to their defaults

Parameters:typ (str, list or None) – PostgreSQL type name or type code, or list of such, or None to reset all typecast functions

New in version 5.0.

Note that database connections cache types and their cast functions using connection specific TypeCache objects. You can also get, set and reset typecast functions on the connection level using the methods TypeCache.get_typecast(), TypeCache.set_typecast() and TypeCache.reset_typecast() of the Connection.type_cache. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call TypeCache.reset_typecast() on the Connection.type_cache.

Module constants¶

pgdb.apilevel¶

The string constant '2.0', stating that the module is DB-API 2.0 level compliant.

pgdb.threadsafety¶

The integer constant 1, stating that the module itself is thread-safe, but the connections are not thread-safe, and therefore must be protected with a lock if you want to use them from different threads.

pgdb.paramstyle¶

The string constant pyformat, stating that parameters should be passed using Python extended format codes, e.g. " ... WHERE name=%(name)s".

Errors raised by this module¶

The errors that can be raised by the pgdb module are the following:

exception pgdb.Warning¶

Exception raised for important warnings like data truncations while inserting.

exception pgdb.Error¶

Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single except statement. Warnings are not considered errors and thus do not use this class as base.

exception pgdb.InterfaceError¶

Exception raised for errors that are related to the database interface rather than the database itself.

exception pgdb.DatabaseError¶

Exception raised for errors that are related to the database.

In PyGreSQL, this also has a DatabaseError.sqlstate attribute that contains the SQLSTATE error code of this error.

exception pgdb.DataError¶

Exception raised for errors that are due to problems with the processed data like division by zero or numeric value out of range.

exception pgdb.OperationalError¶

Exception raised for errors that are related to the database’s operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, or a memory allocation error occurred during processing.

exception pgdb.IntegrityError¶

Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails.

exception pgdb.ProgrammingError¶

Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement or wrong number of parameters specified.

exception pgdb.NotSupportedError¶

Exception raised in case a method or database API was used which is not supported by the database.

PyGreSQL-5.1/docs/_build/html/contents/pgdb/types.html0000644000175100077410000006067313470245536022701 0ustar darcypyg00000000000000 Type – Type objects and constructors — PyGreSQL 5.1

Type – Type objects and constructors¶

Type constructors¶

For binding to an operation’s input parameters, PostgreSQL needs to have the input in a particular format. However, from the parameters to the Cursor.execute() and Cursor.executemany() methods it is not always obvious as which PostgreSQL data types they shall be bound. For instance, a Python string could be bound as a simple char value, or also as a date or a time. Or a list could be bound as a array or a json object. To make the intention clear in such cases, you can wrap the parameters in type helper objects. PyGreSQL provides the constructors defined below to create such objects that can hold special values. When passed to the cursor methods, PyGreSQL can then detect the proper type of the input parameter and bind it accordingly.

The pgdb module exports the following type constructors as part of the DB-API 2 standard:

pgdb.Date(year, month, day)¶

Construct an object holding a date value

pgdb.Time(hour[, minute][, second][, microsecond][, tzinfo])¶

Construct an object holding a time value

pgdb.Timestamp(year, month, day[, hour][, minute][, second][, microsecond][, tzinfo])¶

Construct an object holding a time stamp value

pgdb.DateFromTicks(ticks)¶

Construct an object holding a date value from the given ticks value

pgdb.TimeFromTicks(ticks)¶

Construct an object holding a time value from the given ticks value

pgdb.TimestampFromTicks(ticks)¶

Construct an object holding a time stamp from the given ticks value

pgdb.Binary(bytes)¶

Construct an object capable of holding a (long) binary string value

Additionally, PyGreSQL provides the following constructors for PostgreSQL specific data types:

pgdb.Interval(days, hours=0, minutes=0, seconds=0, microseconds=0)¶

Construct an object holding a time interval value

New in version 5.0.

pgdb.Uuid([hex][, bytes][, bytes_le][, fields][, int][, version])¶

Construct an object holding a UUID value

New in version 5.0.

pgdb.Hstore(dict)¶

Construct a wrapper for holding an hstore dictionary

New in version 5.0.

pgdb.Json(obj[, encode])¶

Construct a wrapper for holding an object serializable to JSON

You can pass an optional serialization function as a parameter. By default, PyGreSQL uses json.dumps() to serialize it.

pgdb.Literal(sql)¶

Construct a wrapper for holding a literal SQL string

New in version 5.0.

Example for using a type constructor:

>>> cursor.execute("create table jsondata (data jsonb)")
>>> data = {'id': 1, 'name': 'John Doe', 'kids': ['Johnnie', 'Janie']}
>>> cursor.execute("insert into jsondata values (%s)", [Json(data)])

Note

SQL NULL values are always represented by the Python None singleton on input and output.

Type objects¶

class pgdb.Type¶

The Cursor.description attribute returns information about each of the result columns of a query. The type_code must compare equal to one of the Type objects defined below. Type objects can be equal to more than one type code (e.g. DATETIME is equal to the type codes for date, time and timestamp columns).

The pgdb module exports the following Type objects as part of the DB-API 2 standard:

STRING

Used to describe columns that are string-based (e.g. char, varchar, text)

BINARY

Used to describe (long) binary columns (bytea)

NUMBER

Used to describe numeric columns (e.g. int, float, numeric, money)

DATETIME

Used to describe date/time columns (e.g. date, time, timestamp, interval)

ROWID

Used to describe the oid column of PostgreSQL database tables

Note

The following more specific type objects are not part of the DB-API 2 standard.

BOOL

Used to describe boolean columns

SMALLINT

Used to describe smallint columns

INTEGER

Used to describe integer columns

LONG

Used to describe bigint columns

FLOAT

Used to describe float columns

NUMERIC

Used to describe numeric columns

MONEY

Used to describe money columns

DATE

Used to describe date columns

TIME

Used to describe time columns

TIMESTAMP

Used to describe timestamp columns

INTERVAL

Used to describe date and time interval columns

UUID

Used to describe uuid columns

HSTORE

Used to describe hstore columns

New in version 5.0.

JSON

Used to describe json and jsonb columns

New in version 5.0.

ARRAY

Used to describe columns containing PostgreSQL arrays

New in version 5.0.

RECORD

Used to describe columns containing PostgreSQL records

New in version 5.0.

Example for using some type objects:

>>> cursor = con.cursor()
>>> cursor.execute("create table jsondata (created date, data jsonb)")
>>> cursor.execute("select * from jsondata")
>>> (created, data) = (d.type_code for d in cursor.description)
>>> created == DATE
True
>>> created == DATETIME
True
>>> created == TIME
False
>>> data == JSON
True
>>> data == STRING
False
PyGreSQL-5.1/docs/_build/html/contents/pgdb/typecache.html0000644000175100077410000003522413470245536023474 0ustar darcypyg00000000000000 TypeCache – The internal cache for database types — PyGreSQL 5.1

TypeCache – The internal cache for database types¶

class pgdb.TypeCache¶

New in version 5.0.

The internal TypeCache of PyGreSQL is not part of the DB-API 2 standard, but is documented here in case you need full control and understanding of the internal handling of database types.

The TypeCache is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to DB-API 2 “type codes†(which are also returned as the type_code field of the Cursor.description attribute).

These type codes are strings which are equal to the PostgreSQL internal type name, but they are also carrying additional information about the associated PostgreSQL type in the following attributes:

  • oid – the OID of the type
  • len – the internal size
  • type – 'b' = base, 'c' = composite, …
  • category – 'A' = Array, 'B' = Boolean, …
  • delim – delimiter to be used when parsing arrays
  • relid – the table OID for composite types

For details, see the PostgreSQL documentation on pg_type.

In addition to the dictionary methods, the TypeCache provides the following methods:

TypeCache.get_fields(typ)¶

Get the names and types of the fields of composite types

Parameters:typ (str or int) – PostgreSQL type name or OID of a composite type
Returns:a list of pairs of field names and types
Return type:list
TypeCache.get_typecast(typ)¶

Get the cast function for the given database type

Parameters:typ (str) – PostgreSQL type name or type code
Returns:the typecast function for the specified type
Return type:function or None
TypeCache.set_typecast(typ, cast)¶

Set a typecast function for the given database type(s)

Parameters:
  • typ (str or int) – PostgreSQL type name or type code, or list of such
  • cast – the typecast function to be set for the specified type(s)

The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named connection, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings.

TypeCache.reset_typecast([typ])¶

Reset the typecasts for the specified (or all) type(s) to their defaults

Parameters:typ (str, list or None) – PostgreSQL type name or type code, or list of such, or None to reset all typecast functions
TypeCache.typecast(value, typ)¶

Cast the given value according to the given database type

Parameters:typ (str) – PostgreSQL type name or type code
Returns:the casted value

Note

Note that the TypeCache is always bound to a database connection. You can also get, set and reset typecast functions on a global level using the functions pgdb.get_typecast(), pgdb.set_typecast() and pgdb.reset_typecast(). If you do this, the current database connections will continue to use their already cached typecast functions unless call the TypeCache.reset_typecast() method on the Connection.type_cache objects of the running connections.

PyGreSQL-5.1/docs/_build/html/contents/pgdb/introduction.html0000644000175100077410000001602713470245536024250 0ustar darcypyg00000000000000 Introduction — PyGreSQL 5.1

Introduction¶

You may either choose to use the “classic†PyGreSQL interface provided by the pg module or else the newer DB-API 2.0 compliant interface provided by the pgdb module.

The following part of the documentation covers only the newer pgdb API.

DB-API 2.0 (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is PEP 0249.

See also

A useful tutorial-like introduction to the DB-API has been written by Andrew M. Kuchling for the LINUX Journal in 1998.

PyGreSQL-5.1/docs/_build/html/contents/pgdb/index.html0000644000175100077410000002506413470245536022637 0ustar darcypyg00000000000000 pgdb — The DB-API Compliant Interface — PyGreSQL 5.1
PyGreSQL-5.1/docs/_build/html/contents/pgdb/cursor.html0000644000175100077410000012663613470245536023054 0ustar darcypyg00000000000000 Cursor – The cursor object — PyGreSQL 5.1

Cursor – The cursor object¶

class pgdb.Cursor¶

These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors created from the same connection are not isolated, i.e., any changes done to the database by a cursor are immediately visible by the other cursors. Cursors created from different connections can or can not be isolated, depending on the level of transaction isolation. The default PostgreSQL transaction isolation level is “read committedâ€.

Cursor objects respond to the following methods and attributes.

Note that Cursor objects also implement both the iterator and the context manager protocol, i.e. you can iterate over them and you can use them in a with statement.

description – details regarding the result columns¶

Cursor.description¶

This read-only attribute is a sequence of 7-item named tuples.

Each of these named tuples contains information describing one result column:

  • name
  • type_code
  • display_size
  • internal_size
  • precision
  • scale
  • null_ok

The values for precision and scale are only set for numeric types. The values for display_size and null_ok are always None.

This attribute will be None for operations that do not return rows or if the cursor has not had an operation invoked via the Cursor.execute() or Cursor.executemany() method yet.

Changed in version 5.0: Before version 5.0, this attribute was an ordinary tuple.

rowcount – number of rows of the result¶

Cursor.rowcount¶

This read-only attribute specifies the number of rows that the last Cursor.execute() or Cursor.executemany() call produced (for DQL statements like SELECT) or affected (for DML statements like UPDATE or INSERT). It is also set by the Cursor.copy_from() and Cursor.copy_to() methods. The attribute is -1 in case no such method call has been performed on the cursor or the rowcount of the last operation cannot be determined by the interface.

close – close the cursor¶

Cursor.close()¶

Close the cursor now (rather than whenever it is deleted)

Return type:None

The cursor will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the cursor.

execute – execute a database operation¶

Cursor.execute(operation[, parameters])¶

Prepare and execute a database operation (query or command)

Parameters:
  • operation (str) – the database operation
  • parameters – a sequence or mapping of parameters
Returns:

the cursor, so you can chain commands

Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified using Python extended format codes, e.g. " ... WHERE name=%(name)s".

A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times).

The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is deprecated: Cursor.executemany() should be used instead.

Note that in case this method raises a DatabaseError, you can get information about the error condition that has occurred by introspecting its DatabaseError.sqlstate attribute, which will be the SQLSTATE error code associated with the error. Applications that need to know which error condition has occurred should usually test the error code, rather than looking at the textual error message.

executemany – execute many similar database operations¶

Cursor.executemany(operation[, seq_of_parameters])¶

Prepare and execute many similar database operations (queries or commands)

Parameters:
  • operation (str) – the database operation
  • seq_of_parameters – a sequence or mapping of parameter tuples or mappings
Returns:

the cursor, so you can chain commands

Prepare a database operation (query or command) and then execute it against all parameter tuples or mappings found in the sequence seq_of_parameters.

Parameters are bound to the query using Python extended format codes, e.g. " ... WHERE name=%(name)s".

callproc – Call a stored procedure¶

Cursor.callproc(self, procname, [parameters]):

Call a stored database procedure with the given name

Parameters:
  • procname (str) – the name of the database function
  • parameters – a sequence of parameters (can be empty or omitted)

This method calls a stored procedure (function) in the PostgreSQL database.

The sequence of parameters must contain one entry for each input argument that the function expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported.

The function may also provide a result set as output. These can be requested through the standard fetch methods of the cursor.

New in version 5.0.

fetchone – fetch next row of the query result¶

Cursor.fetchone()¶

Fetch the next row of a query result set

Returns:the next row of the query result set
Return type:named tuple or None

Fetch the next row of a query result set, returning a single named tuple, or None when no more data is available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers.

An Error (or subclass) exception is raised if the previous call to Cursor.execute() or Cursor.executemany() did not produce any result set or no call was issued yet.

Changed in version 5.0: Before version 5.0, this method returned ordinary tuples.

fetchmany – fetch next set of rows of the query result¶

Cursor.fetchmany([size=None][, keep=False])¶

Fetch the next set of rows of a query result

Parameters:
  • size (int or None) – the number of rows to be fetched
  • keep – if set to true, will keep the passed arraysize
Tpye keep:

bool

Returns:

the next set of rows of the query result

Return type:

list of named tuples

Fetch the next set of rows of a query result, returning a list of named tuples. An empty sequence is returned when no more rows are available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers.

The number of rows to fetch per call is specified by the size parameter. If it is not given, the cursor’s arraysize determines the number of rows to be fetched. If you set the keep parameter to True, this is kept as new arraysize.

The method tries to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned.

An Error (or subclass) exception is raised if the previous call to Cursor.execute() or Cursor.executemany() did not produce any result set or no call was issued yet.

Note there are performance considerations involved with the size parameter. For optimal performance, it is usually best to use the arraysize attribute. If the size parameter is used, then it is best for it to retain the same value from one Cursor.fetchmany() call to the next.

Changed in version 5.0: Before version 5.0, this method returned ordinary tuples.

fetchall – fetch all rows of the query result¶

Cursor.fetchall()¶

Fetch all (remaining) rows of a query result

Returns:the set of all rows of the query result
Return type:list of named tuples

Fetch all (remaining) rows of a query result, returning them as list of named tuples. The field names of the named tuple are the same as the column names of the database query as long as they are valid as field names for named tuples, otherwise they are given positional names.

Note that the cursor’s arraysize attribute can affect the performance of this operation.

Changed in version 5.0: Before version 5.0, this method returned ordinary tuples.

arraysize - the number of rows to fetch at a time¶

Cursor.arraysize¶

The number of rows to fetch at a time

This read/write attribute specifies the number of rows to fetch at a time with Cursor.fetchmany(). It defaults to 1, meaning to fetch a single row at a time.

Methods and attributes that are not part of the standard¶

Note

The following methods and attributes are not part of the DB-API 2 standard.

Cursor.copy_from(stream, table[, format][, sep][, null][, size][, columns])¶

Copy data from an input stream to the specified table

Parameters:
  • stream – the input stream (must be a file-like object, a string or an iterable returning strings)
  • table (str) – the name of a database table
  • format (str) – the format of the data in the input stream, can be 'text' (the default), 'csv', or 'binary'
  • sep (str) – a single character separator (the default is '\t' for text and ',' for csv)
  • null (str) – the textual representation of the NULL value, can also be an empty string (the default is '\\N')
  • size (int) – the size of the buffer when reading file-like objects
  • column (list) – an optional list of column names
Returns:

the cursor, so you can chain commands

Raises:
  • TypeError – parameters with wrong types
  • ValueError – invalid parameters
  • IOError – error when executing the copy operation

This method can be used to copy data from an input stream on the client side to a database table on the server side using the COPY FROM command. The input stream can be provided in form of a file-like object (which must have a read() method), a string, or an iterable returning one row or multiple rows of input data on each iteration.

The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the input.

The size option sets the size of the buffer used when reading data from file-like objects.

The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied.

New in version 5.0.

Cursor.copy_to(stream, table[, format][, sep][, null][, decode][, columns])¶

Copy data from the specified table to an output stream

Parameters:
  • stream – the output stream (must be a file-like object or None)
  • table (str) – the name of a database table or a SELECT query
  • format (str) – the format of the data in the input stream, can be 'text' (the default), 'csv', or 'binary'
  • sep (str) – a single character separator (the default is '\t' for text and ',' for csv)
  • null (str) – the textual representation of the NULL value, can also be an empty string (the default is '\\N')
  • decode (bool) – whether decoded strings shall be returned for non-binary formats (the default is True in Python 3)
  • column (list) – an optional list of column names
Returns:

a generator if stream is set to None, otherwise the cursor

Raises:
  • TypeError – parameters with wrong types
  • ValueError – invalid parameters
  • IOError – error when executing the copy operation

This method can be used to copy data from a database table on the server side to an output stream on the client side using the COPY TO command.

The output stream can be provided in form of a file-like object (which must have a write() method). Alternatively, if None is passed as the output stream, the method will return a generator yielding one row of output data on each iteration.

Output will be returned as byte strings unless you set decode to true.

Note that you can also use a SELECT query instead of the table name.

The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the output.

The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied.

New in version 5.0.

Cursor.row_factory(row)¶

Process rows before they are returned

Parameters:row (list) – the currently processed row of the result set
Returns:the transformed row that the fetch methods shall return

This method is used for processing result rows before returning them through one of the fetch methods. By default, rows are returned as named tuples. You can overwrite this method with a custom row factory if you want to return the rows as different kids of objects. This same row factory will then be used for all result sets. If you overwrite this method, the method Cursor.build_row_factory() for creating row factories dynamically will be ignored.

Note that named tuples are very efficient and can be easily converted to dicts (even OrderedDicts) by calling row._asdict(). If you still want to return rows as dicts, you can create a custom cursor class like this:

class DictCursor(pgdb.Cursor):

    def row_factory(self, row):
        return {key: value for key, value in zip(self.colnames, row)}

cur = DictCursor(con)  # get one DictCursor instance or
con.cursor_type = DictCursor  # always use DictCursor instances

New in version 4.0.

Cursor.build_row_factory()¶

Build a row factory based on the current description

Returns:callable with the signature of Cursor.row_factory()

This method returns row factories for creating named tuples. It is called whenever a new result set is created, and Cursor.row_factory is then assigned the return value of this method. You can overwrite this method with a custom row factory builder if you want to use different row factories for different result sets. Otherwise, you can also simply overwrite the Cursor.row_factory() method. This method will then be ignored.

The default implementation that delivers rows as named tuples essentially looks like this:

def build_row_factory(self):
    return namedtuple('Row', self.colnames, rename=True)._make

New in version 5.0.

Cursor.colnames¶

The list of columns names of the current result set

The values in this list are the same values as the name elements in the Cursor.description attribute. Always use the latter if you want to remain standard compliant.

New in version 5.0.

Cursor.coltypes¶

The list of columns types of the current result set

The values in this list are the same values as the type_code elements in the Cursor.description attribute. Always use the latter if you want to remain standard compliant.

New in version 5.0.

PyGreSQL-5.1/docs/_build/html/contents/pgdb/connection.html0000644000175100077410000004011513470245535023660 0ustar darcypyg00000000000000 Connection – The connection object — PyGreSQL 5.1

Connection – The connection object¶

class pgdb.Connection¶

These connection objects respond to the following methods.

Note that pgdb.Connection objects also implement the context manager protocol, i.e. you can use them in a with statement. When the with block ends, the current transaction will be automatically committed or rolled back if there was an exception, and you won’t need to do this manually.

close – close the connection¶

Connection.close()¶

Close the connection now (rather than whenever it is deleted)

Return type:None

The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed.

commit – commit the connection¶

Connection.commit()¶

Commit any pending transaction to the database

Return type:None

Note that connections always use a transaction, unless you set the Connection.autocommit attribute described below.

rollback – roll back the connection¶

Connection.rollback()¶

Roll back any pending transaction to the database

Return type:None

This method causes the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed.

cursor – return a new cursor object¶

Connection.cursor()¶

Return a new cursor object using the connection

Returns:a connection object
Return type:Cursor

This method returns a new Cursor object that can be used to operate on the database in the way described in the next section.

Attributes that are not part of the standard¶

Note

The following attributes are not part of the DB-API 2 standard.

Connection.closed¶

This is True if the connection has been closed or has become invalid

Connection.cursor_type¶

The default cursor type used by the connection

If you want to use your own custom subclass of the Cursor class with he connection, set this attribute to your custom cursor class. You will then get your custom cursor whenever you call Connection.cursor().

New in version 5.0.

Connection.type_cache¶

A dictionary with the various type codes for the PostgreSQL types

This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the TypeCache class for details.

New in version 5.0.

Connection.autocommit¶

A read/write attribute to get/set the autocommit mode

Normally, all DB-API 2 SQL commands are run inside a transaction. Sometimes this behavior is not desired; there are also some SQL commands such as VACUUM which cannot be run inside a transaction.

By setting this attribute to True you can change this behavior so that no transactions will be started for that connection. In this case every executed SQL command has immediate effect on the database and you don’t need to call Connection.commit() explicitly. In this mode, you can still use with con: blocks to run parts of the code using the connection con inside a transaction.

By default, this attribute is set to False which conforms to the behavior specified by the DB-API 2 standard (manual commit required).

New in version 5.1.

PyGreSQL-5.1/docs/_build/html/contents/install.html0000644000175100077410000005137613470245533022264 0ustar darcypyg00000000000000 Installation — PyGreSQL 5.1

Installation¶

General¶

You must first install Python and PostgreSQL on your system. If you want to access remote databases only, you don’t need to install the full PostgreSQL server, but only the libpq C-interface library. If you are on Windows, make sure that the directory that contains libpq.dll is part of your PATH environment variable.

The current version of PyGreSQL has been tested with Python versions 2.6, 2.7 and 3.3 to 3.7, and PostgreSQL versions 9.0 to 9.6 and 10 or 11.

PyGreSQL will be installed as three modules, a shared library called _pg.so (on Linux) or a DLL called _pg.pyd (on Windows), and two pure Python wrapper modules called pg.py and pgdb.py. All three files will be installed directly into the Python site-packages directory. To uninstall PyGreSQL, simply remove these three files.

Installing with Pip¶

This is the most easy way to install PyGreSQL if you have “pip†installed. Just run the following command in your terminal:

pip install PyGreSQL

This will automatically try to find and download a distribution on the Python Package Index that matches your operating system and Python version and install it.

Installing from a Binary Distribution¶

If you don’t want to use “pipâ€, or “pip†doesn’t find an appropriate distribution for your computer, you can also try to manually download and install a distribution.

When you download the source distribution, you will need to compile the C extension, for which you need a C compiler installed. If you don’t want to install a C compiler or avoid possible problems with the compilation, you can search for a pre-compiled binary distribution of PyGreSQL on the Python Package Index or the PyGreSQL homepage.

You can currently download PyGreSQL as Linux RPM, NetBSD package and Windows installer. Make sure the required Python version of the binary package matches the Python version you have installed.

Install the package as usual on your system.

Note that the documentation is currently only included in the source package.

Installing from Source¶

If you want to install PyGreSQL from Source, or there is no binary package available for your platform, follow these instructions.

Make sure the Python header files and PostgreSQL client and server header files are installed. These come usually with the “devel†packages on Unix systems and the installer executables on Windows systems.

If you are using a precompiled PostgreSQL, you will also need the pg_config tool. This is usually also part of the “devel†package on Unix, and will be installed as part of the database server feature on Windows systems.

Building and installing with Distutils¶

You can build and install PyGreSQL using Distutils.

Download and unpack the PyGreSQL source tarball if you haven’t already done so.

Type the following commands to build and install PyGreSQL:

python setup.py build
python setup.py install

Now you should be ready to use PyGreSQL.

Compiling Manually¶

The source file for compiling the C extension module is pgmodule.c. You have two options. You can compile PyGreSQL as a stand-alone module or you can build it into the Python interpreter.

Stand-Alone¶

  • In the directory containing pgmodule.c, run the following command:

    cc -fpic -shared -o _pg.so -I$PYINC -I$PGINC -I$PSINC -L$PGLIB -lpq pgmodule.c
    

    where you have to set:

    PYINC = path to the Python include files
            (usually something like /usr/include/python)
    PGINC = path to the PostgreSQL client include files
            (something like /usr/include/pgsql or /usr/include/postgresql)
    PSINC = path to the PostgreSQL server include files
            (like /usr/include/pgsql/server or /usr/include/postgresql/server)
    PGLIB = path to the PostgreSQL object code libraries (usually /usr/lib)
    

    If you are not sure about the above paths, try something like:

    PYINC=`find /usr -name Python.h`
    PGINC=`find /usr -name libpq-fe.h`
    PSINC=`find /usr -name postgres.h`
    PGLIB=`find /usr -name libpq.so`
    

    If you have the pg_config tool installed, you can set:

    PGINC=`pg_config --includedir`
    PSINC=`pg_config --includedir-server`
    PGLIB=`pg_config --libdir`
    

    Some options may be added to this line:

    -DNO_DEF_VAR   no default variables support
    -DNO_DIRECT    no direct access methods
    -DNO_LARGE     no large object support
    -DNO_PQSOCKET  if running an older PostgreSQL
    

    On some systems you may need to include -lcrypt in the list of libraries to make it compile.

  • Test the new module. Something like the following should work:

    $ python
    
    >>> import _pg
    >>> db = _pg.connect('thilo','localhost')
    >>> db.query("INSERT INTO test VALUES ('ping','pong')")
    18304
    >>> db.query("SELECT * FROM test")
    eins|zwei
    ----+----
    ping|pong
    (1 row)
    
  • Finally, move the _pg.so, pg.py, and pgdb.py to a directory in your PYTHONPATH. A good place would be /usr/lib/python/site-packages if your Python modules are in /usr/lib/python.

Built-in to Python interpreter¶

  • Find the directory where your Setup file lives (usually in the Modules subdirectory) in the Python source hierarchy and copy or symlink the pgmodule.c file there.

  • Add the following line to your ‘Setup’ file:

    _pg  pgmodule.c -I$PGINC -I$PSINC -L$PGLIB -lpq
    

    where:

    PGINC = path to the PostgreSQL client include files (see above)
    PSINC = path to the PostgreSQL server include files (see above)
    PGLIB = path to the PostgreSQL object code libraries (see above)
    

    Some options may be added to this line:

    -DNO_DEF_VAR   no default variables support
    -DNO_DIRECT    no direct access methods
    -DNO_LARGE     no large object support
    -DNO_PQSOCKET  if running an older PostgreSQL (see above)
    

    On some systems you may need to include -lcrypt in the list of libraries to make it compile.

  • If you want a shared module, make sure that the shared keyword is uncommented and add the above line below it. You used to need to install your shared modules with make sharedinstall but this no longer seems to be true.

  • Copy pg.py to the lib directory where the rest of your modules are. For example, that’s /usr/local/lib/Python on my system.

  • Rebuild Python from the root directory of the Python source hierarchy by running make -f Makefile.pre.in boot and make && make install.

  • For more details read the documentation at the top of Makefile.pre.in.

PyGreSQL-5.1/docs/_build/html/copyright.html0000644000175100077410000001457713470245536020776 0ustar darcypyg00000000000000 Copyright notice — PyGreSQL 5.1
PyGreSQL-5.1/docs/_build/html/announce.html0000644000175100077410000001444113470245533020557 0ustar darcypyg00000000000000 PyGreSQL Announcements — PyGreSQL 5.1

PyGreSQL Announcements¶

Release of PyGreSQL version 5.1¶

Release 5.1 of PyGreSQL.

It is available at: http://pygresql.org/files/PyGreSQL-5.1.tar.gz.

If you are running NetBSD, look in the packages directory under databases. There is also a package in the FreeBSD ports collection.

Please refer to changelog.txt for things that have changed in this version.

This version has been built and unit tested on:
  • NetBSD
  • FreeBSD
  • openSUSE
  • Ubuntu
  • Windows 7 and 10 with both MinGW and Visual Studio
  • PostgreSQL 9.0 to 9.6 and 10 or 11 (32 and 64bit)
  • Python 2.6, 2.7 and 3.3 to 3.7 (32 and 64bit)
D’Arcy J.M. Cain
PyGreSQL-5.1/docs/download/0000755000175100077410000000000013470245542015464 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/download/index.rst0000644000175100077410000000111413466770070017326 0ustar darcypyg00000000000000Download information ==================== .. include:: download.rst News, Changes and Future Development ------------------------------------ See the :doc:`../announce` for current news. For a list of all changes in the current version |version| and in past versions, have a look at the :doc:`../contents/changelog`. The section on :doc:`../community/index` lists ideas for future developments and ways to participate. Installation ------------ Please read the chapter on :doc:`../contents/install` in our documentation. .. include:: files.rst .. include:: ../community/homes.rstPyGreSQL-5.1/docs/download/files.rst0000644000175100077410000000140313466770070017322 0ustar darcypyg00000000000000Distribution files ------------------ ========== = pgmodule.c the C Python module (_pg) pgtypes.h PostgreSQL type definitions py3c.h Python 2/3 compatibility layer for the C extension pg.py the "classic" PyGreSQL module pgdb.py a DB-SIG DB-API 2.0 compliant API wrapper for PygreSQL setup.py the Python setup script To install PyGreSQL, you can run "python setup.py install". setup.cfg the Python setup configuration docs/ documentation directory The documentation has been created with Sphinx. All text files are in ReST format; a HTML version of the documentation can be created with the command "make html" or "gmake html". tests/ a suite of unit tests for PyGreSQL ========== = PyGreSQL-5.1/docs/download/download.rst0000644000175100077410000000264213466770070020035 0ustar darcypyg00000000000000Current PyGreSQL versions ------------------------- You can find PyGreSQL on the **Python Package Index** at * http://pypi.python.org/pypi/PyGreSQL/ The **released version of the source code** is available at * http://pygresql.org/files/PyGreSQL.tar.gz You can also check the latest **pre-release version** at * http://pygresql.org/files/PyGreSQL-beta.tar.gz A **Linux RPM** can be picked up from * http://pygresql.org/files/pygresql.i386.rpm A **NetBSD package** is available in their pkgsrc collection * ftp://ftp.netbsd.org/pub/NetBSD/packages/pkgsrc/databases/py-postgresql/README.html A **FreeBSD package** is available in their ports collection * http://www.freebsd.org/cgi/cvsweb.cgi/ports/databases/py-PyGreSQL/ An **openSUSE package** is available through their build service at * https://software.opensuse.org/package/PyGreSQL?search_term=pygresql A **Win32 installer** for various Python versions is available at * http://pygresql.org/files/PyGreSQL-5.1.win-amd64-py2.6.exe * http://pygresql.org/files/PyGreSQL-5.1.win-amd64-py2.7.exe * http://pygresql.org/files/PyGreSQL-5.1.win-amd64-py3.4.exe * http://pygresql.org/files/PyGreSQL-5.1.win-amd64-py3.5.exe * http://pygresql.org/files/PyGreSQL-5.1.win-amd64-py3.6.exe * http://pygresql.org/files/PyGreSQL-5.1.win-amd64-py3.7.exe Older PyGreSQL versions ----------------------- You can look for older PyGreSQL versions at * http://pygresql.org/files/ PyGreSQL-5.1/docs/toc.txt0000644000175100077410000000034513466770070015211 0ustar darcypyg00000000000000.. PyGreSQL index page with toc (for use without cloud theme) Welcome to PyGreSQL =================== .. toctree:: :maxdepth: 2 about copyright announce download/index contents/index community/indexPyGreSQL-5.1/docs/about.rst0000644000175100077410000000006513466770070015526 0ustar darcypyg00000000000000About PyGreSQL ============== .. include:: about.txtPyGreSQL-5.1/docs/contents/0000755000175100077410000000000013470245541015511 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/contents/changelog.rst0000644000175100077410000007434413470244250020202 0ustar darcypyg00000000000000ChangeLog ========= Version 5.1 (2019-05-17) ------------------------ - Changes to the classic PyGreSQL module (pg): - Support for prepared statements (following a suggestion and first implementation by Justin Pryzby on the mailing list). - DB wrapper objects based on existing connections can now be closed and reopened properly (but the underlying connection will not be affected). - The query object can now be used as an iterator similar to query.getresult() and will then yield the rows as tuples. Thanks to Justin Pryzby for the proposal and most of the implementation. - Deprecated query.ntuples() in the classic API, since len(query) can now be used and returns the same number. - The i-th row of the result can now be accessed as `query[i]`. - New method query.scalarresult() that gets only the first field of each row as a list of scalar values. - New methods query.one(), query.onenamed(), query.onedict() and query.onescalar() that fetch only one row from the result or None if there are no more rows, similar to the cursor.fetchone() method in DB-API 2. - New methods query.single(), query.singlenamed(), query.singledict() and query.singlescalar() that fetch only one row from the result, and raise an error if the result does not have exactly one row. - New methods query.dictiter(), query.namediter() and query.scalariter() returning the same values as query.dictresult(), query.namedresult() and query.salarresult(), but as iterables instead of lists. This avoids creating a Python list of all results and can be slightly more efficient. - Removed pg.get/set_namedresult. You can configure the named tuples factory with the pg.set_row_factory_size() function and change the implementation with pg.set_query_helpers(), but this is not recommended and this function is not part of the official API. - Added new connection attributes `socket`, `backend_pid`, `ssl_in_use` and `ssl_attributes` (the latter need PostgreSQL >= 9.5 on the client). - Changes to the DB-API 2 module (pgdb): - Connections now have an `autocommit` attribute which is set to `False` by default but can be set to `True` to switch to autocommit mode where no transactions are started and calling commit() is not required. Note that this is not part of the DB-API 2 standard. Vesion 5.0.7 (2019-05-17) ------------------------- - This version officially supports the new PostgreSQL 11. - Fixed a bug in parsing array subscript ranges (reported by Justin Pryzby). - Fixed an issue when deleting a DB wrapper object with the underlying connection already closed (bug report by Jacob Champion). Vesion 5.0.6 (2018-07-29) ------------------------- - This version officially supports the new Python 3.7. - Correct trove classifier for the PostgreSQL License. Version 5.0.5 (2018-04-25) -------------------------- - This version officially supports the new PostgreSQL 10. - The memory for the string with the number of rows affected by a classic pg module query() was already freed (bug report and fix by Peifeng Qiu). Version 5.0.4 (2017-07-23) -------------------------- - This version officially supports the new Python 3.6 and PostgreSQL 9.6. - query_formatted() can now be used without parameters. - The automatic renaming of columns that are invalid as field names of named tuples now works more accurately in Python 2.6 and 3.0. - Fixed error checks for unlink() and export() methods of large objects (bug report by Justin Pryzby). - Fixed a compilation issue under OS X (bug report by Josh Johnston). Version 5.0.3 (2016-12-10) -------------------------- - It is now possible to use a custom array cast function by changing the type caster for the 'anyarray' type. For instance, by calling set_typecast('anyarray', lambda v, c: v) you can have arrays returned as strings instead of lists. Note that in the pg module, you can also call set_array(False) in order to return arrays as strings. - The namedtuple classes used for the rows of query results are now cached and reused internally, since creating namedtuples classes in Python is a somewhat expensive operation. By default the cache has a size of 1024 entries, but this can be changed with the set_row_factory_size() function. In certain cases this change can notably improve the performance. - The namedresult() method in the classic API now also tries to rename columns that would result in invalid field names. Version 5.0.2 (2016-09-13) -------------------------- - Fixed an infinite recursion problem in the DB wrapper class of the classic module that could occur when the underlying connection could not be properly opened (bug report by Justin Pryzby). Version 5.0.1 (2016-08-18) -------------------------- - The update() and delete() methods of the DB wrapper now use the OID instead of the primary key if both are provided. This restores backward compatibility with PyGreSQL 4.x and allows updating the primary key itself if an OID exists. - The connect() function of the DB API 2.0 module now accepts additional keyword parameters such as "application_name" which will be passed on to PostgreSQL. - PyGreSQL now adapts some queries to be able to access older PostgreSQL 8.x databases (as suggested on the mailing list by Andres Mejia). However, these old versions of PostgreSQL are not officially supported and tested any more. - Fixed an issue with Postgres types that have an OID >= 0x80000000 (reported on the mailing list by Justin Pryzby). - Allow extra values that are not used in the command in the parameter dict passed to the query_formatted() method (as suggested by Justin Pryzby). - Improved handling of empty arrays in the classic module. - Unused classic connections were not properly garbage collected which could cause memory leaks (reported by Justin Pryzby). - Made C extension compatible with MSVC 9 again (this was needed to compile for Python 2 on Windows). Version 5.0 (2016-03-20) ------------------------ - This version now runs on both Python 2 and Python 3. - The supported versions are Python 2.6 to 2.7, and 3.3 to 3.5. - PostgreSQL is supported in all versions from 9.0 to 9.5. - Changes in the classic PyGreSQL module (pg): - The classic interface got two new methods get_as_list() and get_as_dict() returning a database table as a Python list or dict. The amount of data returned can be controlled with various parameters. - A method upsert() has been added to the DB wrapper class that utilizes the "upsert" feature that is new in PostgreSQL 9.5. The new method nicely complements the existing get/insert/update/delete() methods. - When using insert/update/upsert(), you can now pass PostgreSQL arrays as lists and PostgreSQL records as tuples in the classic module. - Conversely, when the query method returns a PostgreSQL array, it is passed to Python as a list. PostgreSQL records are converted to named tuples as well, but only if you use one of the get/insert/update/delete() methods. PyGreSQL uses a new fast built-in parser to achieve this. The automatic conversion of arrays to lists can be disabled with set_array(False). - The pkey() method of the classic interface now returns tuples instead of frozenset. The order of the tuples is like in the primary key index. - Like the DB-API 2 module, the classic module now also returns bool values from the database as Python bool objects instead of strings. You can still restore the old behavior by calling set_bool(False). - Like the DB-API 2 module, the classic module now also returns bytea data fetched from the database as byte strings, so you don't need to call unescape_bytea() any more. This has been made configurable though, and you can restore the old behavior by calling set_bytea_escaped(True). - A method set_jsondecode() has been added for changing or removing the function that automatically decodes JSON data coming from the database. By default, decoding JSON is now enabled and uses the decoder function in the standard library with its default parameters. - The table name that is affixed to the name of the OID column returned by the get() method of the classic interface will not automatically be fully qualified any more. This reduces overhead from the interface, but it means you must always write the table name in the same way when you call the methods using it and you are using tables with OIDs. Also, OIDs are now only used when access via primary key is not possible. Note that OIDs are considered deprecated anyway, and they are not created by default any more in PostgreSQL 8.1 and later. - The internal caching and automatic quoting of class names in the classic interface has been simplified and improved, it should now perform better and use less memory. Also, overhead for quoting values in the DB wrapper methods has been reduced and security has been improved by passing the values to libpq separately as parameters instead of inline. - It is now possible to use the registered type names instead of the more coarse-grained type names that are used by default in PyGreSQL, without breaking any of the mechanisms for quoting and typecasting, which rely on the type information. This is achieved while maintaining simplicity and backward compatibility by augmenting the type name string objects with all the necessary information under the cover. To switch registered type names on or off (this is the default), call the DB wrapper method use_regtypes(). - A new method query_formatted() has been added to the DB wrapper class that allows using the format specifications from Python. A flag "inline" can be set to specify whether parameters should be sent to the database separately or formatted into the SQL. - A new type helper Bytea() has been added. - Changes in the DB-API 2 module (pgdb): - The DB-API 2 module now always returns result rows as named tuples instead of simply lists as before. The documentation explains how you can restore the old behavior or use custom row objects instead. - The names of the various classes used by the classic and DB-API 2 modules have been renamed to become simpler, more intuitive and in line with the names used in the DB-API 2 documentation. Since the API provides only objects of these types through constructor functions, this should not cause any incompatibilities. - The DB-API 2 module now supports the callproc() cursor method. Note that output parameters are currently not replaced in the return value. - The DB-API 2 module now supports copy operations between data streams on the client and database tables via the COPY command of PostgreSQL. The cursor method copy_from() can be used to copy data from the database to the client, and the cursor method copy_to() can be used to copy data from the client to the database. - The 7-tuples returned by the description attribute of a pgdb cursor are now named tuples, i.e. their elements can be also accessed by name. The column names and types can now also be requested through the colnames and coltypes attributes, which are not part of DB-API 2 though. The type_code provided by the description attribute is still equal to the PostgreSQL internal type name, but now carries some more information in additional attributes. The size, precision and scale information that is part of the description is now properly set for numeric types. - If you pass a Python list as one of the parameters to a DB-API 2 cursor, it is now automatically bound using an ARRAY constructor. If you pass a Python tuple, it is bound using a ROW constructor. This is useful for passing records as well as making use of the IN syntax. - Inversely, when a fetch method of a DB-API 2 cursor returns a PostgreSQL array, it is passed to Python as a list, and when it returns a PostgreSQL composite type, it is passed to Python as a named tuple. PyGreSQL uses a new fast built-in parser to achieve this. Anonymous composite types are also supported, but yield only an ordinary tuple containing text strings. - New type helpers Interval() and Uuid() have been added. - The connection has a new attribute "closed" that can be used to check whether the connection is closed or broken. - SQL commands are always handled as if they include parameters, i.e. literal percent signs must always be doubled. This consistent behavior is necessary for using pgdb with wrappers like SQLAlchemy. - PyGreSQL 5.0 will be supported as a database driver by SQLAlchemy 1.1. - Changes concerning both modules: - PyGreSQL now tries to raise more specific and appropriate subclasses of DatabaseError than just ProgrammingError. Particularly, when database constraints are violated, it raises an IntegrityError now. - The modules now provide get_typecast() and set_typecast() methods allowing to control the typecasting on the global level. The connection objects have got type caches with the same methods which give control over the typecasting on the level of the current connection. See the documentation on details about the type cache and the typecast mechanisms provided by PyGreSQL. - Dates, times, timestamps and time intervals are now returned as the corresponding Python objects from the datetime module of the standard library. In earlier versions of PyGreSQL they had been returned as strings. You can restore the old behavior by deactivating the respective typecast functions, e.g. set_typecast('date', str). - PyGreSQL now support the "uuid" data type, converting such columns automatically to and from Python uuid.UUID objects. - PyGreSQL now supports the "hstore" data type, converting such columns automatically to and from Python dictionaries. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new HStore() type constructor as a hint to PyGreSQL. - PyGreSQL now supports the "json" and "jsonb" data types, converting such columns automatically to and from Python objects. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new Json() type constructor as a hint to PyGreSQL. - A new type helper Literal() for inserting parameters literally as SQL has been added. This is useful for table names, for instance. - Fast parsers cast_array(), cast_record() and cast_hstore for the input and output syntax for PostgreSQL arrays, composite types and the hstore type have been added to the C extension module. The array parser also allows using multi-dimensional arrays with PyGreSQL. - The tty parameter and attribute of database connections has been removed since it is not supported any more since PostgreSQL 7.4. Version 4.2.2 (2016-03-18) -------------------------- - The get_relations() and get_tables() methods now also return system views and tables if you set the optional "system" parameter to True. - Fixed a regression when using temporary tables with DB wrapper methods (thanks to Patrick TJ McPhee for reporting). Version 4.2.1 (2016-02-18) -------------------------- - Fixed a small bug when setting the notice receiver. - Some more minor fixes and re-packaging with proper permissions. Version 4.2 (2016-01-21) ------------------------ - The supported Python versions are 2.4 to 2.7. - PostgreSQL is supported in all versions from 8.3 to 9.5. - Set a better default for the user option "escaping-funcs". - Force build to compile with no errors. - New methods get_parameters() and set_parameters() in the classic interface which can be used to get or set run-time parameters. - New method truncate() in the classic interface that can be used to quickly empty a table or a set of tables. - Fix decimal point handling. - Add option to return boolean values as bool objects. - Add option to return money values as string. - get_tables() does not list information schema tables any more. - Fix notification handler (Thanks Patrick TJ McPhee). - Fix a small issue with large objects. - Minor improvements of the NotificationHandler. - Converted documentation to Sphinx and added many missing parts. - The tutorial files have become a chapter in the documentation. - Greatly improved unit testing, tests run with Python 2.4 to 2.7 again. Version 4.1.1 (2013-01-08) -------------------------- - Add NotificationHandler class and method. Replaces need for pgnotify. - Sharpen test for inserting current_timestamp. - Add more quote tests. False and 0 should evaluate to NULL. - More tests - Any number other than 0 is True. - Do not use positional parameters internally. This restores backward compatibility with version 4.0. - Add methods for changing the decimal point. Version 4.1 (2013-01-01) ------------------------ - Dropped support for Python below 2.5 and PostgreSQL below 8.3. - Added support for Python up to 2.7 and PostgreSQL up to 9.2. - Particularly, support PQescapeLiteral() and PQescapeIdentifier(). - The query method of the classic API now supports positional parameters. This an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors (contribution by Patrick TJ McPhee). - The classic API now supports a method namedresult() in addition to getresult() and dictresult(), which returns the rows of the result as named tuples if these are supported (Python 2.6 or higher). - The classic API has got the new methods begin(), commit(), rollback(), savepoint() and release() for handling transactions. - Both classic and DBAPI 2 connections can now be used as context managers for encapsulating transactions. - The execute() and executemany() methods now return the cursor object, so you can now write statements like "for row in cursor.execute(...)" (as suggested by Adam Frederick). - Binary objects are now automatically escaped and unescaped. - Bug in money quoting fixed. Amounts of $0.00 handled correctly. - Proper handling of date and time objects as input. - Proper handling of floats with 'nan' or 'inf' values as input. - Fixed the set_decimal() function. - All DatabaseError instances now have a sqlstate attribute. - The getnotify() method can now also return payload strings (#15). - Better support for notice processing with the new methods set_notice_receiver() and get_notice_receiver() (as suggested by Michael Filonenko, see #37). - Open transactions are rolled back when pgdb connections are closed (as suggested by Peter Harris, see #46). - Connections and cursors can now be used with the "with" statement (as suggested by Peter Harris, see #46). - New method use_regtypes() that can be called to let getattnames() return registered type names instead of the simplified classic types (#44). Version 4.0 (2009-01-01) ------------------------ - Dropped support for Python below 2.3 and PostgreSQL below 7.4. - Improved performance of fetchall() for large result sets by speeding up the type casts (as suggested by Peter Schuller). - Exposed exceptions as attributes of the connection object. - Exposed connection as attribute of the cursor object. - Cursors now support the iteration protocol. - Added new method to get parameter settings. - Added customizable row_factory as suggested by Simon Pamies. - Separated between mandatory and additional type objects. - Added keyword args to insert, update and delete methods. - Added exception handling for direct copy. - Start transactions only when necessary, not after every commit(). - Release the GIL while making a connection (as suggested by Peter Schuller). - If available, use decimal.Decimal for numeric types. - Allow DB wrapper to be used with DB-API 2 connections (as suggested by Chris Hilton). - Made private attributes of DB wrapper accessible. - Dropped dependence on mx.DateTime module. - Support for PQescapeStringConn() and PQescapeByteaConn(); these are now also used by the internal _quote() functions. - Added 'int8' to INTEGER types. New SMALLINT type. - Added a way to find the number of rows affected by a query() with the classic pg module by returning it as a string. For single inserts, query() still returns the oid as an integer. The pgdb module already provides the "rowcount" cursor attribute for the same purpose. - Improved getnotify() by calling PQconsumeInput() instead of submitting an empty command. - Removed compatibility code for old OID munging style. - The insert() and update() methods now use the "returning" clause if possible to get all changed values, and they also check in advance whether a subsequent select is possible, so that ongoing transactions won't break if there is no select privilege. - Added "protocol_version" and "server_version" attributes. - Revived the "user" attribute. - The pg module now works correctly with composite primary keys; these are represented as frozensets. - Removed the undocumented and actually unnecessary "view" parameter from the get() method. - get() raises a nicer ProgrammingError instead of a KeyError if no primary key was found. - delete() now also works based on the primary key if no oid available and returns whether the row existed or not. Version 3.8.1 (2006-06-05) -------------------------- - Use string methods instead of deprecated string functions. - Only use SQL-standard way of escaping quotes. - Added the functions escape_string() and escape/unescape_bytea() (as suggested by Charlie Dyson and Kavous Bojnourdi a long time ago). - Reverted code in clear() method that set date to current. - Added code for backwards compatibility in OID munging code. - Reorder attnames tests so that "interval" is checked for before "int." - If caller supplies key dictionary, make sure that all has a namespace. Version 3.8 (2006-02-17) ------------------------ - Installed new favicon.ico from Matthew Sporleder - Replaced snprintf by PyOS_snprintf. - Removed NO_SNPRINTF switch which is not needed any longer - Clean up some variable names and namespace - Add get_relations() method to get any type of relation - Rewrite get_tables() to use get_relations() - Use new method in get_attnames method to get attributes of views as well - Add Binary type - Number of rows is now -1 after executing no-result statements - Fix some number handling - Non-simple types do not raise an error any more - Improvements to documentation framework - Take into account that nowadays not every table must have an oid column - Simplification and improvement of the inserttable() function - Fix up unit tests - The usual assortment of minor fixes and enhancements Version 3.7 (2005-09-07) ------------------------ Improvement of pgdb module: - Use Python standard `datetime` if `mxDateTime` is not available Major improvements and clean-up in classic pg module: - All members of the underlying connection directly available in `DB` - Fixes to quoting function - Add checks for valid database connection to methods - Improved namespace support, handle `search_path` correctly - Removed old dust and unnecessary imports, added docstrings - Internal sql statements as one-liners, smoothed out ugly code Version 3.6.2 (2005-02-23) -------------------------- - Further fixes to namespace handling Version 3.6.1 (2005-01-11) -------------------------- - Fixes to namespace handling Version 3.6 (2004-12-17) ------------------------ - Better DB-API 2.0 compliance - Exception hierarchy moved into C module and made available to both APIs - Fix error in update method that caused false exceptions - Moved to standard exception hierarchy in classic API - Added new method to get transaction state - Use proper Python constants where appropriate - Use Python versions of strtol, etc. Allows Win32 build. - Bug fixes and cleanups Version 3.5 (2004-08-29) ------------------------ Fixes and enhancements: - Add interval to list of data types - fix up method wrapping especially close() - retry pkeys once if table missing in case it was just added - wrap query method separately to handle debug better - use isinstance instead of type - fix free/PQfreemem issue - finally - miscellaneous cleanups and formatting Version 3.4 (2004-06-02) ------------------------ Some cleanups and fixes. This is the first version where PyGreSQL is moved back out of the PostgreSQL tree. A lot of the changes mentioned below were actually made while in the PostgreSQL tree since their last release. - Allow for larger integer returns - Return proper strings for true and false - Cleanup convenience method creation - Enhance debugging method - Add reopen method - Allow programs to preload field names for speedup - Move OID handling so that it returns long instead of int - Miscellaneous cleanups and formatting Version 3.3 (2001-12-03) ------------------------ A few cleanups. Mostly there was some confusion about the latest version and so I am bumping the number to keep it straight. - Added NUMERICOID to list of returned types. This fixes a bug when returning aggregates in the latest version of PostgreSQL. Version 3.2 (2001-06-20) ------------------------ Note that there are very few changes to PyGreSQL between 3.1 and 3.2. The main reason for the release is the move into the PostgreSQL development tree. Even the WIN32 changes are pretty minor. - Add Win32 support (gerhard@bigfoot.de) - Fix some DB-API quoting problems (niall.smart@ebeon.com) - Moved development into PostgreSQL development tree. Version 3.1 (2000-11-06) ------------------------ - Fix some quoting functions. In particular handle NULLs better. - Use a method to add primary key information rather than direct manipulation of the class structures - Break decimal out in `_quote` (in pg.py) and treat it as float - Treat timestamp like date for quoting purposes - Remove a redundant SELECT from the `get` method speeding it, and `insert` (since it calls `get`) up a little. - Add test for BOOL type in typecast method to `pgdbTypeCache` class (tv@beamnet.de) - Fix pgdb.py to send port as integer to lower level function (dildog@l0pht.com) - Change pg.py to speed up some operations - Allow updates on tables with no primary keys Version 3.0 (2000-05-30) ------------------------ - Remove strlen() call from pglarge_write() and get size from object (Richard@Bouska.cz) - Add a little more error checking to the quote function in the wrapper - Add extra checking in `_quote` function - Wrap query in pg.py for debugging - Add DB-API 2.0 support to pgmodule.c (andre@via.ecp.fr) - Add DB-API 2.0 wrapper pgdb.py (andre@via.ecp.fr) - Correct keyword clash (temp) in tutorial - Clean up layout of tutorial - Return NULL values as None (rlawrence@lastfoot.com) (WARNING: This will cause backwards compatibility issues) - Change None to NULL in insert and update - Change hash-bang lines to use /usr/bin/env - Clearing date should be blank (NULL) not TODAY - Quote backslashes in strings in `_quote` (brian@CSUA.Berkeley.EDU) - Expanded and clarified build instructions (tbryan@starship.python.net) - Make code thread safe (Jerome.Alet@unice.fr) - Add README.distutils (mwa@gate.net & jeremy@cnri.reston.va.us) - Many fixes and increased DB-API compliance by chifungfan@yahoo.com, tony@printra.net, jeremy@alum.mit.edu and others to get the final version ready to release. Version 2.4 (1999-06-15) ------------------------ - Insert returns None if the user doesn't have select permissions on the table. It can (and does) happen that one has insert but not select permissions on a table. - Added ntuples() method to query object (brit@druid.net) - Corrected a bug related to getresult() and the money type - Corrected a bug related to negative money amounts - Allow update based on primary key if munged oid not available and table has a primary key - Add many __doc__ strings (andre@via.ecp.fr) - Get method works with views if key specified Version 2.3 (1999-04-17) ------------------------ - connect.host returns "localhost" when connected to Unix socket (torppa@tuhnu.cutery.fi) - Use `PyArg_ParseTupleAndKeywords` in connect() (torppa@tuhnu.cutery.fi) - fixes and cleanups (torppa@tuhnu.cutery.fi) - Fixed memory leak in dictresult() (terekhov@emc.com) - Deprecated pgext.py - functionality now in pg.py - More cleanups to the tutorial - Added fileno() method - terekhov@emc.com (Mikhail Terekhov) - added money type to quoting function - Compiles cleanly with more warnings turned on - Returns PostgreSQL error message on error - Init accepts keywords (Jarkko Torppa) - Convenience functions can be overridden (Jarkko Torppa) - added close() method Version 2.2 (1998-12-21) ------------------------ - Added user and password support thanks to Ng Pheng Siong (ngps@post1.com) - Insert queries return the inserted oid - Add new `pg` wrapper (C module renamed to _pg) - Wrapped database connection in a class - Cleaned up some of the tutorial. (More work needed.) - Added `version` and `__version__`. Thanks to thilo@eevolute.com for the suggestion. Version 2.1 (1998-03-07) ------------------------ - return fields as proper Python objects for field type - Cleaned up pgext.py - Added dictresult method Version 2.0 (1997-12-23) ------------------------ - Updated code for PostgreSQL 6.2.1 and Python 1.5 - Reformatted code and converted to use full ANSI style prototypes - Changed name to PyGreSQL (from PyGres95) - Changed order of arguments to connect function - Created new type `pgqueryobject` and moved certain methods to it - Added a print function for pgqueryobject - Various code changes - mostly stylistic Version 1.0b (1995-11-04) ------------------------- - Keyword support for connect function moved from library file to C code and taken away from library - Rewrote documentation - Bug fix in connect function - Enhancements in large objects interface methods Version 1.0a (1995-10-30) ------------------------- A limited release. - Module adapted to standard Python syntax - Keyword support for connect function in library file - Rewrote default parameters interface (internal use of strings) - Fixed minor bugs in module interface - Redefinition of error messages Version 0.9b (1995-10-10) ------------------------- The first public release. - Large objects implementation - Many bug fixes, enhancements, ... Version 0.1a (1995-10-07) ------------------------- - Basic libpq functions (SQL access) PyGreSQL-5.1/docs/contents/postgres/0000755000175100077410000000000013470245542017360 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/contents/postgres/index.rst0000644000175100077410000000063513466770070021231 0ustar darcypyg00000000000000------------------- A PostgreSQL Primer ------------------- The examples in this chapter of the documentation have been taken from the PostgreSQL manual. They demonstrate some PostgreSQL features using the classic PyGreSQL interface. They can serve as an introduction to PostgreSQL, but not so much as examples for the use of PyGreSQL. Contents ======== .. toctree:: basic advanced func syscat PyGreSQL-5.1/docs/contents/postgres/func.rst0000644000175100077410000001154513466770070021057 0ustar darcypyg00000000000000Examples for using SQL functions ================================ .. py:currentmodule:: pg We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Creating SQL Functions on Base Types ------------------------------------ A **CREATE FUNCTION** statement lets you create a new function that can be used in expressions (in SELECT, INSERT, etc.). We will start with functions that return values of base types. Let's create a simple SQL function that takes no arguments and returns 1:: >>> query("""CREATE FUNCTION one() RETURNS int4 ... AS 'SELECT 1 as ONE' LANGUAGE SQL""") Functions can be used in any expressions (eg. in the target list or qualifications):: >>> print(db.query("SELECT one() AS answer")) answer ------ 1 (1 row) Here's how you create a function that takes arguments. The following function returns the sum of its two arguments:: >>> query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4 ... AS $$ SELECT $1 + $2 $$ LANGUAGE SQL""") >>> print(query("SELECT add_em(1, 2) AS answer")) answer ------ 3 (1 row) Creating SQL Functions on Composite Types ----------------------------------------- It is also possible to create functions that return values of composite types. Before we create more sophisticated functions, let's populate an EMP table:: >>> query("""CREATE TABLE EMP ( ... name text, ... salary int4, ... age f int4, ... dept varchar(16))""") >>> emps = ["'Sam', 1200, 16, 'toy'", ... "'Claire', 5000, 32, 'shoe'", ... "'Andy', -1000, 2, 'candy'", ... "'Bill', 4200, 36, 'shoe'", ... "'Ginger', 4800, 30, 'candy'"] >>> for emp in emps: ... query("INSERT INTO EMP VALUES (%s)" % emp) Every INSERT statement will return a '1' indicating that it has inserted one row into the EMP table. The argument of a function can also be a tuple. For instance, *double_salary* takes a tuple of the EMP table:: >>> query("""CREATE FUNCTION double_salary(EMP) RETURNS int4 ... AS $$ SELECT $1.salary * 2 AS salary $$ LANGUAGE SQL""") >>> print(query("""SELECT name, double_salary(EMP) AS dream ... FROM EMP WHERE EMP.dept = 'toy'""")) name|dream ----+----- Sam | 2400 (1 row) The return value of a function can also be a tuple. However, make sure that the expressions in the target list are in the same order as the columns of EMP:: >>> query("""CREATE FUNCTION new_emp() RETURNS EMP AS $$ ... SELECT 'None'::text AS name, ... 1000 AS salary, ... 25 AS age, ... 'None'::varchar(16) AS dept ... $$ LANGUAGE SQL""") You can then extract a column out of the resulting tuple by using the "function notation" for projection columns (i.e. ``bar(foo)`` is equivalent to ``foo.bar``). Note that ``new_emp().name`` isn't supported:: >>> print(query("SELECT name(new_emp()) AS nobody")) nobody ------ None (1 row) Let's try one more function that returns tuples:: >>> query("""CREATE FUNCTION high_pay() RETURNS setof EMP ... AS 'SELECT * FROM EMP where salary > 1500' ... LANGUAGE SQL""") >>> query("SELECT name(high_pay()) AS overpaid") overpaid -------- Claire Bill Ginger (3 rows) Creating SQL Functions with multiple SQL statements --------------------------------------------------- You can also create functions that do more than just a SELECT. You may have noticed that Andy has a negative salary. We'll create a function that removes employees with negative salaries:: >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Andy | -1000| 2|candy Bill | 4200| 36|shoe Ginger| 4800| 30|candy (5 rows) >>> query("""CREATE FUNCTION clean_EMP () RETURNS int4 AS ... 'DELETE FROM EMP WHERE EMP.salary < 0; ... SELECT 1 AS ignore_this' ... LANGUAGE SQL""") >>> query("SELECT clean_EMP()") clean_emp --------- 1 (1 row) >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Bill | 4200| 36|shoe Ginger| 4800| 30|candy (4 rows) Remove functions that were created in this example -------------------------------------------------- We can remove the functions that we have created in this example and the table EMP, by using the DROP command:: query("DROP FUNCTION clean_EMP()") query("DROP FUNCTION high_pay()") query("DROP FUNCTION new_emp()") query("DROP FUNCTION add_em(int4, int4)") query("DROP FUNCTION one()") query("DROP TABLE EMP CASCADE") PyGreSQL-5.1/docs/contents/postgres/syscat.rst0000644000175100077410000001107613466770070021431 0ustar darcypyg00000000000000Examples for using the system catalogs ====================================== .. py:currentmodule:: pg The system catalogs are regular tables where PostgreSQL stores schema metadata, such as information about tables and columns, and internal bookkeeping information. You can drop and recreate the tables, add columns, insert and update values, and severely mess up your system that way. Normally, one should not change the system catalogs by hand: there are SQL commands to make all supported changes. For example, CREATE DATABASE inserts a row into the *pg_database* catalog — and actually creates the database on disk. It this section we want to show examples for how to parse some of the system catalogs, making queries with the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Lists indices ------------- This query lists all simple indices in the database:: print(query("""SELECT bc.relname AS class_name, ic.relname AS index_name, a.attname FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a WHERE i.indrelid = bc.oid AND i.indexrelid = ic.oid AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid AND NOT a.attisdropped AND a.attnum>0 ORDER BY class_name, index_name, attname""")) List user defined attributes ---------------------------- This query lists all user-defined attributes and their types in user-defined tables:: print(query("""SELECT c.relname, a.attname, format_type(a.atttypid, a.atttypmod) FROM pg_class c, pg_attribute a WHERE c.relkind = 'r' AND c.relnamespace!=ALL(ARRAY[ 'pg_catalog','pg_toast', 'information_schema']::regnamespace[]) AND a.attnum > 0 AND a.attrelid = c.oid AND NOT a.attisdropped ORDER BY relname, attname""")) List user defined base types ---------------------------- This query lists all user defined base types:: print(query("""SELECT r.rolname, t.typname FROM pg_type t, pg_authid r WHERE r.oid = t.typowner AND t.typrelid = '0'::oid and t.typelem = '0'::oid AND r.rolname != 'postgres' ORDER BY rolname, typname""")) List operators -------------- This query lists all right-unary operators:: print(query("""SELECT o.oprname AS right_unary, lt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type lt, pg_type result WHERE o.oprkind='r' and o.oprleft = lt.oid AND o.oprresult = result.oid ORDER BY operand""")) This query lists all left-unary operators:: print(query("""SELECT o.oprname AS left_unary, rt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type result WHERE o.oprkind='l' AND o.oprright = rt.oid AND o.oprresult = result.oid ORDER BY operand""")) And this one lists all of the binary operators:: print(query("""SELECT o.oprname AS binary_op, rt.typname AS right_opr, lt.typname AS left_opr, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type lt, pg_type result WHERE o.oprkind = 'b' AND o.oprright = rt.oid AND o.oprleft = lt.oid AND o.oprresult = result.oid""")) List functions of a language ---------------------------- Given a programming language, this query returns the name, args and return type from all functions of a language:: language = 'sql' print(query("""SELECT p.proname, p.pronargs, t.typname FROM pg_proc p, pg_language l, pg_type t WHERE p.prolang = l.oid AND p.prorettype = t.oid AND l.lanname = $1 ORDER BY proname""", (language,))) List aggregate functions ------------------------ This query lists all of the aggregate functions and the type to which they can be applied:: print(query("""SELECT p.proname, t.typname FROM pg_aggregate a, pg_proc p, pg_type t WHERE a.aggfnoid = p.oid and p.proargtypes[0] = t.oid ORDER BY proname, typname""")) List operator families ---------------------- The following query lists all defined operator families and all the operators included in each family:: print(query("""SELECT am.amname, opf.opfname, amop.amopopr::regoperator FROM pg_am am, pg_opfamily opf, pg_amop amop WHERE opf.opfmethod = am.oid AND amop.amopfamily = opf.oid ORDER BY amname, opfname, amopopr""")) PyGreSQL-5.1/docs/contents/postgres/basic.rst0000644000175100077410000003020013466770070021172 0ustar darcypyg00000000000000Basic examples ============== .. py:currentmodule:: pg In this section, we demonstrate how to use some of the very basic features of PostgreSQL using the classic PyGreSQL interface. Creating a connection to the database ------------------------------------- We start by creating a **connection** to the PostgreSQL database:: >>> from pg import DB >>> db = DB() If you pass no parameters when creating the :class:`DB` instance, then PyGreSQL will try to connect to the database on the local host that has the same name as the current user, and also use that name for login. You can also pass the database name, host, port and login information as parameters when creating the :class:`DB` instance:: >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') The :class:`DB` class of which ``db`` is an object is a wrapper around the lower level :class:`Connection` class of the :mod:`pg` module. The most important method of such connection objects is the ``query`` method that allows you to send SQL commands to the database. Creating tables --------------- The first thing you would want to do in an empty database is creating a table. To do this, you need to send a **CREATE TABLE** command to the database. PostgreSQL has its own set of built-in types that can be used for the table columns. Let us create two tables "weather" and "cities":: >>> db.query("""CREATE TABLE weather ( ... city varchar(80), ... temp_lo int, temp_hi int, ... prcp float8, ... date date)""") >>> db.query("""CREATE TABLE cities ( ... name varchar(80), ... location point)""") .. note:: Keywords are case-insensitive but identifiers are case-sensitive. You can get a list of all tables in the database with:: >>> db.get_tables() ['public.cities', 'public.weather'] Insert data ----------- Now we want to fill our tables with data. An **INSERT** statement is used to insert a new row into a table. There are several ways you can specify what columns the data should go to. Let us insert a row into each of these tables. The simplest case is when the list of values corresponds to the order of the columns specified in the CREATE TABLE command:: >>> db.query("""INSERT INTO weather ... VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')""") >>> db.query("""INSERT INTO cities ... VALUES ('San Francisco', '(-194.0, 53.0)')""") You can also specify the columns to which the values correspond. The columns can be specified in any order. You may also omit any number of columns, such as with unknown precipitation, below:: >>> db.query("""INSERT INTO weather (date, city, temp_hi, temp_lo) ... VALUES ('11/29/1994', 'Hayward', 54, 37)""") If you get errors regarding the format of the date values, your database is probably set to a different date style. In this case you must change the date style like this:: >>> db.query("set datestyle = MDY") Instead of explicitly writing the INSERT statement and sending it to the database with the :meth:`DB.query` method, you can also use the more convenient :meth:`DB.insert` method that does the same under the hood:: >>> db.insert('weather', ... date='11/29/1994', city='Hayward', temp_hi=54, temp_lo=37) And instead of using keyword parameters, you can also pass the values to the :meth:`DB.insert` method in a single Python dictionary. If you have a Python list with many rows that shall be used to fill a database table quickly, you can use the :meth:`DB.inserttable` method. Retrieving data --------------- After having entered some data into our tables, let's see how we can get the data out again. A **SELECT** statement is used for retrieving data. The basic syntax is: .. code-block:: psql SELECT columns FROM tables WHERE predicates A simple one would be the following query:: >>> q = db.query("SELECT * FROM weather") >>> print(q) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 37| 54| |1994-11-29 (2 rows) You may also specify expressions in the target list. (The 'AS column' specifies the column name of the result. It is optional.) :: >>> print(db.query("""SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date ... FROM weather""")) city |temp_avg| date -------------+--------+---------- San Francisco| 48|1994-11-27 Hayward | 45|1994-11-29 (2 rows) If you want to retrieve rows that satisfy certain condition (i.e. a restriction), specify the condition in a WHERE clause. The following retrieves the weather of San Francisco on rainy days:: >>> print(db.query("""SELECT * FROM weather ... WHERE city = 'San Francisco' AND prcp > 0.0""")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Here is a more complicated one. Duplicates are removed when DISTINCT is specified. ORDER BY specifies the column to sort on. (Just to make sure the following won't confuse you, DISTINCT and ORDER BY can be used separately.) :: >>> print(db.query("SELECT DISTINCT city FROM weather ORDER BY city")) city ------------- Hayward San Francisco (2 rows) So far we have only printed the output of a SELECT query. The object that is returned by the query is an instance of the :class:`Query` class that can print itself in the nicely formatted way we saw above. But you can also retrieve the results as a list of tuples, by using the :meth:`Query.getresult` method:: >>> from pprint import pprint >>> q = db.query("SELECT * FROM weather") >>> pprint(q.getresult()) [('San Francisco', 46, 50, 0.25, '1994-11-27'), ('Hayward', 37, 54, None, '1994-11-29')] Here we used pprint to print out the returned list in a nicely formatted way. If you want to retrieve the results as a list of dictionaries instead of tuples, use the :meth:`Query.dictresult` method instead:: >>> pprint(q.dictresult()) [{'city': 'San Francisco', 'date': '1994-11-27', 'prcp': 0.25, 'temp_hi': 50, 'temp_lo': 46}, {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37}] Finally, you can also retrieve the results as a list of named tuples, using the :meth:`Query.namedresult` method. This can be a good compromise between simple tuples and the more memory intensive dictionaries: >>> for row in q.namedresult(): ... print(row.city, row.date) ... San Francisco 1994-11-27 Hayward 1994-11-29 If you only want to retrieve a single row of data, you can use the more convenient :meth:`DB.get` method that does the same under the hood:: >>> d = dict(city='Hayward') >>> db.get('weather', d, 'city') >>> pprint(d) {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37} As you see, the :meth:`DB.get` method returns a dictionary with the column names as keys. In the third parameter you can specify which column should be looked up in the WHERE statement of the SELECT statement that is executed by the :meth:`DB.get` method. You normally don't need it when the table was created with a primary key. Retrieving data into other tables --------------------------------- A SELECT ... INTO statement can be used to retrieve data into another table:: >>> db.query("""SELECT * INTO TEMPORARY TABLE temptab FROM weather ... WHERE city = 'San Francisco' and prcp > 0.0""") This fills a temporary table "temptab" with a subset of the data in the original "weather" table. It can be listed with:: >>> print(db.query("SELECT * from temptab")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Aggregates ---------- Let's try the following query:: >>> print(db.query("SELECT max(temp_lo) FROM weather")) max --- 46 (1 row) You can also use aggregates with the GROUP BY clause:: >>> print(db.query("SELECT city, max(temp_lo) FROM weather GROUP BY city")) city |max -------------+--- Hayward | 37 San Francisco| 46 (2 rows) Joining tables -------------- Queries can access multiple tables at once or access the same table in such a way that multiple instances of the table are being processed at the same time. Suppose we want to find all the records that are in the temperature range of other records. W1 and W2 are aliases for weather. We can use the following query to achieve that:: >>> print(db.query("""SELECT W1.city, W1.temp_lo, W1.temp_hi, ... W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2 ... WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi""")) city |temp_lo|temp_hi| city |temp_lo|temp_hi -------+-------+-------+-------------+-------+------- Hayward| 37| 54|San Francisco| 46| 50 (1 row) Now let's join two different tables. The following joins the "weather" table and the "cities" table:: >>> print(db.query("""SELECT city, location, prcp, date ... FROM weather, cities ... WHERE name = city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Since the column names are all different, we don't have to specify the table name. If you want to be clear, you can do the following. They give identical results, of course:: >>> print(db.query("""SELECT w.city, c.location, w.prcp, w.date ... FROM weather w, cities c WHERE c.name = w.city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Updating data ------------- It you want to change the data that has already been inserted into a database table, you will need the **UPDATE** statement. Suppose you discover the temperature readings are all off by 2 degrees as of Nov 28, you may update the data as follow:: >>> db.query("""UPDATE weather ... SET temp_hi = temp_hi - 2, temp_lo = temp_lo - 2 ... WHERE date > '11/28/1994'""") '1' >>> print(db.query("SELECT * from weather")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 35| 52| |1994-11-29 (2 rows) Note that the UPDATE statement returned the string ``'1'``, indicating that exactly one row of data has been affected by the update. If you retrieved one row of data as a dictionary using the :meth:`DB.get` method, then you can also update that row with the :meth:`DB.update` method. Deleting data ------------- To delete rows from a table, a **DELETE** statement can be used. Suppose you are no longer interested in the weather of Hayward, you can do the following to delete those rows from the table:: >>> db.query("DELETE FROM weather WHERE city = 'Hayward'") '1' Again, you get the string ``'1'`` as return value, indicating that exactly one row of data has been deleted. You can also delete all the rows in a table by doing the following. This is different from DROP TABLE which removes the table itself in addition to the removing the rows, as explained in the next section. :: >>> db.query("DELETE FROM weather") '1' >>> print(db.query("SELECT * from weather")) city|temp_lo|temp_hi|prcp|date ----+-------+-------+----+---- (0 rows) Since only one row was left in the table, the DELETE query again returns the string ``'1'``. The SELECT query now gives an empty result. If you retrieved a row of data as a dictionary using the :meth:`DB.get` method, then you can also delete that row with the :meth:`DB.delete` method. Removing the tables ------------------- The **DROP TABLE** command is used to remove tables. After you have done this, you can no longer use those tables:: >>> db.query("DROP TABLE weather, cities") >>> db.query("select * from weather") pg.ProgrammingError: Error: Relation "weather" does not exist PyGreSQL-5.1/docs/contents/postgres/advanced.rst0000644000175100077410000001121513466770070021663 0ustar darcypyg00000000000000Examples for advanced features ============================== .. py:currentmodule:: pg In this section, we show how to use some advanced features of PostgreSQL using the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Inheritance ----------- A table can inherit from zero or more tables. A query can reference either all rows of a table or all rows of a table plus all of its descendants. For example, the capitals table inherits from cities table (it inherits all data fields from cities):: >>> data = [('cities', [ ... "'San Francisco', 7.24E+5, 63", ... "'Las Vegas', 2.583E+5, 2174", ... "'Mariposa', 1200, 1953"]), ... ('capitals', [ ... "'Sacramento',3.694E+5,30,'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"])] Now, let's populate the tables:: >>> data = ['cities', [ ... "'San Francisco', 7.24E+5, 63" ... "'Las Vegas', 2.583E+5, 2174" ... "'Mariposa', 1200, 1953"], ... 'capitals', [ ... "'Sacramento',3.694E+5,30,'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"]] >>> for table, rows in data: ... for row in rows: ... query("INSERT INTO %s VALUES (%s)" % (table, row)) >>> print(query("SELECT * FROM cities")) name |population|altitude -------------+----------+-------- San Francisco| 724000| 63 Las Vegas | 258300| 2174 Mariposa | 1200| 1953 Sacramento | 369400| 30 Madison | 191300| 845 (5 rows) >>> print(query("SELECT * FROM capitals")) name |population|altitude|state ----------+----------+--------+----- Sacramento| 369400| 30|CA Madison | 191300| 845|WI (2 rows) You can find all cities, including capitals, that are located at an altitude of 500 feet or higher by:: >>> print(query("""SELECT c.name, c.altitude ... FROM cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 Madison | 845 (3 rows) On the other hand, the following query references rows of the base table only, i.e. it finds all cities that are not state capitals and are situated at an altitude of 500 feet or higher:: >>> print(query("""SELECT name, altitude ... FROM ONLY cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 (2 rows) Arrays ------ Attributes can be arrays of base types or user-defined types:: >>> query("""CREATE TABLE sal_emp ( ... name text, ... pay_by_quarter int4[], ... pay_by_extra_quarter int8[], ... schedule text[][])""") Insert instances with array attributes. Note the use of braces:: >>> query("""INSERT INTO sal_emp VALUES ( ... 'Bill', '{10000,10000,10000,10000}', ... '{9223372036854775800,9223372036854775800,9223372036854775800}', ... '{{"meeting", "lunch"}, {"training", "presentation"}}')""") >>> query("""INSERT INTO sal_emp VALUES ( ... 'Carol', '{20000,25000,25000,25000}', ... '{9223372036854775807,9223372036854775807,9223372036854775807}', ... '{{"breakfast", "consulting"}, {"meeting", "lunch"}}')""") Queries on array attributes:: >>> query("""SELECT name FROM sal_emp WHERE ... sal_emp.pay_by_quarter[1] != sal_emp.pay_by_quarter[2]""") name ----- Carol (1 row) Retrieve third quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_quarter[3] FROM sal_emp") pay_by_quarter -------------- 10000 25000 (2 rows) Retrieve third quarter extra pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp") pay_by_extra_quarter -------------------- 9223372036854775800 9223372036854775807 (2 rows) Retrieve first two quarters of extra quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp") pay_by_extra_quarter ----------------------------------------- {9223372036854775800,9223372036854775800} {9223372036854775807,9223372036854775807} (2 rows) Select subarrays:: >>> query("""SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp ... WHERE sal_emp.name = 'Bill'""") schedule ---------------------- {{meeting},{training}} (1 row) PyGreSQL-5.1/docs/contents/index.rst0000644000175100077410000000105313466770070017356 0ustar darcypyg00000000000000The PyGreSQL documentation ========================== Contents -------- .. toctree:: :maxdepth: 1 Installing PyGreSQL What's New and History of Changes General PyGreSQL Programming Information First Steps with PyGreSQL The Classic PyGreSQL Interface The DB-API Compliant Interface A PostgreSQL Primer Examples for using PyGreSQL Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` PyGreSQL-5.1/docs/contents/general.rst0000644000175100077410000000367713466770070017702 0ustar darcypyg00000000000000General PyGreSQL programming information ---------------------------------------- PyGreSQL consists of two parts: the "classic" PyGreSQL interface provided by the :mod:`pg` module and the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. If you use only the standard features of the DB-API 2.0 interface, it will be easier to switch from PostgreSQL to another database for which a DB-API 2.0 compliant interface exists. The "classic" interface may be easier to use for beginners, and it provides some higher-level and PostgreSQL specific convenience methods. .. seealso:: **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. Both Python modules utilize the same low-level C extension, which serves as a wrapper for the "libpq" library, the C API to PostgreSQL. This means you must have the libpq library installed as a shared library on your client computer, in a version that is supported by PyGreSQL. Depending on the client platform, you may have to set environment variables like `PATH` or `LD_LIBRARY_PATH` so that PyGreSQL can find the library. .. warning:: Note that PyGreSQL is not thread-safe on the connection level. Therefore we recommend using `DBUtils `_ for multi-threaded environments, which supports both PyGreSQL interfaces. Another option is using PyGreSQL indirectly as a database driver for the high-level `SQLAlchemy `_ SQL toolkit and ORM, which supports PyGreSQL starting with SQLAlchemy 1.1 and which provides a way to use PyGreSQL in a multi-threaded environment using the concept of "thread local storage". Database URLs for PyGreSQL take this form:: postgresql+pygresql://username:password@host:port/database PyGreSQL-5.1/docs/contents/tutorial.rst0000644000175100077410000002255413466770070020123 0ustar darcypyg00000000000000First Steps with PyGreSQL ========================= In this small tutorial we show you the basic operations you can perform with both flavors of the PyGreSQL interface. Please choose your flavor: .. contents:: :local: First Steps with the classic PyGreSQL Interface ----------------------------------------------- .. py:currentmodule:: pg Before doing anything else, it's necessary to create a database connection. To do this, simply import the :class:`DB` wrapper class and create an instance of it, passing the necessary connection parameters, like this:: >>> from pg import DB >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') You can omit one or even all parameters if you want to use their default values. PostgreSQL will use the name of the current operating system user as the login and the database name, and will try to connect to the local host on port 5432 if nothing else is specified. The `db` object has all methods of the lower-level :class:`Connection` class plus some more convenience methods provided by the :class:`DB` wrapper. You can now execute database queries using the :meth:`DB.query` method:: >>> db.query("create table fruits(id serial primary key, name varchar)") You can list all database tables with the :meth:`DB.get_tables` method:: >>> db.get_tables() ['public.fruits'] To get the attributes of the *fruits* table, use :meth:`DB.get_attnames`:: >>> db.get_attnames('fruits') {'id': 'int', 'name': 'text'} Verify that you can insert into the newly created *fruits* table: >>> db.has_table_privilege('fruits', 'insert') True You can insert a new row into the table using the :meth:`DB.insert` method, for example:: >>> db.insert('fruits', name='apple') {'name': 'apple', 'id': 1} Note how this method returns the full row as a dictionary including its *id* column that has been generated automatically by a database sequence. You can also pass a dictionary to the :meth:`DB.insert` method instead of or in addition to using keyword arguments. Let's add another row to the table: >>> banana = db.insert('fruits', name='banana') Or, you can add a whole bunch of fruits at the same time using the :meth:`Connection.inserttable` method. Note that this method uses the COPY command of PostgreSQL to insert all data in one batch operation, which is much faster than sending many individual INSERT commands:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> data = list(enumerate(more_fruits, start=3)) >>> db.inserttable('fruits', data) We can now query the database for all rows that have been inserted into the *fruits* table:: >>> print(db.query('select * from fruits')) id| name --+---------- 1|apple 2|banana 3|cherimaya 4|durian 5|eggfruit 6|fig 7|grapefruit (7 rows) Instead of simply printing the :class:`Query` instance that has been returned by this query, we can also request the data as list of tuples:: >>> q = db.query('select * from fruits') >>> q.getresult() ... [(1, 'apple'), ..., (7, 'grapefruit')] Instead of a list of tuples, we can also request a list of dicts:: >>> q.dictresult() [{'id': 1, 'name': 'apple'}, ..., {'id': 7, 'name': 'grapefruit'}] You can also return the rows as named tuples:: >>> rows = q.namedresult() >>> rows[3].name 'durian' In PyGreSQL 5.1 and newer, you can also use the :class:`Query` instance directly as an iterable that yields the rows as tuples, and there are also methods that return iterables for rows as dictionaries, named tuples or scalar values. Other methods like :meth:`Query.one` or :meth:`Query.onescalar` return only one row or only the first field of that row. You can get the number of rows with the :func:`len` function. Using the method :meth:`DB.get_as_dict`, you can easily import the whole table into a Python dictionary mapping the primary key *id* to the *name*:: >>> db.get_as_dict('fruits', scalar=True) OrderedDict([(1, 'apple'), (2, 'banana'), (3, 'cherimaya'), (4, 'durian'), (5, 'eggfruit'), (6, 'fig'), (7, 'grapefruit')]) To change a single row in the database, you can use the :meth:`DB.update` method. For instance, if you want to capitalize the name 'banana':: >>> db.update('fruits', banana, name=banana['name'].capitalize()) {'id': 2, 'name': 'Banana'} >>> print(db.query('select * from fruits where id between 1 and 3')) id| name --+--------- 1|apple 2|Banana 3|cherimaya (3 rows) Let's also capitalize the other names in the database:: >>> db.query('update fruits set name=initcap(name)') '7' The returned string `'7'` tells us the number of updated rows. It is returned as a string to discern it from an OID which will be returned as an integer, if a new row has been inserted into a table with an OID column. To delete a single row from the database, use the :meth:`DB.delete` method:: >>> db.delete('fruits', banana) 1 The returned integer value `1` tells us that one row has been deleted. If we try it again, the method returns the integer value `0`. Naturally, this method can only return 0 or 1:: >>> db.delete('fruits', banana) 0 Of course, we can insert the row back again:: >>> db.insert('fruits', banana) {'id': 2, 'name': 'Banana'} If we want to change a different row, we can get its current state with:: >>> apple = db.get('fruits', 1) >>> apple {'name': 'Apple', 'id': 1} We can duplicate the row like this:: >>> db.insert('fruits', apple, id=8) {'id': 8, 'name': 'Apple'} To remove the duplicated row, we can do:: >>> db.delete('fruits', id=8) 1 Finally, to remove the table from the database and close the connection:: >>> db.query("drop table fruits") >>> db.close() For more advanced features and details, see the reference: :doc:`pg/index` First Steps with the DB-API 2.0 Interface ----------------------------------------- .. py:currentmodule:: pgdb As with the classic interface, the first thing you need to do is to create a database connection. To do this, use the function :func:`pgdb.connect` in the :mod:`pgdb` module, passing the connection parameters:: >>> from pgdb import connect >>> con = connect(database='testdb', host='pgserver:5432', ... user='scott', password='tiger') As in the classic interface, you can omit parameters if they are the default values used by PostgreSQL. To do anything with the connection, you need to request a cursor object from it, which is thought of as the Python representation of a database cursor. The connection has a method that lets you get a cursor:: >>> cursor = con.cursor() The cursor has a method that lets you execute database queries:: >>> cursor.execute("create table fruits(" ... "id serial primary key, name varchar)") You can also use this method to insert data into the table:: >>> cursor.execute("insert into fruits (name) values ('apple')") You can pass parameters in a safe way:: >>> cursor.execute("insert into fruits (name) values (%s)", ('banana',)) To insert multiple rows at once, you can use the following method:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> parameters = [(name,) for name in more_fruits] >>> cursor.executemany("insert into fruits (name) values (%s)", parameters) The cursor also has a :meth:`Cursor.copy_from` method to quickly insert large amounts of data into the database, and a :meth:`Cursor.copy_to` method to quickly dump large amounts of data from the database, using the PostgreSQL COPY command. Note however, that these methods are an extension provided by PyGreSQL, they are not part of the DB-API 2 standard. Also note that the DB API 2.0 interface does not have an autocommit as you may be used from PostgreSQL. So in order to make these inserts permanent, you need to commit them to the database:: >>> con.commit() If you end the program without calling the commit method of the connection, or if you call the rollback method of the connection, then the changes will be discarded. In a similar way, you can update or delete rows in the database, executing UPDATE or DELETE statements instead of INSERT statements. To fetch rows from the database, execute a SELECT statement first. Then you can use one of several fetch methods to retrieve the results. For instance, to request a single row:: >>> cursor.execute('select * from fruits where id=1') >>> cursor.fetchone() Row(id=1, name='apple') The result is a named tuple. This means you can access its elements either using an index number as for an ordinary tuple, or using the column name as for access to object attributes. To fetch all rows of the query, use this method instead:: >>> cursor.execute('select * from fruits') >>> cursor.fetchall() [Row(id=1, name='apple'), ..., Row(id=7, name='grapefruit')] The output is a list of named tuples. If you want to fetch only a limited number of rows from the query:: >>> cursor.execute('select * from fruits') >>> cursor.fetchmany(2) [Row(id=1, name='apple'), Row(id=2, name='banana')] Finally, to remove the table from the database and close the connection:: >>> db.execute("drop table fruits") >>> cur.close() >>> con.close() For more advanced features and details, see the reference: :doc:`pgdb/index` PyGreSQL-5.1/docs/contents/pgdb/0000755000175100077410000000000013470245542016426 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/contents/pgdb/module.rst0000644000175100077410000001572313466770070020461 0ustar darcypyg00000000000000Module functions and constants ============================== .. py:currentmodule:: pgdb The :mod:`pgdb` module defines a :func:`connect` function that allows to connect to a database, some global constants describing the capabilities of the module as well as several exception classes. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dsn], [user], [password], [host], [database], [**kwargs]) Return a new connection to the database :param str dsn: data source name as string :param str user: the database user name :param str password: the database password :param str host: the hostname of the database :param database: the name of the database :param dict kwargs: other connection parameters :returns: a connection object :rtype: :class:`Connection` :raises pgdb.OperationalError: error connecting to the database This function takes parameters specifying how to connect to a PostgreSQL database and returns a :class:`Connection` object using these parameters. If specified, the *dsn* parameter must be a string with the format ``'host:base:user:passwd:opt'``. All of the parts specified in the *dsn* are optional. You can also specify the parameters individually using keyword arguments, which always take precedence. The *host* can also contain a port if specified in the format ``'host:port'``. In the *opt* part of the *dsn* you can pass command-line options to the server. You can pass additional connection parameters using the optional *kwargs* keyword arguments. Example:: con = connect(dsn='myhost:mydb', user='guido', password='234$') .. versionchanged:: 5.0.1 Support for additional parameters passed as *kwargs*. get/set/reset_typecast -- Control the global typecast functions --------------------------------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides built-in typecast functions for the common database types, but if you want to change these or add more typecast functions, you can set these up using the following functions. .. note:: The following functions are not part of the DB-API 2 standard. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 As of version 5.0.3 you can also use this method to change the typecasting of PostgreSQL array types. You must run ``set_typecast('anyarray', cast)`` in order to do this. The ``cast`` method must take a string value and a cast function for the base type and return the array converted to a Python object. For instance, run ``set_typecast('anyarray', lambda v, c: v)`` to switch off the casting of arrays completely, and always return them encoded as strings. .. method:: reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`TypeCache` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`TypeCache.get_typecast`, :meth:`TypeCache.set_typecast` and :meth:`TypeCache.reset_typecast` of the :attr:`Connection.type_cache`. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`TypeCache.reset_typecast` on the :attr:`Connection.type_cache`. Module constants ---------------- .. data:: apilevel The string constant ``'2.0'``, stating that the module is DB-API 2.0 level compliant. .. data:: threadsafety The integer constant 1, stating that the module itself is thread-safe, but the connections are not thread-safe, and therefore must be protected with a lock if you want to use them from different threads. .. data:: paramstyle The string constant ``pyformat``, stating that parameters should be passed using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. Errors raised by this module ---------------------------- The errors that can be raised by the :mod:`pgdb` module are the following: .. exception:: Warning Exception raised for important warnings like data truncations while inserting. .. exception:: Error Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single except statement. Warnings are not considered errors and thus do not use this class as base. .. exception:: InterfaceError Exception raised for errors that are related to the database interface rather than the database itself. .. exception:: DatabaseError Exception raised for errors that are related to the database. In PyGreSQL, this also has a :attr:`DatabaseError.sqlstate` attribute that contains the ``SQLSTATE`` error code of this error. .. exception:: DataError Exception raised for errors that are due to problems with the processed data like division by zero or numeric value out of range. .. exception:: OperationalError Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, or a memory allocation error occurred during processing. .. exception:: IntegrityError Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails. .. exception:: ProgrammingError Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement or wrong number of parameters specified. .. exception:: NotSupportedError Exception raised in case a method or database API was used which is not supported by the database. PyGreSQL-5.1/docs/contents/pgdb/types.rst0000644000175100077410000001311513466770070020331 0ustar darcypyg00000000000000Type -- Type objects and constructors ===================================== .. py:currentmodule:: pgdb .. _type_constructors: Type constructors ----------------- For binding to an operation's input parameters, PostgreSQL needs to have the input in a particular format. However, from the parameters to the :meth:`Cursor.execute` and :meth:`Cursor.executemany` methods it is not always obvious as which PostgreSQL data types they shall be bound. For instance, a Python string could be bound as a simple ``char`` value, or also as a ``date`` or a ``time``. Or a list could be bound as a ``array`` or a ``json`` object. To make the intention clear in such cases, you can wrap the parameters in type helper objects. PyGreSQL provides the constructors defined below to create such objects that can hold special values. When passed to the cursor methods, PyGreSQL can then detect the proper type of the input parameter and bind it accordingly. The :mod:`pgdb` module exports the following type constructors as part of the DB-API 2 standard: .. function:: Date(year, month, day) Construct an object holding a date value .. function:: Time(hour, [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time value .. function:: Timestamp(year, month, day, [hour], [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time stamp value .. function:: DateFromTicks(ticks) Construct an object holding a date value from the given *ticks* value .. function:: TimeFromTicks(ticks) Construct an object holding a time value from the given *ticks* value .. function:: TimestampFromTicks(ticks) Construct an object holding a time stamp from the given *ticks* value .. function:: Binary(bytes) Construct an object capable of holding a (long) binary string value Additionally, PyGreSQL provides the following constructors for PostgreSQL specific data types: .. function:: Interval(days, hours=0, minutes=0, seconds=0, microseconds=0) Construct an object holding a time interval value .. versionadded:: 5.0 .. function:: Uuid([hex], [bytes], [bytes_le], [fields], [int], [version]) Construct an object holding a UUID value .. versionadded:: 5.0 .. function:: Hstore(dict) Construct a wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj, [encode]) Construct a wrapper for holding an object serializable to JSON You can pass an optional serialization function as a parameter. By default, PyGreSQL uses :func:`json.dumps` to serialize it. .. function:: Literal(sql) Construct a wrapper for holding a literal SQL string .. versionadded:: 5.0 Example for using a type constructor:: >>> cursor.execute("create table jsondata (data jsonb)") >>> data = {'id': 1, 'name': 'John Doe', 'kids': ['Johnnie', 'Janie']} >>> cursor.execute("insert into jsondata values (%s)", [Json(data)]) .. note:: SQL ``NULL`` values are always represented by the Python *None* singleton on input and output. .. _type_objects: Type objects ------------ .. class:: Type The :attr:`Cursor.description` attribute returns information about each of the result columns of a query. The *type_code* must compare equal to one of the :class:`Type` objects defined below. Type objects can be equal to more than one type code (e.g. :class:`DATETIME` is equal to the type codes for ``date``, ``time`` and ``timestamp`` columns). The pgdb module exports the following :class:`Type` objects as part of the DB-API 2 standard: .. object:: STRING Used to describe columns that are string-based (e.g. ``char``, ``varchar``, ``text``) .. object:: BINARY Used to describe (long) binary columns (``bytea``) .. object:: NUMBER Used to describe numeric columns (e.g. ``int``, ``float``, ``numeric``, ``money``) .. object:: DATETIME Used to describe date/time columns (e.g. ``date``, ``time``, ``timestamp``, ``interval``) .. object:: ROWID Used to describe the ``oid`` column of PostgreSQL database tables .. note:: The following more specific type objects are not part of the DB-API 2 standard. .. object:: BOOL Used to describe ``boolean`` columns .. object:: SMALLINT Used to describe ``smallint`` columns .. object:: INTEGER Used to describe ``integer`` columns .. object:: LONG Used to describe ``bigint`` columns .. object:: FLOAT Used to describe ``float`` columns .. object:: NUMERIC Used to describe ``numeric`` columns .. object:: MONEY Used to describe ``money`` columns .. object:: DATE Used to describe ``date`` columns .. object:: TIME Used to describe ``time`` columns .. object:: TIMESTAMP Used to describe ``timestamp`` columns .. object:: INTERVAL Used to describe date and time ``interval`` columns .. object:: UUID Used to describe ``uuid`` columns .. object:: HSTORE Used to describe ``hstore`` columns .. versionadded:: 5.0 .. object:: JSON Used to describe ``json`` and ``jsonb`` columns .. versionadded:: 5.0 .. object:: ARRAY Used to describe columns containing PostgreSQL arrays .. versionadded:: 5.0 .. object:: RECORD Used to describe columns containing PostgreSQL records .. versionadded:: 5.0 Example for using some type objects:: >>> cursor = con.cursor() >>> cursor.execute("create table jsondata (created date, data jsonb)") >>> cursor.execute("select * from jsondata") >>> (created, data) = (d.type_code for d in cursor.description) >>> created == DATE True >>> created == DATETIME True >>> created == TIME False >>> data == JSON True >>> data == STRING False PyGreSQL-5.1/docs/contents/pgdb/connection.rst0000644000175100077410000000741513466770070021332 0ustar darcypyg00000000000000Connection -- The connection object =================================== .. py:currentmodule:: pgdb .. class:: Connection These connection objects respond to the following methods. Note that ``pgdb.Connection`` objects also implement the context manager protocol, i.e. you can use them in a ``with`` statement. When the ``with`` block ends, the current transaction will be automatically committed or rolled back if there was an exception, and you won't need to do this manually. close -- close the connection ----------------------------- .. method:: Connection.close() Close the connection now (rather than whenever it is deleted) :rtype: None The connection will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed. commit -- commit the connection ------------------------------- .. method:: Connection.commit() Commit any pending transaction to the database :rtype: None Note that connections always use a transaction, unless you set the :attr:`Connection.autocommit` attribute described below. rollback -- roll back the connection ------------------------------------ .. method:: Connection.rollback() Roll back any pending transaction to the database :rtype: None This method causes the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed. cursor -- return a new cursor object ------------------------------------ .. method:: Connection.cursor() Return a new cursor object using the connection :returns: a connection object :rtype: :class:`Cursor` This method returns a new :class:`Cursor` object that can be used to operate on the database in the way described in the next section. Attributes that are not part of the standard -------------------------------------------- .. note:: The following attributes are not part of the DB-API 2 standard. .. attribute:: Connection.closed This is *True* if the connection has been closed or has become invalid .. attribute:: Connection.cursor_type The default cursor type used by the connection If you want to use your own custom subclass of the :class:`Cursor` class with he connection, set this attribute to your custom cursor class. You will then get your custom cursor whenever you call :meth:`Connection.cursor`. .. versionadded:: 5.0 .. attribute:: Connection.type_cache A dictionary with the various type codes for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`TypeCache` class for details. .. versionadded:: 5.0 .. attribute:: Connection.autocommit A read/write attribute to get/set the autocommit mode Normally, all DB-API 2 SQL commands are run inside a transaction. Sometimes this behavior is not desired; there are also some SQL commands such as VACUUM which cannot be run inside a transaction. By setting this attribute to ``True`` you can change this behavior so that no transactions will be started for that connection. In this case every executed SQL command has immediate effect on the database and you don't need to call :meth:`Connection.commit` explicitly. In this mode, you can still use ``with con:`` blocks to run parts of the code using the connection ``con`` inside a transaction. By default, this attribute is set to ``False`` which conforms to the behavior specified by the DB-API 2 standard (manual commit required). .. versionadded:: 5.1 PyGreSQL-5.1/docs/contents/pgdb/introduction.rst0000644000175100077410000000137113466770070021707 0ustar darcypyg00000000000000Introduction ============ You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the newer :mod:`pgdb` API. **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. .. seealso:: A useful tutorial-like `introduction to the DB-API `_ has been written by Andrew M. Kuchling for the LINUX Journal in 1998. PyGreSQL-5.1/docs/contents/pgdb/typecache.rst0000644000175100077410000000665313466770070021143 0ustar darcypyg00000000000000TypeCache -- The internal cache for database types ================================================== .. py:currentmodule:: pgdb .. class:: TypeCache .. versionadded:: 5.0 The internal :class:`TypeCache` of PyGreSQL is not part of the DB-API 2 standard, but is documented here in case you need full control and understanding of the internal handling of database types. The TypeCache is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to DB-API 2 "type codes" (which are also returned as the *type_code* field of the :attr:`Cursor.description` attribute). These type codes are strings which are equal to the PostgreSQL internal type name, but they are also carrying additional information about the associated PostgreSQL type in the following attributes: - *oid* -- the OID of the type - *len* -- the internal size - *type* -- ``'b'`` = base, ``'c'`` = composite, ... - *category* -- ``'A'`` = Array, ``'B'`` = Boolean, ... - *delim* -- delimiter to be used when parsing arrays - *relid* -- the table OID for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`TypeCache` provides the following methods: .. method:: TypeCache.get_fields(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: a list of pairs of field names and types :rtype: list .. method:: TypeCache.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. method:: TypeCache.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: TypeCache.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. method:: TypeCache.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that the :class:`TypeCache` is always bound to a database connection. You can also get, set and reset typecast functions on a global level using the functions :func:`pgdb.get_typecast`, :func:`pgdb.set_typecast` and :func:`pgdb.reset_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless call the :meth:`TypeCache.reset_typecast` method on the :attr:`Connection.type_cache` objects of the running connections. PyGreSQL-5.1/docs/contents/pgdb/cursor.rst0000644000175100077410000003670113466770070020510 0ustar darcypyg00000000000000Cursor -- The cursor object =========================== .. py:currentmodule:: pgdb .. class:: Cursor These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors created from the same connection are not isolated, i.e., any changes done to the database by a cursor are immediately visible by the other cursors. Cursors created from different connections can or can not be isolated, depending on the level of transaction isolation. The default PostgreSQL transaction isolation level is "read committed". Cursor objects respond to the following methods and attributes. Note that ``Cursor`` objects also implement both the iterator and the context manager protocol, i.e. you can iterate over them and you can use them in a ``with`` statement. description -- details regarding the result columns --------------------------------------------------- .. attribute:: Cursor.description This read-only attribute is a sequence of 7-item named tuples. Each of these named tuples contains information describing one result column: - *name* - *type_code* - *display_size* - *internal_size* - *precision* - *scale* - *null_ok* The values for *precision* and *scale* are only set for numeric types. The values for *display_size* and *null_ok* are always ``None``. This attribute will be ``None`` for operations that do not return rows or if the cursor has not had an operation invoked via the :meth:`Cursor.execute` or :meth:`Cursor.executemany` method yet. .. versionchanged:: 5.0 Before version 5.0, this attribute was an ordinary tuple. rowcount -- number of rows of the result ---------------------------------------- .. attribute:: Cursor.rowcount This read-only attribute specifies the number of rows that the last :meth:`Cursor.execute` or :meth:`Cursor.executemany` call produced (for DQL statements like SELECT) or affected (for DML statements like UPDATE or INSERT). It is also set by the :meth:`Cursor.copy_from` and :meth:`Cursor.copy_to` methods. The attribute is -1 in case no such method call has been performed on the cursor or the rowcount of the last operation cannot be determined by the interface. close -- close the cursor ------------------------- .. method:: Cursor.close() Close the cursor now (rather than whenever it is deleted) :rtype: None The cursor will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the cursor. execute -- execute a database operation --------------------------------------- .. method:: Cursor.execute(operation, [parameters]) Prepare and execute a database operation (query or command) :param str operation: the database operation :param parameters: a sequence or mapping of parameters :returns: the cursor, so you can chain commands Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times). The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is deprecated: :meth:`Cursor.executemany` should be used instead. Note that in case this method raises a :exc:`DatabaseError`, you can get information about the error condition that has occurred by introspecting its :attr:`DatabaseError.sqlstate` attribute, which will be the ``SQLSTATE`` error code associated with the error. Applications that need to know which error condition has occurred should usually test the error code, rather than looking at the textual error message. executemany -- execute many similar database operations ------------------------------------------------------- .. method:: Cursor.executemany(operation, [seq_of_parameters]) Prepare and execute many similar database operations (queries or commands) :param str operation: the database operation :param seq_of_parameters: a sequence or mapping of parameter tuples or mappings :returns: the cursor, so you can chain commands Prepare a database operation (query or command) and then execute it against all parameter tuples or mappings found in the sequence *seq_of_parameters*. Parameters are bound to the query using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. callproc -- Call a stored procedure ----------------------------------- .. method:: Cursor.callproc(self, procname, [parameters]): Call a stored database procedure with the given name :param str procname: the name of the database function :param parameters: a sequence of parameters (can be empty or omitted) This method calls a stored procedure (function) in the PostgreSQL database. The sequence of parameters must contain one entry for each input argument that the function expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported. The function may also provide a result set as output. These can be requested through the standard fetch methods of the cursor. .. versionadded:: 5.0 fetchone -- fetch next row of the query result ---------------------------------------------- .. method:: Cursor.fetchone() Fetch the next row of a query result set :returns: the next row of the query result set :rtype: named tuple or None Fetch the next row of a query result set, returning a single named tuple, or ``None`` when no more data is available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchmany -- fetch next set of rows of the query result ------------------------------------------------------- .. method:: Cursor.fetchmany([size=None], [keep=False]) Fetch the next set of rows of a query result :param size: the number of rows to be fetched :type size: int or None :param keep: if set to true, will keep the passed arraysize :tpye keep: bool :returns: the next set of rows of the query result :rtype: list of named tuples Fetch the next set of rows of a query result, returning a list of named tuples. An empty sequence is returned when no more rows are available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. The number of rows to fetch per call is specified by the *size* parameter. If it is not given, the cursor's :attr:`arraysize` determines the number of rows to be fetched. If you set the *keep* parameter to True, this is kept as new :attr:`arraysize`. The method tries to fetch as many rows as indicated by the *size* parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. Note there are performance considerations involved with the *size* parameter. For optimal performance, it is usually best to use the :attr:`arraysize` attribute. If the *size* parameter is used, then it is best for it to retain the same value from one :meth:`Cursor.fetchmany` call to the next. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchall -- fetch all rows of the query result ---------------------------------------------- .. method:: Cursor.fetchall() Fetch all (remaining) rows of a query result :returns: the set of all rows of the query result :rtype: list of named tuples Fetch all (remaining) rows of a query result, returning them as list of named tuples. The field names of the named tuple are the same as the column names of the database query as long as they are valid as field names for named tuples, otherwise they are given positional names. Note that the cursor's :attr:`arraysize` attribute can affect the performance of this operation. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. arraysize - the number of rows to fetch at a time ------------------------------------------------- .. attribute:: Cursor.arraysize The number of rows to fetch at a time This read/write attribute specifies the number of rows to fetch at a time with :meth:`Cursor.fetchmany`. It defaults to 1, meaning to fetch a single row at a time. Methods and attributes that are not part of the standard -------------------------------------------------------- .. note:: The following methods and attributes are not part of the DB-API 2 standard. .. method:: Cursor.copy_from(stream, table, [format], [sep], [null], [size], [columns]) Copy data from an input stream to the specified table :param stream: the input stream (must be a file-like object, a string or an iterable returning strings) :param str table: the name of a database table :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param int size: the size of the buffer when reading file-like objects :param list column: an optional list of column names :returns: the cursor, so you can chain commands :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from an input stream on the client side to a database table on the server side using the ``COPY FROM`` command. The input stream can be provided in form of a file-like object (which must have a ``read()`` method), a string, or an iterable returning one row or multiple rows of input data on each iteration. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the input. The size option sets the size of the buffer used when reading data from file-like objects. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.copy_to(stream, table, [format], [sep], [null], [decode], [columns]) Copy data from the specified table to an output stream :param stream: the output stream (must be a file-like object or ``None``) :param str table: the name of a database table or a ``SELECT`` query :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param bool decode: whether decoded strings shall be returned for non-binary formats (the default is True in Python 3) :param list column: an optional list of column names :returns: a generator if stream is set to ``None``, otherwise the cursor :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from a database table on the server side to an output stream on the client side using the ``COPY TO`` command. The output stream can be provided in form of a file-like object (which must have a ``write()`` method). Alternatively, if ``None`` is passed as the output stream, the method will return a generator yielding one row of output data on each iteration. Output will be returned as byte strings unless you set decode to true. Note that you can also use a ``SELECT`` query instead of the table name. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the output. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.row_factory(row) Process rows before they are returned :param list row: the currently processed row of the result set :returns: the transformed row that the fetch methods shall return This method is used for processing result rows before returning them through one of the fetch methods. By default, rows are returned as named tuples. You can overwrite this method with a custom row factory if you want to return the rows as different kids of objects. This same row factory will then be used for all result sets. If you overwrite this method, the method :meth:`Cursor.build_row_factory` for creating row factories dynamically will be ignored. Note that named tuples are very efficient and can be easily converted to dicts (even OrderedDicts) by calling ``row._asdict()``. If you still want to return rows as dicts, you can create a custom cursor class like this:: class DictCursor(pgdb.Cursor): def row_factory(self, row): return {key: value for key, value in zip(self.colnames, row)} cur = DictCursor(con) # get one DictCursor instance or con.cursor_type = DictCursor # always use DictCursor instances .. versionadded:: 4.0 .. method:: Cursor.build_row_factory() Build a row factory based on the current description :returns: callable with the signature of :meth:`Cursor.row_factory` This method returns row factories for creating named tuples. It is called whenever a new result set is created, and :attr:`Cursor.row_factory` is then assigned the return value of this method. You can overwrite this method with a custom row factory builder if you want to use different row factories for different result sets. Otherwise, you can also simply overwrite the :meth:`Cursor.row_factory` method. This method will then be ignored. The default implementation that delivers rows as named tuples essentially looks like this:: def build_row_factory(self): return namedtuple('Row', self.colnames, rename=True)._make .. versionadded:: 5.0 .. attribute:: Cursor.colnames The list of columns names of the current result set The values in this list are the same values as the *name* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 .. attribute:: Cursor.coltypes The list of columns types of the current result set The values in this list are the same values as the *type_code* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 PyGreSQL-5.1/docs/contents/pgdb/index.rst0000644000175100077410000000043513466770070020275 0ustar darcypyg00000000000000---------------------------------------------- :mod:`pgdb` --- The DB-API Compliant Interface ---------------------------------------------- .. module:: pgdb Contents ======== .. toctree:: introduction module connection cursor types typecache adaptation PyGreSQL-5.1/docs/contents/pgdb/adaptation.rst0000644000175100077410000003531613466770070021320 0ustar darcypyg00000000000000Remarks on Adaptation and Typecasting ===================================== .. py:currentmodule:: pgdb Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int [#int8]_ int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#int8] int8 is converted to long in Python 2 .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ PyGreSQL knows how to adapt the common Python types to get a suitable representation of their values for PostgreSQL when you pass parameters to a query. For example:: >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> parameters = (144, 3.75, 'hello', None) >>> tuple(cur.execute('SELECT %s, %s, %s, %s', parameters).fetchone() (144, Decimal('3.75'), 'hello', None) This is the result we can expect, so obviously PyGreSQL has adapted the parameters and sent the following query to PostgreSQL: .. code-block:: sql SELECT 144, 3.75, 'hello', NULL Note the subtle, but important detail that even though the SQL string passed to :meth:`cur.execute` contains conversion specifications normally used in Python with the ``%`` operator for formatting strings, we didn't use the ``%`` operator to format the parameters, but passed them as the second argument to :meth:`cur.execute`. I.e. we **didn't** write the following:: >>> tuple(cur.execute('SELECT %s, %s, %s, %s' % parameters).fetchone() If we had done this, PostgreSQL would have complained because the parameters were not adapted. Particularly, there would be no quotes around the value ``'hello'``, so PostgreSQL would have interpreted this as a database column, which would have caused a :exc:`ProgrammingError`. Also, the Python value ``None`` would have been included in the SQL command literally, instead of being converted to the SQL keyword ``NULL``, which would have been another reason for PostgreSQL to complain about our bad query: .. code-block:: sql SELECT 144, 3.75, hello, None Even worse, building queries with the use of the ``%`` operator makes us vulnerable to so called "SQL injection" exploits, where an attacker inserts malicious SQL statements into our queries that we never intended to be executed. We could avoid this by carefully quoting and escaping the parameters, but this would be tedious and if we overlook something, our code will still be vulnerable. So please don't do this. This cannot be emphasized enough, because it is such a subtle difference and using the ``%`` operator looks so natural: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. The good thing is that by letting PyGreSQL do the work for you, you can treat all your parameters equally and don't need to ponder where you need to put quotes or need to escape strings. You can and should also always use the general ``%s`` specification instead of e.g. using ``%d`` for integers. Actually, to avoid mistakes and make it easier to insert parameters at more than one location, you can and should use named specifications, like this:: >>> params = dict(greeting='Hello', name='HAL') >>> sql = """SELECT %(greeting)s || ', ' || %(name)s ... || '. Do you read me, ' || %(name)s || '?'""" >>> cur.execute(sql, params).fetchone()[0] 'Hello, HAL. Do you read me, HAL?' PyGreSQL does not only adapt the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> cur.execute("SELECT %(array)s", params).fetchone()[0] [[1, 2], [3, 4]] Note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. The query that was actually executed was this: .. code-block:: sql SELECT ARRAY[[1,2],[3,4]] Again, if we had inserted the list using the ``%`` operator without adaptation, the ``ARRAY`` keyword would have been missing in the query. Tuples are adapted as PostgreSQL composite types:: >>> params = dict(record=('Bond', 'James')) >>> cur.execute("SELECT %(record)s", params).fetchone()[0] ('Bond', 'James') You can also use this feature with the ``IN`` syntax of SQL:: >>> params = dict(what='needle', where=('needle', 'haystack')) >>> cur.execute("SELECT %(what)s IN %(where)s", params).fetchone()[0] True Sometimes a Python type can be ambiguous. For instance, you might want to insert a Python list not into an array column, but into a JSON column. Or you want to interpret a string as a date and insert it into a DATE column. In this case you can give PyGreSQL a hint by using :ref:`type_constructors`:: >>> cur.execute("CREATE TABLE json_data (data json, created date)") >>> params = dict( ... data=pgdb.Json([1, 2, 3]), created=pgdb.Date(2016, 1, 29)) >>> sql = ("INSERT INTO json_data VALUES (%(data)s, %(created)s)") >>> cur.execute(sql, params) >>> cur.execute("SELECT * FROM json_data").fetchone() Row(data=[1, 2, 3], created='2016-01-29') Let's think of another example where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> cur.execute("SELECT * FROM on_hand").fetchone() Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) However, we may not want to use named tuples, but custom Python classes to hold our values, like this one:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=InventoryItem('fuzzy dice', 42, 1.99), count=1000)) InterfaceError: Do not know how to adapt type While PyGreSQL knows how to adapt tuples, it does not know what to make out of our custom class. To simply convert the object to a string using the ``str`` function is not a solution, since this yields a human readable string that is not useful for PostgreSQL. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_repr__``, like this:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) ... ... def __pg_repr__(self): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. Note that PyGreSQL adapts the result of ``__pg_repr__`` again if it is a tuple or a list. Otherwise, it must be a properly escaped string. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via one of the "fetch" methods of a cursor. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set, and :func:`reset_typecast` allows you to reset the typecast function to its default. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pgdb.get_typecast('int4') int You can change this to return float values instead:: >>> pgdb.set_typecast('int4', float) >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> cur.execute('select 42::int4').fetchone()[0] 42.0 Note that the connections cache the typecast functions, so you may need to reopen the database connection, or reset the cache of the connection to make this effective, using the following command:: >>> con.type_cache.reset_typecast() The :class:`TypeCache` of the connection can also be used to change typecast functions locally for one database connection only. As a more useful example, we can create a typecast function that casts items of the composite type used as example in the previous section to instances of the corresponding Python class:: >>> con.type_cache.reset_typecast() >>> cast_tuple = con.type_cache.get_typecast('inventory_item') >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) >>> con.type_cache.set_typecast('inventory_item', cast_item) >>> str(cur.execute("SELECT * FROM on_hand").fetchone()[0]) 'fuzzy dice (from 42, at $1.99)' As you saw in the last section you, PyGreSQL also has a typecast function for JSON, which is the default JSON decoder from the standard library. Let's assume we want to use a slight variation of that decoder in which every integer in JSON is converted to a float in Python. This can be accomplished as follows:: >>> from json import loads >>> cast_json = lambda v: loads(v, parse_int=float) >>> pgdb.set_typecast('json', cast_json) >>> cur.execute("SELECT data FROM json_data").fetchone()[0] [1.0, 2.0, 3.0] Note again that you may need to run ``con.type_cache.reset_typecast()`` to make this effective. Also note that the two types ``json`` and ``jsonb`` have their own typecast functions, so if you use ``jsonb`` instead of ``json``, you need to use this type name when setting the typecast function:: >>> pgdb.set_typecast('jsonb', cast_json) As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> con.cursor().execute("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""").fetchone()[0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 9.5). So let' resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].rsplit(',', 1) ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pgdb.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python quite easily:: >>> circle = {c.name: c.circle for c in con.cursor().execute( ... "SELECT * FROM circle").fetchall()} The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle {'C1': Circle(Point(2, 3), 3.0), 'C2': Circle(Point(1, -1), 4.0)} Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] PyGreSQL-5.1/docs/contents/examples.rst0000644000175100077410000000121213466770070020062 0ustar darcypyg00000000000000Examples ======== I am starting to collect examples of applications that use PyGreSQL. So far I only have a few but if you have an example for me, you can either send me the files or the URL for me to point to. The :doc:`postgres/index` that is part of the PyGreSQL distribution shows some examples of using PostgreSQL with PyGreSQL. Here is a `list of motorcycle rides in Ontario `_ that uses a PostgreSQL database to store the rides. There is a link at the bottom of the page to view the source code. Oleg Broytmann has written a simple example `RGB database demo `_ PyGreSQL-5.1/docs/contents/install.rst0000644000175100077410000001520113466770070017715 0ustar darcypyg00000000000000Installation ============ General ------- You must first install Python and PostgreSQL on your system. If you want to access remote databases only, you don't need to install the full PostgreSQL server, but only the libpq C-interface library. If you are on Windows, make sure that the directory that contains libpq.dll is part of your ``PATH`` environment variable. The current version of PyGreSQL has been tested with Python versions 2.6, 2.7 and 3.3 to 3.7, and PostgreSQL versions 9.0 to 9.6 and 10 or 11. PyGreSQL will be installed as three modules, a shared library called _pg.so (on Linux) or a DLL called _pg.pyd (on Windows), and two pure Python wrapper modules called pg.py and pgdb.py. All three files will be installed directly into the Python site-packages directory. To uninstall PyGreSQL, simply remove these three files. Installing with Pip ------------------- This is the most easy way to install PyGreSQL if you have "pip" installed. Just run the following command in your terminal:: pip install PyGreSQL This will automatically try to find and download a distribution on the `Python Package Index `_ that matches your operating system and Python version and install it. Installing from a Binary Distribution ------------------------------------- If you don't want to use "pip", or "pip" doesn't find an appropriate distribution for your computer, you can also try to manually download and install a distribution. When you download the source distribution, you will need to compile the C extension, for which you need a C compiler installed. If you don't want to install a C compiler or avoid possible problems with the compilation, you can search for a pre-compiled binary distribution of PyGreSQL on the Python Package Index or the PyGreSQL homepage. You can currently download PyGreSQL as Linux RPM, NetBSD package and Windows installer. Make sure the required Python version of the binary package matches the Python version you have installed. Install the package as usual on your system. Note that the documentation is currently only included in the source package. Installing from Source ---------------------- If you want to install PyGreSQL from Source, or there is no binary package available for your platform, follow these instructions. Make sure the Python header files and PostgreSQL client and server header files are installed. These come usually with the "devel" packages on Unix systems and the installer executables on Windows systems. If you are using a precompiled PostgreSQL, you will also need the pg_config tool. This is usually also part of the "devel" package on Unix, and will be installed as part of the database server feature on Windows systems. Building and installing with Distutils ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can build and install PyGreSQL using `Distutils `_. Download and unpack the PyGreSQL source tarball if you haven't already done so. Type the following commands to build and install PyGreSQL:: python setup.py build python setup.py install Now you should be ready to use PyGreSQL. Compiling Manually ~~~~~~~~~~~~~~~~~~ The source file for compiling the C extension module is pgmodule.c. You have two options. You can compile PyGreSQL as a stand-alone module or you can build it into the Python interpreter. Stand-Alone ^^^^^^^^^^^ * In the directory containing ``pgmodule.c``, run the following command:: cc -fpic -shared -o _pg.so -I$PYINC -I$PGINC -I$PSINC -L$PGLIB -lpq pgmodule.c where you have to set:: PYINC = path to the Python include files (usually something like /usr/include/python) PGINC = path to the PostgreSQL client include files (something like /usr/include/pgsql or /usr/include/postgresql) PSINC = path to the PostgreSQL server include files (like /usr/include/pgsql/server or /usr/include/postgresql/server) PGLIB = path to the PostgreSQL object code libraries (usually /usr/lib) If you are not sure about the above paths, try something like:: PYINC=`find /usr -name Python.h` PGINC=`find /usr -name libpq-fe.h` PSINC=`find /usr -name postgres.h` PGLIB=`find /usr -name libpq.so` If you have the ``pg_config`` tool installed, you can set:: PGINC=`pg_config --includedir` PSINC=`pg_config --includedir-server` PGLIB=`pg_config --libdir` Some options may be added to this line:: -DNO_DEF_VAR no default variables support -DNO_DIRECT no direct access methods -DNO_LARGE no large object support -DNO_PQSOCKET if running an older PostgreSQL On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * Test the new module. Something like the following should work:: $ python >>> import _pg >>> db = _pg.connect('thilo','localhost') >>> db.query("INSERT INTO test VALUES ('ping','pong')") 18304 >>> db.query("SELECT * FROM test") eins|zwei ----+---- ping|pong (1 row) * Finally, move the ``_pg.so``, ``pg.py``, and ``pgdb.py`` to a directory in your ``PYTHONPATH``. A good place would be ``/usr/lib/python/site-packages`` if your Python modules are in ``/usr/lib/python``. Built-in to Python interpreter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Find the directory where your ``Setup`` file lives (usually in the ``Modules`` subdirectory) in the Python source hierarchy and copy or symlink the ``pgmodule.c`` file there. * Add the following line to your 'Setup' file:: _pg pgmodule.c -I$PGINC -I$PSINC -L$PGLIB -lpq where:: PGINC = path to the PostgreSQL client include files (see above) PSINC = path to the PostgreSQL server include files (see above) PGLIB = path to the PostgreSQL object code libraries (see above) Some options may be added to this line:: -DNO_DEF_VAR no default variables support -DNO_DIRECT no direct access methods -DNO_LARGE no large object support -DNO_PQSOCKET if running an older PostgreSQL (see above) On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * If you want a shared module, make sure that the ``shared`` keyword is uncommented and add the above line below it. You used to need to install your shared modules with ``make sharedinstall`` but this no longer seems to be true. * Copy ``pg.py`` to the lib directory where the rest of your modules are. For example, that's ``/usr/local/lib/Python`` on my system. * Rebuild Python from the root directory of the Python source hierarchy by running ``make -f Makefile.pre.in boot`` and ``make && make install``. * For more details read the documentation at the top of ``Makefile.pre.in``. PyGreSQL-5.1/docs/contents/pg/0000755000175100077410000000000013470245541016117 5ustar darcypyg00000000000000PyGreSQL-5.1/docs/contents/pg/adaptation.rst0000644000175100077410000004150013466770070021002 0ustar darcypyg00000000000000Remarks on Adaptation and Typecasting ===================================== .. py:currentmodule:: pg Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int [#int8]_ int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#int8] int8 is converted to long in Python 2 .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ When you use the higher level methods of the classic :mod:`pg` module like :meth:`DB.insert()` or :meth:`DB.update()`, you don't need to care about adaptation of parameters, since all of this is happening automatically behind the scenes. You only need to consider this issue when creating SQL commands manually and sending them to the database using the :meth:`DB.query` method. Imagine you have created a user login form that stores the login name as *login* and the password as *passwd* and you now want to get the user data for that user. You may be tempted to execute a query like this:: >>> db = pg.DB(...) >>> sql = "SELECT * FROM user_table WHERE login = '%s' AND passwd = '%s'" >>> db.query(sql % (login, passwd)).getresult()[0] This seems to work at a first glance, but you will notice an error as soon as you try to use a login name containing a single quote. Even worse, this error can be exploited through so-called "SQL injection", where an attacker inserts malicious SQL statements into the query that you never intended to be executed. For instance, with a login name something like ``' OR ''='`` the attacker could easily log in and see the user data of another user in the database. One solution for this problem would be to cleanse your input of "dangerous" characters like the single quote, but this is tedious and it is likely that you overlook something or break the application e.g. for users with names like "D'Arcy". A better solution is to use the escaping functions provided by PostgreSQL which are available as methods on the :class:`DB` object:: >>> login = "D'Arcy" >>> db.escape_string(login) "D''Arcy" As you see, :meth:`DB.escape_string` has doubled the single quote which is the right thing to do in SQL. However, there are better ways of passing parameters to the query, without having to manually escape them. If you pass the parameters as positional arguments to :meth:`DB.query`, then PyGreSQL will send them to the database separately, without the need for quoting them inside the SQL command, and without the problems inherent with that process. In this case you must put placeholders of the form ``$1``, ``$2`` etc. in the SQL command in place of the parameters that should go there. For instance:: >>> sql = "SELECT * FROM user_table WHERE login = $1 AND passwd = $2" >>> db.query(sql, login, passwd).getresult()[0] That's much better. So please always keep the following warning in mind: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. If you like the ``%`` format specifications of Python better than the placeholders used by PostgreSQL, there is still a way to use them, via the :meth:`DB.query_formatted` method:: >>> sql = "SELECT * FROM user_table WHERE login = %s AND passwd = %s" >>> db.query_formatted(sql, (login, passwd)).getresult()[0] Note that we need to pass the parameters not as positional arguments here, but as a single tuple. Also note again that we did not use the ``%`` operator of Python to format the SQL string, we just used the ``%s`` format specifications of Python and let PyGreSQL care about the formatting. Even better, you can also pass the parameters as a dictionary if you use the :meth:`DB.query_formatted` method:: >>> sql = """SELECT * FROM user_table ... WHERE login = %(login)s AND passwd = %(passwd)s""" >>> parameters = dict(login=login, passwd=passwd) >>> db.query_formatted(sql, parameters).getresult()[0] Here is another example:: >>> sql = "SELECT 'Hello, ' || %s || '!'" >>> db.query_formatted(sql, (login,)).getresult()[0] You would think that the following even simpler example should work, too: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,)).getresult()[0] ProgrammingError: Could not determine data type of parameter $1 The issue here is that :meth:`DB.query_formatted` by default still uses PostgreSQL parameters, transforming the Python style ``%s`` placeholder into a ``$1`` placeholder, and sending the login name separately from the query. In the query we looked at before, the concatenation with other strings made it clear that it should be interpreted as a string. This simple query however does not give PostgreSQL a clue what data type the ``$1`` placeholder stands for. This is different when you are embedding the login name directly into the query instead of passing it as parameter to PostgreSQL. You can achieve this by setting the *inline* parameter of :meth:`DB.query_formatted`, like so:: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,), inline=True).getresult()[0] Another way of making this query work while still sending the parameters separately is to simply cast the parameter values:: >>> sql = "SELECT %s::text" >>> db.query_formatted(sql, (login,), inline=False).getresult()[0] In real world examples you will rarely have to cast your parameters like that, since in an INSERT statement or a WHERE clause comparing the parameter to a table column the data type will be clear from the context. When binding the parameters to a query, PyGreSQL not only adapts the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> db.query_formatted("SELECT %(array)s::int[]", params).getresult()[0][0] [[1, 2], [3, 4]] Note that again we only need to cast the array parameter or use inline parameters because this simple query does not provide enough context. Also note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. Tuples are adapted as PostgreSQL composite types. If you use inline paramters, they can also be used with the ``IN`` syntax. Let's think of a more real world example again where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> db.query_formatted("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> db.query("SELECT * FROM on_hand").getresult()[0][0] Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) The :meth:`DB.insert` method provides a simpler way to achieve the same:: >>> row = dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99'))} Perhaps we want to use custom Python classes instead of named tuples to hold our values:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error. This is because PyGreSQL tries to pass the string representation of the object as a parameter to PostgreSQL, but this is just a human readable string and not useful for PostgreSQL to build a composite type. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_str__``, like so:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) ... ... def __pg_str__(self, typ): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. You can even make these objects adapt to different types in different ways:: >>> class InventoryItem: ... ... ... ... ... def __pg_str__(self, typ): ... if typ == 'text': ... return str(self) ... return (self.name, self.supplier_id, self.price) ... >>> db.query("ALTER TABLE on_hand ADD COLUMN remark varchar") >>> item=InventoryItem('fuzzy dice', 42, 1.99) >>> row = dict(item=item, remark=item, count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), 'remark': 'fuzzy dice (from 42, at $1.99)'} There is also another "magic" method ``__pg_repr__`` which does not take the *typ* parameter. That method is used instead of ``__pg_str__`` when passing parameters inline. You must be more careful when using ``__pg_repr__``, because it must return a properly escaped string that can be put literally inside the SQL. The only exception is when you return a tuple or list, because these will be adapted and properly escaped by PyGreSQL again. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via the :meth:`DB.get()`, :meth:`Query.getresult()` and similar methods. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pg.get_typecast('int4') int In the classic PyGreSQL module, the typecasting for these basic types is always done internally by the C extension module for performance reasons. We can set a different typecast function for ``int4``, but it will not become effective, the C module continues to use its internal typecasting. However, we can add new typecast functions for the database types that are not supported by the C module. For example, we can create a typecast function that casts items of the composite PostgreSQL type used as example in the previous section to instances of the corresponding Python class. To do this, at first we get the default typecast function that PyGreSQL has created for the current :class:`DB` connection. This default function casts composite types to named tuples, as we have seen in the section before. We can grab it from the :attr:`DB.dbtypes` object as follows:: >>> cast_tuple = db.dbtypes.get_typecast('inventory_item') Now we can create a new typecast function that converts the tuple to an instance of our custom class:: >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) Finally, we set this typecast function, either globally with :func:`set_typecast`, or locally for the current connection like this:: >>> db.dbtypes.set_typecast('inventory_item', cast_item) Now we can get instances of our custom class directly from the database:: >>> item = db.query("SELECT * FROM on_hand").getresult()[0][0] >>> str(item) 'fuzzy dice (from 42, at $1.99)' Note that some of the typecast functions used by the C module are configurable with separate module level functions, such as :meth:`set_decimal`, :meth:`set_bool` or :meth:`set_jsondecode`. You need to use these instead of :meth:`set_typecast` if you want to change the behavior of the C module. Also note that after changing global typecast functions with :meth:`set_typecast`, you may need to run ``db.dbtypes.reset_typecast()`` to make these changes effective on connections that were already open. As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> q = db.query("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""") >>> q.getresult()[0][0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 11). So let's resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].split(',') ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pg.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python simply using:: >>> circle = db.get_as_dict('circle', scalar=True) The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle['C1'] Circle(Point(2, 3), 3.0) >>> circle['C2'] Circle(Point(1, -1), 4.0) Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] PyGreSQL-5.1/docs/contents/pg/db_wrapper.rst0000644000175100077410000011344413466770070021012 0ustar darcypyg00000000000000The DB wrapper class ==================== .. py:currentmodule:: pg .. class:: DB The :class:`Connection` methods are wrapped in the class :class:`DB` which also adds convenient higher level methods for working with the database. It also serves as a context manager for the connection. The preferred way to use this module is as follows:: import pg with pg.DB(...) as db: # for parameters, see below for r in db.query( # just for example "SELECT foo, bar FROM foo_bar_table WHERE foo !~ bar" ).dictresult(): print('%(foo)s %(bar)s' % r) This class can be subclassed as in this example:: import pg class DB_ride(pg.DB): """Ride database wrapper This class encapsulates the database functions and the specific methods for the ride database.""" def __init__(self): """Open a database connection to the rides database""" pg.DB.__init__(self, dbname='ride') self.query("SET DATESTYLE TO 'ISO'") [Add or override methods here] The following describes the methods and variables of this class. Initialization -------------- The :class:`DB` class is initialized with the same arguments as the :func:`connect` function described above. It also initializes a few internal variables. The statement ``db = DB()`` will open the local database with the name of the user just like ``connect()`` does. You can also initialize the DB class with an existing :mod:`pg` or :mod:`pgdb` connection. Pass this connection as a single unnamed parameter, or as a single parameter named ``db``. This allows you to use all of the methods of the DB class with a DB-API 2 compliant connection. Note that the :meth:`Connection.close` and :meth:`Connection.reopen` methods are inoperative in this case. pkey -- return the primary key of a table ----------------------------------------- .. method:: DB.pkey(table) Return the primary key of a table :param str table: name of table :returns: Name of the field which is the primary key of the table :rtype: str :raises KeyError: the table does not have a primary key This method returns the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key. get_databases -- get list of databases in the system ---------------------------------------------------- .. method:: DB.get_databases() Get the list of databases in the system :returns: all databases in the system :rtype: list Although you can do this with a simple select, it is added here for convenience. get_relations -- get list of relations in connected database ------------------------------------------------------------ .. method:: DB.get_relations([kinds], [system]) Get the list of relations in connected database :param str kinds: a string or sequence of type letters :param bool system: whether system relations should be returned :returns: all relations of the given kinds in the database :rtype: list This method returns the list of relations in the connected database. Although you can do this with a simple select, it is added here for convenience. You can select which kinds of relations you are interested in by passing type letters in the `kinds` parameter. The type letters are ``r`` = ordinary table, ``i`` = index, ``S`` = sequence, ``v`` = view, ``c`` = composite type, ``s`` = special, ``t`` = TOAST table. If `kinds` is None or an empty string, all relations are returned (this is also the default). If `system` is set to `True`, then system tables and views (temporary tables, toast tables, catalog vies and tables) will be returned as well, otherwise they will be ignored. get_tables -- get list of tables in connected database ------------------------------------------------------ .. method:: DB.get_tables([system]) Get the list of tables in connected database :param bool system: whether system tables should be returned :returns: all tables in connected database :rtype: list This is a shortcut for ``get_relations('r', system)`` that has been added for convenience. get_attnames -- get the attribute names of a table -------------------------------------------------- .. method:: DB.get_attnames(table) Get the attribute names of a table :param str table: name of table :returns: an ordered dictionary mapping attribute names to type names Given the name of a table, digs out the set of attribute names. Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes' types) with the column names in the proper order if you iterate over it. By default, only a limited number of simple types will be returned. You can get the registered types instead, if enabled by calling the :meth:`DB.use_regtypes` method. has_table_privilege -- check table privilege -------------------------------------------- .. method:: DB.has_table_privilege(table, privilege) Check whether current user has specified table privilege :param str table: the name of the table :param str privilege: privilege to be checked -- default is 'select' :returns: whether current user has specified table privilege :rtype: bool Returns True if the current user has the specified privilege for the table. .. versionadded:: 4.0 get/set_parameter -- get or set run-time parameters ---------------------------------------------------- .. method:: DB.get_parameter(parameter) Get the value of run-time parameters :param parameter: the run-time parameter(s) to get :type param: str, tuple, list or dict :returns: the current value(s) of the run-time parameter(s) :rtype: str, list or dict :raises TypeError: Invalid parameter type(s) :raises pg.ProgrammingError: Invalid parameter name(s) If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name. You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys. By passing the special name ``'all'`` as the parameter, you can get a dict of all existing configuration parameters. Note that you can request most of the important parameters also using :meth:`Connection.parameter()` which does not involve a database query, unlike :meth:`DB.get_parameter` and :meth:`DB.set_parameter`. .. versionadded:: 4.2 .. method:: DB.set_parameter(parameter, [value], [local]) Set the value of run-time parameters :param parameter: the run-time parameter(s) to set :type param: string, tuple, list or dict :param value: the value to set :type param: str or None :raises TypeError: Invalid parameter type(s) :raises ValueError: Invalid value argument(s) :raises pg.ProgrammingError: Invalid parameter name(s) or values If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or *None* is passed as a value, then the run-time parameter will be restored to its default value. You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict. By passing the special name ``'all'`` as the parameter, you can reset all existing settable run-time parameters to their default values. If you set *local* to `True`, then the command takes effect for only the current transaction. After :meth:`DB.commit` or :meth:`DB.rollback`, the session-level setting takes effect again. Setting *local* to `True` will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately. .. versionadded:: 4.2 begin/commit/rollback/savepoint/release -- transaction handling --------------------------------------------------------------- .. method:: DB.begin([mode]) Begin a transaction :param str mode: an optional transaction mode such as 'READ ONLY' This initiates a transaction block, that is, all following queries will be executed in a single transaction until :meth:`DB.commit` or :meth:`DB.rollback` is called. .. versionadded:: 4.1 .. method:: DB.start() This is the same as the :meth:`DB.begin` method. .. method:: DB.commit() Commit a transaction This commits the current transaction. .. method:: DB.end() This is the same as the :meth:`DB.commit` method. .. versionadded:: 4.1 .. method:: DB.rollback([name]) Roll back a transaction :param str name: optionally, roll back to the specified savepoint This rolls back the current transaction, discarding all its changes. .. method:: DB.abort() This is the same as the :meth:`DB.rollback` method. .. versionadded:: 4.2 .. method:: DB.savepoint(name) Define a new savepoint :param str name: the name to give to the new savepoint This establishes a new savepoint within the current transaction. .. versionadded:: 4.1 .. method:: DB.release(name) Destroy a savepoint :param str name: the name of the savepoint to destroy This destroys a savepoint previously defined in the current transaction. .. versionadded:: 4.1 get -- get a row from a database table or view ---------------------------------------------- .. method:: DB.get(table, row, [keyname]) Get a row from a database table or view :param str table: name of table or view :param row: either a dictionary or the value to be looked up :param str keyname: name of field to use as key (optional) :returns: A dictionary - the keys are the attribute names, the values are the row values. :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row This method is the basic mechanism to get a single row. It assumes that the *keyname* specifies a unique row. It must be the name of a single column or a tuple of column names. If *keyname* is not specified, then the primary key for the table is used. If *row* is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed *keyname* or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values if the row was passed as a dictionary. The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as ``oid(table)`` using the actual name of the table. Note that since PyGreSQL 5.0 this will return the value of an array type column as a Python list by default. insert -- insert a row into a database table -------------------------------------------- .. method:: DB.insert(table, [row], [col=val, ...]) Insert a row into a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the inserted values in the database :rtype: dict :raises pg.ProgrammingError: missing privilege or conflict This method inserts a row into a table. If the optional dictionary is not supplied then the required values must be included as keyword/value pairs. If a dictionary is supplied then any keywords provided will be added to or replace the entry in the dictionary. The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. Note that since PyGreSQL 5.0 it is possible to insert a value for an array type column by passing it as a Python list. update -- update a row in a database table ------------------------------------------ .. method:: DB.update(table, [row], [col=val, ...]) Update a row in a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself. The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc. Like insert, the dictionary is optional and updates will be performed on the fields in the keywords. There must be an OID or primary key either specified using the ``'oid'`` keyword or in the dictionary, in which case the OID must be munged. upsert -- insert a row with conflict resolution ----------------------------------------------- .. method:: DB.upsert(table, [row], [col=val, ...]) Insert a row into a database table with conflict resolution :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for specifying the update :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege This method inserts a row into a table, but instead of raising a ProgrammingError exception in case of violating a constraint or unique index, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided. Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary. Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way: A value of `False` or `None` means the column shall not be updated, a value of `True` means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value `True`. So if in the case of a conflict you want to update every column that has been passed in the dictionary `d` , you would call ``upsert(table, d)``. If you don't want to do anything in case of a conflict, i.e. leave the existing row as it is, call ``upsert(table, d, **dict.fromkeys(d))``. If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by writing the table prefix ``included.`` before the column name, and you can refer to the value that has been proposed for insertion by writing ``excluded.`` as table prefix. The dictionary is modified in any case to reflect the values in the database after the operation has completed. .. note:: The method uses the PostgreSQL "upsert" feature which is only available since PostgreSQL 9.5. With older PostgreSQL versions, you will get a ProgrammingError if you use this method. .. versionadded:: 5.0 query -- execute a SQL command string ------------------------------------- .. method:: DB.query(command, [arg1, [arg2, ...]]) Execute a SQL command string :param str command: SQL command :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to the :class:`Connection` function with the same name, except that positional arguments can be passed either as a single list or tuple, or as individual positional arguments. These arguments will then be used as parameter values of parameterized queries. Example:: name = input("Name? ") phone = input("Phone? ") rows = db.query("update employees set phone=$2 where name=$1", name, phone).getresult()[0][0] # or rows = db.query("update employees set phone=$2 where name=$1", (name, phone)).getresult()[0][0] query_formatted -- execute a formatted SQL command string --------------------------------------------------------- .. method:: DB.query_formatted(command, [parameters], [types], [inline]) Execute a formatted SQL command string :param str command: SQL command :param parameters: the values of the parameters for the SQL command :type parameters: tuple, list or dict :param types: optionally, the types of the parameters :type types: tuple, list or dict :param bool inline: whether the parameters should be passed in the SQL :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to :meth:`DB.query`, but using Python format placeholders of the form ``%s`` or ``%(names)s`` instead of PostgreSQL placeholders of the form ``$1``. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity. If you set *inline* to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately. If you set *inline* to True or don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case. Note that the adaption and conversion of the parameters causes a certain performance overhead. Depending on the type of values, the overhead can be smaller for *inline* queries or if you pass the types of the parameters, so that they don't need to be guessed from the values. For best performance, we recommend using a raw :meth:`DB.query` or :meth:`DB.query_prepared` if you are executing many of the same operations with different parameters. Example:: name = input("Name? ") phone = input("Phone? ") rows = db.query_formatted( "update employees set phone=%s where name=%s", (phone, name)).getresult()[0][0] # or rows = db.query_formatted( "update employees set phone=%(phone)s where name=%(name)s", dict(name=name, phone=phone)).getresult()[0][0] query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: DB.query_prepared(name, [arg1, [arg2, ...]]) Execute a prepared statement :param str name: name of the prepared statement :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This methods works like the :meth:`DB.query` method, except that instead of passing the SQL command, you pass the name of a prepared statement created previously using the :meth:`DB.prepare` method. Passing an empty string or *None* as the name will execute the unnamed statement (see warning about the limited lifetime of the unnamed statement in :meth:`DB.prepare`). The functionality of this method is equivalent to that of the SQL ``EXECUTE`` command. Note that calling EXECUTE would require parameters to be sent inline, and be properly sanitized (escaped, quoted). .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: DB.prepare(name, command) Create a prepared statement :param str command: SQL command :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for later execution of the given command with the :meth:`DB.query_prepared` method. If the name is empty or *None*, the unnamed prepared statement is used, in which case any pre-existing unnamed statement is replaced. Otherwise, if a prepared statement with the specified name is already defined in the current database session, a :exc:`pg.ProgrammingError` is raised. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then be passed to the :meth:`Connection.query_prepared` method as positional arguments. The functionality of this method is equivalent to that of the SQL ``PREPARE`` command. Example:: db.prepare('change phone', "update employees set phone=$2 where ein=$1") while True: ein = input("Employee ID? ") if not ein: break phone = input("Phone? ") db.query_prepared('change phone', ein, phone) .. note:: We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations on the database. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: DB.describe_prepared([name]) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 delete_prepared -- delete a prepared statement ---------------------------------------------- .. method:: DB.delete_prepared([name]) Delete a prepared statement :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are always deallocated automatically when the current session ends. .. versionadded:: 5.1 clear -- clear row values in memory ----------------------------------- .. method:: DB.clear(table, [row]) Clear row values in memory :param str table: name of table :param dict row: optional dictionary of values :returns: an empty row :rtype: dict This method clears all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to *False*, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged. If the dictionary is not supplied a new one is created. delete -- delete a row from a database table -------------------------------------------- .. method:: DB.delete(table, [row], [col=val, ...]) Delete a row from a database table :param str table: name of table :param dict d: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :rtype: None :raises pg.ProgrammingError: table has no primary key, row is still referenced or missing privilege :raises KeyError: missing key value for the row This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method will raise a ProgrammingError. truncate -- quickly empty database tables ----------------------------------------- .. method:: DB.truncate(table, [restart], [cascade], [only]) Empty a table or set of tables :param table: the name of the table(s) :type table: str, list or set :param bool restart: whether table sequences should be restarted :param bool cascade: whether referenced tables should also be truncated :param only: whether only parent tables should be truncated :type only: bool or list This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables. If *restart* is set to `True`, sequences owned by columns of the truncated table(s) are automatically restarted. If *cascade* is set to `True`, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter *only* is not set to `True`, all the descendant tables (if any) will also be truncated. Optionally, a ``*`` can be specified after the table name to explicitly indicate that descendant tables are included. If the parameter *table* is a list, the parameter *only* can also be a list of corresponding boolean values. .. versionadded:: 4.2 get_as_list/dict -- read a table as a list or dictionary -------------------------------------------------------- .. method:: DB.get_as_list(table, [what], [where], [order], [limit], [offset], [scalar]) Get a table as a list :param str table: the name of the table (the FROM clause) :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: list :raises TypeError: the table name has not been specified This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters. The parameter *what* can restrict the query to only return a subset of the table columns. The parameter *where* can restrict the query to only return a subset of the table rows. The specified SQL expressions all need to be fulfilled for a row to get into the result. The parameter *order* specifies the ordering of the rows. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set *order* to *False* if you don't care about the ordering. The parameters *limit* and *offset* specify the maximum number of rows returned and a number of rows skipped over. If you set the *scalar* option to *True*, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway. .. versionadded:: 5.0 .. method:: DB.get_as_dict(table, [keyname], [what], [where], [order], [limit], [offset], [scalar]) Get a table as a dictionary :param str table: the name of the table (the FROM clause) :param keyname: column(s) to be used as key(s) of the dictionary :type keyname: str, list, tuple or None :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: dict or OrderedDict :raises TypeError: the table name has not been specified :raises KeyError: keyname(s) are invalid or not part of the result :raises pg.ProgrammingError: no keyname(s) and table has no primary key This method is similar to :meth:`DB.get_as_list`, but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the *scalar* option has been set to *True*. With the optional parameter *keyname* you can specify a different set of columns to be used as the keys of the dictionary. If the Python version supports it, the dictionary will be an *OrderedDict* using the order specified with the *order* parameter or the key column(s) if not specified. You can set *order* to *False* if you don't care about the ordering. In this case the returned dictionary will be an ordinary one. .. versionadded:: 5.0 escape_literal/identifier/string/bytea -- escape for SQL -------------------------------------------------------- The following methods escape text or binary strings so that they can be inserted directly into an SQL command. Except for :meth:`DB.escape_byte`, you don't need to call these methods for the strings passed as parameters to :meth:`DB.query`. You also don't need to call any of these methods when storing data using :meth:`DB.insert` and similar. .. method:: DB.escape_literal(string) Escape a string for use within SQL as a literal constant :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. .. versionadded:: 4.1 .. method:: DB.escape_identifier(string) Escape a string for use within SQL as an identifier :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use as an SQL identifier, such as a table, column, or function name. This is useful when a user-supplied identifier might contain special characters that would otherwise be misinterpreted by the SQL parser, or when the identifier might contain upper case characters whose case should be preserved. .. versionadded:: 4.1 .. method:: DB.escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str Similar to the module function :func:`pg.escape_string` with the same name, but the behavior of this method is adjusted depending on the connection properties (such as character encoding). .. method:: DB.escape_bytea(datastring) Escape binary data for use within SQL as type ``bytea`` :param str datastring: string containing the binary data that is to be escaped :returns: the escaped string :rtype: str Similar to the module function :func:`pg.escape_bytea` with the same name, but the behavior of this method is adjusted depending on the connection properties (in particular, whether standard-conforming strings are enabled). unescape_bytea -- unescape data retrieved from the database ----------------------------------------------------------- .. method:: DB.unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param datastring: the ``bytea`` data string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :meth:`DB.escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. encode/decode_json -- encode and decode JSON data ------------------------------------------------- The following methods can be used to encode end decode data in `JSON `_ format. .. method:: DB.encode_json(obj) Encode a Python object for use within SQL as type ``json`` or ``jsonb`` :param obj: Python object that shall be encoded to JSON format :type obj: dict, list or None :returns: string representation of the Python object in JSON format :rtype: str This method serializes a Python object into a JSON formatted string that can be used within SQL. You don't need to use this method on the data stored with :meth:`DB.insert` and similar, only if you store the data directly as part of an SQL command or parameter with :meth:`DB.query`. This is the same as the :func:`json.dumps` function from the standard library. .. versionadded:: 5.0 .. method:: DB.decode_json(string) Decode ``json`` or ``jsonb`` data that has been retrieved as text :param string: JSON formatted string shall be decoded into a Python object :type string: str :returns: Python object representing the JSON formatted string :rtype: dict, list or None This method deserializes a JSON formatted string retrieved as text from the database to a Python object. You normally don't need to use this method as JSON data is automatically decoded by PyGreSQL. If you don't want the data to be decoded, then you can cast ``json`` or ``jsonb`` columns to ``text`` in PostgreSQL or you can set the decoding function to *None* or a different function using :func:`pg.set_jsondecode`. By default this is the same as the :func:`json.loads` function from the standard library. .. versionadded:: 5.0 use_regtypes -- choose usage of registered type names ----------------------------------------------------- .. method:: DB.use_regtypes([regtypes]) Determine whether registered type names shall be used :param bool regtypes: if passed, set whether registered type names shall be used :returns: whether registered type names are used The :meth:`DB.get_attnames` method can return either simplified "classic" type names (the default) or more fine-grained "registered" type names. Which kind of type names is used can be changed by calling :meth:`DB.get_regtypes`. If you pass a boolean, it sets whether registered type names shall be used. The method can also be used to check through its return value whether registered type names are currently used. .. versionadded:: 4.1 notification_handler -- create a notification handler ----------------------------------------------------- .. class:: DB.notification_handler(event, callback, [arg_dict], [timeout], [stop_event]) Create a notification handler instance :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event This method creates a :class:`pg.NotificationHandler` object using the :class:`DB` connection as explained under :doc:`notification`. .. versionadded:: 4.1.1 Attributes of the DB wrapper class ---------------------------------- .. attribute:: DB.db The wrapped :class:`Connection` object You normally don't need this, since all of the members can be accessed from the :class:`DB` wrapper class as well. .. attribute:: DB.dbname The name of the database that the connection is using .. attribute:: DB.dbtypes A dictionary with the various type names for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`DbTypes` class for details. .. versionadded:: 5.0 .. attribute:: DB.adapter A class with some helper functions for adapting parameters This can be used for building queries with parameters. You normally will not need this, as you can use the :class:`DB.query_formatted` method. .. versionadded:: 5.0 PyGreSQL-5.1/docs/contents/pg/introduction.rst0000644000175100077410000000147513466770070021406 0ustar darcypyg00000000000000Introduction ============ You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the older :mod:`pg` API. The :mod:`pg` module handles three types of objects, - the :class:`Connection` instances, which handle the connection and all the requests to the database, - the :class:`LargeObject` instances, which handle all the accesses to PostgreSQL large objects, - the :class:`Query` instances that handle query results and it provides a convenient wrapper class :class:`DB` for the basic :class:`Connection` class. .. seealso:: If you want to see a simple example of the use of some of these functions, see the :doc:`../examples` page. PyGreSQL-5.1/docs/contents/pg/db_types.rst0000644000175100077410000000741613466770070020477 0ustar darcypyg00000000000000DbTypes -- The internal cache for database types ================================================ .. py:currentmodule:: pg .. class:: DbTypes .. versionadded:: 5.0 The :class:`DbTypes` object is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to PyGreSQL "type names" (which are also returned by :meth:`DB.get_attnames` as dictionary values). These type names are strings which are equal to either the simple PyGreSQL names or to the more fine-grained registered PostgreSQL type names if these have been enabled with :meth:`DB.use_regtypes`. Besides being strings, they carry additional information about the associated PostgreSQL type in the following attributes: - *oid* -- the PostgreSQL type OID - *pgtype* -- the internal PostgreSQL data type name - *regtype* -- the registered PostgreSQL data type name - *simple* -- the more coarse-grained PyGreSQL type name - *typtype* -- `b` = base type, `c` = composite type etc. - *category* -- `A` = Array, `b` =Boolean, `C` = Composite etc. - *delim* -- delimiter for array types - *relid* -- corresponding table for composite types - *attnames* -- attributes for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`DbTypes` class also provides the following methods: .. method:: DbTypes.get_attnames(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: an ordered dictionary mapping field names to type names .. method:: DbTypes.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. method:: DbTypes.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: DbTypes.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or list of type names, or None to reset all typecast functions :type typ: str, list or None .. method:: DbTypes.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that :class:`DbTypes` object is always bound to a database connection. You can also get and set and reset typecast functions on a global level using the functions :func:`pg.get_typecast` and :func:`pg.set_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless you reset the typecast functions by calling the :meth:`DbTypes.reset_typecast` method on :attr:`DB.dbtypes` objects of the running connections. Also note that the typecasting for all of the basic types happens already in the C low-level extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension. PyGreSQL-5.1/docs/contents/pg/module.rst0000644000175100077410000006706313466770070020157 0ustar darcypyg00000000000000Module functions and constants ============================== .. py:currentmodule:: pg The :mod:`pg` module defines a few functions that allow to connect to a database and to define "default variables" that override the environment variables used by PostgreSQL. These "default variables" were designed to allow you to handle general connection parameters without heavy code in your programs. You can prompt the user for a value, put it in the default variable, and forget it, without having to modify your environment. The support for default variables can be disabled by setting the ``-DNO_DEF_VAR`` option in the Python setup file. Methods relative to this are specified by the tag [DV]. All variables are set to ``None`` at module initialization, specifying that standard environment variables should be used. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dbname], [host], [port], [opt], [user], [passwd]) Open a :mod:`pg` connection :param dbname: name of connected database (*None* = :data:`defbase`) :type str: str or None :param host: name of the server host (*None* = :data:`defhost`) :type host: str or None :param port: port used by the database server (-1 = :data:`defport`) :type port: int :param opt: connection options (*None* = :data:`defopt`) :type opt: str or None :param user: PostgreSQL user (*None* = :data:`defuser`) :type user: str or None :param passwd: password for user (*None* = :data:`defpasswd`) :type passwd: str or None :returns: If successful, the :class:`Connection` handling the connection :rtype: :class:`Connection` :raises TypeError: bad argument type, or too many arguments :raises SyntaxError: duplicate argument definition :raises pg.InternalError: some error occurred during pg connection definition :raises Exception: (all exceptions relative to object allocation) This function opens a connection to a specified database on a given PostgreSQL server. You can use keywords here, as described in the Python tutorial. The names of the keywords are the name of the parameters given in the syntax line. The ``opt`` parameter can be used to pass command-line options to the server. For a precise description of the parameters, please refer to the PostgreSQL user manual. If you want to add additional parameters not specified here, you must pass a connection string or a connection URI instead of the ``dbname`` (as in ``con3`` and ``con4`` in the following example). Example:: import pg con1 = pg.connect('testdb', 'myhost', 5432, None, 'bob', None) con2 = pg.connect(dbname='testdb', host='myhost', user='bob') con3 = pg.connect('host=myhost user=bob dbname=testdb connect_timeout=10') con4 = pg.connect('postgresql://bob@myhost/testdb?connect_timeout=10') get/set_defhost -- default server host [DV] ------------------------------------------- .. function:: get_defhost(host) Get the default host :returns: the current default host specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default host specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defhost(host) Set the default host :param host: the new default host specification :type host: str or None :returns: the previous default host specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default host value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defport -- default server port [DV] ------------------------------------------- .. function:: get_defport() Get the default port :returns: the current default port specification :rtype: int :raises TypeError: too many arguments This method returns the current default port specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defport(port) Set the default port :param port: the new default port :type port: int :returns: previous default port specification :rtype: int or None This methods sets the default port value for new connections. If -1 is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default port. get/set_defopt -- default connection options [DV] -------------------------------------------------- .. function:: get_defopt() Get the default connection options :returns: the current default options specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default connection options specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defopt(options) Set the default connection options :param options: the new default connection options :type options: str or None :returns: previous default options specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default connection options value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default options. get/set_defbase -- default database name [DV] --------------------------------------------- .. function:: get_defbase() Get the default database name :returns: the current default database name specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database name specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defbase(base) Set the default database name :param base: the new default base name :type base: str or None :returns: the previous default database name specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database name value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defuser -- default database user [DV] --------------------------------------------- .. function:: get_defuser() Get the default database user :returns: the current default database user specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database user specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defuser(user) Set the default database user :param user: the new default database user :type base: str or None :returns: the previous default database user specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database user name for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defpasswd -- default database password [DV] --------------------------------------------------- .. function:: get_defpasswd() Get the default database password :returns: the current default database password specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database password specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defpasswd(passwd) Set the default database password :param passwd: the new default database password :type base: str or None :returns: the previous default database password specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database password for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. escape_string -- escape a string for use within SQL --------------------------------------------------- .. function:: escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str :raises TypeError: bad argument type, or too many arguments This function escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. :func:`escape_string` performs this operation. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. .. note:: It is especially important to do proper escaping when handling strings that were received from an untrustworthy source. Otherwise there is a security risk: you are vulnerable to "SQL injection" attacks wherein unwanted SQL commands are fed to your database. Example:: name = input("Name? ") phone = con.query("select phone from employees where name='%s'" % escape_string(name)).getresult() escape_bytea -- escape binary data for use within SQL ----------------------------------------------------- .. function:: escape_bytea(datastring) escape binary data for use within SQL as type ``bytea`` :param str datastring: string containing the binary data that is to be escaped :returns: the escaped string :rtype: str :raises TypeError: bad argument type, or too many arguments Escapes binary data for use within an SQL command with the type ``bytea``. As with :func:`escape_string`, this is only used when inserting data directly into an SQL command string. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. Example:: picture = open('garfield.gif', 'rb').read() con.query("update pictures set img='%s' where name='Garfield'" % escape_bytea(picture)) unescape_bytea -- unescape data that has been retrieved as text --------------------------------------------------------------- .. function:: unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param str datastring: the ``bytea`` data string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes :raises TypeError: bad argument type, or too many arguments Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :func:`escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. Note that there is also a :class:`DB` method with the same name which does exactly the same. get/set_decimal -- decimal type to be used for numeric values ------------------------------------------------------------- .. function:: get_decimal() Get the decimal type to be used for numeric values :returns: the Python class used for PostgreSQL numeric values :rtype: class This function returns the Python class that is used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal` if available, otherwise the :class:`float` type is used. .. function:: set_decimal(cls) Set a decimal type to be used for numeric values :param class cls: the Python class to be used for PostgreSQL numeric values This function can be used to specify the Python class that shall be used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal` if available, otherwise the :class:`float` type is used. get/set_decimal_point -- decimal mark used for monetary values -------------------------------------------------------------- .. function:: get_decimal_point() Get the decimal mark used for monetary values :returns: string with one character representing the decimal mark :rtype: str This function returns the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values when converting them to decimal numbers. The default setting is ``'.'`` as a decimal point. This setting is not adapted automatically to the locale used by PostgreSQL, but you can use :func:`set_decimal()` to set a different decimal mark manually. A return value of ``None`` means monetary values are not interpreted as decimal numbers, but returned as strings including the formatting and currency. .. versionadded:: 4.1.1 .. function:: set_decimal_point(string) Specify which decimal mark is used for interpreting monetary values :param str string: string with one character representing the decimal mark This function can be used to specify the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values. The default value is '.' as a decimal point. This value is not adapted automatically to the locale used by PostgreSQL, so if you are dealing with a database set to a locale that uses a ``','`` instead of ``'.'`` as the decimal point, then you need to call ``set_decimal(',')`` to have PyGreSQL interpret monetary values correctly. If you don't want money values to be converted to decimal numbers, then you can call ``set_decimal(None)``, which will cause PyGreSQL to return monetary values as strings including their formatting and currency. .. versionadded:: 4.1.1 get/set_bool -- whether boolean values are returned as bool objects ------------------------------------------------------------------- .. function:: get_bool() Check whether boolean values are returned as bool objects :returns: whether or not bool objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this with the :func:`set_bool` function. .. versionadded:: 4.2 .. function:: set_bool(on) Set whether boolean values are returned as bool objects :param on: whether or not bool objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this by calling ``set_bool(True)``. .. versionadded:: 4.2 .. versionchanged:: 5.0 Boolean values had been returned as string by default in earlier versions. get/set_array -- whether arrays are returned as list objects ------------------------------------------------------------ .. function:: get_array() Check whether arrays are returned as list objects :returns: whether or not list objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this with the :func:`set_array` function. .. versionadded:: 5.0 .. function:: set_array(on) Set whether arrays are returned as list objects :param on: whether or not list objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this by calling ``set_array(False)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Arrays had been always returned as text strings only in earlier versions. get/set_bytea_escaped -- whether bytea data is returned escaped --------------------------------------------------------------- .. function:: get_bytea_escaped() Check whether bytea values are returned as escaped strings :returns: whether or not bytea objects will be returned escaped :rtype: bool This function checks whether PyGreSQL returns PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this with the :func:`set_bytea_escaped` function. .. versionadded:: 5.0 .. function:: set_bytea_escaped(on) Set whether bytea values are returned as escaped strings :param on: whether or not bytea objects shall be returned escaped This function can be used to specify whether PyGreSQL shall return PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this by calling ``set_bytea_escaped(True)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Bytea data had been returned in escaped form by default in earlier versions. get/set_jsondecode -- decoding JSON format ------------------------------------------ .. function:: get_jsondecode() Get the function that deserializes JSON formatted strings This returns the function used by PyGreSQL to construct Python objects from JSON formatted strings. .. function:: set_jsondecode(func) Set a function that will deserialize JSON formatted strings :param func: the function to be used for deserializing JSON strings You can use this if you do not want to deserialize JSON strings coming in from the database, or if want to use a different function than the standard function :func:`json.loads` or if you want to use it with parameters different from the default ones. If you set this function to *None*, then the automatic deserialization of JSON strings will be deactivated. .. versionadded:: 5.0 .. versionchanged:: 5.0 JSON data had been always returned as text strings in earlier versions. get/set_datestyle -- assume a fixed date style ---------------------------------------------- .. function:: get_datestyle() Get the assumed date style for typecasting This returns the PostgreSQL date style that is silently assumed when typecasting dates or *None* if no fixed date style is assumed, in which case the date style is requested from the database when necessary (this is the default). Note that this method will *not* get the date style that is currently set in the session or in the database. You can get the current setting with the methods :meth:`DB.get_parameter` and :meth:`Connection.parameter`. You can also get the date format corresponding to the current date style by calling :meth:`Connection.date_format`. .. versionadded:: 5.0 .. function:: set_datestyle(datestyle) Set a fixed date style that shall be assumed when typecasting :param str datestyle: the date style that shall be assumed, or *None* if no fixed dat style shall be assumed PyGreSQL is able to automatically pick up the right date style for typecasting date values from the database, even if you change it for the current session with a ``SET DateStyle`` command. This is happens very effectively without an additional database request being involved. If you still want to have PyGreSQL always assume a fixed date style instead, then you can set one with this function. Note that calling this function will *not* alter the date style of the database or the current session. You can do that by calling the method :meth:`DB.set_parameter` instead. .. versionadded:: 5.0 get/set_typecast -- custom typecasting -------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides through its C extension module basic typecast functions for the common database types, but if you want to add more typecast functions, you can set these using the following functions. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`DbTypes` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`DbTypes.get_typecast`, :meth:`DbTypes.set_typecast` and :meth:`DbTypes.reset_typecast` of the :attr:`DB.dbtypes` object. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`DbTypes.reset_typecast` on the :attr:`DB.dbtypes` object. Also note that the typecasting for all of the basic types happens already in the C extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension module. cast_array/record -- fast parsers for arrays and records -------------------------------------------------------- PosgreSQL returns arrays and records (composite types) using a special output syntax with several quirks that cannot easily and quickly be parsed in Python. Therefore the C extension module provides two fast parsers that allow quickly turning these text representations into Python objects: Arrays will be converted to Python lists, and records to Python tuples. These fast parsers are used automatically by PyGreSQL in order to return arrays and records from database queries as lists and tuples, so you normally don't need to call them directly. You may only need them for typecasting arrays of data types that are not supported by default in PostgreSQL. .. function:: cast_array(string, [cast], [delim]) Cast a string representing a PostgreSQL array to a Python list :param str string: the string with the text representation of the array :param cast: a typecast function for the elements of the array :type cast: callable or None :param delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a list representing the PostgreSQL array in Python :rtype: list :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL array (which may look like ``'{{1,2}{3,4}}'`` for a two-dimensional array), a typecast function *cast* that is called for every element, and an optional delimiter character *delim* (usually a comma), and returns a Python list representing the array (which may be nested like ``[[1, 2], [3, 4]]`` in this example). The cast function must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the list. If you don't pass a cast function or set it to *None*, then unprocessed text strings will be returned as elements of the array. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 .. function:: cast_record(string, [cast], [delim]) Cast a string representing a PostgreSQL record to a Python tuple :param str string: the string with the text representation of the record :param cast: typecast function(s) for the elements of the record :type cast: callable, list or tuple of callables, or None :param delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a tuple representing the PostgreSQL record in Python :rtype: tuple :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL record (which may look like ``'(1,a,2,b)'`` for a record composed of four fields), a typecast function *cast* that is called for every element, or a list or tuple of such functions corresponding to the individual fields of the record, and an optional delimiter character *delim* (usually a comma), and returns a Python tuple representing the record (which may be inhomogeneous like ``(1, 'a', 2, 'b')`` in this example). The cast function(s) must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the tuple. If you don't pass cast function(s) or pass *None* instead, then unprocessed text strings will be returned as elements of the tuple. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 Note that besides using parentheses instead of braces, there are other subtle differences in escaping special characters and NULL values between the syntax used for arrays and the one used for composite types, which these functions take into account. Type helpers ------------ The module provides the following type helper functions. You can wrap parameters with these functions when passing them to :meth:`DB.query` or :meth:`DB.query_formatted` in order to give PyGreSQL a hint about the type of the parameters, if it cannot be derived from the context. .. function:: Bytea(bytes) A wrapper for holding a bytea value .. versionadded:: 5.0 .. function:: HStore(dict) A wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj) A wrapper for holding an object serializable to JSON .. versionadded:: 5.0 The following additional type helper is only meaningful when used with :meth:`DB.query_formatted`. It marks a parameter as text that shall be literally included into the SQL. This is useful for passing table names for instance. .. function:: Literal(sql) A wrapper for holding a literal SQL string .. versionadded:: 5.0 Module constants ---------------- Some constants are defined in the module dictionary. They are intended to be used as parameters for methods calls. You should refer to the libpq description in the PostgreSQL user manual for more information about them. These constants are: .. data:: version .. data:: __version__ constants that give the current version .. data:: INV_READ .. data:: INV_WRITE large objects access modes, used by :meth:`Connection.locreate` and :meth:`LargeObject.open` .. data:: SEEK_SET .. data:: SEEK_CUR .. data:: SEEK_END positional flags, used by :meth:`LargeObject.seek` .. data:: TRANS_IDLE .. data:: TRANS_ACTIVE .. data:: TRANS_INTRANS .. data:: TRANS_INERROR .. data:: TRANS_UNKNOWN transaction states, used by :meth:`Connection.transaction` PyGreSQL-5.1/docs/contents/pg/query.rst0000644000175100077410000003175013466770070020031 0ustar darcypyg00000000000000Query methods ============= .. py:currentmodule:: pg .. class:: Query The :class:`Query` object returned by :meth:`Connection.query` and :meth:`DB.query` can be used as an iterable returning rows as tuples. You can also directly access row tuples using their index, and get the number of rows with the :func:`len` function. The :class:`Query` class also provides the following methods for accessing the results of the query: getresult -- get query values as list of tuples ----------------------------------------------- .. method:: Query.getresult() Get query values as list of tuples :returns: result values as a list of tuples :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of tuples. More information about this result may be accessed using :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. Since PyGreSQL 5.1 the :class:`Query` can be also used directly as an iterable sequence, i.e. you can iterate over the :class:`Query` object to get the same tuples as returned by :meth:`Query.getresult`. This is slightly more efficient than getting the full list of results, but note that the full result is always fetched from the server anyway when the query is executed. You can also call :func:`len` on a query to find the number of rows in the result, and access row tuples using their index directly on the :class:`Query` object. dictresult/dictiter -- get query values as dictionaries ------------------------------------------------------- .. method:: Query.dictresult() Get query values as list of dictionaries :returns: result values as a list of dictionaries :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of dictionaries which have the field names as keys. If the query has duplicate field names, you will get the value for the field with the highest index in the query. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. method:: Query.dictiter() Get query values as iterable of dictionaries :returns: result values as an iterable of dictionaries :rtype: iterable :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as an iterable of dictionaries which have the field names as keys. This is slightly more efficient than getting the full list of results as dictionaries, but note that the full result is always fetched from the server anyway when the query is executed. If the query has duplicate field names, you will get the value for the field with the highest index in the query. .. versionadded:: 5.1 namedresult/namediter -- get query values a named tuples -------------------------------------------------------- .. method:: Query.namedresult() Get query values as list of named tuples :returns: result values as a list of named tuples :rtype: list :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as a list of named tuples with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. versionadded:: 4.1 .. method:: Query.namediter() Get query values as iterable of named tuples :returns: result values as an iterable of named tuples :rtype: iterable :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as an iterable of named tuples with proper field names. This is slightly more efficient than getting the full list of results as named tuples, but note that the full result is always fetched from the server anyway when the query is executed. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. .. versionadded:: 5.1 scalarresult/scalariter -- get query values as scalars ------------------------------------------------------ .. method:: Query.scalarresult() Get first fields from query result as list of scalar values :returns: first fields from result as a list of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as a list of scalar values in the order returned by the server. .. versionadded:: 5.1 .. method:: Query.scalariter() Get first fields from query result as iterable of scalar values :returns: first fields from result as an iterable of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as an iterable of scalar values in the order returned by the server. This is slightly more efficient than getting the full list of results as rows or scalar values, but note that the full result is always fetched from the server anyway when the query is executed. .. versionadded:: 5.1 one/onedict/onenamed/onescalar -- get one result of a query ----------------------------------------------------------- .. method:: Query.one() Get one row from the result of a query as a tuple :returns: next row from the query results as a tuple of fields :rtype: tuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a tuple of fields. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onedict() Get one row from the result of a query as a dictionary :returns: next row from the query results as a dictionary :rtype: dict or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a dictionary with the field names used as the keys. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onenamed() Get one row from the result of a query as named tuple :returns: next row from the query results as a named tuple :rtype: named tuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onescalar() Get one row from the result of a query as scalar value :returns: next row from the query results as a scalar value :rtype: type of first field or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of the next row from the result as a scalar value. This method can be called multiple times to return more rows as scalars. It returns None if the result does not contain one more row. .. versionadded:: 5.1 single/singledict/singlenamed/singlescalar -- get single result of a query -------------------------------------------------------------------------- .. method:: Query.single() Get single row from the result of a query as a tuple :returns: single row from the query results as a tuple of fields :rtype: tuple :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a tuple of fields. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singledict() Get single row from the result of a query as a dictionary :returns: single row from the query results as a dictionary :rtype: dict :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a dictionary with the field names used as the keys. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlenamed() Get single row from the result of a query as named tuple :returns: single row from the query results as a named tuple :rtype: named tuple :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns single row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlescalar() Get single row from the result of a query as scalar value :returns: single row from the query results as a scalar value :rtype: type of first field :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of a single row from the result as a scalar value. This method returns the same single row as scalar when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 listfields -- list fields names of previous query result -------------------------------------------------------- .. method:: Query.listfields() List fields names of previous query result :returns: field names :rtype: list :raises TypeError: too many parameters This method returns the list of field names defined for the query result. The fields are in the same order as the result values. fieldname, fieldnum -- field name/number conversion --------------------------------------------------- .. method:: Query.fieldname(num) Get field name from its number :param int num: field number :returns: field name :rtype: str :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: invalid field number This method allows to find a field name from its rank number. It can be useful for displaying a result. The fields are in the same order as the result values. .. method:: Query.fieldnum(name) Get field number from its name :param str name: field name :returns: field number :rtype: int :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: unknown field name This method returns a field number given its name. It can be used to build a function that converts result list strings to their correct type, using a hardcoded table definition. The number returned is the field rank in the query result. ntuples -- return number of tuples in query object -------------------------------------------------- .. method:: Query.ntuples() Return number of tuples in query object :returns: number of tuples in :class:`Query` :rtype: int :raises TypeError: Too many arguments. This method returns the number of tuples in the query result. .. deprecated:: 5.1 You can use the normal :func:`len` function instead. PyGreSQL-5.1/docs/contents/pg/large_objects.rst0000644000175100077410000001435413466770070021470 0ustar darcypyg00000000000000LargeObject -- Large Objects ============================ .. py:currentmodule:: pg .. class:: LargeObject Objects that are instances of the class :class:`LargeObject` are used to handle all the requests concerning a PostgreSQL large object. These objects embed and hide all the "recurrent" variables (object OID and connection), exactly in the same way :class:`Connection` instances do, thus only keeping significant parameters in function calls. The :class:`LargeObject` instance keeps a reference to the :class:`Connection` object used for its creation, sending requests though with its parameters. Any modification but dereferencing the :class:`Connection` object will thus affect the :class:`LargeObject` instance. Dereferencing the initial :class:`Connection` object is not a problem since Python won't deallocate it before the :class:`LargeObject` instance dereferences it. All functions return a generic error message on call error, whatever the exact error was. The :attr:`error` attribute of the object allows to get the exact error message. See also the PostgreSQL programmer's guide for more information about the large object interface. open -- open a large object --------------------------- .. method:: LargeObject.open(mode) Open a large object :param int mode: open mode definition :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: already opened object, or open error This method opens a large object for reading/writing, in the same way than the Unix open() function. The mode value can be obtained by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE`). close -- close a large object ----------------------------- .. method:: LargeObject.close() Close a large object :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters :raises IOError: object is not opened, or close error This method closes a previously opened large object, in the same way than the Unix close() function. read, write, tell, seek, unlink -- file-like large object handling ------------------------------------------------------------------ .. method:: LargeObject.read(size) Read data from large object :param int size: maximal size of the buffer to be read :returns: the read buffer :rtype: bytes :raises TypeError: invalid connection, invalid object, bad parameter type, or too many parameters :raises ValueError: if `size` is negative :raises IOError: object is not opened, or read error This function allows to read data from a large object, starting at current position. .. method:: LargeObject.write(string) Read data to large object :param bytes string: string buffer to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: object is not opened, or write error This function allows to write data to a large object, starting at current position. .. method:: LargeObject.seek(offset, whence) Change current position in large object :param int offset: position offset :param int whence: positional parameter :returns: new position in object :rtype: int :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not opened, or seek error This method allows to move the position cursor in the large object. The valid values for the whence parameter are defined as constants in the :mod:`pg` module (:const:`SEEK_SET`, :const:`SEEK_CUR`, :const:`SEEK_END`). .. method:: LargeObject.tell() Return current position in large object :returns: current position in large object :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek error This method allows to get the current position in the large object. .. method:: LargeObject.unlink() Delete large object :rtype: None :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not closed, or unlink error This methods unlinks (deletes) the PostgreSQL large object. size -- get the large object size --------------------------------- .. method:: LargeObject.size() Return the large object size :returns: the large object size :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek/tell error This (composite) method allows to get the size of a large object. It was implemented because this function is very useful for a web interfaced database. Currently, the large object needs to be opened first. export -- save a large object to a file --------------------------------------- .. method:: LargeObject.export(name) Export a large object to a file :param str name: file to be created :rtype: None :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not closed, or export error This methods allows to dump the content of a large object in a very simple way. The exported file is created on the host of the program, not the server host. Object attributes ----------------- :class:`LargeObject` objects define a read-only set of attributes that allow to get some information about it. These attributes are: .. attribute:: LargeObject.oid the OID associated with the large object (int) .. attribute:: LargeObject.pgcnx the :class:`Connection` object associated with the large object .. attribute:: LargeObject.error the last warning/error message of the connection (str) .. warning:: In multi-threaded environments, :attr:`LargeObject.error` may be modified by another thread using the same :class:`Connection`. Remember these object are shared, not duplicated. You should provide some locking to be able if you want to check this. The :attr:`LargeObject.oid` attribute is very interesting, because it allows you to reuse the OID later, creating the :class:`LargeObject` object with a :meth:`Connection.getlo` method call. PyGreSQL-5.1/docs/contents/pg/index.rst0000644000175100077410000000047313466770070017771 0ustar darcypyg00000000000000-------------------------------------------- :mod:`pg` --- The Classic PyGreSQL Interface -------------------------------------------- .. module:: pg Contents ======== .. toctree:: introduction module connection db_wrapper query large_objects notification db_types adaptation PyGreSQL-5.1/docs/contents/pg/notification.rst0000644000175100077410000001070013466770070021342 0ustar darcypyg00000000000000The Notification Handler ======================== .. py:currentmodule:: pg PyGreSQL comes with a client-side asynchronous notification handler that was based on the ``pgnotify`` module written by Ng Pheng Siong. .. versionadded:: 4.1.1 Instantiating the notification handler -------------------------------------- .. class:: NotificationHandler(db, event, callback, [arg_dict], [timeout], [stop_event]) Create an instance of the notification handler :param int db: the database connection :type db: :class:`Connection` :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event You can also create an instance of the NotificationHandler using the :class:`DB.connection_handler` method. In this case you don't need to pass a database connection because the :class:`DB` connection itself will be used as the datebase connection for the notification handler. You must always pass the name of an *event* (notification channel) to listen for and a *callback* function. You can also specify a dictionary *arg_dict* that will be passed as the single argument to the callback function, and a *timeout* value in seconds (a floating point number denotes fractions of seconds). If it is absent or *None*, the callers will never time out. If the time-out is reached, the callback function will be called with a single argument that is *None*. If you set the *timeout* to ``0``, the handler will poll notifications synchronously and return. You can specify the name of the event that will be used to signal the handler to stop listening as *stop_event*. By default, it will be the event name prefixed with ``'stop_'``. All of the parameters will be also available as attributes of the created notification handler object. Invoking the notification handler --------------------------------- To invoke the notification handler, just call the instance without passing any parameters. The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated *pid*, *event* and *extra* (the payload passed with the notification) are inserted into its *arg_dict* dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return. In the special case that the timeout of the handler has been set to ``0``, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event. .. warning:: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. Sending notifications --------------------- You can send notifications by either running ``NOTIFY`` commands on the database directly, or using the following method: .. method:: NotificationHandler.notify([db], [stop], [payload]) Generate a notification :param int db: the database connection for sending the notification :type db: :class:`Connection` :param bool stop: whether to produce a normal event or a stop event :param str payload: an optional payload to be sent with the notification This method sends a notification event together with an optional *payload*. If you set the *stop* flag, a stop notification will be sent instead of a normal notification. This will cause the handler to stop listening. .. warning:: If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe. Auxiliary methods ----------------- .. method:: NotificationHandler.listen() Start listening for the event and the stop event This method is called implicitly when the handler is invoked. .. method:: NotificationHandler.unlisten() Stop listening for the event and the stop event This method is called implicitly when the handler receives a stop event or when it is closed or deleted. .. method:: NotificationHandler.close() Stop listening and close the database connection You can call this method instead of :meth:`NotificationHandler.unlisten` if you want to close not only the handler, but also the database connection it was created with.PyGreSQL-5.1/docs/contents/pg/connection.rst0000644000175100077410000004754313466770070021032 0ustar darcypyg00000000000000Connection -- The connection object =================================== .. py:currentmodule:: pg .. class:: Connection This object handles a connection to a PostgreSQL database. It embeds and hides all the parameters that define this connection, thus just leaving really significant parameters in function calls. .. note:: Some methods give direct access to the connection socket. *Do not use them unless you really know what you are doing.* If you prefer disabling them, set the ``-DNO_DIRECT`` option in the Python setup file. These methods are specified by the tag [DA]. .. note:: Some other methods give access to large objects (refer to PostgreSQL user manual for more information about these). If you want to forbid access to these from the module, set the ``-DNO_LARGE`` option in the Python setup file. These methods are specified by the tag [LO]. query -- execute a SQL command string ------------------------------------- .. method:: Connection.query(command, [args]) Execute a SQL command string :param str command: SQL command :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row as an integer. If the query is an update or delete statement, or an insert statement that did not insert exactly one row, or on a table without OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an ``"insert/update ... returning"`` statement), this method returns a :class:`Query`. Otherwise, it returns ``None``. You can use the :class:`Query` object as an iterator that yields all results as tuples, or call :meth:`Query.getresult` to get the result as a list of tuples. Alternatively, you can call :meth:`Query.dictresult` or :meth:`Query.dictiter` if you want to get the rows as dictionaries, or :meth:`Query.namedresult` or :meth:`Query.namediter` if you want to get the rows as named tuples. You can also simply print the :class:`Query` object to show the query results on the console. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data, in which case the values must be supplied separately as a tuple. The values are substituted by the database in such a way that they don't need to be escaped, making this an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors. If you don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case. When the database could not process the query, a :exc:`pg.ProgrammingError` or a :exc:`pg.InternalError` is raised. You can check the ``SQLSTATE`` error code of this error by reading its :attr:`sqlstate` attribute. Example:: name = input("Name? ") phone = con.query("select phone from employees where name=$1", (name,)).getresult() query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: Connection.query_prepared(name, [args]) Execute a prepared statement :param str name: name of the prepared statement :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This method works exactly like :meth:`Connection.query` except that instead of passing the command itself, you pass the name of a prepared statement. An empty name corresponds to the unnamed statement. You must have previously created the corresponding named or unnamed statement with :meth:`Connection.prepare`, or an :exc:`pg.OperationalError` will be raised. .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: Connection.prepare(name, command) Create a prepared statement :param str name: name of the prepared statement :param str command: SQL command :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for the given command for later execution with the :meth:`Connection.query_prepared` method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise a :exc:`pg.ProgrammingError` is raised if the statement name is already defined in the current database session. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then later be passed to the :meth:`Connection.query_prepared` method separately as a tuple. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: Connection.describe_prepared(name) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 reset -- reset the connection ----------------------------- .. method:: Connection.reset() Reset the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method resets the current database connection. cancel -- abandon processing of current SQL command --------------------------------------------------- .. method:: Connection.cancel() :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method requests that the server abandon processing of the current SQL command. close -- close the database connection -------------------------------------- .. method:: Connection.close() Close the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments This method closes the database connection. The connection will be closed in any case when the connection is deleted but this allows you to explicitly close it. It is mainly here to allow the DB-SIG API wrapper to implement a close function. transaction -- get the current transaction state ------------------------------------------------ .. method:: Connection.transaction() Get the current in-transaction status of the server :returns: the current in-transaction status :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection The status returned by this method can be :const:`TRANS_IDLE` (currently idle), :const:`TRANS_ACTIVE` (a command is in progress), :const:`TRANS_INTRANS` (idle, in a valid transaction block), or :const:`TRANS_INERROR` (idle, in a failed transaction block). :const:`TRANS_UNKNOWN` is reported if the connection is bad. The status :const:`TRANS_ACTIVE` is reported only when a query has been sent to the server and not yet completed. parameter -- get a current server parameter setting --------------------------------------------------- .. method:: Connection.parameter(name) Look up a current parameter setting of the server :param str name: the name of the parameter to look up :returns: the current setting of the specified parameter :rtype: str or None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection Certain parameter values are reported by the server automatically at connection startup or whenever their values change. This method can be used to interrogate these settings. It returns the current value of a parameter if known, or *None* if the parameter is not known. You can use this method to check the settings of important parameters such as `server_version`, `server_encoding`, `client_encoding`, `application_name`, `is_superuser`, `session_authorization`, `DateStyle`, `IntervalStyle`, `TimeZone`, `integer_datetimes`, and `standard_conforming_strings`. Values that are not reported by this method can be requested using :meth:`DB.get_parameter`. .. versionadded:: 4.0 date_format -- get the currently used date format ------------------------------------------------- .. method:: Connection.date_format() Look up the date format currently being used by the database :returns: the current date format :rtype: str :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the current date format used by the server. Note that it is cheap to call this method, since there is no database query involved and the setting is also cached internally. You will need the date format when you want to manually typecast dates and timestamps coming from the database instead of using the built-in typecast functions. The date format returned by this method can be directly used with date formatting functions such as :meth:`datetime.strptime`. It is derived from the current setting of the database parameter ``DateStyle``. .. versionadded:: 5.0 fileno -- get the socket used to connect to the database -------------------------------------------------------- .. method:: Connection.fileno() Get the socket used to connect to the database :returns: the socket id of the database connection :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the underlying socket id used to connect to the database. This is useful for use in select calls, etc. getnotify -- get the last notify from the server ------------------------------------------------ .. method:: Connection.getnotify() Get the last notify from the server :returns: last notify from server :rtype: tuple, None :raises TypeError: too many parameters :raises TypeError: invalid connection This method tries to get a notify from the server (from the SQL statement NOTIFY). If the server returns no notify, the methods returns None. Otherwise, it returns a tuple (triplet) *(relname, pid, extra)*, where *relname* is the name of the notify, *pid* is the process id of the connection that triggered the notify, and *extra* is a payload string that has been sent with the notification. Remember to do a listen query first, otherwise :meth:`Connection.getnotify` will always return ``None``. .. versionchanged:: 4.1 Support for payload strings was added in version 4.1. inserttable -- insert a list into a table ----------------------------------------- .. method:: Connection.inserttable(table, values) Insert a Python list into a database table :param str table: the table name :param list values: list of rows values :rtype: None :raises TypeError: invalid connection, bad argument type, or too many arguments :raises MemoryError: insert buffer could not be allocated :raises ValueError: unsupported values This method allows to *quickly* insert large blocks of data in a table: It inserts the whole values list into the given table. Internally, it uses the COPY command of the PostgreSQL database. The list is a list of tuples/lists that define the values for each inserted row. The rows values may contain string, integer, long or double (real) values. .. warning:: This method doesn't type check the fields according to the table definition; it just looks whether or not it knows how to handle such types. get/set_cast_hook -- fallback typecast function ----------------------------------------------- .. method:: Connection.get_cast_hook() Get the function that handles all external typecasting :returns: the current external typecast function :rtype: callable, None :raises TypeError: too many (any) arguments This returns the callback function used by PyGreSQL to provide plug-in Python typecast functions for the connection. .. versionadded:: 5.0 .. method:: Connection.set_cast_hook(func) Set a function that will handle all external typecasting :param func: the function to be used as a callback :rtype: None :raises TypeError: the specified notice receiver is not callable This methods allows setting a custom fallback function for providing Python typecast functions for the connection to supplement the C extension module. If you set this function to *None*, then only the typecast functions implemented in the C extension module are enabled. You normally would not want to change this. Instead, you can use :func:`get_typecast` and :func:`set_typecast` to add or change the plug-in Python typecast functions. .. versionadded:: 5.0 get/set_notice_receiver -- custom notice receiver ------------------------------------------------- .. method:: Connection.get_notice_receiver() Get the current notice receiver :returns: the current notice receiver callable :rtype: callable, None :raises TypeError: too many (any) arguments This method gets the custom notice receiver callback function that has been set with :meth:`Connection.set_notice_receiver`, or ``None`` if no custom notice receiver has ever been set on the connection. .. versionadded:: 4.1 .. method:: Connection.set_notice_receiver(func) Set a custom notice receiver :param func: the custom notice receiver callback function :rtype: None :raises TypeError: the specified notice receiver is not callable This method allows setting a custom notice receiver callback function. When a notice or warning message is received from the server, or generated internally by libpq, and the message level is below the one set with ``client_min_messages``, the specified notice receiver function will be called. This function must take one parameter, the :class:`Notice` object, which provides the following read-only attributes: .. attribute:: Notice.pgcnx the connection .. attribute:: Notice.message the full message with a trailing newline .. attribute:: Notice.severity the level of the message, e.g. 'NOTICE' or 'WARNING' .. attribute:: Notice.primary the primary human-readable error message .. attribute:: Notice.detail an optional secondary error message .. attribute:: Notice.hint an optional suggestion what to do about the problem .. versionadded:: 4.1 putline -- write a line to the server socket [DA] ------------------------------------------------- .. method:: Connection.putline(line) Write a line to the server socket :param str line: line to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters This method allows to directly write a string to the server socket. getline -- get a line from server socket [DA] --------------------------------------------- .. method:: Connection.getline() Get a line from server socket :returns: the line read :rtype: str :raises TypeError: invalid connection :raises TypeError: too many parameters :raises MemoryError: buffer overflow This method allows to directly read a string from the server socket. endcopy -- synchronize client and server [DA] --------------------------------------------- .. method:: Connection.endcopy() Synchronize client and server :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters The use of direct access methods may desynchronize client and server. This method ensure that client and server will be synchronized. locreate -- create a large object in the database [LO] ------------------------------------------------------ .. method:: Connection.locreate(mode) Create a large object in the database :param int mode: large object create mode :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises pg.OperationalError: creation error This method creates a large object in the database. The mode can be defined by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE` and :const:`INV_ARCHIVE`). Please refer to PostgreSQL user manual for a description of the mode values. getlo -- build a large object from given oid [LO] ------------------------------------------------- .. method:: Connection.getlo(oid) Create a large object in the database :param int oid: OID of the existing large object :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: bad OID value (0 is invalid_oid) This method allows reusing a previously created large object through the :class:`LargeObject` interface, provided the user has its OID. loimport -- import a file to a large object [LO] ------------------------------------------------ .. method:: Connection.loimport(name) Import a file to a large object :param str name: the name of the file to be imported :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad argument type, or too many arguments :raises pg.OperationalError: error during file import This methods allows to create large objects in a very simple way. You just give the name of a file containing the data to be used. Object attributes ----------------- Every :class:`Connection` defines a set of read-only attributes that describe the connection and its status. These attributes are: .. attribute:: Connection.host the host name of the server (str) .. attribute:: Connection.port the port of the server (int) .. attribute:: Connection.db the selected database (str) .. attribute:: Connection.options the connection options (str) .. attribute:: Connection.user user name on the database system (str) .. attribute:: Connection.protocol_version the frontend/backend protocol being used (int) .. versionadded:: 4.0 .. attribute:: Connection.server_version the backend version (int, e.g. 90305 for 9.3.5) .. versionadded:: 4.0 .. attribute:: Connection.status the status of the connection (int: 1 = OK, 0 = bad) .. attribute:: Connection.error the last warning/error message from the server (str) .. attribute:: Connection.socket the file descriptor number of the connection socket to the server (int) .. versionadded:: 5.1 .. attribute:: Connection.backend_pid the PID of the backend process handling this connection (int) .. versionadded:: 5.1 .. attribute:: Connection.ssl_in_use this is True if the connection uses SSL, False if not .. versionadded:: 5.1 (needs PostgreSQL >= 9.5) .. attribute:: Connection.ssl_attributes SSL-related information about the connection (dict) .. versionadded:: 5.1 (needs PostgreSQL >= 9.5) PyGreSQL-5.1/docs/about.txt0000644000175100077410000000446613466770070015546 0ustar darcypyg00000000000000**PyGreSQL** is an *open-source* `Python `_ module that interfaces to a `PostgreSQL `_ database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. | This software is copyright © 1995, Pascal Andre. | Further modifications are copyright © 1997-2008 by D'Arcy J.M. Cain. | Further modifications are copyright © 2009-2019 by the PyGreSQL team. | For licensing details, see the full :doc:`copyright`. **PostgreSQL** is a highly scalable, SQL compliant, open source object-relational database management system. With more than 20 years of development history, it is quickly becoming the de facto database for enterprise level open source solutions. Best of all, PostgreSQL's source code is available under the most liberal open source license: the BSD license. **Python** Python is an interpreted, interactive, object-oriented programming language. It is often compared to Tcl, Perl, Scheme or Java. Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++. Python is also usable as an extension language for applications that need a programmable interface. The Python implementation is copyrighted but freely usable and distributable, even for commercial use. **PyGreSQL** is a Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script or application. PyGreSQL is developed and tested on a NetBSD system, but it also runs on most other platforms where PostgreSQL and Python is running. It is based on the PyGres95 code written by Pascal Andre (andre@chimay.via.ecp.fr). D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 and serves as the "BDFL" of PyGreSQL. The current version PyGreSQL 5.1 needs PostgreSQL 9.0 to 9.6 or 10 or 11, and Python 2.6, 2.7 or 3.3 to 3.7. If you need to support older PostgreSQL versions or older Python 2.x versions, you can resort to the PyGreSQL 4.x versions that still support them. PyGreSQL-5.1/docs/conf.py0000644000175100077410000002415113467000173015152 0ustar darcypyg00000000000000# -*- coding: utf-8 -*- # # PyGreSQL documentation build configuration file. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import shutil # Import Cloud theme (this will also automatically add the theme directory). # Note: We add a navigation bar to the cloud them using a custom layout. if os.environ.get('READTHEDOCS', None) == 'True': # We cannot use our custom layout here, since RTD overrides layout.html. use_cloud_theme = False else: try: import cloud_sptheme use_cloud_theme = True except ImportError: use_cloud_theme = False shutil.copyfile('start.txt' if use_cloud_theme else 'toc.txt', 'index.rst') # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] if use_cloud_theme else [] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'PyGreSQL' author = 'The PyGreSQL team' copyright = '2019, ' + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '5.1' # The full version, including alpha/beta/rc tags. release = '5.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # List of pages which are included in other pages and therefore should # not appear in the toctree. exclude_patterns += [ 'download/download.rst', 'download/files.rst', 'community/mailinglist.rst', 'community/source.rst', 'community/bugtracker.rst', 'community/support.rst', 'community/homes.rst'] if use_cloud_theme: exclude_patterns += ['about.rst'] # The reST default role (used for this markup: `text`) for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'cloud' if use_cloud_theme else 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. if use_cloud_theme: html_theme_options = { 'roottarget': 'contents/index', 'defaultcollapsed': True, 'shaded_decor': True} else: html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'PyGreSQL %s' % version if use_cloud_theme: html_title += ' documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/pygresql.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'PyGreSQLdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'PyGreSQL.tex', 'PyGreSQL Documentation', author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pygresql', 'PyGreSQL Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'PyGreSQL', u'PyGreSQL Documentation', author, 'PyGreSQL', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False PyGreSQL-5.1/docs/requirements.txt0000644000175100077410000000002413466770070017141 0ustar darcypyg00000000000000cloud_sptheme>=1.7.1PyGreSQL-5.1/docs/make.bat0000644000175100077410000001551613466770070015276 0ustar darcypyg00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyGreSQL.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyGreSQL.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end PyGreSQL-5.1/docs/copyright.rst0000644000175100077410000000245513466770070016431 0ustar darcypyg00000000000000Copyright notice ================ Written by D'Arcy J.M. Cain (darcy@druid.net) Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain (darcy@PyGreSQL.org) Further modifications copyright (c) 2009-2019 by the PyGreSQL team. Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. In this license the term "AUTHORS" refers to anyone who has contributed code to PyGreSQL. IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. PyGreSQL-5.1/pgnotice.c0000644000175100077410000000767513466770070014724 0ustar darcypyg00000000000000/* * $Id: pgnotice.c 985 2019-04-22 22:07:43Z cito $ * * PyGreSQL - a Python interface for the PostgreSQL database. * * The notice object - this file is part a of the C extension module. * * Copyright (c) 2019 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. * */ /* Get notice object attributes. */ static PyObject * notice_getattr(noticeObject *self, PyObject *nameobj) { PGresult const *res = self->res; const char *name = PyStr_AsString(nameobj); int fieldcode; if (!res) { PyErr_SetString(PyExc_TypeError, "Cannot get current notice"); return NULL; } /* pg connection object */ if (!strcmp(name, "pgcnx")) { if (self->pgcnx && _check_cnx_obj(self->pgcnx)) { Py_INCREF(self->pgcnx); return (PyObject *) self->pgcnx; } else { Py_INCREF(Py_None); return Py_None; } } /* full message */ if (!strcmp(name, "message")) { return PyStr_FromString(PQresultErrorMessage(res)); } /* other possible fields */ fieldcode = 0; if (!strcmp(name, "severity")) fieldcode = PG_DIAG_SEVERITY; else if (!strcmp(name, "primary")) fieldcode = PG_DIAG_MESSAGE_PRIMARY; else if (!strcmp(name, "detail")) fieldcode = PG_DIAG_MESSAGE_DETAIL; else if (!strcmp(name, "hint")) fieldcode = PG_DIAG_MESSAGE_HINT; if (fieldcode) { char *s = PQresultErrorField(res, fieldcode); if (s) { return PyStr_FromString(s); } else { Py_INCREF(Py_None); return Py_None; } } return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Get the list of notice attributes. */ static PyObject * notice_dir(noticeObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[ssssss]", "pgcnx", "severity", "message", "primary", "detail", "hint"); return attrs; } /* Return notice as string in human readable form. */ static PyObject * notice_str(noticeObject *self) { return notice_getattr(self, PyBytes_FromString("message")); } /* Notice object methods */ static struct PyMethodDef notice_methods[] = { {"__dir__", (PyCFunction) notice_dir, METH_NOARGS, NULL}, {NULL, NULL} }; static char notice__doc__[] = "PostgreSQL notice object"; /* Notice type definition */ static PyTypeObject noticeType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Notice", /* tp_name */ sizeof(noticeObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ 0, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) notice_str, /* tp_str */ (getattrofunc) notice_getattr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ notice__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ notice_methods, /* tp_methods */ }; PyGreSQL-5.1/pgmodule.c0000644000175100077410000011521013466770070014711 0ustar darcypyg00000000000000/* * $Id: pgmodule.c 1002 2019-04-25 22:33:57Z cito $ * * PyGreSQL - a Python interface for the PostgreSQL database. * * This is the main file for the C extension module. * * Copyright (c) 2019 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. * */ /* Note: This should be linked against the same C runtime lib as Python */ #include #include #include /* The type definitions from */ #include "pgtypes.h" /* Macros for single-source Python 2/3 compatibility */ #include "py3c.h" static PyObject *Error, *Warning, *InterfaceError, *DatabaseError, *InternalError, *OperationalError, *ProgrammingError, *IntegrityError, *DataError, *NotSupportedError, *InvalidResultError, *NoResultError, *MultipleResultsError; #define _TOSTRING(x) #x #define TOSTRING(x) _TOSTRING(x) static const char *PyPgVersion = TOSTRING(PYGRESQL_VERSION); #if SIZEOF_SIZE_T != SIZEOF_INT #define Py_InitModule4 Py_InitModule4_64 #endif /* Default values */ #define PG_ARRAYSIZE 1 /* Flags for object validity checks */ #define CHECK_OPEN 1 #define CHECK_CLOSE 2 #define CHECK_CNX 4 #define CHECK_RESULT 8 #define CHECK_DQL 16 /* Query result types */ #define RESULT_EMPTY 1 #define RESULT_DML 2 #define RESULT_DDL 3 #define RESULT_DQL 4 /* Flags for move methods */ #define QUERY_MOVEFIRST 1 #define QUERY_MOVELAST 2 #define QUERY_MOVENEXT 3 #define QUERY_MOVEPREV 4 #define MAX_BUFFER_SIZE 8192 /* maximum transaction size */ #define MAX_ARRAY_DEPTH 16 /* maximum allowed depth of an array */ /* MODULE GLOBAL VARIABLES */ #ifdef DEFAULT_VARS static PyObject *pg_default_host; /* default database host */ static PyObject *pg_default_base; /* default database name */ static PyObject *pg_default_opt; /* default connection options */ static PyObject *pg_default_port; /* default connection port */ static PyObject *pg_default_user; /* default username */ static PyObject *pg_default_passwd; /* default password */ #endif /* DEFAULT_VARS */ static PyObject *decimal = NULL, /* decimal type */ *dictiter = NULL, /* function for getting named results */ *namediter = NULL, /* function for getting named results */ *namednext = NULL, /* function for getting one named result */ *scalariter = NULL, /* function for getting scalar results */ *jsondecode = NULL; /* function for decoding json strings */ static const char *date_format = NULL; /* date format that is always assumed */ static char decimal_point = '.'; /* decimal point used in money values */ static int bool_as_text = 0; /* whether bool shall be returned as text */ static int array_as_text = 0; /* whether arrays shall be returned as text */ static int bytea_escaped = 0; /* whether bytea shall be returned escaped */ static int pg_encoding_utf8 = 0; static int pg_encoding_latin1 = 0; static int pg_encoding_ascii = 0; /* OBJECTS ======= Each object has a number of elements. The naming scheme will be based on the object type. Here are the elements using example object type "foo". - fooType: Type definition for object. - fooObject: A structure to hold local object information. - foo_methods: Methods declaration. - foo_method_name: Object methods. The objects that we need to create: - pg: The module itself. - conn: Connection object returned from pg.connect(). - notice: Notice object returned from pg.notice(). - large: Large object returned by pg.conn.locreate() and pg.conn.loimport(). - query: Query object returned by pg.conn.query(). - source: Source object returned by pg.conn.source(). */ /* Forward declarations for types */ static PyTypeObject connType, sourceType, queryType, noticeType, largeType; /* Forward static declarations */ static void notice_receiver(void *, const PGresult *); /* Object declarations */ typedef struct { PyObject_HEAD int valid; /* validity flag */ PGconn *cnx; /* Postgres connection handle */ const char *date_format; /* date format derived from datestyle */ PyObject *cast_hook; /* external typecast method */ PyObject *notice_receiver; /* current notice receiver */ } connObject; #define is_connObject(v) (PyType(v) == &connType) typedef struct { PyObject_HEAD int valid; /* validity flag */ connObject *pgcnx; /* parent connection object */ PGresult *result; /* result content */ int encoding; /* client encoding */ int result_type; /* result type (DDL/DML/DQL) */ long arraysize; /* array size for fetch method */ int current_row; /* currently selected row */ int max_row; /* number of rows in the result */ int num_fields; /* number of fields in each row */ } sourceObject; #define is_sourceObject(v) (PyType(v) == &sourceType) typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ PGresult const *res; /* an error or warning */ } noticeObject; #define is_noticeObject(v) (PyType(v) == ¬iceType) typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ PGresult *result; /* result content */ int encoding; /* client encoding */ int current_row; /* currently selected row */ int max_row; /* number of rows in the result */ int num_fields; /* number of fields in each row */ int *col_types; /* PyGreSQL column types */ } queryObject; #define is_queryObject(v) (PyType(v) == &queryType) #ifdef LARGE_OBJECTS typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ Oid lo_oid; /* large object oid */ int lo_fd; /* large object fd */ } largeObject; #define is_largeObject(v) (PyType(v) == &largeType) #endif /* LARGE_OBJECTS */ /* Internal functions */ #include "pginternal.c" /* Connection object */ #include "pgconn.c" /* Query object */ #include "pgquery.c" /* Source object */ #include "pgsource.c" /* Notice object */ #include "pgnotice.c" /* Large objects */ #ifdef LARGE_OBJECTS #include "pglarge.c" #endif /* MODULE FUNCTIONS */ /* Connect to a database. */ static char pg_connect__doc__[] = "connect(dbname, host, port, opt) -- connect to a PostgreSQL database\n\n" "The connection uses the specified parameters (optional, keywords aware).\n"; static PyObject * pg_connect(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = { "dbname", "host", "port", "opt", "user", "passwd", NULL }; char *pghost, *pgopt, *pgdbname, *pguser, *pgpasswd; int pgport; char port_buffer[20]; connObject *conn_obj; pghost = pgopt = pgdbname = pguser = pgpasswd = NULL; pgport = -1; /* * parses standard arguments With the right compiler warnings, this * will issue a diagnostic. There is really no way around it. If I * don't declare kwlist as const char *kwlist[] then it complains when * I try to assign all those constant strings to it. */ if (!PyArg_ParseTupleAndKeywords( args, dict, "|zzizzz", (char**)kwlist, &pgdbname, &pghost, &pgport, &pgopt, &pguser, &pgpasswd)) { return NULL; } #ifdef DEFAULT_VARS /* handles defaults variables (for uninitialised vars) */ if ((!pghost) && (pg_default_host != Py_None)) pghost = PyBytes_AsString(pg_default_host); if ((pgport == -1) && (pg_default_port != Py_None)) pgport = (int) PyInt_AsLong(pg_default_port); if ((!pgopt) && (pg_default_opt != Py_None)) pgopt = PyBytes_AsString(pg_default_opt); if ((!pgdbname) && (pg_default_base != Py_None)) pgdbname = PyBytes_AsString(pg_default_base); if ((!pguser) && (pg_default_user != Py_None)) pguser = PyBytes_AsString(pg_default_user); if ((!pgpasswd) && (pg_default_passwd != Py_None)) pgpasswd = PyBytes_AsString(pg_default_passwd); #endif /* DEFAULT_VARS */ if (!(conn_obj = PyObject_NEW(connObject, &connType))) { set_error_msg(InternalError, "Can't create new connection object"); return NULL; } conn_obj->valid = 1; conn_obj->cnx = NULL; conn_obj->date_format = date_format; conn_obj->cast_hook = NULL; conn_obj->notice_receiver = NULL; if (pgport != -1) { memset(port_buffer, 0, sizeof(port_buffer)); sprintf(port_buffer, "%d", pgport); } Py_BEGIN_ALLOW_THREADS conn_obj->cnx = PQsetdbLogin(pghost, pgport == -1 ? NULL : port_buffer, pgopt, NULL, pgdbname, pguser, pgpasswd); Py_END_ALLOW_THREADS if (PQstatus(conn_obj->cnx) == CONNECTION_BAD) { set_error(InternalError, "Cannot connect", conn_obj->cnx, NULL); Py_XDECREF(conn_obj); return NULL; } return (PyObject *) conn_obj; } /* Escape string */ static char pg_escape_string__doc__[] = "escape_string(string) -- escape a string for use within SQL"; static PyObject * pg_escape_string(PyObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = pg_encoding_ascii; tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_string() expects a string as argument"); return NULL; } to_length = 2*from_length + 1; if ((Py_ssize_t ) to_length < from_length) { /* overflow */ to_length = from_length; from_length = (from_length - 1)/2; } to = (char *) PyMem_Malloc(to_length); to_length = (int) PQescapeString(to, from, (size_t) from_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, to_length); else to_obj = get_decoded_string(to, to_length, encoding); PyMem_Free(to); return to_obj; } /* Escape bytea */ static char pg_escape_bytea__doc__[] = "escape_bytea(data) -- escape binary data for use within SQL as type bytea"; static PyObject * pg_escape_bytea(PyObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { encoding = pg_encoding_ascii; tmp_obj = get_encoded_string(data, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_bytea() expects a string as argument"); return NULL; } to = (char *) PQescapeBytea( (unsigned char*) from, (size_t) from_length, &to_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, to_length - 1); else to_obj = get_decoded_string(to, to_length - 1, encoding); if (to) PQfreemem(to); return to_obj; } /* Unescape bytea */ static char pg_unescape_bytea__doc__[] = "unescape_bytea(string) -- unescape bytea data retrieved as text"; static PyObject * pg_unescape_bytea(PyObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { tmp_obj = get_encoded_string(data, pg_encoding_ascii); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method unescape_bytea() expects a string as argument"); return NULL; } to = (char *) PQunescapeBytea((unsigned char*) from, &to_length); Py_XDECREF(tmp_obj); if (!to) return PyErr_NoMemory(); to_obj = PyBytes_FromStringAndSize(to, to_length); PQfreemem(to); return to_obj; } /* Set fixed datestyle. */ static char pg_set_datestyle__doc__[] = "set_datestyle(style) -- set which style is assumed"; static PyObject * pg_set_datestyle(PyObject *self, PyObject *args) { const char *datestyle = NULL; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &datestyle)) { PyErr_SetString( PyExc_TypeError, "Function set_datestyle() expects a string or None as argument"); return NULL; } date_format = datestyle ? date_style_to_format(datestyle) : NULL; Py_INCREF(Py_None); return Py_None; } /* Get fixed datestyle. */ static char pg_get_datestyle__doc__[] = "get_datestyle() -- get which date style is assumed"; static PyObject * pg_get_datestyle(PyObject *self, PyObject *noargs) { if (date_format) { return PyStr_FromString(date_format_to_style(date_format)); } else { Py_INCREF(Py_None); return Py_None; } } /* Get decimal point. */ static char pg_get_decimal_point__doc__[] = "get_decimal_point() -- get decimal point to be used for money values"; static PyObject * pg_get_decimal_point(PyObject *self, PyObject *noargs) { PyObject *ret; char s[2]; if (decimal_point) { s[0] = decimal_point; s[1] = '\0'; ret = PyStr_FromString(s); } else { Py_INCREF(Py_None); ret = Py_None; } return ret; } /* Set decimal point. */ static char pg_set_decimal_point__doc__[] = "set_decimal_point(char) -- set decimal point to be used for money values"; static PyObject * pg_set_decimal_point(PyObject *self, PyObject *args) { PyObject *ret = NULL; char *s = NULL; /* gets arguments */ if (PyArg_ParseTuple(args, "z", &s)) { if (!s) s = "\0"; else if (*s && (*(s+1) || !strchr(".,;: '*/_`|", *s))) s = NULL; } if (s) { decimal_point = *s; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_decimal_mark() expects" " a decimal mark character as argument"); } return ret; } /* Get decimal type. */ static char pg_get_decimal__doc__[] = "get_decimal() -- get the decimal type to be used for numeric values"; static PyObject * pg_get_decimal(PyObject *self, PyObject *noargs) { PyObject *ret; ret = decimal ? decimal : Py_None; Py_INCREF(ret); return ret; } /* Set decimal type. */ static char pg_set_decimal__doc__[] = "set_decimal(cls) -- set a decimal type to be used for numeric values"; static PyObject * pg_set_decimal(PyObject *self, PyObject *cls) { PyObject *ret = NULL; if (cls == Py_None) { Py_XDECREF(decimal); decimal = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(cls)) { Py_XINCREF(cls); Py_XDECREF(decimal); decimal = cls; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_decimal() expects" " a callable or None as argument"); } return ret; } /* Get usage of bool values. */ static char pg_get_bool__doc__[] = "get_bool() -- check whether boolean values are converted to bool"; static PyObject * pg_get_bool(PyObject *self, PyObject *noargs) { PyObject *ret; ret = bool_as_text ? Py_False : Py_True; Py_INCREF(ret); return ret; } /* Set usage of bool values. */ static char pg_set_bool__doc__[] = "set_bool(on) -- set whether boolean values should be converted to bool"; static PyObject * pg_set_bool(PyObject *self, PyObject *args) { PyObject *ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { bool_as_text = i ? 0 : 1; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString( PyExc_TypeError, "Function set_bool() expects a boolean value as argument"); } return ret; } /* Get conversion of arrays to lists. */ static char pg_get_array__doc__[] = "get_array() -- check whether arrays are converted as lists"; static PyObject * pg_get_array(PyObject *self, PyObject *noargs) { PyObject *ret; ret = array_as_text ? Py_False : Py_True; Py_INCREF(ret); return ret; } /* Set conversion of arrays to lists. */ static char pg_set_array__doc__[] = "set_array(on) -- set whether arrays should be converted to lists"; static PyObject * pg_set_array(PyObject* self, PyObject* args) { PyObject* ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { array_as_text = i ? 0 : 1; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString( PyExc_TypeError, "Function set_array() expects a boolean value as argument"); } return ret; } /* Check whether bytea values are unescaped. */ static char pg_get_bytea_escaped__doc__[] = "get_bytea_escaped() -- check whether bytea will be returned escaped"; static PyObject * pg_get_bytea_escaped(PyObject *self, PyObject *noargs) { PyObject *ret; ret = bytea_escaped ? Py_True : Py_False; Py_INCREF(ret); return ret; } /* Set usage of bool values. */ static char pg_set_bytea_escaped__doc__[] = "set_bytea_escaped(on) -- set whether bytea will be returned escaped"; static PyObject * pg_set_bytea_escaped(PyObject *self, PyObject *args) { PyObject *ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { bytea_escaped = i ? 1 : 0; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_bytea_escaped() expects" " a boolean value as argument"); } return ret; } /* set query helper functions (not part of public API) */ static char pg_set_query_helpers__doc__[] = "set_query_helpers(*helpers) -- set internal query helper functions"; static PyObject * pg_set_query_helpers(PyObject *self, PyObject *args) { /* gets arguments */ if (!PyArg_ParseTuple(args, "O!O!O!O!", &PyFunction_Type, &dictiter, &PyFunction_Type, &namediter, &PyFunction_Type, &namednext, &PyFunction_Type, &scalariter)) { return NULL; } Py_INCREF(Py_None); return Py_None; } /* Get json decode function. */ static char pg_get_jsondecode__doc__[] = "get_jsondecode() -- get the function used for decoding json results"; static PyObject * pg_get_jsondecode(PyObject *self, PyObject *noargs) { PyObject *ret; ret = jsondecode; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Set json decode function. */ static char pg_set_jsondecode__doc__[] = "set_jsondecode(func) -- set a function to be used for decoding json results"; static PyObject * pg_set_jsondecode(PyObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(jsondecode); jsondecode = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(jsondecode); jsondecode = func; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function jsondecode() expects" " a callable or None as argument"); } return ret; } #ifdef DEFAULT_VARS /* Get default host. */ static char pg_get_defhost__doc__[] = "get_defhost() -- return default database host"; static PyObject * pg_get_defhost(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_host); return pg_default_host; } /* Set default host. */ static char pg_set_defhost__doc__[] = "set_defhost(string) -- set default database host and return previous value"; static PyObject * pg_set_defhost(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defhost() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_host; if (tmp) { pg_default_host = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_host = Py_None; } return old; } /* Get default database. */ static char pg_get_defbase__doc__[] = "get_defbase() -- return default database name"; static PyObject * pg_get_defbase(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_base); return pg_default_base; } /* Set default database. */ static char pg_set_defbase__doc__[] = "set_defbase(string) -- set default database name and return previous value"; static PyObject * pg_set_defbase(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defbase() Argument a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_base; if (tmp) { pg_default_base = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_base = Py_None; } return old; } /* Get default options. */ static char pg_get_defopt__doc__[] = "get_defopt() -- return default database options"; static PyObject * pg_get_defopt(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_opt); return pg_default_opt; } /* Set default options. */ static char pg_set_defopt__doc__[] = "set_defopt(string) -- set default options and return previous value"; static PyObject * pg_setdefopt(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defopt() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_opt; if (tmp) { pg_default_opt = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_opt = Py_None; } return old; } /* Get default username. */ static char pg_get_defuser__doc__[] = "get_defuser() -- return default database username"; static PyObject * pg_get_defuser(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_user); return pg_default_user; } /* Set default username. */ static char pg_set_defuser__doc__[] = "set_defuser(name) -- set default username and return previous value"; static PyObject * pg_set_defuser(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defuser() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_user; if (tmp) { pg_default_user = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_user = Py_None; } return old; } /* Set default password. */ static char pg_set_defpasswd__doc__[] = "set_defpasswd(password) -- set default database password"; static PyObject * pg_set_defpasswd(PyObject *self, PyObject *args) { char *tmp = NULL; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defpasswd() expects a string or None as argument"); return NULL; } if (tmp) { pg_default_passwd = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_passwd = Py_None; } Py_INCREF(Py_None); return Py_None; } /* Get default port. */ static char pg_get_defport__doc__[] = "get_defport() -- return default database port"; static PyObject * pg_get_defport(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_port); return pg_default_port; } /* Set default port. */ static char pg_set_defport__doc__[] = "set_defport(port) -- set default port and return previous value"; static PyObject * pg_set_defport(PyObject *self, PyObject *args) { long int port = -2; PyObject *old; /* gets arguments */ if ((!PyArg_ParseTuple(args, "l", &port)) || (port < -1)) { PyErr_SetString(PyExc_TypeError, "Function set_deport expects" " a positive integer or -1 as argument"); return NULL; } /* adjusts value */ old = pg_default_port; if (port != -1) { pg_default_port = PyInt_FromLong(port); } else { Py_INCREF(Py_None); pg_default_port = Py_None; } return old; } #endif /* DEFAULT_VARS */ /* Cast a string with a text representation of an array to a list. */ static char pg_cast_array__doc__[] = "cast_array(string, cast=None, delim=',') -- cast a string as an array"; PyObject * pg_cast_array(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = {"string", "cast", "delim", NULL}; PyObject *string_obj, *cast_obj = NULL, *ret; char *string, delim = ','; Py_ssize_t size; int encoding; if (!PyArg_ParseTupleAndKeywords( args, dict, "O|Oc", (char**) kwlist, &string_obj, &cast_obj, &delim)) { return NULL; } if (PyBytes_Check(string_obj)) { PyBytes_AsStringAndSize(string_obj, &string, &size); string_obj = NULL; encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string_obj)) { string_obj = PyUnicode_AsUTF8String(string_obj); if (!string_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(string_obj, &string, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_array() expects a string as first argument"); return NULL; } if (!cast_obj || cast_obj == Py_None) { if (cast_obj) { Py_DECREF(cast_obj); cast_obj = NULL; } } else if (!PyCallable_Check(cast_obj)) { PyErr_SetString( PyExc_TypeError, "Function cast_array() expects a callable as second argument"); return NULL; } ret = cast_array(string, size, encoding, 0, cast_obj, delim); Py_XDECREF(string_obj); return ret; } /* Cast a string with a text representation of a record to a tuple. */ static char pg_cast_record__doc__[] = "cast_record(string, cast=None, delim=',') -- cast a string as a record"; PyObject * pg_cast_record(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = {"string", "cast", "delim", NULL}; PyObject *string_obj, *cast_obj = NULL, *ret; char *string, delim = ','; Py_ssize_t size, len; int encoding; if (!PyArg_ParseTupleAndKeywords( args, dict, "O|Oc", (char**) kwlist, &string_obj, &cast_obj, &delim)) { return NULL; } if (PyBytes_Check(string_obj)) { PyBytes_AsStringAndSize(string_obj, &string, &size); string_obj = NULL; encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string_obj)) { string_obj = PyUnicode_AsUTF8String(string_obj); if (!string_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(string_obj, &string, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_record() expects a string as first argument"); return NULL; } if (!cast_obj || PyCallable_Check(cast_obj)) { len = 0; } else if (cast_obj == Py_None) { Py_DECREF(cast_obj); cast_obj = NULL; len = 0; } else if (PyTuple_Check(cast_obj) || PyList_Check(cast_obj)) { len = PySequence_Size(cast_obj); if (!len) { Py_DECREF(cast_obj); cast_obj = NULL; } } else { PyErr_SetString(PyExc_TypeError, "Function cast_record() expects a callable" " or tuple or list of callables as second argument"); return NULL; } ret = cast_record(string, size, encoding, 0, cast_obj, len, delim); Py_XDECREF(string_obj); return ret; } /* Cast a string with a text representation of an hstore to a dict. */ static char pg_cast_hstore__doc__[] = "cast_hstore(string) -- cast a string as an hstore"; PyObject * pg_cast_hstore(PyObject *self, PyObject *string) { PyObject *tmp_obj = NULL, *ret; char *s; Py_ssize_t size; int encoding; if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &s, &size); encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string)) { tmp_obj = PyUnicode_AsUTF8String(string); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &s, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_hstore() expects a string as first argument"); return NULL; } ret = cast_hstore(s, size, encoding); Py_XDECREF(tmp_obj); return ret; } /* The list of functions defined in the module */ static struct PyMethodDef pg_methods[] = { {"connect", (PyCFunction) pg_connect, METH_VARARGS|METH_KEYWORDS, pg_connect__doc__}, {"escape_string", (PyCFunction) pg_escape_string, METH_O, pg_escape_string__doc__}, {"escape_bytea", (PyCFunction) pg_escape_bytea, METH_O, pg_escape_bytea__doc__}, {"unescape_bytea", (PyCFunction) pg_unescape_bytea, METH_O, pg_unescape_bytea__doc__}, {"get_datestyle", (PyCFunction) pg_get_datestyle, METH_NOARGS, pg_get_datestyle__doc__}, {"set_datestyle", (PyCFunction) pg_set_datestyle, METH_VARARGS, pg_set_datestyle__doc__}, {"get_decimal_point", (PyCFunction) pg_get_decimal_point, METH_NOARGS, pg_get_decimal_point__doc__}, {"set_decimal_point", (PyCFunction) pg_set_decimal_point, METH_VARARGS, pg_set_decimal_point__doc__}, {"get_decimal", (PyCFunction) pg_get_decimal, METH_NOARGS, pg_get_decimal__doc__}, {"set_decimal", (PyCFunction) pg_set_decimal, METH_O, pg_set_decimal__doc__}, {"get_bool", (PyCFunction) pg_get_bool, METH_NOARGS, pg_get_bool__doc__}, {"set_bool", (PyCFunction) pg_set_bool, METH_VARARGS, pg_set_bool__doc__}, {"get_array", (PyCFunction) pg_get_array, METH_NOARGS, pg_get_array__doc__}, {"set_array", (PyCFunction) pg_set_array, METH_VARARGS, pg_set_array__doc__}, {"set_query_helpers", (PyCFunction) pg_set_query_helpers, METH_VARARGS, pg_set_query_helpers__doc__}, {"get_bytea_escaped", (PyCFunction) pg_get_bytea_escaped, METH_NOARGS, pg_get_bytea_escaped__doc__}, {"set_bytea_escaped", (PyCFunction) pg_set_bytea_escaped, METH_VARARGS, pg_set_bytea_escaped__doc__}, {"get_jsondecode", (PyCFunction) pg_get_jsondecode, METH_NOARGS, pg_get_jsondecode__doc__}, {"set_jsondecode", (PyCFunction) pg_set_jsondecode, METH_O, pg_set_jsondecode__doc__}, {"cast_array", (PyCFunction) pg_cast_array, METH_VARARGS|METH_KEYWORDS, pg_cast_array__doc__}, {"cast_record", (PyCFunction) pg_cast_record, METH_VARARGS|METH_KEYWORDS, pg_cast_record__doc__}, {"cast_hstore", (PyCFunction) pg_cast_hstore, METH_O, pg_cast_hstore__doc__}, #ifdef DEFAULT_VARS {"get_defhost", pg_get_defhost, METH_NOARGS, pg_get_defhost__doc__}, {"set_defhost", pg_set_defhost, METH_VARARGS, pg_set_defhost__doc__}, {"get_defbase", pg_get_defbase, METH_NOARGS, pg_get_defbase__doc__}, {"set_defbase", pg_set_defbase, METH_VARARGS, pg_set_defbase__doc__}, {"get_defopt", pg_get_defopt, METH_NOARGS, pg_get_defopt__doc__}, {"set_defopt", pg_setdefopt, METH_VARARGS, pg_set_defopt__doc__}, {"get_defport", pg_get_defport, METH_NOARGS, pg_get_defport__doc__}, {"set_defport", pg_set_defport, METH_VARARGS, pg_set_defport__doc__}, {"get_defuser", pg_get_defuser, METH_NOARGS, pg_get_defuser__doc__}, {"set_defuser", pg_set_defuser, METH_VARARGS, pg_set_defuser__doc__}, {"set_defpasswd", pg_set_defpasswd, METH_VARARGS, pg_set_defpasswd__doc__}, #endif /* DEFAULT_VARS */ {NULL, NULL} /* sentinel */ }; static char pg__doc__[] = "Python interface to PostgreSQL DB"; static struct PyModuleDef moduleDef = { PyModuleDef_HEAD_INIT, "_pg", /* m_name */ pg__doc__, /* m_doc */ -1, /* m_size */ pg_methods /* m_methods */ }; /* Initialization function for the module */ MODULE_INIT_FUNC(_pg) { PyObject *mod, *dict, *s; /* Create the module and add the functions */ mod = PyModule_Create(&moduleDef); /* Initialize here because some Windows platforms get confused otherwise */ #if IS_PY3 connType.tp_base = noticeType.tp_base = queryType.tp_base = sourceType.tp_base = &PyBaseObject_Type; #ifdef LARGE_OBJECTS largeType.tp_base = &PyBaseObject_Type; #endif #else connType.ob_type = noticeType.ob_type = queryType.ob_type = sourceType.ob_type = &PyType_Type; #ifdef LARGE_OBJECTS largeType.ob_type = &PyType_Type; #endif #endif if (PyType_Ready(&connType) || PyType_Ready(¬iceType) || PyType_Ready(&queryType) || PyType_Ready(&sourceType) #ifdef LARGE_OBJECTS || PyType_Ready(&largeType) #endif ) { return NULL; } dict = PyModule_GetDict(mod); /* Exceptions as defined by DB-API 2.0 */ Error = PyErr_NewException("pg.Error", PyExc_Exception, NULL); PyDict_SetItemString(dict, "Error", Error); Warning = PyErr_NewException("pg.Warning", PyExc_Exception, NULL); PyDict_SetItemString(dict, "Warning", Warning); InterfaceError = PyErr_NewException( "pg.InterfaceError", Error, NULL); PyDict_SetItemString(dict, "InterfaceError", InterfaceError); DatabaseError = PyErr_NewException( "pg.DatabaseError", Error, NULL); PyDict_SetItemString(dict, "DatabaseError", DatabaseError); InternalError = PyErr_NewException( "pg.InternalError", DatabaseError, NULL); PyDict_SetItemString(dict, "InternalError", InternalError); OperationalError = PyErr_NewException( "pg.OperationalError", DatabaseError, NULL); PyDict_SetItemString(dict, "OperationalError", OperationalError); ProgrammingError = PyErr_NewException( "pg.ProgrammingError", DatabaseError, NULL); PyDict_SetItemString(dict, "ProgrammingError", ProgrammingError); IntegrityError = PyErr_NewException( "pg.IntegrityError", DatabaseError, NULL); PyDict_SetItemString(dict, "IntegrityError", IntegrityError); DataError = PyErr_NewException( "pg.DataError", DatabaseError, NULL); PyDict_SetItemString(dict, "DataError", DataError); NotSupportedError = PyErr_NewException( "pg.NotSupportedError", DatabaseError, NULL); PyDict_SetItemString(dict, "NotSupportedError", NotSupportedError); InvalidResultError = PyErr_NewException( "pg.InvalidResultError", DataError, NULL); PyDict_SetItemString(dict, "InvalidResultError", InvalidResultError); NoResultError = PyErr_NewException( "pg.NoResultError", InvalidResultError, NULL); PyDict_SetItemString(dict, "NoResultError", NoResultError); MultipleResultsError = PyErr_NewException( "pg.MultipleResultsError", InvalidResultError, NULL); PyDict_SetItemString(dict, "MultipleResultsError", MultipleResultsError); /* Make the version available */ s = PyStr_FromString(PyPgVersion); PyDict_SetItemString(dict, "version", s); PyDict_SetItemString(dict, "__version__", s); Py_DECREF(s); /* Result types for queries */ PyDict_SetItemString(dict, "RESULT_EMPTY", PyInt_FromLong(RESULT_EMPTY)); PyDict_SetItemString(dict, "RESULT_DML", PyInt_FromLong(RESULT_DML)); PyDict_SetItemString(dict, "RESULT_DDL", PyInt_FromLong(RESULT_DDL)); PyDict_SetItemString(dict, "RESULT_DQL", PyInt_FromLong(RESULT_DQL)); /* Transaction states */ PyDict_SetItemString(dict,"TRANS_IDLE",PyInt_FromLong(PQTRANS_IDLE)); PyDict_SetItemString(dict,"TRANS_ACTIVE",PyInt_FromLong(PQTRANS_ACTIVE)); PyDict_SetItemString(dict,"TRANS_INTRANS",PyInt_FromLong(PQTRANS_INTRANS)); PyDict_SetItemString(dict,"TRANS_INERROR",PyInt_FromLong(PQTRANS_INERROR)); PyDict_SetItemString(dict,"TRANS_UNKNOWN",PyInt_FromLong(PQTRANS_UNKNOWN)); #ifdef LARGE_OBJECTS /* Create mode for large objects */ PyDict_SetItemString(dict, "INV_READ", PyInt_FromLong(INV_READ)); PyDict_SetItemString(dict, "INV_WRITE", PyInt_FromLong(INV_WRITE)); /* Position flags for lo_lseek */ PyDict_SetItemString(dict, "SEEK_SET", PyInt_FromLong(SEEK_SET)); PyDict_SetItemString(dict, "SEEK_CUR", PyInt_FromLong(SEEK_CUR)); PyDict_SetItemString(dict, "SEEK_END", PyInt_FromLong(SEEK_END)); #endif /* LARGE_OBJECTS */ #ifdef DEFAULT_VARS /* Prepare default values */ Py_INCREF(Py_None); pg_default_host = Py_None; Py_INCREF(Py_None); pg_default_base = Py_None; Py_INCREF(Py_None); pg_default_opt = Py_None; Py_INCREF(Py_None); pg_default_port = Py_None; Py_INCREF(Py_None); pg_default_user = Py_None; Py_INCREF(Py_None); pg_default_passwd = Py_None; #endif /* DEFAULT_VARS */ /* Store common pg encoding ids */ pg_encoding_utf8 = pg_char_to_encoding("UTF8"); pg_encoding_latin1 = pg_char_to_encoding("LATIN1"); pg_encoding_ascii = pg_char_to_encoding("SQL_ASCII"); /* Check for errors */ if (PyErr_Occurred()) { return NULL; } return mod; } PyGreSQL-5.1/MANIFEST.in0000644000175100077410000000130713470244250014457 0ustar darcypyg00000000000000 include pgconn.c include pginternal.c include pglarge.c include pgmodule.c include pgnotice.c include pgquery.c include pgsource.c include pgtypes.h include py3c.h include pg.py include pgdb.py include setup.py include setup.cfg include README.rst include LICENSE.txt recursive-include tests *.py include docs/Makefile include docs/make.bat include docs/*.py include docs/*.rst include docs/*.txt exclude docs/index.rst recursive-include docs/community *.rst recursive-include docs/contents *.rst recursive-include docs/download *.rst recursive-include docs/_static *.css_t *.ico *.png recursive-include docs/_templates *.html recursive-include docs/_build/html *.css *.gif *.html *.ico *.js *.png *.txt