Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Merge branch 'copy-refcount-bug' into devel

  • Loading branch information...
commit e9a485e30bf74c598536417819f196476e0dc406 2 parents dd7ee70 + b6e710b
@dvarrazzo authored
Showing with 99 additions and 19 deletions.
  1. +2 −0  NEWS
  2. +22 −19 psycopg/cursor_type.c
  3. +75 −0 scripts/ticket58.py
View
2  NEWS
@@ -13,6 +13,8 @@ What's new in psycopg 2.4.2
support was built (ticket #53).
- Fixed escape for negative numbers prefixed by minus operator
(ticket #57).
+ - Fixed refcount issue during copy. Reported and fixed by Dave
+ Malcolm (ticket #58, Red Hat Bug 711095).
- Trying to execute concurrent operations on the same connection
through concurrent green thread results in an error instead of a
deadlock.
View
41 psycopg/cursor_type.c
@@ -1220,8 +1220,12 @@ _psyco_curs_has_read_check(PyObject* o, void* var)
{
if (PyObject_HasAttrString(o, "readline")
&& PyObject_HasAttrString(o, "read")) {
- /* It's OK to store a borrowed reference, because it is only held for
- * the duration of psyco_curs_copy_from. */
+ /* This routine stores a borrowed reference. Although it is only held
+ * for the duration of psyco_curs_copy_from, nested invocations of
+ * Py_BEGIN_ALLOW_THREADS could surrender control to another thread,
+ * which could invoke the garbage collector. We thus need an
+ * INCREF/DECREF pair if we store this pointer in a GC object, such as
+ * a cursorObject */
*((PyObject**)var) = o;
return 1;
}
@@ -1311,6 +1315,7 @@ psyco_curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
Dprintf("psyco_curs_copy_from: query = %s", query);
self->copysize = bufsize;
+ Py_INCREF(file);
self->copyfile = file;
if (pq_execute(self, query, 0) == 1) {
@@ -1319,6 +1324,7 @@ psyco_curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
}
self->copyfile = NULL;
+ Py_DECREF(file);
exit:
PyMem_Free(quoted_delimiter);
@@ -1337,8 +1343,6 @@ static int
_psyco_curs_has_write_check(PyObject* o, void* var)
{
if (PyObject_HasAttrString(o, "write")) {
- /* It's OK to store a borrowed reference, because it is only held for
- * the duration of psyco_curs_copy_to. */
*((PyObject**)var) = o;
return 1;
}
@@ -1424,12 +1428,15 @@ psyco_curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
Dprintf("psyco_curs_copy_to: query = %s", query);
self->copysize = 0;
+ Py_INCREF(file);
self->copyfile = file;
if (pq_execute(self, query, 0) == 1) {
res = Py_None;
Py_INCREF(Py_None);
}
+
+ Py_DECREF(file);
self->copyfile = NULL;
exit:
@@ -1471,18 +1478,18 @@ psyco_curs_copy_expert(cursorObject *self, PyObject *args, PyObject *kwargs)
EXC_IF_TPC_PREPARED(self->conn, copy_expert);
sql = _psyco_curs_validate_sql_basic(self, sql);
-
- /* Any failure from here forward should 'goto fail' rather than
+
+ /* Any failure from here forward should 'goto exit' rather than
'return NULL' directly. */
-
- if (sql == NULL) { goto fail; }
+
+ if (sql == NULL) { goto exit; }
/* This validation of file is rather weak, in that it doesn't enforce the
assocation between "COPY FROM" -> "read" and "COPY TO" -> "write".
However, the error handling in _pq_copy_[in|out] must be able to handle
the case where the attempt to call file.read|write fails, so no harm
done. */
-
+
if ( !PyObject_HasAttrString(file, "read")
&& !PyObject_HasAttrString(file, "write")
)
@@ -1490,26 +1497,22 @@ psyco_curs_copy_expert(cursorObject *self, PyObject *args, PyObject *kwargs)
PyErr_SetString(PyExc_TypeError, "file must be a readable file-like"
" object for COPY FROM; a writeable file-like object for COPY TO."
);
- goto fail;
+ goto exit;
}
self->copysize = bufsize;
+ Py_INCREF(file);
self->copyfile = file;
/* At this point, the SQL statement must be str, not unicode */
- if (pq_execute(self, Bytes_AS_STRING(sql), 0) != 1) { goto fail; }
+ if (pq_execute(self, Bytes_AS_STRING(sql), 0) != 1) { goto exit; }
res = Py_None;
Py_INCREF(res);
- goto cleanup;
- fail:
- if (res != NULL) {
- Py_DECREF(res);
- res = NULL;
- }
- /* Fall through to cleanup */
- cleanup:
+
+exit:
self->copyfile = NULL;
+ Py_XDECREF(file);
Py_XDECREF(sql);
return res;
View
75 scripts/ticket58.py
@@ -0,0 +1,75 @@
+"""
+A script to reproduce the race condition described in ticket #58
+
+from https://bugzilla.redhat.com/show_bug.cgi?id=711095
+
+Results in the error:
+
+ python: Modules/gcmodule.c:277: visit_decref: Assertion `gc->gc.gc_refs != 0'
+ failed.
+
+on unpatched library.
+"""
+
+import threading
+import gc
+import time
+
+import psycopg2
+from StringIO import StringIO
+
+done = 0
+
+class GCThread(threading.Thread):
+ # A thread that sits in an infinite loop, forcing the garbage collector
+ # to run
+ def run(self):
+ global done
+ while not done:
+ gc.collect()
+ time.sleep(0.1) # give the other thread a chance to run
+
+gc_thread = GCThread()
+
+
+# This assumes a pre-existing db named "test", with:
+# "CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);"
+
+conn = psycopg2.connect("dbname=test user=postgres")
+cur = conn.cursor()
+
+# Start the other thread, running the GC regularly
+gc_thread.start()
+
+# Now do lots of "cursor.copy_from" calls:
+print "copy_from"
+for i in range(1000):
+ f = StringIO("42\tfoo\n74\tbar\n")
+ cur.copy_from(f, 'test', columns=('num', 'data'))
+ # Assuming the other thread gets a chance to run during this call, expect a
+ # build of python (with assertions enabled) to bail out here with:
+ # python: Modules/gcmodule.c:277: visit_decref: Assertion `gc->gc.gc_refs != 0' failed.
+
+# Also exercise the copy_to code path
+print "copy_to"
+cur.execute("truncate test")
+f = StringIO("42\tfoo\n74\tbar\n")
+cur.copy_from(f, 'test', columns=('num', 'data'))
+for i in range(1000):
+ f = StringIO()
+ cur.copy_to(f, 'test', columns=('num', 'data'))
+
+# And copy_expert too
+print "copy_expert"
+cur.execute("truncate test")
+for i in range(1000):
+ f = StringIO("42\tfoo\n74\tbar\n")
+ cur.copy_expert("copy test to stdout", f)
+
+# Terminate the GC thread's loop:
+done = 1
+
+cur.close()
+conn.close()
+
+
Please sign in to comment.
Something went wrong with that request. Please try again.