Index: tests/harness/testsuite.h
===================================================================
--- tests/harness/testsuite.h	(revision 8672)
+++ tests/harness/testsuite.h	(working copy)
@@ -74,6 +74,11 @@
 //  needless generation of diagnostic output in cases when it's expensive.
 extern bool verbose;
 
+/// A global flag which a test should set if it knows that it's going to fail
+/// in a particular situation.  Known failures will not cause the test suite to
+/// fail, but will be listed at the end of the test run.
+extern bool global_known_failure;
+
 /// The exception type we were expecting in TEST_EXCEPTION.
 //  Used to detect if such an exception was mishandled by a the
 //  compiler/runtime.
@@ -97,8 +102,14 @@
 	    /// The number of tests which failed.
 	    unsigned int failed;
 
-	    /// The number of tests which were skipped
+	    /// The number of tests which were skipped.
 	    unsigned int skipped;
+
+	    /// The number of known failures which occurred.
+	    unsigned int known_failures;
+
+	    /// The number of tests marked as known failures which failed to fail.
+	    unsigned int known_failures_passed;
 	};
 
 	/** Add a test-specific command line option.
@@ -154,7 +165,7 @@
 	test_driver(const test_driver &);
 	test_driver & operator = (const test_driver &);
 
-	typedef enum { PASS = 1, FAIL = 0, SKIP = -1 } test_result;
+	typedef enum { KNOWN_FAIL_PASSED=3, KNOWN_FAIL=2, PASS = 1, FAIL = 0, SKIP = -1 } test_result;
 
 	static std::map<int, std::string *> short_opts;
 
@@ -250,4 +261,12 @@
 	"Expected `"STRINGIZE(a)"' and `"STRINGIZE(b)"' not to be equal:" \
 	" were " << (a) << " and " << (b))
 
+/// Mark a test as known-to-fail.  This is intended to be used temporarily to /
+/// mark tests for known bugs before the bugs are fixed.  The test failure will
+/// be displayed as a known failure, and not cause the test suite to return a
+/// failure code.  It should be called from inside the testcase.  If the case
+/// is only known to fail in certain situations (for example, only for some
+/// backends), the macro can be called only in those situations.
+#define KNOWN_FAILURE do { global_known_failure = 1; } while (0)
+
 #endif // OM_HGUARD_TESTSUITE_H
Index: tests/harness/testsuite.cc
===================================================================
--- tests/harness/testsuite.cc	(revision 8672)
+++ tests/harness/testsuite.cc	(working copy)
@@ -64,6 +64,11 @@
 /// The global verbose flag.
 bool verbose;
 
+/// A global flag which a test should set if it knows that it's going to fail
+/// in a particular situation.  Known failures will not cause the test suite to
+/// fail, but will be listed at the end of the test run.
+bool global_known_failure;
+
 #ifdef HAVE_VALGRIND
 static int vg_log_fd = -1;
 #endif
@@ -78,7 +83,7 @@
 om_ostringstream tout;
 
 int test_driver::runs = 0;
-test_driver::result test_driver::total = {0, 0, 0};
+test_driver::result test_driver::total = {0, 0, 0, 0, 0};
 string test_driver::argv0;
 string test_driver::opt_help;
 map<int, string *> test_driver::short_opts;
@@ -198,7 +203,8 @@
 //  If this test driver is used for anything other than
 //  Xapian tests, then this ought to be provided by
 //  the client, really.
-//  return: test_driver::PASS, test_driver::FAIL, or test_driver::SKIP
+//  return: test_driver::PASS, test_driver::FAIL, test_driver::SKIP,
+//  test_driver::KNOWN_FAIL or test_driver::KNOWN_FAIL_PASSED
 test_driver::test_result
 test_driver::runtest(const test_desc *test)
 {
@@ -339,8 +345,13 @@
 		    if (s[s.size() - 1] != '\n') out << endl;
 		    tout.str("");
 		}
-		out << " " << col_red << "FAILED" << col_reset;
-		return FAIL;
+		if (global_known_failure) {
+		    out << " " << col_yellow << "KNOWN FAILURE" << col_reset;
+		    return KNOWN_FAIL;
+		} else {
+		    out << " " << col_red << "FAILED" << col_reset;
+		    return FAIL;
+		}
 	    } catch (const TestSkip &) {
 		string s = tout.str();
 		if (!s.empty()) {
@@ -362,7 +373,11 @@
 		    if (s[s.size() - 1] != '\n') out << endl;
 		    tout.str("");
 		}
-		out << " " << col_red << errclass << col_reset;
+		if (global_known_failure) {
+		    out << " " << col_yellow << "KNOWN FAILURE: " << errclass << col_reset << endl;
+		} else {
+		    out << " " << col_red << errclass << col_reset;
+		}
 		if (verbose) {
 		    out << err.get_type() << " exception: " << err.get_msg();
 		    if (!err.get_context().empty())
@@ -371,6 +386,9 @@
 			out << " (" << err.get_error_string() << ")";
 		    out << endl;
 		}
+		if (global_known_failure) {
+		    return KNOWN_FAIL;
+		}
 		return FAIL;
 	    } catch (const string & msg) {
 		string s = tout.str();
@@ -379,7 +397,11 @@
 		    if (s[s.size() - 1] != '\n') out << endl;
 		    tout.str("");
 		}
-		out << " " << col_red << "EXCEPTION: ";
+		if (global_known_failure) {
+		    out << " " << col_yellow << "KNOWN FAILURE: EXCEPTION: " << col_reset;
+		} else {
+		    out << " " << col_red << "EXCEPTION: ";
+		}
 		size_t cutoff = min(size_t(40), msg.size());
 		cutoff = find(msg.begin(), msg.begin() + cutoff, '\n') - msg.begin();
 		if (cutoff == msg.size()) out << msg; else out << msg.substr(0, cutoff) << "...";
@@ -387,6 +409,9 @@
 		if (verbose && cutoff != msg.size()) {
 		    out << msg << endl;
 		}
+		if (global_known_failure) {
+		    return KNOWN_FAIL;
+		}
 		return FAIL;
 	    } catch (...) {
 		string s = tout.str();
@@ -395,8 +420,13 @@
 		    if (s[s.size() - 1] != '\n') out << endl;
 		    tout.str("");
 		}
-		out << " " << col_red << "UNKNOWN EXCEPTION" << col_reset;
-		return FAIL;
+		if (global_known_failure) {
+		    out << " " << col_yellow << "KNOWN FAILURE: UNKNOWN EXCEPTION" << col_reset;
+		    return KNOWN_FAIL;
+		} else {
+		    out << " " << col_red << "UNKNOWN EXCEPTION" << col_reset;
+		    return FAIL;
+		}
 	    }
 	} else {
 	    // caught signal
@@ -418,9 +448,18 @@
 		case SIGSTKFLT: signame = "SIGSTKFLT"; break;
 #endif
 	    }
-	    out << " " << col_red << signame << col_reset;
-	    return FAIL;
+	    if (global_known_failure) {
+		out << " " << col_yellow << "KNOWN FAILURE: " << signame << col_reset;
+		return KNOWN_FAIL;
+	    } else {
+		out << " " << col_red << signame << col_reset;
+		return FAIL;
+	    }
 	}
+	if (global_known_failure) {
+	    out << col_red << " KNOWN FAILURE PASSED" << col_reset;
+	    return KNOWN_FAIL_PASSED;
+	}
 	return PASS;
     }
 }
@@ -446,7 +485,7 @@
     set<string> m(b, e);
     bool check_name = !m.empty();
 
-    test_driver::result res = {0, 0, 0};
+    test_driver::result res = {0, 0, 0, 0, 0};
 
     for (const test_desc *test = tests; test->name; test++) {
 	bool do_this_test = !check_name;
@@ -466,6 +505,7 @@
 	if (do_this_test) {
 	    out << "Running test: " << test->name << "...";
 	    out.flush();
+	    global_known_failure = 0;
 	    switch (runtest(test)) {
 		case PASS:
 		    ++res.succeeded;
@@ -475,6 +515,10 @@
 			out << "\r                                                                               \r";
 		    }
 		    break;
+		case KNOWN_FAIL:
+		    ++res.known_failures;
+		    out << endl;
+		    break;
 		case FAIL:
 		    ++res.failed;
 		    out << endl;
@@ -483,6 +527,14 @@
 			return res;
 		    }
 		    break;
+		case KNOWN_FAIL_PASSED:
+		    ++res.known_failures_passed;
+		    out << endl;
+		    if (abort_on_error) {
+			out << "Test passed, but marked as a known failure - aborting further tests." << endl;
+			return res;
+		    }
+		    break;
 		case SKIP:
 		    ++res.skipped;
 		    out << endl;
@@ -509,7 +561,7 @@
     if (r.succeeded != 0 || r.failed != 0) {
 	cout << argv0 << " " << desc << ": ";
 
-	if (r.failed == 0)
+	if (r.failed == 0 && r.known_failures_passed == 0)
 	    cout << "All ";
 
 	cout << col_green << r.succeeded << col_reset << " tests passed";
@@ -517,6 +569,12 @@
 	if (r.failed != 0)
 	    cout << ", " << col_red << r.failed << col_reset << " failed";
 
+	if (r.known_failures_passed != 0)
+	    cout << ", " << col_red << r.known_failures_passed << col_reset << " known failures passed";
+
+	if (r.known_failures != 0)
+	    cout << ", " << col_yellow << r.known_failures << col_reset << " known failures";
+
 	if (r.skipped) {
 	    cout << ", " << col_yellow << r.skipped << col_reset
 		 << " skipped." << endl;
@@ -655,8 +713,10 @@
     total.succeeded += myresult.succeeded;
     total.failed += myresult.failed;
     total.skipped += myresult.skipped;
+    total.known_failures += myresult.known_failures;
+    total.known_failures_passed += myresult.known_failures_passed;
 
-    return bool(myresult.failed); // if 0, then everything passed
+    return bool(myresult.failed || myresult.known_failures_passed); // if 0, then everything passed
 }
 
 bool
Index: tests/queryparsertest.cc
===================================================================
--- tests/queryparsertest.cc	(revision 8674)
+++ tests/queryparsertest.cc	(working copy)
@@ -1062,10 +1062,15 @@
 // Test NumberValueRangeProcessors with actual data..
 static bool test_qp_value_range3()
 {
+    // This test currently fails because the numeric range stuff doesn't yet
+    // marshall the numbers into a representation which sorts in string order
+    // corrently.
+    KNOWN_FAILURE;
+
     Xapian::WritableDatabase db(Xapian::InMemory::open());
     int low = 0;  // FIXME - should it work with negative numbers?
                   // If so, test it with some by setting low to -10
-    int high = 9; // Currently the test passes if high is 9, but not if it is 10.
+    int high = 10;
 
     for (int i = low; i <= high; ++i) {
 	Xapian::Document doc;
Index: HACKING
===================================================================
--- HACKING	(revision 8672)
+++ HACKING	(working copy)
@@ -856,6 +856,17 @@
    changes stop it working as intended.
  * If you've fixed a bug, make sure there's a regression test which
    fails on the existing code and succeeds after your changes.
+ * If you're adding a new testcase to exhibit an existing bug, and not checking
+   a fix in at the same time, mark the testcase as a known failure (by calling
+   the macro "KNOWN_FAILURE" somewhere in your testcase), so that the build
+   continues to succeed.  This allows the automated build systems to continue
+   to work, whilst displaying the error to developers.  Fixing the bug is then
+   a priority - we can't generally make a release while there are known
+   failures.  Note that failures which are due to valgrind finding memory
+   errors are not affected by this macro, because this would cause the
+   testsuite to fail for users without valgrind.  Also, test failures which are
+   only shown by valgrind won't cause problems for the automated builds, which
+   don't currently use valgrind.
  * Make sure all existing tests continue to pass.
 
 If you don't know how to write tests using the Xapian test rig, then
