simple py3 changes
diff --git a/test/generate_expected.py b/test/generate_expected.py
index 5b215c4..f668da2 100644
--- a/test/generate_expected.py
+++ b/test/generate_expected.py
@@ -1,11 +1,12 @@
+from __future__ import print_function
 import glob
 import os.path
 for path in glob.glob( '*.json' ):
     text = file(path,'rt').read()
     target = os.path.splitext(path)[0] + '.expected'
     if os.path.exists( target ):
-        print 'skipping:', target
+        print('skipping:', target)
     else:
-        print 'creating:', target
+        print('creating:', target)
         file(target,'wt').write(text)
 
diff --git a/test/pyjsontestrunner.py b/test/pyjsontestrunner.py
index 504f3db..3f08a8a 100644
--- a/test/pyjsontestrunner.py
+++ b/test/pyjsontestrunner.py
@@ -1,12 +1,12 @@
 # Simple implementation of a json test runner to run the test against json-py.
-
+from __future__ import print_function
 import sys
 import os.path
 import json
 import types
 
 if len(sys.argv) != 2:
-    print "Usage: %s input-json-file", sys.argv[0]
+    print("Usage: %s input-json-file", sys.argv[0])
     sys.exit(3)
     
 input_path = sys.argv[1]
diff --git a/test/runjsontests.py b/test/runjsontests.py
index 5fca75a..a1f6082 100644
--- a/test/runjsontests.py
+++ b/test/runjsontests.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import sys
 import os
 import os.path
@@ -11,7 +12,7 @@
     actual = actual.strip().replace('\r','').split('\n')
     diff_line = 0
     max_line_to_compare = min( len(expected), len(actual) )
-    for index in xrange(0,max_line_to_compare):
+    for index in range(0,max_line_to_compare):
         if expected[index].strip() != actual[index].strip():
             diff_line = index + 1
             break
@@ -51,7 +52,7 @@
     for input_path in tests + test_jsonchecker:
         expect_failure = os.path.basename( input_path ).startswith( 'fail' )
         is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
-        print 'TESTING:', input_path,
+        print('TESTING:', input_path, end=' ')
         options = is_json_checker_test and '--json-checker' or ''
         pipe = os.popen( "%s%s %s %s" % (
             valgrind_path, jsontest_executable_path, options,
@@ -61,24 +62,24 @@
         if is_json_checker_test:
             if expect_failure:
                 if status is None:
-                    print 'FAILED'
+                    print('FAILED')
                     failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
                                           safeReadFile(input_path)) )
                 else:
-                    print 'OK'
+                    print('OK')
             else:
                 if status is not None:
-                    print 'FAILED'
+                    print('FAILED')
                     failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
                 else:
-                    print 'OK'
+                    print('OK')
         else:
             base_path = os.path.splitext(input_path)[0]
             actual_output = safeReadFile( base_path + '.actual' )
             actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
             file(base_path + '.process-output','wt').write( process_output )
             if status:
-                print 'parsing failed'
+                print('parsing failed')
                 failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
             else:
                 expected_output_path = os.path.splitext(input_path)[0] + '.expected'
@@ -86,23 +87,23 @@
                 detail = ( compareOutputs( expected_output, actual_output, 'input' )
                             or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
                 if detail:
-                    print 'FAILED'
+                    print('FAILED')
                     failed_tests.append( (input_path, detail) )
                 else:
-                    print 'OK'
+                    print('OK')
 
     if failed_tests:
-        print
-        print 'Failure details:'
+        print()
+        print('Failure details:')
         for failed_test in failed_tests:
-            print '* Test', failed_test[0]
-            print failed_test[1]
-            print
-        print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
-                                                       len(failed_tests) )
+            print('* Test', failed_test[0])
+            print(failed_test[1])
+            print()
+        print('Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
+                                                       len(failed_tests) ))
         return 1
     else:
-        print 'All %d tests passed.' % len(tests)
+        print('All %d tests passed.' % len(tests))
         return 0
 
 def main():
diff --git a/test/rununittests.py b/test/rununittests.py
index 366184c..6279f80 100644
--- a/test/rununittests.py
+++ b/test/rununittests.py
@@ -1,8 +1,9 @@
+from __future__ import print_function
+from glob import glob
 import sys
 import os
 import os.path
 import subprocess
-from glob import glob
 import optparse
 
 VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
@@ -28,29 +29,29 @@
     test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
     status, test_names = test_proxy.run( ['--list-tests'] )
     if not status:
-        print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
+        print("Failed to obtain unit tests list:\n" + test_names, file=sys.stderr)
         return 1
     test_names = [name.strip() for name in test_names.strip().split('\n')]
     failures = []
     for name in test_names:
-        print 'TESTING %s:' % name,
+        print('TESTING %s:' % name, end=' ')
         succeed, result = test_proxy.run( ['--test', name] )
         if succeed:
-            print 'OK'
+            print('OK')
         else:
             failures.append( (name, result) )
-            print 'FAILED'
+            print('FAILED')
     failed_count = len(failures)
     pass_count = len(test_names) - failed_count
     if failed_count:
-        print
+        print()
         for name, result in failures:
-            print result
-        print '%d/%d tests passed (%d failure(s))' % (
-            pass_count, len(test_names), failed_count)
+            print(result)
+        print('%d/%d tests passed (%d failure(s))' % (
+            pass_count, len(test_names), failed_count))
         return 1
     else:
-        print 'All %d tests passed' % len(test_names)
+        print('All %d tests passed' % len(test_names))
         return 0
 
 def main():