diff --git a/run-tests.py b/run-tests.py index 7eaee085..befc5fb7 100755 --- a/run-tests.py +++ b/run-tests.py @@ -46,6 +46,7 @@ import textwrap import calendar import types +import subprocess try: from hashlib import md5 except ImportError: @@ -53,9 +54,9 @@ from difflib import Differ # Make sure that a supported version of Python is being used: -if not (0x02040000 <= sys.hexversion < 0x03000000): +if not (0x02070000 <= sys.hexversion < 0x03000000): sys.stderr.write( - 'error: Python 2, version 2.4 or higher required.\n' + 'error: Python 2, version 2.7 or higher required.\n' ) sys.exit(1) @@ -4310,6 +4311,10 @@ def vendor_1_1_not_root(): svntest.main.svnadmin_binary = svnadmin_binary svntest.main.svnversion_binary = svnversion_binary + # check version of svn_binary + svn_version = subprocess.check_output([svn_binary, '--version', '-q']) + svntest.main.SVN_VER_MINOR = int(svn_version.split('.')[1]) + svntest.main.run_tests(test_list) # NOTREACHED diff --git a/svntest/__init__.py b/svntest/__init__.py index 98a67b67..fd15fffb 100644 --- a/svntest/__init__.py +++ b/svntest/__init__.py @@ -23,11 +23,11 @@ __all__ = [ ] import sys -if sys.hexversion < 0x2050000: - sys.stderr.write('[SKIPPED] at least Python 2.5 is required\n') +if sys.hexversion < 0x2070000: + sys.stderr.write('[SKIPPED] at least Python 2.7 is required\n') # note: exiting is a bit harsh for a library module, but we really do - # require Python 2.5. this package isn't going to work otherwise. + # require Python 2.7. this package isn't going to work otherwise. # we're skipping this test, not failing, so exit with 0 sys.exit(0) @@ -35,11 +35,8 @@ try: import sqlite3 except ImportError: - try: - from pysqlite2 import dbapi2 as sqlite3 - except ImportError: - sys.stderr.write('[SKIPPED] Python sqlite3 module required\n') - sys.exit(0) + sys.stderr.write('[SKIPPED] Python sqlite3 module required\n') + sys.exit(0) # don't export this name del sys @@ -53,11 +50,11 @@ class Skip(Exception): pass # import in a specific order: things with the fewest circular imports first. -import testcase -import wc -import verify -import tree -import sandbox -import main -import actions -import factory +from . import testcase +from . import wc +from . import verify +from . import tree +from . import sandbox +from . import main +from . import actions +from . import factory diff --git a/svntest/actions.py b/svntest/actions.py index b69fed19..d21fe30e 100644 --- a/svntest/actions.py +++ b/svntest/actions.py @@ -35,11 +35,14 @@ from cStringIO import StringIO import svntest -from svntest import main, verify, tree, wc +from svntest import main, verify, tree, wc, sandbox from svntest import Failure logger = logging.getLogger() +# (abbreviation) +Item = svntest.wc.StateItem + def _log_tree_state(msg, actual, subtree=""): if subtree: subtree += os.sep @@ -61,7 +64,61 @@ def no_relocate_validation(): def do_relocate_validation(): os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_RELOCATE_VALIDATION'] = 'no' -def setup_pristine_greek_repository(): +def _setup_pristine_repo(tree_state, + repos_dir, dump_dir, repos_url, + use_precooked=True): + # If there's no pristine repos, create one. + if not os.path.exists(repos_dir): + if use_precooked and main.options.fsfs_version is not None: + main.unpack_greek_repos(repos_dir) + else: + main.create_repos(repos_dir) + + # if this is dav, gives us access rights to import the greek tree. + if main.is_ra_type_dav(): + authz_file = os.path.join(main.work_dir, "authz") + main.file_write(authz_file, "[/]\n* = rw\n") + + # dump the greek tree to disk. + tree_state.write_to_disk(dump_dir) + + # import the greek tree, using l:foo/p:bar + ### todo: svn should not be prompting for auth info when using + ### repositories with no auth/auth requirements + _, output, _ = main.run_svn(None, 'import', '-m', + 'Log message for revision 1.', + dump_dir, repos_url) + + # verify the printed output of 'svn import'. + lastline = output.pop().strip() + match = re.search("(Committed|Imported) revision [0-9]+.", lastline) + if not match: + logger.error("import did not succeed, while creating greek repos.") + logger.error("The final line from 'svn import' was:") + logger.error(lastline) + sys.exit(1) + output_tree = wc.State.from_commit(output) + + expected_output_tree = tree_state.copy(dump_dir) + expected_output_tree.tweak(verb='Adding', + contents=None) + + try: + expected_output_tree.compare_and_display('output', output_tree) + except tree.SVNTreeUnequal: + verify.display_trees("ERROR: output of import command is unexpected.", + "OUTPUT TREE", + expected_output_tree.old_tree(), + output_tree.old_tree()) + sys.exit(1) + + # Finally, disallow any changes to the "pristine" repos. + error_msg = "Don't modify the pristine repository" + create_failing_hook(repos_dir, 'start-commit', error_msg) + create_failing_hook(repos_dir, 'pre-lock', error_msg) + create_failing_hook(repos_dir, 'pre-revprop-change', error_msg) + +def setup_pristine_repositories(): """Create the pristine repository and 'svn import' the greek tree""" # these directories don't exist out of the box, so we may have to create them @@ -71,65 +128,27 @@ def setup_pristine_greek_repository(): if not os.path.exists(main.general_repo_dir): os.makedirs(main.general_repo_dir) # this also creates all the intermediate dirs - # If there's no pristine repos, create one. - if not os.path.exists(main.pristine_greek_repos_dir): - main.create_repos(main.pristine_greek_repos_dir) - - # if this is dav, gives us access rights to import the greek tree. - if main.is_ra_type_dav(): - authz_file = os.path.join(main.work_dir, "authz") - main.file_write(authz_file, "[/]\n* = rw\n") - - # dump the greek tree to disk. - main.greek_state.write_to_disk(main.greek_dump_dir) - - # import the greek tree, using l:foo/p:bar - ### todo: svn should not be prompting for auth info when using - ### repositories with no auth/auth requirements - exit_code, output, errput = main.run_svn(None, 'import', '-m', - 'Log message for revision 1.', - main.greek_dump_dir, - main.pristine_greek_repos_url) - - # check for any errors from the import - if len(errput): - display_lines("Errors during initial 'svn import':", - 'STDERR', None, errput) - sys.exit(1) - - # verify the printed output of 'svn import'. - lastline = output.pop().strip() - match = re.search("(Committed|Imported) revision [0-9]+.", lastline) - if not match: - logger.error("import did not succeed, while creating greek repos.") - logger.error("The final line from 'svn import' was:") - logger.error(lastline) - sys.exit(1) - output_tree = wc.State.from_commit(output) + if not os.path.exists(main.other_dav_root_dir): + os.makedirs(main.other_dav_root_dir) + if not os.path.exists(main.non_dav_root_dir): + os.makedirs(main.non_dav_root_dir) - expected_output_tree = main.greek_state.copy(main.greek_dump_dir) - expected_output_tree.tweak(verb='Adding', - contents=None) - - try: - expected_output_tree.compare_and_display('output', output_tree) - except tree.SVNTreeUnequal: - verify.display_trees("ERROR: output of import command is unexpected.", - "OUTPUT TREE", - expected_output_tree.old_tree(), - output_tree.old_tree()) - sys.exit(1) + _setup_pristine_repo(main.greek_state, + main.pristine_greek_repos_dir, + main.greek_dump_dir, + main.pristine_greek_repos_url) - # Finally, disallow any changes to the "pristine" repos. - error_msg = "Don't modify the pristine repository" - create_failing_hook(main.pristine_greek_repos_dir, 'start-commit', error_msg) - create_failing_hook(main.pristine_greek_repos_dir, 'pre-lock', error_msg) - create_failing_hook(main.pristine_greek_repos_dir, 'pre-revprop-change', error_msg) + # NOTE: We don't use precooked trojan repositories. + _setup_pristine_repo(main.trojan_state, + main.pristine_trojan_repos_dir, + main.trojan_dump_dir, + main.pristine_trojan_repos_url, + use_precooked=False) ###################################################################### -def guarantee_empty_repository(path): +def guarantee_empty_repository(path, minor_version): """Guarantee that a local svn repository exists at PATH, containing nothing.""" @@ -139,33 +158,47 @@ def guarantee_empty_repository(path): # create an empty repository at PATH. main.safe_rmtree(path) - main.create_repos(path) + main.create_repos(path, minor_version) # Used by every test, so that they can run independently of one # another. Every time this routine is called, it recursively copies # the `pristine repos' to a new location. -# Note: make sure setup_pristine_greek_repository was called once before -# using this function. -def guarantee_greek_repository(path, minor_version): - """Guarantee that a local svn repository exists at PATH, containing - nothing but the greek-tree at revision 1.""" - - if path == main.pristine_greek_repos_dir: +# Note: make sure setup_pristine_repositories was called once before +# using these functions. +def _guarantee_repos(path, repos_dir, minor_version, use_precooked=True): + if path == repos_dir: logger.error("attempt to overwrite the pristine repos! Aborting.") sys.exit(1) # copy the pristine repository to PATH. main.safe_rmtree(path) - if main.copy_repos(main.pristine_greek_repos_dir, path, 1, 1, minor_version): + if (use_precooked and main.options.fsfs_version is not None): + failed = main.unpack_greek_repos(path) + else: + failed = main.copy_repos(repos_dir, path, 1, 1, minor_version) + if failed: logger.error("copying repository failed.") sys.exit(1) # make the repos world-writeable, for mod_dav_svn's sake. - main.chmod_tree(path, 0666, 0666) + main.chmod_tree(path, main.S_ALL_RW, main.S_ALL_RW) + # give the repository a unique UUID + run_and_verify_svnadmin([], [], 'setuuid', path) -def run_and_verify_atomic_ra_revprop_change(message, - expected_stdout, +def guarantee_greek_repository(path, minor_version): + """Guarantee that a local svn repository exists at PATH, containing + nothing but the greek-tree at revision 1.""" + + _guarantee_repos(path, main.pristine_greek_repos_dir, minor_version) + +def guarantee_trojan_repository(path, minor_version): + """Guarantee that a local svn repository exists at PATH, containing + nothing but the trojan-tree at revision 1.""" + + _guarantee_repos(path, main.pristine_trojan_repos_dir, minor_version, False) + +def run_and_verify_atomic_ra_revprop_change(expected_stdout, expected_stderr, expected_exit, url, revision, propname, @@ -195,11 +228,11 @@ def make_proplist_skel_part(nick, val): want_error) verify.verify_outputs("Unexpected output", out, err, expected_stdout, expected_stderr) - verify.verify_exit_code(message, exit_code, expected_exit) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) return exit_code, out, err -def run_and_verify_svnlook(message, expected_stdout, +def run_and_verify_svnlook(expected_stdout, expected_stderr, *varargs): """Like run_and_verify_svnlook2, but the expected exit code is assumed to be 0 if no output is expected on stderr, and 1 otherwise.""" @@ -207,21 +240,21 @@ def run_and_verify_svnlook(message, expected_stdout, expected_exit = 0 if expected_stderr is not None and expected_stderr != []: expected_exit = 1 - return run_and_verify_svnlook2(message, expected_stdout, expected_stderr, + return run_and_verify_svnlook2(expected_stdout, expected_stderr, expected_exit, *varargs) -def run_and_verify_svnlook2(message, expected_stdout, expected_stderr, +def run_and_verify_svnlook2(expected_stdout, expected_stderr, expected_exit, *varargs): """Run svnlook command and check its output and exit code.""" exit_code, out, err = main.run_svnlook(*varargs) verify.verify_outputs("Unexpected output", out, err, expected_stdout, expected_stderr) - verify.verify_exit_code(message, exit_code, expected_exit) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) return exit_code, out, err -def run_and_verify_svnadmin(message, expected_stdout, +def run_and_verify_svnadmin(expected_stdout, expected_stderr, *varargs): """Like run_and_verify_svnadmin2, but the expected exit code is assumed to be 0 if no output is expected on stderr, and 1 otherwise.""" @@ -229,21 +262,43 @@ def run_and_verify_svnadmin(message, expected_stdout, expected_exit = 0 if expected_stderr is not None and expected_stderr != []: expected_exit = 1 - return run_and_verify_svnadmin2(message, expected_stdout, expected_stderr, + return run_and_verify_svnadmin2(expected_stdout, expected_stderr, expected_exit, *varargs) -def run_and_verify_svnadmin2(message, expected_stdout, expected_stderr, +def run_and_verify_svnadmin2(expected_stdout, expected_stderr, expected_exit, *varargs): """Run svnadmin command and check its output and exit code.""" exit_code, out, err = main.run_svnadmin(*varargs) verify.verify_outputs("Unexpected output", out, err, expected_stdout, expected_stderr) - verify.verify_exit_code(message, exit_code, expected_exit) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) + return exit_code, out, err + + +def run_and_verify_svnfsfs(expected_stdout, + expected_stderr, *varargs): + """Like run_and_verify_svnfsfs2, but the expected exit code is + assumed to be 0 if no output is expected on stderr, and 1 otherwise.""" + + expected_exit = 0 + if expected_stderr is not None and expected_stderr != []: + expected_exit = 1 + return run_and_verify_svnfsfs2(expected_stdout, expected_stderr, + expected_exit, *varargs) + +def run_and_verify_svnfsfs2(expected_stdout, expected_stderr, + expected_exit, *varargs): + """Run svnfsfs command and check its output and exit code.""" + + exit_code, out, err = main.run_svnfsfs(*varargs) + verify.verify_outputs("Unexpected output", out, err, + expected_stdout, expected_stderr) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) return exit_code, out, err -def run_and_verify_svnversion(message, wc_dir, trail_url, +def run_and_verify_svnversion(wc_dir, trail_url, expected_stdout, expected_stderr, *varargs): """like run_and_verify_svnversion2, but the expected exit code is assumed to be 0 if no output is expected on stderr, and 1 otherwise.""" @@ -251,11 +306,11 @@ def run_and_verify_svnversion(message, wc_dir, trail_url, expected_exit = 0 if expected_stderr is not None and expected_stderr != []: expected_exit = 1 - return run_and_verify_svnversion2(message, wc_dir, trail_url, + return run_and_verify_svnversion2(wc_dir, trail_url, expected_stdout, expected_stderr, expected_exit, *varargs) -def run_and_verify_svnversion2(message, wc_dir, trail_url, +def run_and_verify_svnversion2(wc_dir, trail_url, expected_stdout, expected_stderr, expected_exit, *varargs): """Run svnversion command and check its output and exit code.""" @@ -267,10 +322,10 @@ def run_and_verify_svnversion2(message, wc_dir, trail_url, verify.verify_outputs("Unexpected output", out, err, expected_stdout, expected_stderr) - verify.verify_exit_code(message, exit_code, expected_exit) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) return exit_code, out, err -def run_and_verify_svn(message, expected_stdout, expected_stderr, *varargs): +def run_and_verify_svn(expected_stdout, expected_stderr, *varargs): """like run_and_verify_svn2, but the expected exit code is assumed to be 0 if no output is expected on stderr, and 1 otherwise.""" @@ -281,10 +336,10 @@ def run_and_verify_svn(message, expected_stdout, expected_stderr, *varargs): expected_exit = 1 elif expected_stderr != []: expected_exit = 1 - return run_and_verify_svn2(message, expected_stdout, expected_stderr, + return run_and_verify_svn2(expected_stdout, expected_stderr, expected_exit, *varargs) -def run_and_verify_svn2(message, expected_stdout, expected_stderr, +def run_and_verify_svn2(expected_stdout, expected_stderr, expected_exit, *varargs): """Invoke main.run_svn() with *VARARGS. Return exit code as int; stdout, stderr as lists of lines (including line terminators). For both @@ -299,7 +354,7 @@ def run_and_verify_svn2(message, expected_stdout, expected_stderr, - If it is already an instance of ExpectedOutput (e.g. UnorderedOutput), leave it alone. - ...and invoke compare_and_display_lines() on MESSAGE, a label based + ...and invoke compare_and_display_lines() on a label based on the name of the stream being compared (e.g. STDOUT), the ExpectedOutput instance, and the actual output. @@ -321,38 +376,36 @@ def run_and_verify_svn2(message, expected_stdout, expected_stderr, want_err = True exit_code, out, err = main.run_svn(want_err, *varargs) - verify.verify_outputs(message, out, err, expected_stdout, expected_stderr) - verify.verify_exit_code(message, exit_code, expected_exit) + verify.verify_outputs("Unexpected output", out, err, + expected_stdout, expected_stderr) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) return exit_code, out, err def run_and_verify_load(repo_dir, dump_file_content, - bypass_prop_validation = False): + bypass_prop_validation = False, + normalize_props = False): "Runs 'svnadmin load' and reports any errors." if not isinstance(dump_file_content, list): raise TypeError("dump_file_content argument should have list type") expected_stderr = [] + args = () if bypass_prop_validation: - exit_code, output, errput = main.run_command_stdin( - main.svnadmin_binary, expected_stderr, 0, 1, dump_file_content, - 'load', '--force-uuid', '--quiet', '--bypass-prop-validation', repo_dir) - else: - exit_code, output, errput = main.run_command_stdin( - main.svnadmin_binary, expected_stderr, 0, 1, dump_file_content, - 'load', '--force-uuid', '--quiet', repo_dir) - - verify.verify_outputs("Unexpected stderr output", None, errput, - None, expected_stderr) + args += ('--bypass-prop-validation',) + if normalize_props: + args += ('--normalize-props',) + main.run_command_stdin( + main.svnadmin_binary, expected_stderr, 0, True, dump_file_content, + 'load', '--force-uuid', '--quiet', repo_dir, *args) def run_and_verify_dump(repo_dir, deltas=False): "Runs 'svnadmin dump' and reports any errors, returning the dump content." + args = () if deltas: - exit_code, output, errput = main.run_svnadmin('dump', '--deltas', - repo_dir) - else: - exit_code, output, errput = main.run_svnadmin('dump', repo_dir) - verify.verify_outputs("Missing expected output(s)", output, errput, - verify.AnyOutput, verify.AnyOutput) + args += ('--deltas',) + exit_code, output, errput = run_and_verify_svnadmin( + verify.AnyOutput, [], + 'dump', '--quiet', repo_dir, *args) return output @@ -365,8 +418,10 @@ def run_and_verify_svnrdump(dumpfile_content, expected_stdout, # Since main.run_svnrdump() uses binary mode, normalize the stderr # line endings on Windows ourselves. if sys.platform == 'win32': - err = map(lambda x : x.replace('\r\n', '\n'), err) + err = [x.replace('\r\n', '\n') for x in err] + # Ignore "consider upgrade" warnings to allow regression tests to pass + # when run against a 1.6 mod_dav_svn. for index, line in enumerate(err[:]): if re.search("warning: W200007", line): del err[index] @@ -377,32 +432,83 @@ def run_and_verify_svnrdump(dumpfile_content, expected_stdout, return output -def run_and_verify_svnmucc(message, expected_stdout, expected_stderr, +def run_and_verify_svnmover(expected_stdout, expected_stderr, + *varargs): + """Run svnmover command and check its output""" + + expected_exit = 0 + if expected_stderr is not None and expected_stderr != []: + expected_exit = 1 + return run_and_verify_svnmover2(expected_stdout, expected_stderr, + expected_exit, *varargs) + +def run_and_verify_svnmover2(expected_stdout, expected_stderr, + expected_exit, *varargs): + """Run svnmover command and check its output and exit code.""" + + exit_code, out, err = main.run_svnmover(*varargs) + verify.verify_outputs("Unexpected output", out, err, + expected_stdout, expected_stderr) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) + return exit_code, out, err + + +def run_and_verify_svnmucc(expected_stdout, expected_stderr, *varargs): """Run svnmucc command and check its output""" expected_exit = 0 if expected_stderr is not None and expected_stderr != []: expected_exit = 1 - return run_and_verify_svnmucc2(message, expected_stdout, expected_stderr, + return run_and_verify_svnmucc2(expected_stdout, expected_stderr, expected_exit, *varargs) -def run_and_verify_svnmucc2(message, expected_stdout, expected_stderr, +def run_and_verify_svnmucc2(expected_stdout, expected_stderr, expected_exit, *varargs): """Run svnmucc command and check its output and exit code.""" exit_code, out, err = main.run_svnmucc(*varargs) verify.verify_outputs("Unexpected output", out, err, expected_stdout, expected_stderr) - verify.verify_exit_code(message, exit_code, expected_exit) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) + return exit_code, out, err + + +def run_and_verify_svnsync(expected_stdout, expected_stderr, + *varargs): + """Run svnsync command and check its output""" + + expected_exit = 0 + if expected_stderr is not None and expected_stderr != []: + expected_exit = 1 + return run_and_verify_svnsync2(expected_stdout, expected_stderr, + expected_exit, *varargs) + +def run_and_verify_svnsync2(expected_stdout, expected_stderr, + expected_exit, *varargs): + """Run svnsync command and check its output and exit code.""" + + exit_code, out, err = main.run_svnsync(*varargs) + + # Ignore "consider upgrade" warnings to allow regression tests to pass + # when run against a 1.6 mod_dav_svn. + for index, line in enumerate(err[:]): + if re.search("warning: W200007", line): + del err[index] + + verify.verify_outputs("Unexpected output", out, err, + expected_stdout, expected_stderr) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) return exit_code, out, err def load_repo(sbox, dumpfile_path = None, dump_str = None, - bypass_prop_validation = False): + bypass_prop_validation = False, create_wc=True, + normalize_props = False): "Loads the dumpfile into sbox" if not dump_str: - dump_str = open(dumpfile_path, "rb").read() + with open(dumpfile_path, "rb") as fp: + dump_str = fp.read() # Create a virgin repos and working copy main.safe_rmtree(sbox.repo_dir, 1) @@ -411,8 +517,9 @@ def load_repo(sbox, dumpfile_path = None, dump_str = None, # Load the mergetracking dumpfile into the repos, and check it out the repo run_and_verify_load(sbox.repo_dir, dump_str.splitlines(True), - bypass_prop_validation) - run_and_verify_svn(None, None, [], "co", sbox.repo_url, sbox.wc_dir) + bypass_prop_validation, normalize_props) + if create_wc: + run_and_verify_svn(None, [], "co", sbox.repo_url, sbox.wc_dir) return dump_str @@ -424,6 +531,22 @@ def expected_noop_update_output(rev): % (rev), "no-op update") +def run_and_verify_svnauthz(expected_stdout, expected_stderr, + expected_exit, compat_mode, *varargs): + """Run svnauthz command and check its output and exit code. + If COMPAT_MODE is True then run the command in pre-1.8 + compatibility mode""" + + if compat_mode: + exit_code, out, err = main.run_svnauthz_validate(*varargs) + else: + exit_code, out, err = main.run_svnauthz(*varargs) + + verify.verify_outputs("Unexpected output", out, err, + expected_stdout, expected_stderr) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) + return exit_code, out, err + ###################################################################### # Subversion Actions # @@ -433,13 +556,9 @@ def expected_noop_update_output(rev): # -def run_and_verify_checkout2(do_remove, - URL, wc_dir_name, output_tree, disk_tree, - singleton_handler_a = None, - a_baton = None, - singleton_handler_b = None, - b_baton = None, - *args): +def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, + expected_stderr=[], + *args, **kw): """Checkout the URL into a new directory WC_DIR_NAME. *ARGS are any extra optional args to the checkout subcommand. @@ -455,20 +574,13 @@ def run_and_verify_checkout2(do_remove, if isinstance(output_tree, wc.State): output_tree = output_tree.old_tree() - if isinstance(disk_tree, wc.State): - disk_tree = disk_tree.old_tree() - - # Remove dir if it's already there, unless this is a forced checkout. - # In that case assume we want to test a forced checkout's toleration - # of obstructing paths. - if do_remove: - main.safe_rmtree(wc_dir_name) # Checkout and make a tree of the output, using l:foo/p:bar ### todo: svn should not be prompting for auth info when using ### repositories with no auth/auth requirements - exit_code, output, errput = main.run_svn(None, 'co', - URL, wc_dir_name, *args) + exit_code, output, errput = run_and_verify_svn(None, expected_stderr, + 'co', URL, wc_dir_name, + *args) actual = tree.build_tree_from_checkout(output) # Verify actual output against expected output. @@ -478,47 +590,27 @@ def run_and_verify_checkout2(do_remove, _log_tree_state("ACTUAL OUTPUT TREE:", actual, wc_dir_name) raise - # Create a tree by scanning the working copy - actual = tree.build_tree_from_wc(wc_dir_name) - - # Verify expected disk against actual disk. - try: - tree.compare_trees("disk", actual, disk_tree, - singleton_handler_a, a_baton, - singleton_handler_b, b_baton) - except tree.SVNTreeUnequal: - _log_tree_state("ACTUAL DISK TREE:", actual, wc_dir_name) - raise - -def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, - singleton_handler_a = None, - a_baton = None, - singleton_handler_b = None, - b_baton = None, - *args): - """Same as run_and_verify_checkout2(), but without the DO_REMOVE arg. - WC_DIR_NAME is deleted if present unless the '--force' option is passed - in *ARGS.""" - - - # Remove dir if it's already there, unless this is a forced checkout. - # In that case assume we want to test a forced checkout's toleration - # of obstructing paths. - return run_and_verify_checkout2(('--force' not in args), - URL, wc_dir_name, output_tree, disk_tree, - singleton_handler_a, - a_baton, - singleton_handler_b, - b_baton, - *args) - + if disk_tree: + verify_disk(wc_dir_name, disk_tree, False, **kw) def run_and_verify_export(URL, export_dir_name, output_tree, disk_tree, *args): + """Same as run_and_verify_export2 but with KEEP_EOL_STYLE set to False.""" + + run_and_verify_export2(URL, export_dir_name, output_tree, disk_tree, + False, *args) + +def run_and_verify_export2(URL, export_dir_name, output_tree, disk_tree, + keep_eol_style=False, *args): """Export the URL into a new directory WC_DIR_NAME. The subcommand output will be verified against OUTPUT_TREE, and the exported copy itself will be verified against DISK_TREE. + + If KEEP_EOL_STYLE is set, don't let Python normalize the EOL when + reading working copy contents as text files. It has no effect on + binary files. + Return if successful, raise on failure. """ assert isinstance(output_tree, wc.State) @@ -544,7 +636,8 @@ def run_and_verify_export(URL, export_dir_name, output_tree, disk_tree, # Create a tree by scanning the working copy. Don't ignore # the .svn directories so that we generate an error if they # happen to show up. - actual = tree.build_tree_from_wc(export_dir_name, ignore_svn=False) + actual = tree.build_tree_from_wc(export_dir_name, ignore_svn=False, + keep_eol_style=keep_eol_style) # Verify expected disk against actual disk. try: @@ -557,8 +650,13 @@ def run_and_verify_export(URL, export_dir_name, output_tree, disk_tree, # run_and_verify_log_xml class LogEntry: - def __init__(self, revision, changed_paths=None, revprops=None): + def __init__(self, revision, attributes=None, + changed_paths=None, revprops=None): self.revision = revision + if attributes == None: + self.attributes = {} + else: + self.attributes = attributes if changed_paths == None: self.changed_paths = {} else: @@ -568,6 +666,15 @@ def __init__(self, revision, changed_paths=None, revprops=None): else: self.revprops = revprops + def assert_log_attrs(self, attributes): + """Assert that attributes is the same as this entry's attributes + Raises svntest.Failure if not. + """ + if self.attributes != attributes: + raise Failure('\n' + '\n'.join(difflib.ndiff( + pprint.pformat(attributes).splitlines(), + pprint.pformat(self.attributes).splitlines()))) + def assert_changed_paths(self, changed_paths): """Assert that changed_paths is the same as this entry's changed_paths Raises svntest.Failure if not. @@ -597,7 +704,7 @@ def parse(self, data): for i in data: self.parser.Parse(i) self.parser.Parse('', True) - except xml.parsers.expat.ExpatError, e: + except xml.parsers.expat.ExpatError as e: raise verify.SVNUnexpectedStdout('%s\n%s\n' % (e, ''.join(data),)) return self.entries @@ -645,7 +752,7 @@ def svn_prop(self, name): # element handlers def logentry_start(self, attrs): - self.entries.append(LogEntry(int(attrs['revision']))) + self.entries.append(LogEntry(int(attrs['revision']), attrs)) def author_end(self): self.svn_prop('author') def msg_end(self): @@ -665,16 +772,19 @@ def path_end(self): self.entries[-1].changed_paths[self.use_cdata()] = [{'kind': self.kind, 'action': self.action}] -def run_and_verify_log_xml(message=None, expected_paths=None, - expected_revprops=None, expected_stdout=None, - expected_stderr=None, args=[]): +def run_and_verify_log_xml(expected_log_attrs=None, + expected_paths=None, expected_revprops=None, + expected_stdout=None, expected_stderr=None, + args=[]): """Call run_and_verify_svn with log --xml and args (optional) as command - arguments, and pass along message, expected_stdout, and expected_stderr. - - If message is None, pass the svn log command as message. + arguments, and pass along expected_stdout, and expected_stderr. expected_paths checking is not yet implemented. + expected_log_attrs is an optional list of dicts, compared to each revisions's + logentry attributes. The list must be in the same order the log entries + come in. + expected_revprops is an optional list of dicts, compared to each revision's revprops. The list must be in the same order the log entries come in. Any svn:date revprops in the dicts must be '' in order to @@ -683,8 +793,7 @@ def run_and_verify_log_xml(message=None, expected_paths=None, expected_paths and expected_revprops are ignored if expected_stdout or expected_stderr is specified. """ - if message == None: - message = ' '.join(args) + message = ' '.join(args) # We'll parse the output unless the caller specifies expected_stderr or # expected_stdout for run_and_verify_svn. @@ -701,7 +810,7 @@ def run_and_verify_log_xml(message=None, expected_paths=None, log_args.append('-v') (exit_code, stdout, stderr) = run_and_verify_svn( - message, expected_stdout, expected_stderr, + expected_stdout, expected_stderr, 'log', '--xml', *log_args) if not parse: return @@ -713,6 +822,8 @@ def run_and_verify_log_xml(message=None, expected_paths=None, entry.assert_revprops(expected_revprops[index]) if expected_paths != None: entry.assert_changed_paths(expected_paths[index]) + if expected_log_attrs != None: + entry.assert_log_attrs(expected_log_attrs[index]) def verify_update(actual_output, @@ -724,11 +835,9 @@ def verify_update(actual_output, elision_output_tree, disk_tree, status_tree, - singleton_handler_a=None, - a_baton=None, - singleton_handler_b=None, - b_baton=None, - check_props=False): + check_props=False, + keep_eol_style=False, + extra_files=None): """Verify update of WC_DIR_NAME. The subcommand output (found in ACTUAL_OUTPUT, ACTUAL_MERGEINFO_OUTPUT, @@ -740,6 +849,10 @@ def verify_update(actual_output, against STATUS_TREE (if provided). (This is a good way to check that revision numbers were bumped.) + If KEEP_EOL_STYLE is set, don't let Python normalize the EOL when + reading working copy contents as text files. It has no effect on + binary files. + Return if successful, raise on failure. For the comparison with DISK_TREE, pass SINGLETON_HANDLER_A and @@ -759,10 +872,6 @@ def verify_update(actual_output, mergeinfo_output_tree = mergeinfo_output_tree.old_tree() if isinstance(elision_output_tree, wc.State): elision_output_tree = elision_output_tree.old_tree() - if isinstance(disk_tree, wc.State): - disk_tree = disk_tree.old_tree() - if isinstance(status_tree, wc.State): - status_tree = status_tree.old_tree() # Verify actual output against expected output. if output_tree: @@ -794,39 +903,74 @@ def verify_update(actual_output, # Create a tree by scanning the working copy, and verify it if disk_tree: - actual_disk = tree.build_tree_from_wc(wc_dir_name, check_props) - try: - tree.compare_trees("disk", actual_disk, disk_tree, - singleton_handler_a, a_baton, - singleton_handler_b, b_baton) - except tree.SVNTreeUnequal: - _log_tree_state("EXPECTED DISK TREE:", disk_tree) - _log_tree_state("ACTUAL DISK TREE:", actual_disk) - raise + verify_disk(wc_dir_name, disk_tree, check_props, + extra_files, keep_eol_style) # Verify via 'status' command too, if possible. if status_tree: run_and_verify_status(wc_dir_name, status_tree) -def verify_disk(wc_dir_name, disk_tree, check_props=False): +def verify_disk(wc_dir_name, disk_tree, check_props=False, + extra_files=None, keep_eol_style=False): """Verify WC_DIR_NAME against DISK_TREE. If CHECK_PROPS is set, - the comparison will examin props. Returns if successful, raises on - failure.""" - verify_update(None, None, None, wc_dir_name, None, None, None, disk_tree, - None, check_props=check_props) + the comparison will examin props. + + If KEEP_EOL_STYLE is set, don't let Python normalize the EOL when + reading working copy contents as text files. It has no effect on + binary files. + + Returns if successful, raises on failure.""" + + singleton_handler_a = None + a_baton = None, + singleton_handler_b = None + b_baton = None + done_a = None + + if extra_files: + singleton_handler_a = svntest.tree.detect_conflict_files + done_a = svntest.tree.detect_conflict_files_done + a_baton = extra_files + + if isinstance(disk_tree, wc.State): + disk_tree = disk_tree.old_tree() + + actual_disk = tree.build_tree_from_wc(wc_dir_name, check_props, + keep_eol_style=keep_eol_style) + try: + tree.compare_trees("disk", actual_disk, disk_tree, + singleton_handler_a, a_baton, + singleton_handler_b, b_baton) + except tree.SVNTreeUnequal: + _log_tree_state("EXPECTED DISK TREE:", disk_tree) + _log_tree_state("ACTUAL DISK TREE:", actual_disk) + raise + if done_a: + done_a(a_baton) def run_and_verify_update(wc_dir_name, output_tree, disk_tree, status_tree, - error_re_string = None, - singleton_handler_a = None, - a_baton = None, - singleton_handler_b = None, - b_baton = None, + expected_stderr=[], check_props = False, - *args): + *args, **kw): + """Same as run_and_verify_update2 but with keep_eol_style set to False.""" + run_and_verify_update2(wc_dir_name, + output_tree, disk_tree, status_tree, + expected_stderr, + check_props, + False, + *args, **kw) + + +def run_and_verify_update2(wc_dir_name, + output_tree, disk_tree, status_tree, + expected_stderr=[], + check_props = False, + keep_eol_style = False, + *args, **kw): """Update WC_DIR_NAME. *ARGS are any extra optional args to the update subcommand. NOTE: If *ARGS is specified at all, explicit @@ -838,44 +982,30 @@ def run_and_verify_update(wc_dir_name, If ERROR_RE_STRING, the update must exit with error, and the error message must match regular expression ERROR_RE_STRING. - Else if ERROR_RE_STRING is None, then: - If OUTPUT_TREE is not None, the subcommand output will be verified against OUTPUT_TREE. If DISK_TREE is not None, the working copy itself will be verified against DISK_TREE. If STATUS_TREE is not None, the 'svn status' output will be verified against STATUS_TREE. (This is a good way to check that revision numbers were bumped.) - For the DISK_TREE verification, SINGLETON_HANDLER_A and - SINGLETON_HANDLER_B will be passed to tree.compare_trees -- see that - function's doc string for more details. - If CHECK_PROPS is set, then disk comparison will examine props. + If KEEP_EOL_STYLE is set, don't let Python normalize the EOL when + reading working copy contents as text files. It has no effect on + binary files. + Return if successful, raise on failure.""" # Update and make a tree of the output. - if len(args): - exit_code, output, errput = main.run_svn(error_re_string, 'up', *args) - else: - exit_code, output, errput = main.run_svn(error_re_string, - 'up', wc_dir_name, - *args) + if len(args) == 0: + args = (wc_dir_name,) - if error_re_string: - rm = re.compile(error_re_string) - for line in errput: - match = rm.search(line) - if match: - return - raise main.SVNUnmatchedError + exit_code, output, errput = run_and_verify_svn(None, expected_stderr, 'up', *args) actual = wc.State.from_checkout(output) verify_update(actual, None, None, wc_dir_name, output_tree, None, None, disk_tree, status_tree, - singleton_handler_a, a_baton, - singleton_handler_b, b_baton, - check_props) + check_props, keep_eol_style=keep_eol_style, **kw) def run_and_parse_info(*args): @@ -959,19 +1089,20 @@ def run_and_verify_info(expected_infos, *args): for actual, expected in zip(actual_infos, expected_infos): # compare dicts + path = actual['Path'] for key, value in expected.items(): assert ':' not in key # caller passed impossible expectations? if value is None and key in actual: - raise main.SVNLineUnequal("Found unexpected key '%s' with value '%s'" - % (key, actual[key])) + raise main.SVNLineUnequal("On '%s': Found unexpected key '%s'\n Value '%s'" + % (path, key, actual[key])) if value is not None and key not in actual: - raise main.SVNLineUnequal("Expected key '%s' (with value '%s') " - "not found" % (key, value)) + raise main.SVNLineUnequal("On '%s': Expected key '%s' not found\n Expected value '%s'" + % (path, key, value)) if value is not None and not re.match(value, actual[key]): - raise verify.SVNUnexpectedStdout("Values of key '%s' don't match:\n" + raise verify.SVNUnexpectedStdout("On '%s': Values of key '%s' don't match:\n" " Expected: '%s' (regex)\n" " Found: '%s' (string)\n" - % (key, value, actual[key])) + % (path, key, value, actual[key])) except: sys.stderr.write("Bad 'svn info' output:\n" @@ -985,25 +1116,39 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, mergeinfo_output_tree, elision_output_tree, disk_tree, status_tree, skip_tree, - error_re_string = None, - singleton_handler_a = None, - a_baton = None, - singleton_handler_b = None, - b_baton = None, + expected_stderr = [], check_props = False, dry_run = True, - *args): + *args, **kw): + """Same as run_and_verify_merge2 but with keep_eol_style set to False. """ + + run_and_verify_merge2(dir, rev1, rev2, url1, url2, + output_tree, + mergeinfo_output_tree, + elision_output_tree, + disk_tree, status_tree, skip_tree, + expected_stderr, + check_props, + dry_run, + False, + *args, **kw) + +def run_and_verify_merge2(dir, rev1, rev2, url1, url2, + output_tree, + mergeinfo_output_tree, + elision_output_tree, + disk_tree, status_tree, skip_tree, + expected_stderr = [], + check_props = False, + dry_run = True, + keep_eol_style = False, + *args, **kw): """Run 'svn merge URL1@REV1 URL2@REV2 DIR' if URL2 is not None (for a three-way merge between URLs and WC). If URL2 is None, run 'svn merge -rREV1:REV2 URL1 DIR'. If both REV1 and REV2 are None, leave off the '-r' argument. - If ERROR_RE_STRING, the merge must exit with error, and the error - message must match regular expression ERROR_RE_STRING. - - Else if ERROR_RE_STRING is None, then: - The subcommand output will be verified against OUTPUT_TREE. Output related to mergeinfo notifications will be verified against MERGEINFO_OUTPUT_TREE if that is not None. Output related to mergeinfo @@ -1021,6 +1166,10 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, If DRY_RUN is set then a --dry-run merge will be carried out first and the output compared with that of the full merge. + If KEEP_EOL_STYLE is set, don't let Python normalize the EOL when + reading working copy contents as text files. It has no effect on + binary files. + Return if successful, raise on failure. *ARGS are any extra optional args to the merge subcommand. @@ -1040,12 +1189,12 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, merge_command = tuple(merge_command) if dry_run: - pre_disk = tree.build_tree_from_wc(dir) + pre_disk = tree.build_tree_from_wc(dir, keep_eol_style=keep_eol_style) dry_run_command = merge_command + ('--dry-run',) dry_run_command = dry_run_command + args - exit_code, out_dry, err_dry = main.run_svn(error_re_string, - *dry_run_command) - post_disk = tree.build_tree_from_wc(dir) + exit_code, out_dry, err_dry = run_and_verify_svn(None, expected_stderr, + *dry_run_command) + post_disk = tree.build_tree_from_wc(dir, keep_eol_style=keep_eol_style) try: tree.compare_trees("disk", post_disk, pre_disk) except tree.SVNTreeError: @@ -1057,16 +1206,7 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, # Update and make a tree of the output. merge_command = merge_command + args - exit_code, out, err = main.run_svn(error_re_string, *merge_command) - - if error_re_string: - if not error_re_string.startswith(".*"): - error_re_string = ".*(" + error_re_string + ")" - expected_err = verify.RegexOutput(error_re_string, match_all=False) - verify.verify_outputs(None, None, err, None, expected_err) - return - elif err: - raise verify.SVNUnexpectedStderr(err) + exit_code, out, err = run_and_verify_svn(None, expected_stderr, *merge_command) # Split the output into that related to application of the actual diff # and that related to the recording of mergeinfo describing the merge. @@ -1121,7 +1261,7 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, for x in out_dry: logger.warn(x) logger.warn("The full merge output:") - for x in out: + for x in merge_diff_out: logger.warn(x) logger.warn("=============================================================") raise main.SVNUnmatchedError @@ -1155,9 +1295,7 @@ def extra_skip(a, b): verify_update(actual_diff, actual_mergeinfo, actual_elision, dir, output_tree, mergeinfo_output_tree, elision_output_tree, disk_tree, status_tree, - singleton_handler_a, a_baton, - singleton_handler_b, b_baton, - check_props) + check_props, keep_eol_style=keep_eol_style, **kw) def run_and_verify_patch(dir, patch_path, @@ -1165,14 +1303,29 @@ def run_and_verify_patch(dir, patch_path, error_re_string=None, check_props=False, dry_run=True, - *args): + *args, **kw): + """Same as run_and_verify_patch2 but with KEEP_EOL_STYLE set to False.""" + + run_and_verify_patch2(dir, patch_path, + output_tree, disk_tree, status_tree, skip_tree, + error_re_string, + check_props, + dry_run, + False, + *args, **kw) + +def run_and_verify_patch2(dir, patch_path, + output_tree, disk_tree, status_tree, skip_tree, + error_re_string=None, + check_props=False, + dry_run=True, + keep_eol_style=False, + *args, **kw): """Run 'svn patch patch_path DIR'. If ERROR_RE_STRING, 'svn patch' must exit with error, and the error message must match regular expression ERROR_RE_STRING. - Else if ERROR_RE_STRING is None, then: - The subcommand output will be verified against OUTPUT_TREE, and the working copy itself will be verified against DISK_TREE. If optional STATUS_TREE is given, then 'svn status' output will be compared. @@ -1183,20 +1336,23 @@ def run_and_verify_patch(dir, patch_path, If DRY_RUN is set then a --dry-run patch will be carried out first and the output compared with that of the full patch application. - Returns if successful, raises on failure.""" + If KEEP_EOL_STYLE is set, don't let Python normalize the EOL when + reading working copy contents as text files. It has no effect on + binary files. + Returns if successful, raises on failure.""" patch_command = [ "patch" ] patch_command.append(patch_path) patch_command.append(dir) patch_command = tuple(patch_command) if dry_run: - pre_disk = tree.build_tree_from_wc(dir) + pre_disk = tree.build_tree_from_wc(dir, keep_eol_style=keep_eol_style) dry_run_command = patch_command + ('--dry-run',) dry_run_command = dry_run_command + args exit_code, out_dry, err_dry = main.run_svn(error_re_string, *dry_run_command) - post_disk = tree.build_tree_from_wc(dir) + post_disk = tree.build_tree_from_wc(dir, keep_eol_style=keep_eol_style) try: tree.compare_trees("disk", post_disk, pre_disk) except tree.SVNTreeError: @@ -1259,7 +1415,8 @@ def extra_skip(a, b): verify_update(mytree, None, None, dir, output_tree, None, None, disk_tree, status_tree, - check_props=check_props) + check_props=check_props, keep_eol_style=keep_eol_style, + **kw) def run_and_verify_mergeinfo(error_re_string = None, @@ -1282,7 +1439,6 @@ def run_and_verify_mergeinfo(error_re_string = None, return out = [_f for _f in [x.rstrip()[1:] for x in out] if _f] - expected_output.sort() extra_out = [] if out != expected_output: exp_hash = dict.fromkeys(expected_output) @@ -1303,57 +1459,33 @@ def run_and_verify_switch(wc_dir_name, wc_target, switch_url, output_tree, disk_tree, status_tree, - error_re_string = None, - singleton_handler_a = None, - a_baton = None, - singleton_handler_b = None, - b_baton = None, + expected_stderr = [], check_props = False, - *args): + *args, **kw): """Switch WC_TARGET (in working copy dir WC_DIR_NAME) to SWITCH_URL. - If ERROR_RE_STRING, the switch must exit with error, and the error - message must match regular expression ERROR_RE_STRING. - - Else if ERROR_RE_STRING is None, then: - The subcommand output will be verified against OUTPUT_TREE, and the working copy itself will be verified against DISK_TREE. If optional STATUS_TREE is given, then 'svn status' output will be compared. (This is a good way to check that revision numbers were bumped.) - For the DISK_TREE verification, SINGLETON_HANDLER_A and - SINGLETON_HANDLER_B will be passed to tree.compare_trees -- see that - function's doc string for more details. - If CHECK_PROPS is set, then disk comparison will examine props. Return if successful, raise on failure.""" # Update and make a tree of the output. - exit_code, output, errput = main.run_svn(error_re_string, 'switch', - switch_url, wc_target, *args) - - if error_re_string: - if not error_re_string.startswith(".*"): - error_re_string = ".*(" + error_re_string + ")" - expected_err = verify.RegexOutput(error_re_string, match_all=False) - verify.verify_outputs(None, None, errput, None, expected_err) - return - elif errput: - raise verify.SVNUnexpectedStderr(err) - + exit_code, output, errput = run_and_verify_svn(None, expected_stderr, + 'switch', switch_url, + wc_target, *args) actual = wc.State.from_checkout(output) verify_update(actual, None, None, wc_dir_name, output_tree, None, None, disk_tree, status_tree, - singleton_handler_a, a_baton, - singleton_handler_b, b_baton, - check_props) + check_props, **kw) -def process_output_for_commit(output): +def process_output_for_commit(output, error_re_string): """Helper for run_and_verify_commit(), also used in the factory.""" # Remove the final output line, and verify that the commit succeeded. lastline = "" @@ -1372,7 +1504,7 @@ def external_removal(line): cm = re.compile("(Committed|Imported) revision [0-9]+.") match = cm.search(lastline) - if not match: + if not match and not error_re_string: logger.warn("ERROR: commit did not succeed.") logger.warn("The final line from 'svn ci' was:") logger.warn(lastline) @@ -1399,7 +1531,7 @@ def external_removal(line): def run_and_verify_commit(wc_dir_name, output_tree, status_tree, - error_re_string = None, + expected_stderr=[], *args): """Commit and verify results within working copy WC_DIR_NAME, sending ARGS to the commit subcommand. @@ -1409,44 +1541,34 @@ def run_and_verify_commit(wc_dir_name, output_tree, status_tree, be compared. (This is a good way to check that revision numbers were bumped.) - If ERROR_RE_STRING is None, the commit must not exit with error. If - ERROR_RE_STRING is a string, the commit must exit with error, and - the error message must match regular expression ERROR_RE_STRING. + EXPECTED_STDERR is handled as in run_and_verify_svn() Return if successful, raise on failure.""" if isinstance(output_tree, wc.State): output_tree = output_tree.old_tree() - if isinstance(status_tree, wc.State): - status_tree = status_tree.old_tree() # Commit. + if len(args) == 0: + args = (wc_dir_name,) if '-m' not in args and '-F' not in args: args = list(args) + ['-m', 'log msg'] - exit_code, output, errput = main.run_svn(error_re_string, 'ci', - *args) - - if error_re_string: - if not error_re_string.startswith(".*"): - error_re_string = ".*(" + error_re_string + ")" - expected_err = verify.RegexOutput(error_re_string, match_all=False) - verify.verify_outputs(None, None, errput, None, expected_err) - return - - # Else not expecting error: + exit_code, output, errput = run_and_verify_svn(None, expected_stderr, + 'ci', *args) # Convert the output into a tree. - output = process_output_for_commit(output) + output = process_output_for_commit(output, expected_stderr) actual = tree.build_tree_from_commit(output) # Verify actual output against expected output. - try: - tree.compare_trees("output", actual, output_tree) - except tree.SVNTreeError: - verify.display_trees("Output of commit is unexpected", - "OUTPUT TREE", output_tree, actual) - _log_tree_state("ACTUAL OUTPUT TREE:", actual, wc_dir_name) - raise + if output_tree: + try: + tree.compare_trees("output", actual, output_tree) + except tree.SVNTreeError: + verify.display_trees("Output of commit is unexpected", + "OUTPUT TREE", output_tree, actual) + _log_tree_state("ACTUAL OUTPUT TREE:", actual, wc_dir_name) + raise # Verify via 'status' command too, if possible. if status_tree: @@ -1455,50 +1577,43 @@ def run_and_verify_commit(wc_dir_name, output_tree, status_tree, # This function always passes '-q' to the status command, which # suppresses the printing of any unversioned or nonexistent items. -def run_and_verify_status(wc_dir_name, output_tree, - singleton_handler_a = None, - a_baton = None, - singleton_handler_b = None, - b_baton = None): +def run_and_verify_status(wc_dir_name, status_tree, no_entries=False): """Run 'status' on WC_DIR_NAME and compare it with the - expected OUTPUT_TREE. SINGLETON_HANDLER_A and SINGLETON_HANDLER_B will - be passed to tree.compare_trees - see that function's doc string for - more details. + expected STATUS_TREE. Returns on success, raises on failure.""" - if isinstance(output_tree, wc.State): - output_state = output_tree - output_tree = output_tree.old_tree() - else: - output_state = None + if not isinstance(status_tree, wc.State): + raise TypeError('wc.State tree expected') exit_code, output, errput = main.run_svn(None, 'status', '-v', '-u', '-q', wc_dir_name) - actual = tree.build_tree_from_status(output) + actual_status = svntest.wc.State.from_status(output, wc_dir=wc_dir_name) # Verify actual output against expected output. try: - tree.compare_trees("status", actual, output_tree, - singleton_handler_a, a_baton, - singleton_handler_b, b_baton) + status_tree.compare_and_display('status', actual_status) except tree.SVNTreeError: - verify.display_trees(None, 'STATUS OUTPUT TREE', output_tree, actual) - _log_tree_state("ACTUAL STATUS TREE:", actual, wc_dir_name) + _log_tree_state("ACTUAL STATUS TREE:", actual_status.old_tree(), + wc_dir_name) raise + if no_entries: + return + # if we have an output State, and we can/are-allowed to create an # entries-based State, then compare the two. - if output_state: - entries_state = wc.State.from_entries(wc_dir_name) - if entries_state: - tweaked = output_state.copy() - tweaked.tweak_for_entries_compare() - try: - tweaked.compare_and_display('entries', entries_state) - except tree.SVNTreeUnequal: - ### do something more - raise + actual_entries = wc.State.from_entries(wc_dir_name) + if actual_entries: + tweaked = status_tree.copy() + tweaked.tweak_for_entries_compare() + try: + tweaked.compare_and_display('entries', actual_entries) + except tree.SVNTreeUnequal: + ### do something more + _log_tree_state("ACTUAL ENTRY TREE:", actual_entries.old_tree(), + wc_dir_name) + raise # A variant of previous func, but doesn't pass '-q'. This allows us @@ -1508,19 +1623,20 @@ def run_and_verify_unquiet_status(wc_dir_name, status_tree): expected STATUS_TREE. Returns on success, raises on failure.""" - if isinstance(status_tree, wc.State): - status_tree = status_tree.old_tree() + if not isinstance(status_tree, wc.State): + raise TypeError('wc.State tree expected') exit_code, output, errput = main.run_svn(None, 'status', '-v', '-u', wc_dir_name) - actual = tree.build_tree_from_status(output) + actual_status = svntest.wc.State.from_status(output, wc_dir=wc_dir_name) # Verify actual output against expected output. try: - tree.compare_trees("UNQUIET STATUS", actual, status_tree) + status_tree.compare_and_display('unquiet status', actual_status) except tree.SVNTreeError: - _log_tree_state("ACTUAL UNQUIET STATUS TREE:", actual, wc_dir_name) + _log_tree_state("ACTUAL STATUS TREE:", + actual_status.normalize().old_tree(), wc_dir_name) raise def run_and_verify_status_xml(expected_entries = [], @@ -1530,7 +1646,7 @@ def run_and_verify_status_xml(expected_entries = [], EXPECTED_ENTRIES. """ - exit_code, output, errput = run_and_verify_svn(None, None, [], + exit_code, output, errput = run_and_verify_svn(None, [], 'status', '--xml', *args) if len(errput) > 0: @@ -1573,6 +1689,90 @@ def getText(nodelist): pprint.pformat(expected_entries).splitlines(), pprint.pformat(actual_entries).splitlines()))) +def run_and_verify_inherited_prop_xml(path_or_url, + expected_inherited_props, + expected_explicit_props, + propname=None, + peg_rev=None, + *args): + """If PROPNAME is None, then call run_and_verify_svn with proplist -v --xml + --show-inherited-props on PATH_OR_URL, otherwise call run_and_verify_svn + with propget PROPNAME --xml --show-inherited-props. + + PATH_OR_URL is pegged at PEG_REV if the latter is not None. If PEG_REV + is none, then PATH_OR_URL is pegged at HEAD if a url. + + EXPECTED_INHERITED_PROPS is a (possibly empty) dict mapping working copy + paths or URLs to dicts of inherited properties. EXPECTED_EXPLICIT_PROPS is + a (possibly empty) dict of the explicit properties expected on PATH_OR_URL. + + Returns on success, raises on failure if EXPECTED_INHERITED_PROPS or + EXPECTED_EXPLICIT_PROPS don't match the results of proplist/propget. + """ + + if peg_rev is None: + if sandbox.is_url(path_or_url): + path_or_url = path_or_url + '@HEAD' + else: + path_or_url = path_or_url + '@' + str(peg_rev) + + if (propname): + exit_code, output, errput = svntest.actions.run_and_verify_svn( + None, [], 'propget', propname, '--xml', + '--show-inherited-props', path_or_url, *args) + else: + exit_code, output, errput = svntest.actions.run_and_verify_svn( + None, [], 'proplist', '-v', '--xml', '--show-inherited-props', + path_or_url, *args) + + if len(errput) > 0: + raise Failure + + # Props inherited from within the WC are keyed on absolute paths. + expected_iprops = {} + for x in expected_inherited_props: + if sandbox.is_url(x): + expected_iprops[x] = expected_inherited_props[x] + else: + expected_iprops[os.path.abspath(x)] = expected_inherited_props[x] + + actual_iprops = {} + actual_explicit_props = {} + + doc = parseString(''.join(output)) + targets = doc.getElementsByTagName('target') + for t in targets: + + # Create actual inherited props. + iprops = t.getElementsByTagName('inherited_property') + + if len(iprops) > 0: + actual_iprops[t.getAttribute('path')]={} + + for i in iprops: + actual_iprops[t.getAttribute('path')][i.getAttribute('name')] = \ + i.firstChild.nodeValue + + # Create actual explicit props. + xprops = t.getElementsByTagName('property') + + for x in xprops: + actual_explicit_props[x.getAttribute('name')] = x.firstChild.nodeValue + + if expected_explicit_props != actual_explicit_props: + raise svntest.Failure( + 'Actual and expected explicit props do not match\n' + + '\n'.join(difflib.ndiff( + pprint.pformat(expected_explicit_props).splitlines(), + pprint.pformat(actual_explicit_props).splitlines()))) + + if expected_iprops != actual_iprops: + raise svntest.Failure( + 'Actual and expected inherited props do not match\n' + + '\n'.join(difflib.ndiff( + pprint.pformat(expected_iprops).splitlines(), + pprint.pformat(actual_iprops).splitlines()))) + def run_and_verify_diff_summarize_xml(error_re_string = [], expected_prefix = None, expected_paths = [], @@ -1595,7 +1795,7 @@ def run_and_verify_diff_summarize_xml(error_re_string = [], EXPECTED_PROPS and EXPECTED_KINDS. Returns on success, raises on failure.""" - exit_code, output, errput = run_and_verify_svn(None, None, error_re_string, + exit_code, output, errput = run_and_verify_svn(None, error_re_string, 'diff', '--summarize', '--xml', *args) @@ -1612,7 +1812,7 @@ def run_and_verify_diff_summarize_xml(error_re_string = [], for path in paths: modified_path = path.childNodes[0].data - if (expected_prefix is not None + if (expected_prefix and modified_path.find(expected_prefix) == 0): modified_path = modified_path.replace(expected_prefix, '')[1:].strip() @@ -1679,16 +1879,16 @@ def run_and_validate_lock(path, username): comment = "Locking path:%s." % path # lock the path - run_and_verify_svn(None, ".*locked by user", [], 'lock', + run_and_verify_svn(".*locked by user", [], 'lock', '--username', username, '-m', comment, path) # Run info and check that we get the lock fields. - exit_code, output, err = run_and_verify_svn(None, None, [], + exit_code, output, err = run_and_verify_svn(None, [], 'info','-R', path) - ### TODO: Leverage RegexOuput([...], match_all=True) here. + ### TODO: Leverage RegexOutput([...], match_all=True) here. # prepare the regexs to compare against token_re = re.compile(".*?Lock Token: opaquelocktoken:.*?", re.DOTALL) author_re = re.compile(".*?Lock Owner: %s\n.*?" % username, re.DOTALL) @@ -1711,10 +1911,27 @@ def _run_and_verify_resolve(cmd, expected_paths, *args): # TODO: verify that the status of PATHS changes accordingly. if len(args) == 0: args = expected_paths - expected_output = verify.UnorderedOutput([ - "Resolved conflicted state of '" + path + "'\n" for path in - expected_paths]) - run_and_verify_svn(None, expected_output, [], + expected_output = verify.AlternateOutput([ + verify.UnorderedOutput([ + "Resolved conflicted state of '" + path + "'\n" for path in + expected_paths]), + verify.UnorderedOutput([ + "Breaking move with source path '" + path + "'\n" for path in + expected_paths] + [ + "Resolved conflicted state of '" + path + "'\n" for path in + expected_paths]), + verify.UnorderedOutput([ + "Merge conflicts in '" + path + "' marked as resolved.\n" for path in + expected_paths]), + verify.UnorderedRegexListOutput([ + "Conflict in property.*at '" + re.escape(path) + "' marked as resolved.\n" \ + for path in expected_paths]), + verify.UnorderedOutput([ + "Tree conflict at '" + path + "' marked as resolved.\n" for path in + expected_paths]), + ], + match_all=False) + run_and_verify_svn(expected_output, [], cmd, *args) def run_and_verify_resolve(expected_paths, *args): @@ -1738,7 +1955,7 @@ def run_and_verify_revert(expected_paths, *args): expected_output = verify.UnorderedOutput([ "Reverted '" + path + "'\n" for path in expected_paths]) - run_and_verify_svn(None, expected_output, [], + run_and_verify_svn(expected_output, [], "revert", *args) @@ -1747,32 +1964,52 @@ def run_and_verify_revert(expected_paths, *args): # This allows a test to *quickly* bootstrap itself. -def make_repo_and_wc(sbox, create_wc = True, read_only = False, - minor_version = None): - """Create a fresh 'Greek Tree' repository and check out a WC from it. +def make_repo_and_wc(sbox, create_wc=True, read_only=False, empty=False, + minor_version=None, tree=None): + """Create a fresh repository and check out a WC from it. If EMPTY is + True, the repository and WC will be empty and at revision 0, + otherwise they will contain the 'Greek Tree' at revision 1. If READ_ONLY is False, a dedicated repository will be created, at the path - SBOX.repo_dir. If READ_ONLY is True, the pristine repository will be used. + SBOX.repo_dir. If READ_ONLY is True, a shared pristine repository may be + used or a dedicated repository may be created. (Currently we use a shared + pristine 'Greek tree' repo but we create a dedicated empty repo.) In either case, SBOX.repo_url is assumed to point to the repository that will be used. - If create_wc is True, a dedicated working copy will be checked out from + If CREATE_WC is True, a dedicated working copy will be checked out from the repository, at the path SBOX.wc_dir. Returns on success, raises on failure.""" - # Create (or copy afresh) a new repos with a greek tree in it. - if not read_only: - guarantee_greek_repository(sbox.repo_dir, minor_version) + # Create or copy or reference the appropriate kind of repository: + # if we want a non-empty, Greek repo, refer to the shared one; else + # if we want an empty repo or a writable Greek repo, create one. + # (We could have a shared empty repo for read-only use, but we don't.) + if empty: + guarantee_empty_repository(sbox.repo_dir, minor_version) + expected_state = svntest.wc.State('', {}) + else: + if tree == 'greek': + if not read_only: + guarantee_greek_repository(sbox.repo_dir, minor_version) + expected_state = main.greek_state + elif tree == 'trojan': + if not read_only: + guarantee_trojan_repository(sbox.repo_dir, minor_version) + expected_state = main.trojan_state + else: + raise ValueError("'tree' must be 'greek' or 'trojan'" + " but was '%s'" % str(tree)) if create_wc: # Generate the expected output tree. - expected_output = main.greek_state.copy() + expected_output = expected_state.copy() expected_output.wc_dir = sbox.wc_dir expected_output.tweak(status='A ', contents=None) # Generate an expected wc tree. - expected_wc = main.greek_state + expected_wc = expected_state # Do a checkout, and verify the resulting output and disk contents. run_and_verify_checkout(sbox.repo_url, @@ -1783,7 +2020,7 @@ def make_repo_and_wc(sbox, create_wc = True, read_only = False, # just make sure the parent folder of our working copy is created try: os.mkdir(main.general_wc_dir) - except OSError, err: + except OSError as err: if err.errno != errno.EEXIST: raise @@ -1797,14 +2034,21 @@ def duplicate_dir(wc_name, wc_copy_name): -def get_virginal_state(wc_dir, rev): +def get_virginal_state(wc_dir, rev, tree='greek'): "Return a virginal greek tree state for a WC and repos at revision REV." rev = str(rev) ### maybe switch rev to an integer? # copy the greek tree, shift it to the new wc_dir, insert a root elem, # then tweak all values - state = main.greek_state.copy() + if tree == 'greek': + state = main.greek_state.copy() + elif tree == 'trojan': + state = main.trojan_state.copy() + else: + raise ValueError("'tree' must be 'greek' or 'trojan'" + " but was '%s'" % str(tree)) + state.wc_dir = wc_dir state.desc[''] = wc.StateItem() state.tweak(contents=None, status=' ', wc_rev=rev) @@ -1812,11 +2056,11 @@ def get_virginal_state(wc_dir, rev): return state # Cheap administrative directory locking -def lock_admin_dir(wc_dir, recursive=False): +def lock_admin_dir(wc_dir, recursive=False, work_queue=False): "Lock a SVN administrative directory" db, root_path, relpath = wc.open_wc_db(wc_dir) - svntest.main.run_wc_lock_tester(recursive, wc_dir) + svntest.main.run_wc_lock_tester(recursive, wc_dir, work_queue) def set_incomplete(wc_dir, revision): "Make wc_dir incomplete at revision" @@ -1831,10 +2075,20 @@ def get_wc_base_rev(wc_dir): "Return the BASE revision of the working copy at WC_DIR." return run_and_parse_info(wc_dir)[0]['Revision'] +def load_dumpfile(filename): + "Return the contents of the FILENAME assuming that it is a dump file" + with open(filename, "rb") as fp: + dump_str = fp.readlines() + return dump_str + def hook_failure_message(hook_name): """Return the error message that the client prints for failure of the specified hook HOOK_NAME. The wording changed with Subversion 1.5.""" - if svntest.main.options.server_minor_version < 5: + + # Output depends on the server version, not the repository version. + # This gets the wrong result for modern servers with old format + # repositories. + if svntest.main.options.server_minor_version < 5 and not svntest.main.is_ra_type_file(): return "'%s' hook failed with error output:\n" % hook_name else: if hook_name in ["start-commit", "pre-commit"]: @@ -1898,25 +2152,50 @@ def create_failing_post_commit_hook(repo_dir): '@echo Post-commit hook failed 1>&2\n' '@exit 1\n') +def _make_temp_file(contents): + """ Create a unique temporary file with the specified CONTENTS + and return its path. """ + from tempfile import mkstemp + (fd, path) = mkstemp() + os.close(fd) + file = open(path, 'wb') + file.write(contents) + file.flush() + file.close() + return path + # set_prop can be used for properties with NULL characters which are not # handled correctly when passed to subprocess.Popen() and values like "*" # which are not handled correctly on Windows. -def set_prop(name, value, path, expected_re_string=None): +def set_prop(name, value, path, expected_re_string=None, force=None): """Set a property with specified value""" - if value and (value[0] == '-' or '\x00' in value or sys.platform == 'win32'): - from tempfile import mkstemp - (fd, value_file_path) = mkstemp() - os.close(fd) - value_file = open(value_file_path, 'wb') - value_file.write(value) - value_file.flush() - value_file.close() - exit_code, out, err = main.run_svn(expected_re_string, 'propset', - '-F', value_file_path, name, path) - os.remove(value_file_path) + if not force: + propset = ('propset',) + else: + propset = ('propset', '--force') + + if isinstance(value, bytes): + file = _make_temp_file(value) + elif isinstance(value, str): + if value and (value[0] == '-' or '\x00' in value or + sys.platform == 'win32'): + file = _make_temp_file(value.encode()) + else: + file = None else: - exit_code, out, err = main.run_svn(expected_re_string, 'propset', - name, value, path) + raise TypeError(value) + + if file is None: + propset += (name, value, path) + else: + propset += ('-F', file, name, path) + + try: + exit_code, out, err = main.run_svn(expected_re_string, *propset) + finally: + if file is not None: + os.remove(file) + if expected_re_string: if not expected_re_string.startswith(".*"): expected_re_string = ".*(" + expected_re_string + ")" @@ -1932,7 +2211,7 @@ def check_prop(name, path, exp_out, revprop=None): else: revprop_options = [] # Not using run_svn because binary_mode must be set - exit_code, out, err = main.run_command(main.svn_binary, None, 1, 'pg', + exit_code, out, err = main.run_command(main.svn_binary, None, True, 'pg', '--strict', name, path, '--config-dir', main.default_config_dir, @@ -1989,7 +2268,7 @@ def inject_conflict_into_wc(sbox, state_path, file_path, if expected_status: expected_status.tweak(state_path, wc_rev=merged_rev) run_and_verify_commit(wc_dir, expected_output, expected_status, - None, file_path) + [], file_path) # Backdate the file. exit_code, output, errput = main.run_svn(None, "up", "-r", str(prev_rev), @@ -2010,7 +2289,7 @@ def inject_conflict_into_wc(sbox, state_path, file_path, inject_conflict_into_expected_state(state_path, expected_disk, expected_status, conflicting_contents, contents, - merged_rev) + prev_rev, merged_rev) exit_code, output, errput = main.run_svn(None, "up", "-r", str(merged_rev), file_path) if expected_status: @@ -2018,26 +2297,30 @@ def inject_conflict_into_wc(sbox, state_path, file_path, def inject_conflict_into_expected_state(state_path, expected_disk, expected_status, - wc_text, merged_text, merged_rev): + wc_text, merged_text, prev_rev, + merged_rev): """Update the EXPECTED_DISK and EXPECTED_STATUS trees for the conflict at STATE_PATH (ignored if None). WC_TEXT, MERGED_TEXT, and MERGED_REV are used to determine the contents of the conflict (the text parameters should be newline-terminated).""" if expected_disk: conflict_marker = make_conflict_marker_text(wc_text, merged_text, - merged_rev) + prev_rev, merged_rev) existing_text = expected_disk.desc[state_path].contents or "" expected_disk.tweak(state_path, contents=existing_text + conflict_marker) if expected_status: expected_status.tweak(state_path, status='C ') -def make_conflict_marker_text(wc_text, merged_text, merged_rev): +def make_conflict_marker_text(wc_text, merged_text, prev_rev, merged_rev, + old_text=''): """Return the conflict marker text described by WC_TEXT (the current text in the working copy, MERGED_TEXT (the conflicting text merged in), and MERGED_REV (the revision from whence the conflicting text came).""" - return "<<<<<<< .working\n" + wc_text + "=======\n" + \ + return "<<<<<<< .working\n" + wc_text + \ + "||||||| .merge-left.r" + str(prev_rev) + '\n' + \ + old_text + "=======\n" + \ merged_text + ">>>>>>> .merge-right.r" + str(merged_rev) + "\n" @@ -2089,7 +2372,7 @@ def build_greek_tree_conflicts(sbox): expected_status = get_virginal_state(wc_dir, 1) expected_status.tweak('A/D/G/pi', wc_rev='2') expected_status.remove('A/D/G/rho', 'A/D/G/tau') - run_and_verify_commit(wc_dir, expected_output, expected_status, None, + run_and_verify_commit(wc_dir, expected_output, expected_status, [], '-m', 'Incoming changes.', wc_dir ) # Update back to the pristine state ("time-warp"). @@ -2101,7 +2384,7 @@ def build_greek_tree_conflicts(sbox): expected_disk = main.greek_state expected_status = get_virginal_state(wc_dir, 1) run_and_verify_update(wc_dir, expected_output, expected_disk, - expected_status, None, None, None, None, None, False, + expected_status, [], False, '-r', '1', wc_dir) # Make local changes @@ -2112,983 +2395,6 @@ def build_greek_tree_conflicts(sbox): # Update, receiving the incoming changes on top of the local changes, # causing tree conflicts. Don't check for any particular result: that is # the job of other tests. - run_and_verify_svn(None, verify.AnyOutput, [], 'update', wc_dir) - - -def make_deep_trees(base): - """Helper function for deep trees conflicts. Create a set of trees, - each in its own "container" dir. Any conflicts can be tested separately - in each container. - """ - j = os.path.join - # Create the container dirs. - F = j(base, 'F') - D = j(base, 'D') - DF = j(base, 'DF') - DD = j(base, 'DD') - DDF = j(base, 'DDF') - DDD = j(base, 'DDD') - os.makedirs(F) - os.makedirs(j(D, 'D1')) - os.makedirs(j(DF, 'D1')) - os.makedirs(j(DD, 'D1', 'D2')) - os.makedirs(j(DDF, 'D1', 'D2')) - os.makedirs(j(DDD, 'D1', 'D2', 'D3')) - - # Create their files. - alpha = j(F, 'alpha') - beta = j(DF, 'D1', 'beta') - gamma = j(DDF, 'D1', 'D2', 'gamma') - main.file_append(alpha, "This is the file 'alpha'.\n") - main.file_append(beta, "This is the file 'beta'.\n") - main.file_append(gamma, "This is the file 'gamma'.\n") - - -def add_deep_trees(sbox, base_dir_name): - """Prepare a "deep_trees" within a given directory. - - The directory / is created and a deep_tree - is created within. The items are only added, a commit has to be - called separately, if needed. - - will thus be a container for the set of containers - mentioned in make_deep_trees(). - """ - j = os.path.join - base = j(sbox.wc_dir, base_dir_name) - make_deep_trees(base) - main.run_svn(None, 'add', base) - - -Item = wc.StateItem - -# initial deep trees state -deep_trees_virginal_state = wc.State('', { - 'F' : Item(), - 'F/alpha' : Item("This is the file 'alpha'.\n"), - 'D' : Item(), - 'D/D1' : Item(), - 'DF' : Item(), - 'DF/D1' : Item(), - 'DF/D1/beta' : Item("This is the file 'beta'.\n"), - 'DD' : Item(), - 'DD/D1' : Item(), - 'DD/D1/D2' : Item(), - 'DDF' : Item(), - 'DDF/D1' : Item(), - 'DDF/D1/D2' : Item(), - 'DDF/D1/D2/gamma' : Item("This is the file 'gamma'.\n"), - 'DDD' : Item(), - 'DDD/D1' : Item(), - 'DDD/D1/D2' : Item(), - 'DDD/D1/D2/D3' : Item(), - }) - - -# Many actions on deep trees and their resulting states... - -def deep_trees_leaf_edit(base): - """Helper function for deep trees test cases. Append text to files, - create new files in empty directories, and change leaf node properties.""" - j = os.path.join - F = j(base, 'F', 'alpha') - DF = j(base, 'DF', 'D1', 'beta') - DDF = j(base, 'DDF', 'D1', 'D2', 'gamma') - main.file_append(F, "More text for file alpha.\n") - main.file_append(DF, "More text for file beta.\n") - main.file_append(DDF, "More text for file gamma.\n") - run_and_verify_svn(None, verify.AnyOutput, [], - 'propset', 'prop1', '1', F, DF, DDF) - - D = j(base, 'D', 'D1') - DD = j(base, 'DD', 'D1', 'D2') - DDD = j(base, 'DDD', 'D1', 'D2', 'D3') - run_and_verify_svn(None, verify.AnyOutput, [], - 'propset', 'prop1', '1', D, DD, DDD) - D = j(base, 'D', 'D1', 'delta') - DD = j(base, 'DD', 'D1', 'D2', 'epsilon') - DDD = j(base, 'DDD', 'D1', 'D2', 'D3', 'zeta') - main.file_append(D, "This is the file 'delta'.\n") - main.file_append(DD, "This is the file 'epsilon'.\n") - main.file_append(DDD, "This is the file 'zeta'.\n") - run_and_verify_svn(None, verify.AnyOutput, [], - 'add', D, DD, DDD) - -# deep trees state after a call to deep_trees_leaf_edit -deep_trees_after_leaf_edit = wc.State('', { - 'F' : Item(), - 'F/alpha' : Item("This is the file 'alpha'.\nMore text for file alpha.\n"), - 'D' : Item(), - 'D/D1' : Item(), - 'D/D1/delta' : Item("This is the file 'delta'.\n"), - 'DF' : Item(), - 'DF/D1' : Item(), - 'DF/D1/beta' : Item("This is the file 'beta'.\nMore text for file beta.\n"), - 'DD' : Item(), - 'DD/D1' : Item(), - 'DD/D1/D2' : Item(), - 'DD/D1/D2/epsilon' : Item("This is the file 'epsilon'.\n"), - 'DDF' : Item(), - 'DDF/D1' : Item(), - 'DDF/D1/D2' : Item(), - 'DDF/D1/D2/gamma' : Item("This is the file 'gamma'.\nMore text for file gamma.\n"), - 'DDD' : Item(), - 'DDD/D1' : Item(), - 'DDD/D1/D2' : Item(), - 'DDD/D1/D2/D3' : Item(), - 'DDD/D1/D2/D3/zeta' : Item("This is the file 'zeta'.\n"), - }) - - -def deep_trees_leaf_del(base): - """Helper function for deep trees test cases. Delete files and empty - dirs.""" - j = os.path.join - F = j(base, 'F', 'alpha') - D = j(base, 'D', 'D1') - DF = j(base, 'DF', 'D1', 'beta') - DD = j(base, 'DD', 'D1', 'D2') - DDF = j(base, 'DDF', 'D1', 'D2', 'gamma') - DDD = j(base, 'DDD', 'D1', 'D2', 'D3') - main.run_svn(None, 'rm', F, D, DF, DD, DDF, DDD) - -# deep trees state after a call to deep_trees_leaf_del -deep_trees_after_leaf_del = wc.State('', { - 'F' : Item(), - 'D' : Item(), - 'DF' : Item(), - 'DF/D1' : Item(), - 'DD' : Item(), - 'DD/D1' : Item(), - 'DDF' : Item(), - 'DDF/D1' : Item(), - 'DDF/D1/D2' : Item(), - 'DDD' : Item(), - 'DDD/D1' : Item(), - 'DDD/D1/D2' : Item(), - }) - -# deep trees state after a call to deep_trees_leaf_del with no commit -def deep_trees_after_leaf_del_no_ci(wc_dir): - if svntest.main.wc_is_singledb(wc_dir): - return deep_trees_after_leaf_del - else: - return deep_trees_empty_dirs - - -def deep_trees_tree_del(base): - """Helper function for deep trees test cases. Delete top-level dirs.""" - j = os.path.join - F = j(base, 'F', 'alpha') - D = j(base, 'D', 'D1') - DF = j(base, 'DF', 'D1') - DD = j(base, 'DD', 'D1') - DDF = j(base, 'DDF', 'D1') - DDD = j(base, 'DDD', 'D1') - main.run_svn(None, 'rm', F, D, DF, DD, DDF, DDD) - -def deep_trees_rmtree(base): - """Helper function for deep trees test cases. Delete top-level dirs - with rmtree instead of svn del.""" - j = os.path.join - F = j(base, 'F', 'alpha') - D = j(base, 'D', 'D1') - DF = j(base, 'DF', 'D1') - DD = j(base, 'DD', 'D1') - DDF = j(base, 'DDF', 'D1') - DDD = j(base, 'DDD', 'D1') - os.unlink(F) - main.safe_rmtree(D) - main.safe_rmtree(DF) - main.safe_rmtree(DD) - main.safe_rmtree(DDF) - main.safe_rmtree(DDD) - -# deep trees state after a call to deep_trees_tree_del -deep_trees_after_tree_del = wc.State('', { - 'F' : Item(), - 'D' : Item(), - 'DF' : Item(), - 'DD' : Item(), - 'DDF' : Item(), - 'DDD' : Item(), - }) - -# deep trees state without any files -deep_trees_empty_dirs = wc.State('', { - 'F' : Item(), - 'D' : Item(), - 'D/D1' : Item(), - 'DF' : Item(), - 'DF/D1' : Item(), - 'DD' : Item(), - 'DD/D1' : Item(), - 'DD/D1/D2' : Item(), - 'DDF' : Item(), - 'DDF/D1' : Item(), - 'DDF/D1/D2' : Item(), - 'DDD' : Item(), - 'DDD/D1' : Item(), - 'DDD/D1/D2' : Item(), - 'DDD/D1/D2/D3' : Item(), - }) - -# deep trees state after a call to deep_trees_tree_del with no commit -def deep_trees_after_tree_del_no_ci(wc_dir): - if svntest.main.wc_is_singledb(wc_dir): - return deep_trees_after_tree_del - else: - return deep_trees_empty_dirs - -def deep_trees_tree_del_repos(base): - """Helper function for deep trees test cases. Delete top-level dirs, - directly in the repository.""" - j = '/'.join - F = j([base, 'F', 'alpha']) - D = j([base, 'D', 'D1']) - DF = j([base, 'DF', 'D1']) - DD = j([base, 'DD', 'D1']) - DDF = j([base, 'DDF', 'D1']) - DDD = j([base, 'DDD', 'D1']) - main.run_svn(None, 'mkdir', '-m', '', F, D, DF, DD, DDF, DDD) - -# Expected merge/update/switch output. - -deep_trees_conflict_output = wc.State('', { - 'F/alpha' : Item(status=' ', treeconflict='C'), - 'D/D1' : Item(status=' ', treeconflict='C'), - 'DF/D1' : Item(status=' ', treeconflict='C'), - 'DD/D1' : Item(status=' ', treeconflict='C'), - 'DDF/D1' : Item(status=' ', treeconflict='C'), - 'DDD/D1' : Item(status=' ', treeconflict='C'), - }) - -deep_trees_conflict_output_skipped = wc.State('', { - 'D/D1' : Item(verb='Skipped'), - 'F/alpha' : Item(verb='Skipped'), - 'DD/D1' : Item(verb='Skipped'), - 'DF/D1' : Item(verb='Skipped'), - 'DDD/D1' : Item(verb='Skipped'), - 'DDF/D1' : Item(verb='Skipped'), - }) - -# Expected status output after merge/update/switch. - -deep_trees_status_local_tree_del = wc.State('', { - '' : Item(status=' ', wc_rev=3), - 'D' : Item(status=' ', wc_rev=3), - 'D/D1' : Item(status='D ', wc_rev=2, treeconflict='C'), - 'DD' : Item(status=' ', wc_rev=3), - 'DD/D1' : Item(status='D ', wc_rev=2, treeconflict='C'), - 'DD/D1/D2' : Item(status='D ', wc_rev=2), - 'DDD' : Item(status=' ', wc_rev=3), - 'DDD/D1' : Item(status='D ', wc_rev=2, treeconflict='C'), - 'DDD/D1/D2' : Item(status='D ', wc_rev=2), - 'DDD/D1/D2/D3' : Item(status='D ', wc_rev=2), - 'DDF' : Item(status=' ', wc_rev=3), - 'DDF/D1' : Item(status='D ', wc_rev=2, treeconflict='C'), - 'DDF/D1/D2' : Item(status='D ', wc_rev=2), - 'DDF/D1/D2/gamma' : Item(status='D ', wc_rev=2), - 'DF' : Item(status=' ', wc_rev=3), - 'DF/D1' : Item(status='D ', wc_rev=2, treeconflict='C'), - 'DF/D1/beta' : Item(status='D ', wc_rev=2), - 'F' : Item(status=' ', wc_rev=3), - 'F/alpha' : Item(status='D ', wc_rev=2, treeconflict='C'), - }) - -deep_trees_status_local_leaf_edit = wc.State('', { - '' : Item(status=' ', wc_rev=3), - 'D' : Item(status=' ', wc_rev=3), - 'D/D1' : Item(status=' M', wc_rev=2, treeconflict='C'), - 'D/D1/delta' : Item(status='A ', wc_rev=0), - 'DD' : Item(status=' ', wc_rev=3), - 'DD/D1' : Item(status=' ', wc_rev=2, treeconflict='C'), - 'DD/D1/D2' : Item(status=' M', wc_rev=2), - 'DD/D1/D2/epsilon' : Item(status='A ', wc_rev=0), - 'DDD' : Item(status=' ', wc_rev=3), - 'DDD/D1' : Item(status=' ', wc_rev=2, treeconflict='C'), - 'DDD/D1/D2' : Item(status=' ', wc_rev=2), - 'DDD/D1/D2/D3' : Item(status=' M', wc_rev=2), - 'DDD/D1/D2/D3/zeta' : Item(status='A ', wc_rev=0), - 'DDF' : Item(status=' ', wc_rev=3), - 'DDF/D1' : Item(status=' ', wc_rev=2, treeconflict='C'), - 'DDF/D1/D2' : Item(status=' ', wc_rev=2), - 'DDF/D1/D2/gamma' : Item(status='MM', wc_rev=2), - 'DF' : Item(status=' ', wc_rev=3), - 'DF/D1' : Item(status=' ', wc_rev=2, treeconflict='C'), - 'DF/D1/beta' : Item(status='MM', wc_rev=2), - 'F' : Item(status=' ', wc_rev=3), - 'F/alpha' : Item(status='MM', wc_rev=2, treeconflict='C'), - }) - - -class DeepTreesTestCase: - """Describes one tree-conflicts test case. - See deep_trees_run_tests_scheme_for_update(), ..._switch(), ..._merge(). - - The name field is the subdirectory name in which the test should be run. - - The local_action and incoming_action are the functions to run - to construct the local changes and incoming changes, respectively. - See deep_trees_leaf_edit, deep_trees_tree_del, etc. - - The expected_* and error_re_string arguments are described in functions - run_and_verify_[update|switch|merge] - except expected_info, which is a dict that has path keys with values - that are dicts as passed to run_and_verify_info(): - expected_info = { - 'F/alpha' : { - 'Revision' : '3', - 'Tree conflict' : - '^local delete, incoming edit upon update' - + ' Source left: .file.*/F/alpha@2' - + ' Source right: .file.*/F/alpha@3$', - }, - 'DF/D1' : { - 'Tree conflict' : - '^local delete, incoming edit upon update' - + ' Source left: .dir.*/DF/D1@2' - + ' Source right: .dir.*/DF/D1@3$', - }, - ... - } - - Note: expected_skip is only used in merge, i.e. using - deep_trees_run_tests_scheme_for_merge. - """ - - def __init__(self, name, local_action, incoming_action, - expected_output = None, expected_disk = None, - expected_status = None, expected_skip = None, - error_re_string = None, - commit_block_string = ".*remains in conflict.*", - expected_info = None): - self.name = name - self.local_action = local_action - self.incoming_action = incoming_action - self.expected_output = expected_output - self.expected_disk = expected_disk - self.expected_status = expected_status - self.expected_skip = expected_skip - self.error_re_string = error_re_string - self.commit_block_string = commit_block_string - self.expected_info = expected_info - - - -def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme): - """ - Runs a given list of tests for conflicts occuring at an update operation. - - This function wants to save time and perform a number of different - test cases using just a single repository and performing just one commit - for all test cases instead of one for each test case. - - 1) Each test case is initialized in a separate subdir. Each subdir - again contains one set of "deep_trees", being separate container - dirs for different depths of trees (F, D, DF, DD, DDF, DDD). - - 2) A commit is performed across all test cases and depths. - (our initial state, -r2) - - 3) In each test case subdir (e.g. "local_tree_del_incoming_leaf_edit"), - its *incoming* action is performed (e.g. "deep_trees_leaf_edit"), in - each of the different depth trees (F, D, DF, ... DDD). - - 4) A commit is performed across all test cases and depths: - our "incoming" state is "stored away in the repository for now", - -r3. - - 5) All test case dirs and contained deep_trees are time-warped - (updated) back to -r2, the initial state containing deep_trees. - - 6) In each test case subdir (e.g. "local_tree_del_incoming_leaf_edit"), - its *local* action is performed (e.g. "deep_trees_leaf_del"), in - each of the different depth trees (F, D, DF, ... DDD). - - 7) An update to -r3 is performed across all test cases and depths. - This causes tree-conflicts between the "local" state in the working - copy and the "incoming" state from the repository, -r3. - - 8) A commit is performed in each separate container, to verify - that each tree-conflict indeed blocks a commit. - - The sbox parameter is just the sbox passed to a test function. No need - to call sbox.build(), since it is called (once) within this function. - - The "table" greater_scheme models all of the different test cases - that should be run using a single repository. - - greater_scheme is a list of DeepTreesTestCase items, which define complete - test setups, so that they can be performed as described above. - """ - - j = os.path.join - - if not sbox.is_built(): - sbox.build() - wc_dir = sbox.wc_dir - - - # 1) create directories - - for test_case in greater_scheme: - try: - add_deep_trees(sbox, test_case.name) - except: - logger.warn("ERROR IN: Tests scheme for update: " - + "while setting up deep trees in '%s'", test_case.name) - raise - - - # 2) commit initial state - - main.run_svn(None, 'commit', '-m', 'initial state', wc_dir) - - - # 3) apply incoming changes - - for test_case in greater_scheme: - try: - test_case.incoming_action(j(sbox.wc_dir, test_case.name)) - except: - logger.warn("ERROR IN: Tests scheme for update: " - + "while performing incoming action in '%s'", test_case.name) - raise - - - # 4) commit incoming changes - - main.run_svn(None, 'commit', '-m', 'incoming changes', wc_dir) - - - # 5) time-warp back to -r2 - - main.run_svn(None, 'update', '-r2', wc_dir) - - - # 6) apply local changes - - for test_case in greater_scheme: - try: - test_case.local_action(j(wc_dir, test_case.name)) - except: - logger.warn("ERROR IN: Tests scheme for update: " - + "while performing local action in '%s'", test_case.name) - raise - - - # 7) update to -r3, conflicting with incoming changes. - # A lot of different things are expected. - # Do separate update operations for each test case. - - for test_case in greater_scheme: - try: - base = j(wc_dir, test_case.name) - - x_out = test_case.expected_output - if x_out != None: - x_out = x_out.copy() - x_out.wc_dir = base - - x_disk = test_case.expected_disk - - x_status = test_case.expected_status - if x_status != None: - x_status.copy() - x_status.wc_dir = base - - run_and_verify_update(base, x_out, x_disk, None, - error_re_string = test_case.error_re_string) - if x_status: - run_and_verify_unquiet_status(base, x_status) - - x_info = test_case.expected_info or {} - for path in x_info: - run_and_verify_info([x_info[path]], j(base, path)) - - except: - logger.warn("ERROR IN: Tests scheme for update: " - + "while verifying in '%s'", test_case.name) - raise - - - # 8) Verify that commit fails. - - for test_case in greater_scheme: - try: - base = j(wc_dir, test_case.name) - - x_status = test_case.expected_status - if x_status != None: - x_status.copy() - x_status.wc_dir = base - - run_and_verify_commit(base, None, x_status, - test_case.commit_block_string, - base) - except: - logger.warn("ERROR IN: Tests scheme for update: " - + "while checking commit-blocking in '%s'", test_case.name) - raise - - - -def deep_trees_skipping_on_update(sbox, test_case, skip_paths, - chdir_skip_paths): - """ - Create tree conflicts, then update again, expecting the existing tree - conflicts to be skipped. - SKIP_PATHS is a list of paths, relative to the "base dir", for which - "update" on the "base dir" should report as skipped. - CHDIR_SKIP_PATHS is a list of (target-path, skipped-path) pairs for which - an update of "target-path" (relative to the "base dir") should result in - "skipped-path" (relative to "target-path") being reported as skipped. - """ - - """FURTHER_ACTION is a function that will make a further modification to - each target, this being the modification that we expect to be skipped. The - function takes the "base dir" (the WC path to the test case directory) as - its only argument.""" - further_action = deep_trees_tree_del_repos - - j = os.path.join - wc_dir = sbox.wc_dir - base = j(wc_dir, test_case.name) - - # Initialize: generate conflicts. (We do not check anything here.) - setup_case = DeepTreesTestCase(test_case.name, - test_case.local_action, - test_case.incoming_action, - None, - None, - None) - deep_trees_run_tests_scheme_for_update(sbox, [setup_case]) - - # Make a further change to each target in the repository so there is a new - # revision to update to. (This is r4.) - further_action(sbox.repo_url + '/' + test_case.name) - - # Update whole working copy, expecting the nodes still in conflict to be - # skipped. - - x_out = test_case.expected_output - if x_out != None: - x_out = x_out.copy() - x_out.wc_dir = base - - x_disk = test_case.expected_disk - - x_status = test_case.expected_status - if x_status != None: - x_status = x_status.copy() - x_status.wc_dir = base - # Account for nodes that were updated by further_action - x_status.tweak('', 'D', 'F', 'DD', 'DF', 'DDD', 'DDF', wc_rev=4) - - run_and_verify_update(base, x_out, x_disk, None, - error_re_string = test_case.error_re_string) - - run_and_verify_unquiet_status(base, x_status) - - # Try to update each in-conflict subtree. Expect a 'Skipped' output for - # each, and the WC status to be unchanged. - for path in skip_paths: - run_and_verify_update(j(base, path), - wc.State(base, {path : Item(verb='Skipped')}), - None, None) - - run_and_verify_unquiet_status(base, x_status) - - # Try to update each in-conflict subtree. Expect a 'Skipped' output for - # each, and the WC status to be unchanged. - # This time, cd to the subdir before updating it. - was_cwd = os.getcwd() - for path, skipped in chdir_skip_paths: - if isinstance(skipped, list): - expected_skip = {} - for p in skipped: - expected_skip[p] = Item(verb='Skipped') - else: - expected_skip = {skipped : Item(verb='Skipped')} - p = j(base, path) - run_and_verify_update(p, - wc.State(p, expected_skip), - None, None) - os.chdir(was_cwd) - - run_and_verify_unquiet_status(base, x_status) - - # Verify that commit still fails. - for path, skipped in chdir_skip_paths: - - run_and_verify_commit(j(base, path), None, None, - test_case.commit_block_string, - base) - - run_and_verify_unquiet_status(base, x_status) - - -def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme): - """ - Runs a given list of tests for conflicts occuring at a switch operation. - - This function wants to save time and perform a number of different - test cases using just a single repository and performing just one commit - for all test cases instead of one for each test case. - - 1) Each test case is initialized in a separate subdir. Each subdir - again contains two subdirs: one "local" and one "incoming" for - the switch operation. These contain a set of deep_trees each. - - 2) A commit is performed across all test cases and depths. - (our initial state, -r2) - - 3) In each test case subdir's incoming subdir, the - incoming actions are performed. - - 4) A commit is performed across all test cases and depths. (-r3) - - 5) In each test case subdir's local subdir, the local actions are - performed. They remain uncommitted in the working copy. - - 6) In each test case subdir's local dir, a switch is performed to its - corresponding incoming dir. - This causes conflicts between the "local" state in the working - copy and the "incoming" state from the incoming subdir (still -r3). - - 7) A commit is performed in each separate container, to verify - that each tree-conflict indeed blocks a commit. - - The sbox parameter is just the sbox passed to a test function. No need - to call sbox.build(), since it is called (once) within this function. - - The "table" greater_scheme models all of the different test cases - that should be run using a single repository. - - greater_scheme is a list of DeepTreesTestCase items, which define complete - test setups, so that they can be performed as described above. - """ - - j = os.path.join - - if not sbox.is_built(): - sbox.build() - wc_dir = sbox.wc_dir - - - # 1) Create directories. - - for test_case in greater_scheme: - try: - base = j(sbox.wc_dir, test_case.name) - os.makedirs(base) - make_deep_trees(j(base, "local")) - make_deep_trees(j(base, "incoming")) - main.run_svn(None, 'add', base) - except: - logger.warn("ERROR IN: Tests scheme for switch: " - + "while setting up deep trees in '%s'", test_case.name) - raise - - - # 2) Commit initial state (-r2). - - main.run_svn(None, 'commit', '-m', 'initial state', wc_dir) - - - # 3) Apply incoming changes - - for test_case in greater_scheme: - try: - test_case.incoming_action(j(sbox.wc_dir, test_case.name, "incoming")) - except: - logger.warn("ERROR IN: Tests scheme for switch: " - + "while performing incoming action in '%s'", test_case.name) - raise - - - # 4) Commit all changes (-r3). - - main.run_svn(None, 'commit', '-m', 'incoming changes', wc_dir) - - - # 5) Apply local changes in their according subdirs. - - for test_case in greater_scheme: - try: - test_case.local_action(j(sbox.wc_dir, test_case.name, "local")) - except: - logger.warn("ERROR IN: Tests scheme for switch: " - + "while performing local action in '%s'", test_case.name) - raise - - - # 6) switch the local dir to the incoming url, conflicting with incoming - # changes. A lot of different things are expected. - # Do separate switch operations for each test case. - - for test_case in greater_scheme: - try: - local = j(wc_dir, test_case.name, "local") - incoming = sbox.repo_url + "/" + test_case.name + "/incoming" - - x_out = test_case.expected_output - if x_out != None: - x_out = x_out.copy() - x_out.wc_dir = local - - x_disk = test_case.expected_disk - - x_status = test_case.expected_status - if x_status != None: - x_status.copy() - x_status.wc_dir = local - - run_and_verify_switch(local, local, incoming, x_out, x_disk, None, - test_case.error_re_string, None, None, None, - None, False, '--ignore-ancestry') - run_and_verify_unquiet_status(local, x_status) - - x_info = test_case.expected_info or {} - for path in x_info: - run_and_verify_info([x_info[path]], j(local, path)) - except: - logger.warn("ERROR IN: Tests scheme for switch: " - + "while verifying in '%s'", test_case.name) - raise - - - # 7) Verify that commit fails. - - for test_case in greater_scheme: - try: - local = j(wc_dir, test_case.name, 'local') - - x_status = test_case.expected_status - if x_status != None: - x_status.copy() - x_status.wc_dir = local - - run_and_verify_commit(local, None, x_status, - test_case.commit_block_string, - local) - except: - logger.warn("ERROR IN: Tests scheme for switch: " - + "while checking commit-blocking in '%s'", test_case.name) - raise - - -def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, - do_commit_local_changes, - do_commit_conflicts=True, - ignore_ancestry=False): - """ - Runs a given list of tests for conflicts occuring at a merge operation. - - This function wants to save time and perform a number of different - test cases using just a single repository and performing just one commit - for all test cases instead of one for each test case. - - 1) Each test case is initialized in a separate subdir. Each subdir - initially contains another subdir, called "incoming", which - contains a set of deep_trees. - - 2) A commit is performed across all test cases and depths. - (a pre-initial state) - - 3) In each test case subdir, the "incoming" subdir is copied to "local", - via the `svn copy' command. Each test case's subdir now has two sub- - dirs: "local" and "incoming", initial states for the merge operation. - - 4) An update is performed across all test cases and depths, so that the - copies made in 3) are pulled into the wc. - - 5) In each test case's "incoming" subdir, the incoming action is - performed. - - 6) A commit is performed across all test cases and depths, to commit - the incoming changes. - If do_commit_local_changes is True, this becomes step 7 (swap steps). - - 7) In each test case's "local" subdir, the local_action is performed. - If do_commit_local_changes is True, this becomes step 6 (swap steps). - Then, in effect, the local changes are committed as well. - - 8) In each test case subdir, the "incoming" subdir is merged into the - "local" subdir. If ignore_ancestry is True, then the merge is done - with the --ignore-ancestry option, so mergeinfo is neither considered - nor recorded. This causes conflicts between the "local" state in the - working copy and the "incoming" state from the incoming subdir. - - 9) If do_commit_conflicts is True, then a commit is performed in each - separate container, to verify that each tree-conflict indeed blocks - a commit. - - The sbox parameter is just the sbox passed to a test function. No need - to call sbox.build(), since it is called (once) within this function. - - The "table" greater_scheme models all of the different test cases - that should be run using a single repository. - - greater_scheme is a list of DeepTreesTestCase items, which define complete - test setups, so that they can be performed as described above. - """ - - j = os.path.join - - if not sbox.is_built(): - sbox.build() - wc_dir = sbox.wc_dir - - # 1) Create directories. - for test_case in greater_scheme: - try: - base = j(sbox.wc_dir, test_case.name) - os.makedirs(base) - make_deep_trees(j(base, "incoming")) - main.run_svn(None, 'add', base) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while setting up deep trees in '%s'", test_case.name) - raise - - - # 2) Commit pre-initial state (-r2). - - main.run_svn(None, 'commit', '-m', 'pre-initial state', wc_dir) - - - # 3) Copy "incoming" to "local". - - for test_case in greater_scheme: - try: - base_url = sbox.repo_url + "/" + test_case.name - incoming_url = base_url + "/incoming" - local_url = base_url + "/local" - main.run_svn(None, 'cp', incoming_url, local_url, '-m', - 'copy incoming to local') - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while copying deep trees in '%s'", test_case.name) - raise - - # 4) Update to load all of the "/local" subdirs into the working copies. - - try: - main.run_svn(None, 'up', sbox.wc_dir) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while updating local subdirs") - raise - - - # 5) Perform incoming actions - - for test_case in greater_scheme: - try: - test_case.incoming_action(j(sbox.wc_dir, test_case.name, "incoming")) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while performing incoming action in '%s'", test_case.name) - raise - - - # 6) or 7) Commit all incoming actions - - if not do_commit_local_changes: - try: - main.run_svn(None, 'ci', '-m', 'Committing incoming actions', - sbox.wc_dir) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while committing incoming actions") - raise - - - # 7) or 6) Perform all local actions. - - for test_case in greater_scheme: - try: - test_case.local_action(j(sbox.wc_dir, test_case.name, "local")) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while performing local action in '%s'", test_case.name) - raise - - - # 6) or 7) Commit all incoming actions - - if do_commit_local_changes: - try: - main.run_svn(None, 'ci', '-m', 'Committing incoming and local actions', - sbox.wc_dir) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while committing incoming and local actions") - raise - - - # 8) Merge all "incoming" subdirs to their respective "local" subdirs. - # This creates conflicts between the local changes in the "local" wc - # subdirs and the incoming states committed in the "incoming" subdirs. - - for test_case in greater_scheme: - try: - local = j(sbox.wc_dir, test_case.name, "local") - incoming = sbox.repo_url + "/" + test_case.name + "/incoming" - - x_out = test_case.expected_output - if x_out != None: - x_out = x_out.copy() - x_out.wc_dir = local - - x_disk = test_case.expected_disk - - x_status = test_case.expected_status - if x_status != None: - x_status.copy() - x_status.wc_dir = local - - x_skip = test_case.expected_skip - if x_skip != None: - x_skip.copy() - x_skip.wc_dir = local - - varargs = (local,) - if ignore_ancestry: - varargs = varargs + ('--ignore-ancestry',) - - run_and_verify_merge(local, None, None, incoming, None, - x_out, None, None, x_disk, None, x_skip, - test_case.error_re_string, - None, None, None, None, - False, False, *varargs) - run_and_verify_unquiet_status(local, x_status) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while verifying in '%s'", test_case.name) - raise - - - # 9) Verify that commit fails. - - if do_commit_conflicts: - for test_case in greater_scheme: - try: - local = j(wc_dir, test_case.name, 'local') - - x_status = test_case.expected_status - if x_status != None: - x_status.copy() - x_status.wc_dir = local - - run_and_verify_commit(local, None, x_status, - test_case.commit_block_string, - local) - except: - logger.warn("ERROR IN: Tests scheme for merge: " - + "while checking commit-blocking in '%s'", test_case.name) - raise + run_and_verify_svn(verify.AnyOutput, [], 'update', wc_dir) diff --git a/svntest/factory.py b/svntest/factory.py index cb387e83..32093eae 100644 --- a/svntest/factory.py +++ b/svntest/factory.py @@ -97,7 +97,7 @@ # # YOU ARE CORDIALLY INVITED to add/tweak/change to your needs. # If you want to know what's going on, look at the switch() -# funtion of TestFactory below. +# function of TestFactory below. # # # DETAILS @@ -308,8 +308,9 @@ def __init__(self, sbox, prev_status=None, prev_disk=None): # An optimized list kept up-to-date by variable additions self.sorted_vars_by_pathlen = [] - # Wether we ever used the variables 'wc_dir' and 'url' (tiny tweak) + # Whether we ever used the variables 'wc_dir' and 'url' (tiny tweak) self.used_wc_dir = False + self.used_abs_wc_dir = False self.used_url = False # The alternate working copy directories created that need to be @@ -350,6 +351,8 @@ def make(self, commands): # main wc_dir and url if self.used_wc_dir: init += 'wc_dir = sbox.wc_dir\n' + if self.used_abs_wc_dir: + init += 'abs_wc_dir = os.path.abspath(sbox.wc_dir)\n' if self.used_url: init += 'url = sbox.repo_url\n' @@ -447,9 +450,14 @@ def switch(self, line): return self.cmd_svn_checkout(args[2:]) if second in ['propset','pset','ps']: - return self.cmd_svn(args[1:], False, + multiline_args = [arg.replace(r'\n', '\n') for arg in args[1:]] + return self.cmd_svn(multiline_args, False, self.keep_args_of, 3) + if second in ['propget','pget','pg']: + return self.cmd_svn(args[1:], False, + self.keep_args_of, 2) + if second in ['delete','del','remove', 'rm']: return self.cmd_svn(args[1:], False, self.keep_args_of + ['--with-revprop']) @@ -492,7 +500,7 @@ def cmd_svn_standard_run(self, pyargs, runargs, do_chdir, wc): else: py = "expected_stdout = verify.UnorderedOutput(" + pylist + ")\n\n" py += pychdir - py += "actions.run_and_verify_svn2('OUTPUT', expected_stdout, [], 0" + py += "actions.run_and_verify_svn2(expected_stdout, [], 0" else: # write a test that expects failure pylist = self.strlist2py(err) @@ -501,8 +509,7 @@ def cmd_svn_standard_run(self, pyargs, runargs, do_chdir, wc): else: py = "expected_stderr = verify.UnorderedOutput(" + pylist + ")\n\n" py += pychdir - py += ("actions.run_and_verify_svn2('OUTPUT', " + - "[], expected_stderr, " + str(code)) + py += ("actions.run_and_verify_svn2([], expected_stderr, " + str(code)) if len(pyargs) > 0: py += ", " + ", ".join(pyargs) @@ -627,13 +634,13 @@ def cmd_svn_update(self, update_args): py += pychdir py += ("actions.run_and_verify_update(" + wc.py + ", " + "expected_output, expected_disk, expected_status, " + - "None, None, None, None, None, False") + "[], False") else: # write a test that expects error py = "expected_error = " + self.strlist2py(err) + "\n\n" py += pychdir py += ("actions.run_and_verify_update(" + wc.py + ", None, None, " + - "None, expected_error, None, None, None, None, False") + "None, expected_error") if len(pyargs) > 0: py += ', ' + ', '.join(pyargs) @@ -693,14 +700,14 @@ def cmd_svn_switch(self, switch_args): py += ("actions.run_and_verify_switch(" + wc.py + ", " + wc_arg.pyarg + ", " + url_arg.pyarg + ", " + "expected_output, expected_disk, expected_status, " + - "None, None, None, None, None, False") + "[], False") else: # write a test that expects error py = "expected_error = " + self.strlist2py(err) + "\n\n" py += pychdir py += ("actions.run_and_verify_switch(" + wc.py + ", " + wc_arg.pyarg + ", " + url_arg.pyarg + ", " + - "None, None, None, expected_error, None, None, None, None, False") + "None, None, None, expected_error, False") if len(pyargs) > 0: py += ', ' + ', '.join(pyargs) @@ -758,7 +765,7 @@ def cmd_svn_checkout(self, checkout_args): py += ("actions.run_and_verify_checkout(" + url_arg.pyarg + ", " + wc_arg.pyarg + - ", expected_output, expected_disk, None, None, None, None") + ", expected_output, expected_disk") else: # write a test that expects failure pylist = self.strlist2py(err) @@ -767,8 +774,7 @@ def cmd_svn_checkout(self, checkout_args): else: py += "expected_stderr = verify.UnorderedOutput(" + pylist + ")\n\n" py += pychdir - py += ("actions.run_and_verify_svn2('OUTPUT', " + - "[], expected_stderr, " + str(code) + + py += ("actions.run_and_verify_svn2([], expected_stderr, " + str(code) + ", " + url_arg.pyarg + ", " + wc_arg.pyarg) # Append the remaining args @@ -1029,7 +1035,7 @@ def get_current_status(self, wc, quiet=True): make_py, prev_status = self.get_prev_status(wc) - actual_status = svntest.wc.State.from_status(output) + actual_status = svntest.wc.State.from_status(output, wc_dir=wc.realpath) # The tests currently compare SVNTreeNode trees, so let's do that too. prev_status_tree = prev_status.old_tree() @@ -1115,7 +1121,7 @@ def get_sorted_vars_by_pathlen(self): if not runpath: continue strlen = len(runpath) - item = [strlen, name, runpath] + item = (strlen, name, runpath) bisect.insort(lst, item) return lst @@ -1128,14 +1134,14 @@ def get_sorted_var_names(self): urls = [] for name in self.vars: if name.startswith('url_'): - bisect.insort(urls, [name.lower(), name]) + bisect.insort(urls, (name.lower(), name)) else: - bisect.insort(paths, [name.lower(), name]) + bisect.insort(paths, (name.lower(), name)) list = [] for path in paths: - list += [path[1]] + list.append(path[1]) for url in urls: - list += [url[1]] + list.append(url[1]) return list @@ -1168,8 +1174,20 @@ def replace(str, path, name, quote): path = var[2] str = replace(str, path, name, quote) - str = replace(str, self.sbox.wc_dir, 'wc_dir', quote) - str = replace(str, self.sbox.repo_url, 'url', quote) + str2 = replace(str, os.path.abspath(self.sbox.wc_dir), 'abs_wc_dir', quote) + if str != str2: + self.used_abs_wc_dir = True + str = str2 + + str2 = replace(str, self.sbox.wc_dir, 'wc_dir', quote) + if str != str2: + self.used_wc_dir = True + str = str2 + + str2 = replace(str, self.sbox.repo_url, 'url', quote) + if str != str2: + self.used_url = True + str = str2 # now remove trailing null-str adds: # '' + url_A_C + '' @@ -1615,6 +1633,7 @@ def ensure_path_var(self, wc, pathelements): # special case: if a path is '.', simply use wc_dir. if pathelements == ['.']: + self.used_wc_dir = True return wc.py, wc.realpath name = "_".join(pathelements) diff --git a/svntest/main.py b/svntest/main.py index db93e865..c8ceb25f 100644 --- a/svntest/main.py +++ b/svntest/main.py @@ -36,24 +36,28 @@ import urllib import logging import hashlib -from urlparse import urlparse +import zipfile +import codecs try: # Python >=3.0 import queue from urllib.parse import quote as urllib_parse_quote from urllib.parse import unquote as urllib_parse_unquote + from urllib.parse import urlparse except ImportError: # Python <3.0 import Queue as queue from urllib import quote as urllib_parse_quote from urllib import unquote as urllib_parse_unquote + from urlparse import urlparse import svntest from svntest import Failure from svntest import Skip +from svntest.wc import StateItem as Item -SVN_VER_MINOR = 8 +SVN_VER_MINOR = 15 ###################################################################### # @@ -111,7 +115,7 @@ class SVNRepositoryCreateFailure(Failure): # Windows specifics if sys.platform == 'win32': windows = True - file_scheme_prefix = 'file:' + file_scheme_prefix = 'file:///' _exe = '.exe' _bat = '.bat' os.environ['SVN_DBG_STACKTRACES_TO_STDERR'] = 'y' @@ -125,41 +129,81 @@ class SVNRepositoryCreateFailure(Failure): if windows: svneditor_script = os.path.join(sys.path[0], 'svneditor.bat') else: - svneditor_script = os.path.join(sys.path[0], 'svneditor.py') + # This script is in the build tree, not in the source tree. + svneditor_script = os.path.join(os.path.dirname( + os.path.dirname(os.path.abspath('.'))), + 'tests/cmdline/svneditor.sh') # Username and password used by the working copies wc_author = 'jrandom' wc_passwd = 'rayjandom' +# Username and password used by svnrdump in dump/load cross-checks +crosscheck_username = '__dumpster__' +crosscheck_password = '__loadster__' + # Username and password used by the working copies for "second user" # scenarios wc_author2 = 'jconstant' # use the same password as wc_author +stack_trace_regexp = r'(?:.*subversion[\\//].*\.c:[0-9]*,$|.*apr_err=.*)' + # Set C locale for command line programs os.environ['LC_ALL'] = 'C' ###################################################################### -# The locations of the svn, svnadmin and svnlook binaries, relative to -# the only scripts that import this file right now (they live in ../). +# Permission constants used with e.g. chmod() and open(). +# Define them here at a central location, so people aren't tempted to +# use octal literals which are not portable between Python 2 and 3. + +S_ALL_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH +S_ALL_WRITE = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH +S_ALL_EXEC = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + +S_ALL_RW = S_ALL_READ | S_ALL_WRITE +S_ALL_RX = S_ALL_READ | S_ALL_EXEC +S_ALL_RWX = S_ALL_READ | S_ALL_WRITE | S_ALL_EXEC + +###################################################################### +# The locations of the svn binaries. # Use --bin to override these defaults. -svn_binary = os.path.abspath('../../svn/svn' + _exe) -svnadmin_binary = os.path.abspath('../../svnadmin/svnadmin' + _exe) -svnlook_binary = os.path.abspath('../../svnlook/svnlook' + _exe) -svnrdump_binary = os.path.abspath('../../svnrdump/svnrdump' + _exe) -svnsync_binary = os.path.abspath('../../svnsync/svnsync' + _exe) -svnversion_binary = os.path.abspath('../../svnversion/svnversion' + _exe) -svndumpfilter_binary = os.path.abspath('../../svndumpfilter/svndumpfilter' + \ - _exe) -svnmucc_binary=os.path.abspath('../../svnmucc/svnmucc' + _exe) -entriesdump_binary = os.path.abspath('entries-dump' + _exe) -atomic_ra_revprop_change_binary = os.path.abspath('atomic-ra-revprop-change' + \ - _exe) -wc_lock_tester_binary = os.path.abspath('../libsvn_wc/wc-lock-tester' + _exe) -wc_incomplete_tester_binary = os.path.abspath('../libsvn_wc/wc-incomplete-tester' + _exe) +def P(relpath, + head=os.path.dirname(os.path.dirname(os.path.abspath('.'))) + ): + if sys.platform=='win32': + return os.path.join(head, relpath + '.exe') + else: + return os.path.join(head, relpath) +svn_binary = P('svn/svn') +svnadmin_binary = P('svnadmin/svnadmin') +svnlook_binary = P('svnlook/svnlook') +svnrdump_binary = P('svnrdump/svnrdump') +svnsync_binary = P('svnsync/svnsync') +svnversion_binary = P('svnversion/svnversion') +svndumpfilter_binary = P('svndumpfilter/svndumpfilter') +svnmucc_binary = P('svnmucc/svnmucc') +svnfsfs_binary = P('svnfsfs/svnfsfs') +entriesdump_binary = P('tests/cmdline/entries-dump') +lock_helper_binary = P('tests/cmdline/lock-helper') +atomic_ra_revprop_change_binary = P('tests/cmdline/atomic-ra-revprop-change') +wc_lock_tester_binary = P('tests/libsvn_wc/wc-lock-tester') +wc_incomplete_tester_binary = P('tests/libsvn_wc/wc-incomplete-tester') +del P + +###################################################################### +# The location of svnauthz binary, relative to the only scripts that +# import this file right now (they live in ../). +# Use --tools to overide these defaults. +svnauthz_binary = os.path.abspath('../../../tools/server-side/svnauthz' + _exe) +svnauthz_validate_binary = os.path.abspath( + '../../../tools/server-side/svnauthz-validate' + _exe +) +svnmover_binary = os.path.abspath('../../../tools/dev/svnmover/svnmover' + _exe) # Location to the pristine repository, will be calculated from test_area_url # when we know what the user specified for --url. pristine_greek_repos_url = None +pristine_trojan_repos_url = None # Global variable to track all of our options options = None @@ -174,11 +218,21 @@ class SVNRepositoryCreateFailure(Failure): # Constant for the merge info property. SVN_PROP_MERGEINFO = "svn:mergeinfo" +# Constant for the inheritable auto-props property. +SVN_PROP_INHERITABLE_AUTOPROPS = "svn:auto-props" + +# Constant for the inheritable ignores property. +SVN_PROP_INHERITABLE_IGNORES = "svn:global-ignores" + # Where we want all the repositories and working copies to live. # Each test will have its own! general_repo_dir = os.path.join(work_dir, "repositories") general_wc_dir = os.path.join(work_dir, "working_copies") +# Directories used for DAV tests +other_dav_root_dir = os.path.join(work_dir, "fsdavroot") +non_dav_root_dir = os.path.join(work_dir, "nodavroot") + # temp directory in which we will create our 'pristine' local # repository and other scratch data. This should be removed when we # quit and when we startup. @@ -186,7 +240,9 @@ class SVNRepositoryCreateFailure(Failure): # (derivatives of the tmp dir.) pristine_greek_repos_dir = os.path.join(temp_dir, "repos") +pristine_trojan_repos_dir = os.path.join(temp_dir, "trojan") greek_dump_dir = os.path.join(temp_dir, "greekfiles") +trojan_dump_dir = os.path.join(temp_dir, "trojanfiles") default_config_dir = os.path.abspath(os.path.join(temp_dir, "config")) # @@ -196,28 +252,57 @@ class SVNRepositoryCreateFailure(Failure): # call main.greek_state.copy(). That method will return a copy of this # State object which can then be edited. # -_item = svntest.wc.StateItem greek_state = svntest.wc.State('', { - 'iota' : _item("This is the file 'iota'.\n"), - 'A' : _item(), - 'A/mu' : _item("This is the file 'mu'.\n"), - 'A/B' : _item(), - 'A/B/lambda' : _item("This is the file 'lambda'.\n"), - 'A/B/E' : _item(), - 'A/B/E/alpha' : _item("This is the file 'alpha'.\n"), - 'A/B/E/beta' : _item("This is the file 'beta'.\n"), - 'A/B/F' : _item(), - 'A/C' : _item(), - 'A/D' : _item(), - 'A/D/gamma' : _item("This is the file 'gamma'.\n"), - 'A/D/G' : _item(), - 'A/D/G/pi' : _item("This is the file 'pi'.\n"), - 'A/D/G/rho' : _item("This is the file 'rho'.\n"), - 'A/D/G/tau' : _item("This is the file 'tau'.\n"), - 'A/D/H' : _item(), - 'A/D/H/chi' : _item("This is the file 'chi'.\n"), - 'A/D/H/psi' : _item("This is the file 'psi'.\n"), - 'A/D/H/omega' : _item("This is the file 'omega'.\n"), + 'iota' : Item("This is the file 'iota'.\n"), + 'A' : Item(), + 'A/mu' : Item("This is the file 'mu'.\n"), + 'A/B' : Item(), + 'A/B/lambda' : Item("This is the file 'lambda'.\n"), + 'A/B/E' : Item(), + 'A/B/E/alpha' : Item("This is the file 'alpha'.\n"), + 'A/B/E/beta' : Item("This is the file 'beta'.\n"), + 'A/B/F' : Item(), + 'A/C' : Item(), + 'A/D' : Item(), + 'A/D/gamma' : Item("This is the file 'gamma'.\n"), + 'A/D/G' : Item(), + 'A/D/G/pi' : Item("This is the file 'pi'.\n"), + 'A/D/G/rho' : Item("This is the file 'rho'.\n"), + 'A/D/G/tau' : Item("This is the file 'tau'.\n"), + 'A/D/H' : Item(), + 'A/D/H/chi' : Item("This is the file 'chi'.\n"), + 'A/D/H/psi' : Item("This is the file 'psi'.\n"), + 'A/D/H/omega' : Item("This is the file 'omega'.\n"), + }) + +# Likewise our pristine trojan-tree state (for peg revision parsing tests) +# NOTE: We don't use precooked trojan repositories. +trojan_state = svntest.wc.State('', { + 'iota' : Item("This is the file 'iota'.\n"), + '@zeta' : Item("This is the file 'zeta'.\n"), + '_@theta' : Item("This is the file 'theta'.\n"), + '.@kappa' : Item("This is the file 'kappa'.\n"), + 'lambda@' : Item("This is the file 'lambda'.\n"), + '@omicron@' : Item("This is the file 'omicron'.\n"), + '@' : Item(), + '@@' : Item(), + '_@' : Item(), + '.@' : Item(), + 'A' : Item(), + 'A/@@' : Item("This is the file 'A/@@'.\n"), + 'A/alpha' : Item("This is the file 'alpha'.\n"), + 'A/@omega@' : Item("This is the file 'omega'.\n"), + 'B' : Item(), + 'B/@' : Item("This is the file 'B/@'.\n"), + 'B/@beta' : Item("This is the file 'beta'.\n"), + 'B/pi@' : Item("This is the file 'pi'.\n"), + 'G' : Item(), + 'G/_@' : Item("This is the file 'G/_@'.\n"), + 'G/_@gamma' : Item("This is the file 'gamma'.\n"), + 'D' : Item(), + 'D/.@' : Item("This is the file 'D/.@'.\n"), + 'D/.@delta' : Item("This is the file 'delta'.\n"), + 'E' : Item(), }) @@ -228,7 +313,7 @@ def wrap_ex(func, output): def w(*args, **kwds): try: return func(*args, **kwds) - except Failure, ex: + except Failure as ex: if ex.__class__ != Failure or ex.args: ex_args = str(ex) if ex_args: @@ -330,13 +415,29 @@ def get_fsfs_format_file_path(repo_dir): return os.path.join(repo_dir, "db", "format") -def filter_dbg(lines): - for line in lines: - if not line.startswith('DBG:'): - yield line +def ensure_list(item): + "If ITEM is not already a list, convert it to a list." + if isinstance(item, list): + return item + elif isinstance(item, bytes) or isinstance(item, str): + return [ item ] + else: + return list(item) + +def filter_dbg(lines, binary = False): + if binary: + excluded = filter(lambda line: line.startswith(b'DBG:'), lines) + excluded = map(bytes.decode, excluded) + included = filter(lambda line: not line.startswith(b'DBG:'), lines) + else: + excluded = filter(lambda line: line.startswith('DBG:'), lines) + included = filter(lambda line: not line.startswith('DBG:'), lines) + + sys.stdout.write(''.join(excluded)) + return ensure_list(included) # Run any binary, logging the command line and return code -def run_command(command, error_expected, binary_mode=0, *varargs): +def run_command(command, error_expected, binary_mode=False, *varargs): """Run COMMAND with VARARGS. Return exit code as int; stdout, stderr as lists of lines (including line terminators). See run_command_stdin() for details. If ERROR_EXPECTED is None, any stderr output will be @@ -346,6 +447,17 @@ def run_command(command, error_expected, binary_mode=0, *varargs): return run_command_stdin(command, error_expected, 0, binary_mode, None, *varargs) +# Frequently used constants: +# If any of these relative path strings show up in a server response, +# then we can assume that the on-disk repository path was leaked to the +# client. Having these here as constants means we don't need to construct +# them over and over again. +_repos_diskpath1 = os.path.join('cmdline', 'svn-test-work', 'repositories') +_repos_diskpath2 = os.path.join('cmdline', 'svn-test-work', 'local_tmp', + 'repos') +_repos_diskpath1_bytes = _repos_diskpath1.encode() +_repos_diskpath2_bytes = _repos_diskpath2.encode() + # A regular expression that matches arguments that are trivially safe # to pass on a command line without quoting on any supported operating # system: @@ -380,9 +492,9 @@ def open_pipe(command, bufsize=-1, stdin=None, stdout=None, stderr=None): should be passed to wait_on_pipe.""" command = [str(x) for x in command] - # On Windows subprocess.Popen() won't accept a Python script as - # a valid program to execute, rather it wants the Python executable. - if (sys.platform == 'win32') and (command[0].endswith('.py')): + # Always run python scripts under the same Python executable as used + # for the test suite. + if command[0].endswith('.py'): command.insert(0, sys.executable) command_string = command[0] + ' ' + ' '.join(map(_quote_arg, command[1:])) @@ -416,10 +528,17 @@ def wait_on_pipe(waiter, binary_mode, stdin=None): stdout, stderr = kid.communicate(stdin) exit_code = kid.returncode - # Normalize Windows line endings if in text mode. - if windows and not binary_mode: - stdout = stdout.replace('\r\n', '\n') - stderr = stderr.replace('\r\n', '\n') + # We always expect STDERR to be strings, not byte-arrays. + if not isinstance(stderr, str): + stderr = stderr.decode("utf-8", 'surrogateescape') + if not binary_mode: + if not isinstance(stdout, str): + stdout = stdout.decode("utf-8", 'surrogateescape') + + # Normalize Windows line endings if in text mode. + if windows: + stdout = stdout.replace('\r\n', '\n') + stderr = stderr.replace('\r\n', '\n') # Convert output strings to lists. stdout_lines = stdout.splitlines(True) @@ -444,17 +563,20 @@ def wait_on_pipe(waiter, binary_mode, stdin=None): logger.info("CMD: %s exited with %d" % (command_string, exit_code)) return stdout_lines, stderr_lines, exit_code -def spawn_process(command, bufsize=-1, binary_mode=0, stdin_lines=None, +def spawn_process(command, bufsize=-1, binary_mode=False, stdin_lines=None, *varargs): """Run any binary, supplying input text, logging the command line. + BUFSIZE dictates the pipe buffer size used in communication with the - subprocess: 0 means unbuffered, 1 means line buffered, any other - positive value means use a buffer of (approximately) that size. - A negative bufsize means to use the system default, which usually - means fully buffered. The default value for bufsize is 0 (unbuffered). + subprocess: quoting from subprocess.Popen(), "0 means unbuffered, + 1 means line buffered, any other positive value means use a buffer of + (approximately) that size. A negative bufsize means to use the system + default, which usually means fully buffered." + Normalize Windows line endings of stdout and stderr if not BINARY_MODE. Return exit code as int; stdout, stderr as lists of lines (including - line terminators).""" + line terminators). + """ if stdin_lines and not isinstance(stdin_lines, list): raise TypeError("stdin_lines should have list type") @@ -477,7 +599,7 @@ def spawn_process(command, bufsize=-1, binary_mode=0, stdin_lines=None, return exit_code, stdout_lines, stderr_lines -def run_command_stdin(command, error_expected, bufsize=-1, binary_mode=0, +def run_command_stdin(command, error_expected, bufsize=-1, binary_mode=False, stdin_lines=None, *varargs): """Run COMMAND with VARARGS; input STDIN_LINES (a list of strings which should include newline characters) to program via stdin - this @@ -501,13 +623,14 @@ def run_command_stdin(command, error_expected, bufsize=-1, binary_mode=0, *varargs) def _line_contains_repos_diskpath(line): - # ### Note: this assumes that either svn-test-work isn't a symlink, + # ### Note: this assumes that either svn-test-work isn't a symlink, # ### or the diskpath isn't realpath()'d somewhere on the way from # ### the server's configuration and the client's stderr. We could # ### check for both the symlinked path and the realpath. - return \ - os.path.join('cmdline', 'svn-test-work', 'repositories') in line \ - or os.path.join('cmdline', 'svn-test-work', 'local_tmp', 'repos') in line + if isinstance(line, str): + return _repos_diskpath1 in line or _repos_diskpath2 in line + else: + return _repos_diskpath1_bytes in line or _repos_diskpath2_bytes in line for lines, name in [[stdout_lines, "stdout"], [stderr_lines, "stderr"]]: if is_ra_type_file() or 'svnadmin' in command or 'svnlook' in command: @@ -518,6 +641,15 @@ def _line_contains_repos_diskpath(line): and not any(map(lambda arg: 'prop_tests-12' in arg, varargs)): raise Failure("Repository diskpath in %s: %r" % (name, lines)) + valgrind_diagnostic = False + # A valgrind diagnostic will raise a failure if the command is + # expected to run without error. When an error is expected any + # subsequent error pattern matching is usually lenient and will not + # detect the diagnostic so make sure a failure is raised here. + if error_expected and stderr_lines: + if any(map(lambda arg: re.match('==[0-9]+==', arg), stderr_lines)): + valgrind_diagnostic = True + stop = time.time() logger.info('