From 25cb3e591e7d023d7a1b50e87580e839355d30ca Mon Sep 17 00:00:00 2001 From: mhagger Date: Wed, 3 Oct 2012 05:40:54 +0000 Subject: [PATCH] Update to r1393290 of svntest. Update the svntest subdirectory from http://svn.apache.org/repos/asf/subversion/trunk/subversion/tests/cmdline/svntest using svntest/update.sh. Adjust run-tests.py for changes in svntest. git-svn-id: http://cvs2svn.tigris.org/svn/cvs2svn/trunk@5394 be7e6eca-30d4-0310-a8e5-ac0d63af7087 --- run-tests.py | 35 +-- svntest/__init__.py | 6 +- svntest/actions.py | 761 ++++++++++++++++++++++++++++++++---------------- svntest/factory.py | 130 +++++++-- svntest/main.py | 811 +++++++++++++++++++++++++++++++++++++++------------- svntest/objects.py | 9 - svntest/sandbox.py | 202 +++++++++++-- svntest/testcase.py | 105 +++++-- svntest/tree.py | 114 +++++--- svntest/verify.py | 274 +++++++++++++++++- svntest/wc.py | 175 ++++++++---- 11 files changed, 1972 insertions(+), 650 deletions(-) diff --git a/run-tests.py b/run-tests.py index de88fd20..85e99d0f 100755 --- a/run-tests.py +++ b/run-tests.py @@ -69,7 +69,7 @@ import svntest from svntest import Failure from svntest.main import safe_rmtree from svntest.testcase import TestCase -from svntest.testcase import XFail +from svntest.testcase import XFail_deco # Test if Mercurial >= 1.1 is available. try: @@ -120,8 +120,8 @@ def run_program(program, error_re, *varargs): """Run PROGRAM with VARARGS, return stdout as a list of lines. If there is any stderr and ERROR_RE is None, raise - RunProgramException, and print the stderr lines if - svntest.main.options.verbose is true. + RunProgramException, and log the stderr lines via + svntest.main.logger.info(). If ERROR_RE is not None, it is a string regular expression that must match some line of stderr. If it fails to match, raise @@ -142,11 +142,10 @@ def run_program(program, error_re, *varargs): else: # No stderr allowed. if err: - if svntest.main.options.verbose: - print '\n%s said:\n' % program - for line in err: - print ' ' + line, - print + log = svntest.main.logger.info + log('%s said:' % program) + for line in err: + log(' ' + line.rstrip()) raise RunProgramException() return out @@ -157,8 +156,8 @@ def run_script(script, error_re, *varargs): of lines. If there is any stderr and ERROR_RE is None, raise - RunProgramException, and print the stderr lines if - svntest.main.options.verbose is true. + RunProgramException, and log the stderr lines via + svntest.main.logger.info(). If ERROR_RE is not None, it is a string regular expression that must match some line of stderr. If it fails to match, raise @@ -176,8 +175,8 @@ def run_script(script, error_re, *varargs): def run_svn(*varargs): """Run svn with VARARGS; return stdout as a list of lines. - If there is any stderr, raise RunProgramException, and print the - stderr lines if svntest.main.options.verbose is true.""" + If there is any stderr, raise RunProgramException, and log the + stderr lines via svntest.main.logger.info().""" return run_program(svn_binary, None, *varargs) @@ -941,6 +940,7 @@ def cvs2git_manpage(): out = run_script(cvs2git, None, '--man') +@XFail_deco() @Cvs2HgTestFunction def cvs2hg_manpage(): "generate a manpage for cvs2hg" @@ -2516,6 +2516,7 @@ def double_fill(): # conversion doesn't fail. +@XFail_deco() @Cvs2SvnTestFunction def double_fill2(): "reveal a second bug that created a branch twice" @@ -3137,6 +3138,7 @@ def nasty_graphs(): conv = ensure_conversion('nasty-graphs') +@XFail_deco() @Cvs2SvnTestFunction def tagging_after_delete(): "optimal tag after deleting files" @@ -3616,6 +3618,7 @@ def mirror_keyerror3_test(): conv = ensure_conversion('mirror-keyerror3') +@XFail_deco() @Cvs2SvnTestFunction def add_cvsignore_to_branch_test(): "check adding .cvsignore to an existing branch" @@ -3993,7 +3996,7 @@ test_list = [ show_usage, cvs2svn_manpage, cvs2git_manpage, - XFail(cvs2hg_manpage), + cvs2hg_manpage, attr_exec, space_fname, two_quick, @@ -4093,7 +4096,7 @@ test_list = [ resync_pass2_pull_forward, native_eol, double_fill, - XFail(double_fill2), + double_fill2, resync_pass2_push_backward, double_add, bogus_branch_copy, @@ -4139,7 +4142,7 @@ test_list = [ delete_cvsignore, repeated_deltatext, nasty_graphs, - XFail(tagging_after_delete), + tagging_after_delete, crossed_branches, # 130: file_directory_conflict, @@ -4177,7 +4180,7 @@ test_list = [ # 160: mirror_keyerror2_test, mirror_keyerror3_test, - XFail(add_cvsignore_to_branch_test), + add_cvsignore_to_branch_test, missing_deltatext, transform_unlabeled_branch_name, ignore_unlabeled_branch, diff --git a/svntest/__init__.py b/svntest/__init__.py index 55813a21..98a67b67 100644 --- a/svntest/__init__.py +++ b/svntest/__init__.py @@ -23,11 +23,11 @@ __all__ = [ ] import sys -if sys.hexversion < 0x2040000: - sys.stderr.write('[SKIPPED] at least Python 2.4 is required\n') +if sys.hexversion < 0x2050000: + sys.stderr.write('[SKIPPED] at least Python 2.5 is required\n') # note: exiting is a bit harsh for a library module, but we really do - # require Python 2.4. this package isn't going to work otherwise. + # require Python 2.5. this package isn't going to work otherwise. # we're skipping this test, not failing, so exit with 0 sys.exit(0) diff --git a/svntest/actions.py b/svntest/actions.py index 866aac5d..b69fed19 100644 --- a/svntest/actions.py +++ b/svntest/actions.py @@ -24,21 +24,44 @@ ###################################################################### import os, shutil, re, sys, errno -import difflib, pprint +import difflib, pprint, logging import xml.parsers.expat from xml.dom.minidom import parseString +if sys.version_info[0] >= 3: + # Python >=3.0 + from io import StringIO +else: + # Python <3.0 + from cStringIO import StringIO import svntest from svntest import main, verify, tree, wc from svntest import Failure +logger = logging.getLogger() + +def _log_tree_state(msg, actual, subtree=""): + if subtree: + subtree += os.sep + o = StringIO() + o.write(msg + '\n') + tree.dump_tree_script(actual, subtree, stream=o) + logger.warn(o.getvalue()) + o.close() + def no_sleep_for_timestamps(): os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS'] = 'yes' def do_sleep_for_timestamps(): os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS'] = 'no' -def setup_pristine_repository(): +def no_relocate_validation(): + os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_RELOCATE_VALIDATION'] = 'yes' + +def do_relocate_validation(): + os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_RELOCATE_VALIDATION'] = 'no' + +def setup_pristine_greek_repository(): """Create the pristine repository and 'svn import' the greek tree""" # these directories don't exist out of the box, so we may have to create them @@ -49,8 +72,8 @@ def setup_pristine_repository(): os.makedirs(main.general_repo_dir) # this also creates all the intermediate dirs # If there's no pristine repos, create one. - if not os.path.exists(main.pristine_dir): - main.create_repos(main.pristine_dir) + if not os.path.exists(main.pristine_greek_repos_dir): + main.create_repos(main.pristine_greek_repos_dir) # if this is dav, gives us access rights to import the greek tree. if main.is_ra_type_dav(): @@ -66,7 +89,7 @@ def setup_pristine_repository(): exit_code, output, errput = main.run_svn(None, 'import', '-m', 'Log message for revision 1.', main.greek_dump_dir, - main.pristine_url) + main.pristine_greek_repos_url) # check for any errors from the import if len(errput): @@ -78,9 +101,9 @@ def setup_pristine_repository(): lastline = output.pop().strip() match = re.search("(Committed|Imported) revision [0-9]+.", lastline) if not match: - print("ERROR: import did not succeed, while creating greek repos.") - print("The final line from 'svn import' was:") - print(lastline) + logger.error("import did not succeed, while creating greek repos.") + logger.error("The final line from 'svn import' was:") + logger.error(lastline) sys.exit(1) output_tree = wc.State.from_commit(output) @@ -99,9 +122,9 @@ def setup_pristine_repository(): # Finally, disallow any changes to the "pristine" repos. error_msg = "Don't modify the pristine repository" - create_failing_hook(main.pristine_dir, 'start-commit', error_msg) - create_failing_hook(main.pristine_dir, 'pre-lock', error_msg) - create_failing_hook(main.pristine_dir, 'pre-revprop-change', error_msg) + create_failing_hook(main.pristine_greek_repos_dir, 'start-commit', error_msg) + create_failing_hook(main.pristine_greek_repos_dir, 'pre-lock', error_msg) + create_failing_hook(main.pristine_greek_repos_dir, 'pre-revprop-change', error_msg) ###################################################################### @@ -110,8 +133,8 @@ def guarantee_empty_repository(path): """Guarantee that a local svn repository exists at PATH, containing nothing.""" - if path == main.pristine_dir: - print("ERROR: attempt to overwrite the pristine repos! Aborting.") + if path == main.pristine_greek_repos_dir: + logger.error("attempt to overwrite the pristine repos! Aborting.") sys.exit(1) # create an empty repository at PATH. @@ -121,26 +144,61 @@ def guarantee_empty_repository(path): # Used by every test, so that they can run independently of one # another. Every time this routine is called, it recursively copies # the `pristine repos' to a new location. -# Note: make sure setup_pristine_repository was called once before +# Note: make sure setup_pristine_greek_repository was called once before # using this function. -def guarantee_greek_repository(path): +def guarantee_greek_repository(path, minor_version): """Guarantee that a local svn repository exists at PATH, containing nothing but the greek-tree at revision 1.""" - if path == main.pristine_dir: - print("ERROR: attempt to overwrite the pristine repos! Aborting.") + if path == main.pristine_greek_repos_dir: + logger.error("attempt to overwrite the pristine repos! Aborting.") sys.exit(1) # copy the pristine repository to PATH. main.safe_rmtree(path) - if main.copy_repos(main.pristine_dir, path, 1): - print("ERROR: copying repository failed.") + if main.copy_repos(main.pristine_greek_repos_dir, path, 1, 1, minor_version): + logger.error("copying repository failed.") sys.exit(1) # make the repos world-writeable, for mod_dav_svn's sake. main.chmod_tree(path, 0666, 0666) +def run_and_verify_atomic_ra_revprop_change(message, + expected_stdout, + expected_stderr, + expected_exit, + url, revision, propname, + old_propval, propval, + want_error): + """Run atomic-ra-revprop-change helper and check its output and exit code. + Transforms OLD_PROPVAL and PROPVAL into a skel. + For HTTP, the default HTTP library is used.""" + + KEY_OLD_PROPVAL = "old_value_p" + KEY_NEW_PROPVAL = "value" + + def skel_make_atom(word): + return "%d %s" % (len(word), word) + + def make_proplist_skel_part(nick, val): + if val is None: + return "" + else: + return "%s %s" % (skel_make_atom(nick), skel_make_atom(val)) + + skel = "( %s %s )" % (make_proplist_skel_part(KEY_OLD_PROPVAL, old_propval), + make_proplist_skel_part(KEY_NEW_PROPVAL, propval)) + + exit_code, out, err = main.run_atomic_ra_revprop_change(url, revision, + propname, skel, + want_error) + verify.verify_outputs("Unexpected output", out, err, + expected_stdout, expected_stderr) + verify.verify_exit_code(message, exit_code, expected_exit) + return exit_code, out, err + + def run_and_verify_svnlook(message, expected_stdout, expected_stderr, *varargs): """Like run_and_verify_svnlook2, but the expected exit code is @@ -185,24 +243,28 @@ def run_and_verify_svnadmin2(message, expected_stdout, expected_stderr, return exit_code, out, err -def run_and_verify_svnversion(message, wc_dir, repo_url, - expected_stdout, expected_stderr): +def run_and_verify_svnversion(message, wc_dir, trail_url, + expected_stdout, expected_stderr, *varargs): """like run_and_verify_svnversion2, but the expected exit code is assumed to be 0 if no output is expected on stderr, and 1 otherwise.""" expected_exit = 0 if expected_stderr is not None and expected_stderr != []: expected_exit = 1 - return run_and_verify_svnversion2(message, wc_dir, repo_url, + return run_and_verify_svnversion2(message, wc_dir, trail_url, expected_stdout, expected_stderr, - expected_exit) + expected_exit, *varargs) -def run_and_verify_svnversion2(message, wc_dir, repo_url, +def run_and_verify_svnversion2(message, wc_dir, trail_url, expected_stdout, expected_stderr, - expected_exit): + expected_exit, *varargs): """Run svnversion command and check its output and exit code.""" - exit_code, out, err = main.run_svnversion(wc_dir, repo_url) + if trail_url is None: + exit_code, out, err = main.run_svnversion(wc_dir, *varargs) + else: + exit_code, out, err = main.run_svnversion(wc_dir, trail_url, *varargs) + verify.verify_outputs("Unexpected output", out, err, expected_stdout, expected_stderr) verify.verify_exit_code(message, exit_code, expected_exit) @@ -263,28 +325,81 @@ def run_and_verify_svn2(message, expected_stdout, expected_stderr, verify.verify_exit_code(message, exit_code, expected_exit) return exit_code, out, err -def run_and_verify_load(repo_dir, dump_file_content): +def run_and_verify_load(repo_dir, dump_file_content, + bypass_prop_validation = False): "Runs 'svnadmin load' and reports any errors." if not isinstance(dump_file_content, list): raise TypeError("dump_file_content argument should have list type") expected_stderr = [] - exit_code, output, errput = main.run_command_stdin( - main.svnadmin_binary, expected_stderr, 0, 1, dump_file_content, - 'load', '--force-uuid', '--quiet', repo_dir) + if bypass_prop_validation: + exit_code, output, errput = main.run_command_stdin( + main.svnadmin_binary, expected_stderr, 0, 1, dump_file_content, + 'load', '--force-uuid', '--quiet', '--bypass-prop-validation', repo_dir) + else: + exit_code, output, errput = main.run_command_stdin( + main.svnadmin_binary, expected_stderr, 0, 1, dump_file_content, + 'load', '--force-uuid', '--quiet', repo_dir) verify.verify_outputs("Unexpected stderr output", None, errput, None, expected_stderr) -def run_and_verify_dump(repo_dir): +def run_and_verify_dump(repo_dir, deltas=False): "Runs 'svnadmin dump' and reports any errors, returning the dump content." - exit_code, output, errput = main.run_svnadmin('dump', repo_dir) + if deltas: + exit_code, output, errput = main.run_svnadmin('dump', '--deltas', + repo_dir) + else: + exit_code, output, errput = main.run_svnadmin('dump', repo_dir) verify.verify_outputs("Missing expected output(s)", output, errput, verify.AnyOutput, verify.AnyOutput) return output -def load_repo(sbox, dumpfile_path = None, dump_str = None): +def run_and_verify_svnrdump(dumpfile_content, expected_stdout, + expected_stderr, expected_exit, *varargs): + """Runs 'svnrdump dump|load' depending on dumpfile_content and + reports any errors.""" + exit_code, output, err = main.run_svnrdump(dumpfile_content, *varargs) + + # Since main.run_svnrdump() uses binary mode, normalize the stderr + # line endings on Windows ourselves. + if sys.platform == 'win32': + err = map(lambda x : x.replace('\r\n', '\n'), err) + + for index, line in enumerate(err[:]): + if re.search("warning: W200007", line): + del err[index] + + verify.verify_outputs("Unexpected output", output, err, + expected_stdout, expected_stderr) + verify.verify_exit_code("Unexpected return code", exit_code, expected_exit) + return output + + +def run_and_verify_svnmucc(message, expected_stdout, expected_stderr, + *varargs): + """Run svnmucc command and check its output""" + + expected_exit = 0 + if expected_stderr is not None and expected_stderr != []: + expected_exit = 1 + return run_and_verify_svnmucc2(message, expected_stdout, expected_stderr, + expected_exit, *varargs) + +def run_and_verify_svnmucc2(message, expected_stdout, expected_stderr, + expected_exit, *varargs): + """Run svnmucc command and check its output and exit code.""" + + exit_code, out, err = main.run_svnmucc(*varargs) + verify.verify_outputs("Unexpected output", out, err, + expected_stdout, expected_stderr) + verify.verify_exit_code(message, exit_code, expected_exit) + return exit_code, out, err + + +def load_repo(sbox, dumpfile_path = None, dump_str = None, + bypass_prop_validation = False): "Loads the dumpfile into sbox" if not dump_str: dump_str = open(dumpfile_path, "rb").read() @@ -295,11 +410,19 @@ def load_repo(sbox, dumpfile_path = None, dump_str = None): main.create_repos(sbox.repo_dir) # Load the mergetracking dumpfile into the repos, and check it out the repo - run_and_verify_load(sbox.repo_dir, dump_str.splitlines(True)) + run_and_verify_load(sbox.repo_dir, dump_str.splitlines(True), + bypass_prop_validation) run_and_verify_svn(None, None, [], "co", sbox.repo_url, sbox.wc_dir) return dump_str +def expected_noop_update_output(rev): + """Return an ExpectedOutput object describing what we'd expect to + see from an update to revision REV that was effectively a no-op (no + server changes transmitted).""" + return verify.createExpectedOutput("Updating '.*':|At revision %d." + % (rev), + "no-op update") ###################################################################### # Subversion Actions @@ -310,12 +433,13 @@ def load_repo(sbox, dumpfile_path = None, dump_str = None): # -def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, - singleton_handler_a = None, - a_baton = None, - singleton_handler_b = None, - b_baton = None, - *args): +def run_and_verify_checkout2(do_remove, + URL, wc_dir_name, output_tree, disk_tree, + singleton_handler_a = None, + a_baton = None, + singleton_handler_b = None, + b_baton = None, + *args): """Checkout the URL into a new directory WC_DIR_NAME. *ARGS are any extra optional args to the checkout subcommand. @@ -326,8 +450,8 @@ def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, function's doc string for more details. Return if successful, raise on failure. - WC_DIR_NAME is deleted if present unless the '--force' option is passed - in *ARGS.""" + WC_DIR_NAME is deleted if DO_REMOVE is True. + """ if isinstance(output_tree, wc.State): output_tree = output_tree.old_tree() @@ -337,7 +461,7 @@ def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, # Remove dir if it's already there, unless this is a forced checkout. # In that case assume we want to test a forced checkout's toleration # of obstructing paths. - if '--force' not in args: + if do_remove: main.safe_rmtree(wc_dir_name) # Checkout and make a tree of the output, using l:foo/p:bar @@ -351,8 +475,7 @@ def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, try: tree.compare_trees("output", actual, output_tree) except tree.SVNTreeUnequal: - print("ACTUAL OUTPUT TREE:") - tree.dump_tree_script(actual, wc_dir_name + os.sep) + _log_tree_state("ACTUAL OUTPUT TREE:", actual, wc_dir_name) raise # Create a tree by scanning the working copy @@ -364,10 +487,31 @@ def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, singleton_handler_a, a_baton, singleton_handler_b, b_baton) except tree.SVNTreeUnequal: - print("ACTUAL DISK TREE:") - tree.dump_tree_script(actual, wc_dir_name + os.sep) + _log_tree_state("ACTUAL DISK TREE:", actual, wc_dir_name) raise +def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree, + singleton_handler_a = None, + a_baton = None, + singleton_handler_b = None, + b_baton = None, + *args): + """Same as run_and_verify_checkout2(), but without the DO_REMOVE arg. + WC_DIR_NAME is deleted if present unless the '--force' option is passed + in *ARGS.""" + + + # Remove dir if it's already there, unless this is a forced checkout. + # In that case assume we want to test a forced checkout's toleration + # of obstructing paths. + return run_and_verify_checkout2(('--force' not in args), + URL, wc_dir_name, output_tree, disk_tree, + singleton_handler_a, + a_baton, + singleton_handler_b, + b_baton, + *args) + def run_and_verify_export(URL, export_dir_name, output_tree, disk_tree, *args): @@ -394,8 +538,7 @@ def run_and_verify_export(URL, export_dir_name, output_tree, disk_tree, try: tree.compare_trees("output", actual, output_tree) except tree.SVNTreeUnequal: - print("ACTUAL OUTPUT TREE:") - tree.dump_tree_script(actual, export_dir_name + os.sep) + _log_tree_state("ACTUAL OUTPUT TREE:", actual, export_dir_name) raise # Create a tree by scanning the working copy. Don't ignore @@ -407,8 +550,7 @@ def run_and_verify_export(URL, export_dir_name, output_tree, disk_tree, try: tree.compare_trees("disk", actual, disk_tree) except tree.SVNTreeUnequal: - print("ACTUAL DISK TREE:") - tree.dump_tree_script(actual, export_dir_name + os.sep) + _log_tree_state("ACTUAL DISK TREE:", actual, export_dir_name) raise @@ -427,9 +569,13 @@ class LogEntry: self.revprops = revprops def assert_changed_paths(self, changed_paths): - """Not implemented, so just raises svntest.Failure. + """Assert that changed_paths is the same as this entry's changed_paths + Raises svntest.Failure if not. """ - raise Failure('NOT IMPLEMENTED') + if self.changed_paths != changed_paths: + raise Failure('\n' + '\n'.join(difflib.ndiff( + pprint.pformat(changed_paths).splitlines(), + pprint.pformat(self.changed_paths).splitlines()))) def assert_revprops(self, revprops): """Assert that the dict revprops is the same as this entry's revprops. @@ -462,11 +608,13 @@ class LogParser: self.parser.EndElementHandler = self.handle_end_element self.parser.CharacterDataHandler = self.handle_character_data # Ignore some things. - self.ignore_elements('log', 'paths', 'path', 'revprops') + self.ignore_elements('log', 'paths', 'revprops') self.ignore_tags('logentry_end', 'author_start', 'date_start', 'msg_start') # internal state self.cdata = [] self.property = None + self.kind = None + self.action = None # the result self.entries = [] @@ -510,6 +658,12 @@ class LogParser: self.property = attrs['name'] def property_end(self): self.entries[-1].revprops[self.property] = self.use_cdata() + def path_start(self, attrs): + self.kind = attrs['kind'] + self.action = attrs['action'] + def path_end(self): + self.entries[-1].changed_paths[self.use_cdata()] = [{'kind': self.kind, + 'action': self.action}] def run_and_verify_log_xml(message=None, expected_paths=None, expected_revprops=None, expected_stdout=None, @@ -615,8 +769,7 @@ def verify_update(actual_output, try: tree.compare_trees("output", actual_output, output_tree) except tree.SVNTreeUnequal: - print("ACTUAL OUTPUT TREE:") - tree.dump_tree_script(actual_output, wc_dir_name + os.sep) + _log_tree_state("ACTUAL OUTPUT TREE:", actual_output, wc_dir_name) raise # Verify actual mergeinfo recording output against expected output. @@ -625,9 +778,8 @@ def verify_update(actual_output, tree.compare_trees("mergeinfo_output", actual_mergeinfo_output, mergeinfo_output_tree) except tree.SVNTreeUnequal: - print("ACTUAL MERGEINFO OUTPUT TREE:") - tree.dump_tree_script(actual_mergeinfo_output, - wc_dir_name + os.sep) + _log_tree_state("ACTUAL MERGEINFO OUTPUT TREE:", actual_mergeinfo_output, + wc_dir_name) raise # Verify actual mergeinfo elision output against expected output. @@ -636,9 +788,8 @@ def verify_update(actual_output, tree.compare_trees("elision_output", actual_elision_output, elision_output_tree) except tree.SVNTreeUnequal: - print("ACTUAL ELISION OUTPUT TREE:") - tree.dump_tree_script(actual_elision_output, - wc_dir_name + os.sep) + _log_tree_state("ACTUAL ELISION OUTPUT TREE:", actual_elision_output, + wc_dir_name) raise # Create a tree by scanning the working copy, and verify it @@ -649,8 +800,8 @@ def verify_update(actual_output, singleton_handler_a, a_baton, singleton_handler_b, b_baton) except tree.SVNTreeUnequal: - print("ACTUAL DISK TREE:") - tree.dump_tree_script(actual_disk) + _log_tree_state("EXPECTED DISK TREE:", disk_tree) + _log_tree_state("ACTUAL DISK TREE:", actual_disk) raise # Verify via 'status' command too, if possible. @@ -728,8 +879,8 @@ def run_and_verify_update(wc_dir_name, def run_and_parse_info(*args): - """Run 'svn info' and parse its output into a list of dicts, - one dict per target.""" + """Run 'svn info ARGS' and parse its output into a list of dicts, + one dict per reported node.""" # the returned array all_infos = [] @@ -784,8 +935,10 @@ def run_and_parse_info(*args): def run_and_verify_info(expected_infos, *args): """Run 'svn info' with the arguments in *ARGS and verify the results - against expected_infos. The latter should be a list of dicts (in the - same order as the targets). + against expected_infos. The latter should be a list of dicts, one dict + per reported node, in the order in which the 'Path' fields of the output + will appear after sorting them as Python strings. (The dicts in + EXPECTED_INFOS, however, need not have a 'Path' key.) In the dicts, each key is the before-the-colon part of the 'svn info' output, and each value is either None (meaning that the key should *not* appear in @@ -795,6 +948,7 @@ def run_and_verify_info(expected_infos, *args): Return if successful, raise on failure.""" actual_infos = run_and_parse_info(*args) + actual_infos.sort(key=lambda info: info['Path']) try: # zip() won't complain, so check this manually @@ -813,7 +967,7 @@ def run_and_verify_info(expected_infos, *args): if value is not None and key not in actual: raise main.SVNLineUnequal("Expected key '%s' (with value '%s') " "not found" % (key, value)) - if value is not None and not re.search(value, actual[key]): + if value is not None and not re.match(value, actual[key]): raise verify.SVNUnexpectedStdout("Values of key '%s' don't match:\n" " Expected: '%s' (regex)\n" " Found: '%s' (string)\n" @@ -867,7 +1021,12 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, If DRY_RUN is set then a --dry-run merge will be carried out first and the output compared with that of the full merge. - Return if successful, raise on failure.""" + Return if successful, raise on failure. + + *ARGS are any extra optional args to the merge subcommand. + NOTE: If *ARGS is specified at all, an explicit target path must be passed + in *ARGS as well. This allows the caller to merge into single items inside + the working copy, but still verify the entire working copy dir. """ merge_command = [ "merge" ] if url2: @@ -876,7 +1035,8 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, if not (rev1 is None and rev2 is None): merge_command.append("-r" + str(rev1) + ":" + str(rev2)) merge_command.append(url1) - merge_command.append(dir) + if len(args) == 0: + merge_command.append(dir) merge_command = tuple(merge_command) if dry_run: @@ -889,9 +1049,9 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, try: tree.compare_trees("disk", post_disk, pre_disk) except tree.SVNTreeError: - print("=============================================================") - print("Dry-run merge altered working copy") - print("=============================================================") + logger.warn("=============================================================") + logger.warn("Dry-run merge altered working copy") + logger.warn("=============================================================") raise @@ -939,34 +1099,42 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, if dry_run and merge_diff_out != out_dry: # Due to the way ra_serf works, it's possible that the dry-run and # real merge operations did the same thing, but the output came in - # a different order. Let's see if maybe that's the case. + # a different order. Let's see if maybe that's the case by comparing + # the outputs as unordered sets rather than as lists. # - # NOTE: Would be nice to limit this dance to serf tests only, but... - out_copy = merge_diff_out[:] - out_dry_copy = out_dry[:] - out_copy.sort() - out_dry_copy.sort() + # This now happens for other RA layers with modern APR because the + # hash order now varies. + # + # The different orders of the real and dry-run merges may cause + # the "Merging rX through rY into" lines to be duplicated a + # different number of times in the two outputs. The list-set + # conversion removes duplicates so these differences are ignored. + # It also removes "U some/path" duplicate lines. Perhaps we + # should avoid that? + out_copy = set(merge_diff_out[:]) + out_dry_copy = set(out_dry[:]) + if out_copy != out_dry_copy: - print("=============================================================") - print("Merge outputs differ") - print("The dry-run merge output:") + logger.warn("=============================================================") + logger.warn("Merge outputs differ") + logger.warn("The dry-run merge output:") for x in out_dry: - sys.stdout.write(x) - print("The full merge output:") + logger.warn(x) + logger.warn("The full merge output:") for x in out: - sys.stdout.write(x) - print("=============================================================") + logger.warn(x) + logger.warn("=============================================================") raise main.SVNUnmatchedError def missing_skip(a, b): - print("=============================================================") - print("Merge failed to skip: " + a.path) - print("=============================================================") + logger.warn("=============================================================") + logger.warn("Merge failed to skip: %s", a.path) + logger.warn("=============================================================") raise Failure def extra_skip(a, b): - print("=============================================================") - print("Merge unexpectedly skipped: " + a.path) - print("=============================================================") + logger.warn("=============================================================") + logger.warn("Merge unexpectedly skipped: %s", a.path) + logger.warn("=============================================================") raise Failure myskiptree = tree.build_tree_from_skipped(out) @@ -976,8 +1144,7 @@ def run_and_verify_merge(dir, rev1, rev2, url1, url2, tree.compare_trees("skip", myskiptree, skip_tree, extra_skip, None, missing_skip, None) except tree.SVNTreeUnequal: - print("ACTUAL SKIP TREE:") - tree.dump_tree_script(myskiptree, dir + os.sep) + _log_tree_state("ACTUAL SKIP TREE:", myskiptree, dir) raise actual_diff = svntest.wc.State.from_checkout(merge_diff_out, False) @@ -1033,9 +1200,9 @@ def run_and_verify_patch(dir, patch_path, try: tree.compare_trees("disk", post_disk, pre_disk) except tree.SVNTreeError: - print("=============================================================") - print("'svn patch --dry-run' altered working copy") - print("=============================================================") + logger.warn("=============================================================") + logger.warn("'svn patch --dry-run' altered working copy") + logger.warn("=============================================================") raise # Update and make a tree of the output. @@ -1052,32 +1219,27 @@ def run_and_verify_patch(dir, patch_path, if not match: raise main.SVNUnmatchedError elif err: - print("UNEXPECTED STDERR:") + logger.warn("UNEXPECTED STDERR:") for x in err: - sys.stdout.write(x) + logger.warn(x) raise verify.SVNUnexpectedStderr if dry_run and out != out_dry: - print("=============================================================") - print("Outputs differ") - print("'svn patch --dry-run' output:") - for x in out_dry: - sys.stdout.write(x) - print("'svn patch' output:") - for x in out: - sys.stdout.write(x) - print("=============================================================") - raise main.SVNUnmatchedError + # APR hash order means the output order can vary, assume everything is OK + # if only the order changes. + out_dry_expected = svntest.verify.UnorderedOutput(out) + verify.compare_and_display_lines('dry-run patch output not as expected', + '', out_dry_expected, out_dry) def missing_skip(a, b): - print("=============================================================") - print("'svn patch' failed to skip: " + a.path) - print("=============================================================") + logger.warn("=============================================================") + logger.warn("'svn patch' failed to skip: %s", a.path) + logger.warn("=============================================================") raise Failure def extra_skip(a, b): - print("=============================================================") - print("'svn patch' unexpectedly skipped: " + a.path) - print("=============================================================") + logger.warn("=============================================================") + logger.warn("'svn patch' unexpectedly skipped: %s", a.path) + logger.warn("=============================================================") raise Failure myskiptree = tree.build_tree_from_skipped(out) @@ -1090,7 +1252,8 @@ def run_and_verify_patch(dir, patch_path, # when the expected output is a list, we want a line-by-line # comparison to happen instead of a tree comparison - if isinstance(output_tree, list): + if (isinstance(output_tree, list) + or isinstance(output_tree, verify.UnorderedOutput)): verify.verify_outputs(None, out, err, output_tree, error_re_string) output_tree = None @@ -1118,7 +1281,7 @@ def run_and_verify_mergeinfo(error_re_string = None, verify.verify_outputs(None, None, err, None, expected_err) return - out = sorted([_f for _f in [x.rstrip()[1:] for x in out] if _f]) + out = [_f for _f in [x.rstrip()[1:] for x in out] if _f] expected_output.sort() extra_out = [] if out != expected_output: @@ -1194,15 +1357,25 @@ def process_output_for_commit(output): """Helper for run_and_verify_commit(), also used in the factory.""" # Remove the final output line, and verify that the commit succeeded. lastline = "" + rest = [] + + def external_removal(line): + return line.startswith('Removing external') \ + or line.startswith('Removed external') + if len(output): lastline = output.pop().strip() + while len(output) and external_removal(lastline): + rest.append(lastline) + lastline = output.pop().strip() + cm = re.compile("(Committed|Imported) revision [0-9]+.") match = cm.search(lastline) if not match: - print("ERROR: commit did not succeed.") - print("The final line from 'svn ci' was:") - print(lastline) + logger.warn("ERROR: commit did not succeed.") + logger.warn("The final line from 'svn ci' was:") + logger.warn(lastline) raise main.SVNCommitFailure # The new 'final' line in the output is either a regular line that @@ -1219,6 +1392,9 @@ def process_output_for_commit(output): # whoops, it was important output, put it back. output.append(lastline) + if len(rest): + output.extend(rest) + return output @@ -1245,8 +1421,9 @@ def run_and_verify_commit(wc_dir_name, output_tree, status_tree, status_tree = status_tree.old_tree() # Commit. + if '-m' not in args and '-F' not in args: + args = list(args) + ['-m', 'log msg'] exit_code, output, errput = main.run_svn(error_re_string, 'ci', - '-m', 'log msg', *args) if error_re_string: @@ -1268,8 +1445,7 @@ def run_and_verify_commit(wc_dir_name, output_tree, status_tree, except tree.SVNTreeError: verify.display_trees("Output of commit is unexpected", "OUTPUT TREE", output_tree, actual) - print("ACTUAL OUTPUT TREE:") - tree.dump_tree_script(actual, wc_dir_name + os.sep) + _log_tree_state("ACTUAL OUTPUT TREE:", actual, wc_dir_name) raise # Verify via 'status' command too, if possible. @@ -1308,8 +1484,7 @@ def run_and_verify_status(wc_dir_name, output_tree, singleton_handler_b, b_baton) except tree.SVNTreeError: verify.display_trees(None, 'STATUS OUTPUT TREE', output_tree, actual) - print("ACTUAL STATUS TREE:") - tree.dump_tree_script(actual, wc_dir_name + os.sep) + _log_tree_state("ACTUAL STATUS TREE:", actual, wc_dir_name) raise # if we have an output State, and we can/are-allowed to create an @@ -1345,10 +1520,59 @@ def run_and_verify_unquiet_status(wc_dir_name, status_tree): try: tree.compare_trees("UNQUIET STATUS", actual, status_tree) except tree.SVNTreeError: - print("ACTUAL UNQUIET STATUS TREE:") - tree.dump_tree_script(actual, wc_dir_name + os.sep) + _log_tree_state("ACTUAL UNQUIET STATUS TREE:", actual, wc_dir_name) raise +def run_and_verify_status_xml(expected_entries = [], + *args): + """ Run 'status --xml' with arguments *ARGS. If successful the output + is parsed into an XML document and will be verified by comparing against + EXPECTED_ENTRIES. + """ + + exit_code, output, errput = run_and_verify_svn(None, None, [], + 'status', '--xml', *args) + + if len(errput) > 0: + raise Failure + + doc = parseString(''.join(output)) + entries = doc.getElementsByTagName('entry') + + def getText(nodelist): + rc = [] + for node in nodelist: + if node.nodeType == node.TEXT_NODE: + rc.append(node.data) + return ''.join(rc) + + actual_entries = {} + for entry in entries: + wcstatus = entry.getElementsByTagName('wc-status')[0] + commit = entry.getElementsByTagName('commit') + author = entry.getElementsByTagName('author') + rstatus = entry.getElementsByTagName('repos-status') + + actual_entry = {'wcprops' : wcstatus.getAttribute('props'), + 'wcitem' : wcstatus.getAttribute('item'), + } + if wcstatus.hasAttribute('revision'): + actual_entry['wcrev'] = wcstatus.getAttribute('revision') + if (commit): + actual_entry['crev'] = commit[0].getAttribute('revision') + if (author): + actual_entry['author'] = getText(author[0].childNodes) + if (rstatus): + actual_entry['rprops'] = rstatus[0].getAttribute('props') + actual_entry['ritem'] = rstatus[0].getAttribute('item') + + actual_entries[entry.getAttribute('path')] = actual_entry + + if expected_entries != actual_entries: + raise Failure('\n' + '\n'.join(difflib.ndiff( + pprint.pformat(expected_entries).splitlines(), + pprint.pformat(actual_entries).splitlines()))) + def run_and_verify_diff_summarize_xml(error_re_string = [], expected_prefix = None, expected_paths = [], @@ -1401,7 +1625,7 @@ def run_and_verify_diff_summarize_xml(error_re_string = [], modified_path = modified_path.replace(os.sep, "/") if modified_path not in expected_paths: - print("ERROR: %s not expected in the changed paths." % modified_path) + logger.warn("ERROR: %s not expected in the changed paths.", modified_path) raise Failure index = expected_paths.index(modified_path) @@ -1413,15 +1637,15 @@ def run_and_verify_diff_summarize_xml(error_re_string = [], actual_prop = path.getAttribute('props') if expected_item != actual_item: - print("ERROR: expected: %s actual: %s" % (expected_item, actual_item)) + logger.warn("ERROR: expected: %s actual: %s", expected_item, actual_item) raise Failure if expected_kind != actual_kind: - print("ERROR: expected: %s actual: %s" % (expected_kind, actual_kind)) + logger.warn("ERROR: expected: %s actual: %s", expected_kind, actual_kind) raise Failure if expected_prop != actual_prop: - print("ERROR: expected: %s actual: %s" % (expected_prop, actual_prop)) + logger.warn("ERROR: expected: %s actual: %s", expected_prop, actual_prop) raise Failure def run_and_verify_diff_summarize(output_tree, *args): @@ -1444,8 +1668,7 @@ def run_and_verify_diff_summarize(output_tree, *args): tree.compare_trees("output", actual, output_tree) except tree.SVNTreeError: verify.display_trees(None, 'DIFF OUTPUT TREE', output_tree, actual) - print("ACTUAL DIFF OUTPUT TREE:") - tree.dump_tree_script(actual) + _log_tree_state("ACTUAL DIFF OUTPUT TREE:", actual) raise def run_and_validate_lock(path, username): @@ -1506,29 +1729,41 @@ def run_and_verify_resolved(expected_paths, *args): elements of EXPECTED_PATHS as the arguments.""" _run_and_verify_resolve('resolved', expected_paths, *args) +def run_and_verify_revert(expected_paths, *args): + """Run "svn revert" with arguments ARGS, and verify that it reverts + the paths in EXPECTED_PATHS and no others. If no ARGS are + specified, use the elements of EXPECTED_PATHS as the arguments.""" + if len(args) == 0: + args = expected_paths + expected_output = verify.UnorderedOutput([ + "Reverted '" + path + "'\n" for path in + expected_paths]) + run_and_verify_svn(None, expected_output, [], + "revert", *args) + ###################################################################### # Other general utilities # This allows a test to *quickly* bootstrap itself. -def make_repo_and_wc(sbox, create_wc = True, read_only = False): +def make_repo_and_wc(sbox, create_wc = True, read_only = False, + minor_version = None): """Create a fresh 'Greek Tree' repository and check out a WC from it. - If read_only is False, a dedicated repository will be created, named - TEST_NAME. The repository will live in the global dir 'general_repo_dir'. - If read_only is True the pristine repository will be used. + If READ_ONLY is False, a dedicated repository will be created, at the path + SBOX.repo_dir. If READ_ONLY is True, the pristine repository will be used. + In either case, SBOX.repo_url is assumed to point to the repository that + will be used. If create_wc is True, a dedicated working copy will be checked out from - the repository, named TEST_NAME. The wc directory will live in the global - dir 'general_wc_dir'. + the repository, at the path SBOX.wc_dir. - Both variables 'general_repo_dir' and 'general_wc_dir' are defined at the - top of this test suite.) Returns on success, raises on failure.""" + Returns on success, raises on failure.""" # Create (or copy afresh) a new repos with a greek tree in it. if not read_only: - guarantee_greek_repository(sbox.repo_dir) + guarantee_greek_repository(sbox.repo_dir, minor_version) if create_wc: # Generate the expected output tree. @@ -1576,27 +1811,17 @@ def get_virginal_state(wc_dir, rev): return state -def remove_admin_tmp_dir(wc_dir): - "Remove the tmp directory within the administrative directory." - - tmp_path = os.path.join(wc_dir, main.get_admin_name(), 'tmp') - ### Any reason not to use main.safe_rmtree()? - os.rmdir(os.path.join(tmp_path, 'prop-base')) - os.rmdir(os.path.join(tmp_path, 'props')) - os.rmdir(os.path.join(tmp_path, 'text-base')) - os.rmdir(tmp_path) - # Cheap administrative directory locking -def lock_admin_dir(wc_dir): +def lock_admin_dir(wc_dir, recursive=False): "Lock a SVN administrative directory" + db, root_path, relpath = wc.open_wc_db(wc_dir) + + svntest.main.run_wc_lock_tester(recursive, wc_dir) + +def set_incomplete(wc_dir, revision): + "Make wc_dir incomplete at revision" - db = svntest.sqlite3.connect(os.path.join(wc_dir, main.get_admin_name(), - 'wc.db')) - db.execute('insert into wc_lock (wc_id, local_dir_relpath, locked_levels) ' - + 'values (?, ?, ?)', - (1, '', 0)) - db.commit() - db.close() + svntest.main.run_wc_incomplete_tester(wc_dir, revision) def get_wc_uuid(wc_dir): "Return the UUID of the working copy at WC_DIR." @@ -1643,7 +1868,8 @@ def enable_revprop_changes(repo_dir): pre-revprop-change hook script and (if appropriate) making it executable.""" hook_path = main.get_pre_revprop_change_hook_path(repo_dir) - main.create_python_hook_script(hook_path, 'import sys; sys.exit(0)') + main.create_python_hook_script(hook_path, 'import sys; sys.exit(0)', + cmd_alternative='@exit 0') def disable_revprop_changes(repo_dir): """Disable revprop changes in the repository at REPO_DIR by creating a @@ -1653,8 +1879,12 @@ def disable_revprop_changes(repo_dir): hook_path = main.get_pre_revprop_change_hook_path(repo_dir) main.create_python_hook_script(hook_path, 'import sys\n' - 'sys.stderr.write("pre-revprop-change %s" % " ".join(sys.argv[1:6]))\n' - 'sys.exit(1)\n') + 'sys.stderr.write("pre-revprop-change %s" %' + ' " ".join(sys.argv[1:]))\n' + 'sys.exit(1)\n', + cmd_alternative= + '@echo pre-revprop-change %* 1>&2\n' + '@exit 1\n') def create_failing_post_commit_hook(repo_dir): """Create a post-commit hook script in the repository at REPO_DIR that always @@ -1663,39 +1893,56 @@ def create_failing_post_commit_hook(repo_dir): hook_path = main.get_post_commit_hook_path(repo_dir) main.create_python_hook_script(hook_path, 'import sys\n' 'sys.stderr.write("Post-commit hook failed")\n' - 'sys.exit(1)') + 'sys.exit(1)\n', + cmd_alternative= + '@echo Post-commit hook failed 1>&2\n' + '@exit 1\n') # set_prop can be used for properties with NULL characters which are not # handled correctly when passed to subprocess.Popen() and values like "*" # which are not handled correctly on Windows. -def set_prop(name, value, path, expected_err=None): +def set_prop(name, value, path, expected_re_string=None): """Set a property with specified value""" if value and (value[0] == '-' or '\x00' in value or sys.platform == 'win32'): from tempfile import mkstemp (fd, value_file_path) = mkstemp() + os.close(fd) value_file = open(value_file_path, 'wb') value_file.write(value) value_file.flush() value_file.close() - main.run_svn(expected_err, 'propset', '-F', value_file_path, name, path) - os.close(fd) + exit_code, out, err = main.run_svn(expected_re_string, 'propset', + '-F', value_file_path, name, path) os.remove(value_file_path) else: - main.run_svn(expected_err, 'propset', name, value, path) + exit_code, out, err = main.run_svn(expected_re_string, 'propset', + name, value, path) + if expected_re_string: + if not expected_re_string.startswith(".*"): + expected_re_string = ".*(" + expected_re_string + ")" + expected_err = verify.RegexOutput(expected_re_string, match_all=False) + verify.verify_outputs(None, None, err, None, expected_err) -def check_prop(name, path, exp_out): - """Verify that property NAME on PATH has a value of EXP_OUT""" +def check_prop(name, path, exp_out, revprop=None): + """Verify that property NAME on PATH has a value of EXP_OUT. + If REVPROP is not None, then it is a revision number and + a revision property is sought.""" + if revprop is not None: + revprop_options = ['--revprop', '-r', revprop] + else: + revprop_options = [] # Not using run_svn because binary_mode must be set exit_code, out, err = main.run_command(main.svn_binary, None, 1, 'pg', '--strict', name, path, '--config-dir', main.default_config_dir, '--username', main.wc_author, - '--password', main.wc_passwd) + '--password', main.wc_passwd, + *revprop_options) if out != exp_out: - print("svn pg --strict %s output does not match expected." % name) - print("Expected standard output: %s\n" % exp_out) - print("Actual standard output: %s\n" % out) + logger.warn("svn pg --strict %s output does not match expected.", name) + logger.warn("Expected standard output: %s\n", exp_out) + logger.warn("Actual standard output: %s\n", out) raise Failure def fill_file_with_lines(wc_path, line_nbr, line_descrip=None, @@ -1765,7 +2012,6 @@ def inject_conflict_into_wc(sbox, state_path, file_path, conflicting_contents, contents, merged_rev) exit_code, output, errput = main.run_svn(None, "up", "-r", str(merged_rev), - sbox.repo_url + "/" + state_path, file_path) if expected_status: expected_status.tweak(state_path, wc_rev=merged_rev) @@ -2022,6 +2268,13 @@ deep_trees_after_leaf_del = wc.State('', { 'DDD/D1/D2' : Item(), }) +# deep trees state after a call to deep_trees_leaf_del with no commit +def deep_trees_after_leaf_del_no_ci(wc_dir): + if svntest.main.wc_is_singledb(wc_dir): + return deep_trees_after_leaf_del + else: + return deep_trees_empty_dirs + def deep_trees_tree_del(base): """Helper function for deep trees test cases. Delete top-level dirs.""" @@ -2080,6 +2333,13 @@ deep_trees_empty_dirs = wc.State('', { 'DDD/D1/D2/D3' : Item(), }) +# deep trees state after a call to deep_trees_tree_del with no commit +def deep_trees_after_tree_del_no_ci(wc_dir): + if svntest.main.wc_is_singledb(wc_dir): + return deep_trees_after_tree_del + else: + return deep_trees_empty_dirs + def deep_trees_tree_del_repos(base): """Helper function for deep trees test cases. Delete top-level dirs, directly in the repository.""" @@ -2265,7 +2525,8 @@ def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme): j = os.path.join - sbox.build() + if not sbox.is_built(): + sbox.build() wc_dir = sbox.wc_dir @@ -2275,8 +2536,8 @@ def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme): try: add_deep_trees(sbox, test_case.name) except: - print("ERROR IN: Tests scheme for update: " - + "while setting up deep trees in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for update: " + + "while setting up deep trees in '%s'", test_case.name) raise @@ -2291,8 +2552,8 @@ def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme): try: test_case.incoming_action(j(sbox.wc_dir, test_case.name)) except: - print("ERROR IN: Tests scheme for update: " - + "while performing incoming action in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for update: " + + "while performing incoming action in '%s'", test_case.name) raise @@ -2312,8 +2573,8 @@ def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme): try: test_case.local_action(j(wc_dir, test_case.name)) except: - print("ERROR IN: Tests scheme for update: " - + "while performing local action in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for update: " + + "while performing local action in '%s'", test_case.name) raise @@ -2347,8 +2608,8 @@ def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme): run_and_verify_info([x_info[path]], j(base, path)) except: - print("ERROR IN: Tests scheme for update: " - + "while verifying in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for update: " + + "while verifying in '%s'", test_case.name) raise @@ -2367,8 +2628,8 @@ def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme): test_case.commit_block_string, base) except: - print("ERROR IN: Tests scheme for update: " - + "while checking commit-blocking in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for update: " + + "while checking commit-blocking in '%s'", test_case.name) raise @@ -2444,10 +2705,15 @@ def deep_trees_skipping_on_update(sbox, test_case, skip_paths, # This time, cd to the subdir before updating it. was_cwd = os.getcwd() for path, skipped in chdir_skip_paths: - #print("CHDIR TO: %s" % j(base, path)) - os.chdir(j(base, path)) - run_and_verify_update('', - wc.State('', {skipped : Item(verb='Skipped')}), + if isinstance(skipped, list): + expected_skip = {} + for p in skipped: + expected_skip[p] = Item(verb='Skipped') + else: + expected_skip = {skipped : Item(verb='Skipped')} + p = j(base, path) + run_and_verify_update(p, + wc.State(p, expected_skip), None, None) os.chdir(was_cwd) @@ -2506,7 +2772,8 @@ def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme): j = os.path.join - sbox.build() + if not sbox.is_built(): + sbox.build() wc_dir = sbox.wc_dir @@ -2520,8 +2787,8 @@ def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme): make_deep_trees(j(base, "incoming")) main.run_svn(None, 'add', base) except: - print("ERROR IN: Tests scheme for switch: " - + "while setting up deep trees in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for switch: " + + "while setting up deep trees in '%s'", test_case.name) raise @@ -2536,8 +2803,8 @@ def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme): try: test_case.incoming_action(j(sbox.wc_dir, test_case.name, "incoming")) except: - print("ERROR IN: Tests scheme for switch: " - + "while performing incoming action in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for switch: " + + "while performing incoming action in '%s'", test_case.name) raise @@ -2552,8 +2819,8 @@ def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme): try: test_case.local_action(j(sbox.wc_dir, test_case.name, "local")) except: - print("ERROR IN: Tests scheme for switch: " - + "while performing local action in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for switch: " + + "while performing local action in '%s'", test_case.name) raise @@ -2579,15 +2846,16 @@ def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme): x_status.wc_dir = local run_and_verify_switch(local, local, incoming, x_out, x_disk, None, - error_re_string = test_case.error_re_string) + test_case.error_re_string, None, None, None, + None, False, '--ignore-ancestry') run_and_verify_unquiet_status(local, x_status) x_info = test_case.expected_info or {} for path in x_info: run_and_verify_info([x_info[path]], j(local, path)) except: - print("ERROR IN: Tests scheme for switch: " - + "while verifying in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for switch: " + + "while verifying in '%s'", test_case.name) raise @@ -2606,13 +2874,15 @@ def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme): test_case.commit_block_string, local) except: - print("ERROR IN: Tests scheme for switch: " - + "while checking commit-blocking in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for switch: " + + "while checking commit-blocking in '%s'", test_case.name) raise def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, - do_commit_local_changes): + do_commit_local_changes, + do_commit_conflicts=True, + ignore_ancestry=False): """ Runs a given list of tests for conflicts occuring at a merge operation. @@ -2646,12 +2916,14 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, Then, in effect, the local changes are committed as well. 8) In each test case subdir, the "incoming" subdir is merged into the - "local" subdir. - This causes conflicts between the "local" state in the working - copy and the "incoming" state from the incoming subdir. + "local" subdir. If ignore_ancestry is True, then the merge is done + with the --ignore-ancestry option, so mergeinfo is neither considered + nor recorded. This causes conflicts between the "local" state in the + working copy and the "incoming" state from the incoming subdir. - 9) A commit is performed in each separate container, to verify - that each tree-conflict indeed blocks a commit. + 9) If do_commit_conflicts is True, then a commit is performed in each + separate container, to verify that each tree-conflict indeed blocks + a commit. The sbox parameter is just the sbox passed to a test function. No need to call sbox.build(), since it is called (once) within this function. @@ -2665,7 +2937,8 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, j = os.path.join - sbox.build() + if not sbox.is_built(): + sbox.build() wc_dir = sbox.wc_dir # 1) Create directories. @@ -2676,8 +2949,8 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, make_deep_trees(j(base, "incoming")) main.run_svn(None, 'add', base) except: - print("ERROR IN: Tests scheme for merge: " - + "while setting up deep trees in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for merge: " + + "while setting up deep trees in '%s'", test_case.name) raise @@ -2696,8 +2969,8 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, main.run_svn(None, 'cp', incoming_url, local_url, '-m', 'copy incoming to local') except: - print("ERROR IN: Tests scheme for merge: " - + "while copying deep trees in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for merge: " + + "while copying deep trees in '%s'", test_case.name) raise # 4) Update to load all of the "/local" subdirs into the working copies. @@ -2705,7 +2978,7 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, try: main.run_svn(None, 'up', sbox.wc_dir) except: - print("ERROR IN: Tests scheme for merge: " + logger.warn("ERROR IN: Tests scheme for merge: " + "while updating local subdirs") raise @@ -2716,8 +2989,8 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, try: test_case.incoming_action(j(sbox.wc_dir, test_case.name, "incoming")) except: - print("ERROR IN: Tests scheme for merge: " - + "while performing incoming action in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for merge: " + + "while performing incoming action in '%s'", test_case.name) raise @@ -2728,7 +3001,7 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, main.run_svn(None, 'ci', '-m', 'Committing incoming actions', sbox.wc_dir) except: - print("ERROR IN: Tests scheme for merge: " + logger.warn("ERROR IN: Tests scheme for merge: " + "while committing incoming actions") raise @@ -2739,8 +3012,8 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, try: test_case.local_action(j(sbox.wc_dir, test_case.name, "local")) except: - print("ERROR IN: Tests scheme for merge: " - + "while performing local action in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for merge: " + + "while performing local action in '%s'", test_case.name) raise @@ -2751,7 +3024,7 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, main.run_svn(None, 'ci', '-m', 'Committing incoming and local actions', sbox.wc_dir) except: - print("ERROR IN: Tests scheme for merge: " + logger.warn("ERROR IN: Tests scheme for merge: " + "while committing incoming and local actions") raise @@ -2782,34 +3055,40 @@ def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme, x_skip.copy() x_skip.wc_dir = local + varargs = (local,) + if ignore_ancestry: + varargs = varargs + ('--ignore-ancestry',) + run_and_verify_merge(local, None, None, incoming, None, x_out, None, None, x_disk, None, x_skip, - error_re_string = test_case.error_re_string, - dry_run = False) + test_case.error_re_string, + None, None, None, None, + False, False, *varargs) run_and_verify_unquiet_status(local, x_status) except: - print("ERROR IN: Tests scheme for merge: " - + "while verifying in '%s'" % test_case.name) + logger.warn("ERROR IN: Tests scheme for merge: " + + "while verifying in '%s'", test_case.name) raise # 9) Verify that commit fails. - for test_case in greater_scheme: - try: - local = j(wc_dir, test_case.name, 'local') - - x_status = test_case.expected_status - if x_status != None: - x_status.copy() - x_status.wc_dir = local - - run_and_verify_commit(local, None, x_status, - test_case.commit_block_string, - local) - except: - print("ERROR IN: Tests scheme for merge: " - + "while checking commit-blocking in '%s'" % test_case.name) - raise + if do_commit_conflicts: + for test_case in greater_scheme: + try: + local = j(wc_dir, test_case.name, 'local') + + x_status = test_case.expected_status + if x_status != None: + x_status.copy() + x_status.wc_dir = local + + run_and_verify_commit(local, None, x_status, + test_case.commit_block_string, + local) + except: + logger.warn("ERROR IN: Tests scheme for merge: " + + "while checking commit-blocking in '%s'", test_case.name) + raise diff --git a/svntest/factory.py b/svntest/factory.py index 7c8af7f8..cb387e83 100644 --- a/svntest/factory.py +++ b/svntest/factory.py @@ -255,7 +255,7 @@ if sys.version_info[0] >= 3: from io import StringIO else: # Python <3.0 - from StringIO import StringIO + from cStringIO import StringIO def make(wc_dir, commands, prev_status=None, prev_disk=None, verbose=True): """The Factory Invocation Function. This is typically the only one @@ -288,7 +288,9 @@ class TestFactory: # Any expected_disk still there from a previous verification self.prev_disk = None if prev_disk: - self.prev_disk = [None, prev_disk] # svntest.wc.State + reparented_prev_disk = svntest.wc.State(prev_disk.wc_dir, {}); + reparented_prev_disk.add_state(sbox.wc_dir, prev_disk); + self.prev_disk = [None, reparented_prev_disk] # Those command line options that expect an argument following # which is not a path. (don't expand args following these) @@ -434,6 +436,9 @@ class TestFactory: if second in ['update','up']: return self.cmd_svn_update(args[2:]) + if second in ['switch','sw']: + return self.cmd_svn_switch(args[2:]) + if second in ['copy', 'cp', 'move', 'mv', 'rename', 'ren']: return self.cmd_svn_copy_move(args[1:]) @@ -596,7 +601,7 @@ class TestFactory: def cmd_svn_update(self, update_args): - "Runs svnn update, looks what happened and writes the script for it." + "Runs svn update, looks what happened and writes the script for it." pyargs, runargs, do_chdir, targets = self.args2svntest( update_args, True, self.keep_args_of, 0) @@ -637,6 +642,74 @@ class TestFactory: return py + def cmd_svn_switch(self, switch_args): + "Runs svn switch, looks what happened and writes the script for it." + + pyargs, runargs, do_chdir, targets = self.args2svntest( + switch_args, True, self.keep_args_of, 0) + + # Sort out the targets. We need one URL and one wc node, in that order. + if len(targets) < 2: + raise Failure("Sorry, I'm currently enforcing two targets for svn " + + "switch. If you want to supply less, remove this " + + "check and implement whatever seems appropriate.") + + wc_arg = targets[1] + del pyargs[wc_arg.argnr] + del runargs[wc_arg.argnr] + url_arg = targets[0] + del pyargs[url_arg.argnr] + del runargs[url_arg.argnr] + + wc = wc_arg.wc + if not wc: + raise Failure("Unexpected argument ordering to factory's 'svn switch'?") + + pychdir = self.chdir(do_chdir, wc) + + #if '--force' in runargs: + # self.really_safe_rmtree(wc_arg.runarg) + + code, output, err = main.run_svn('Maybe', 'sw', + url_arg.runarg, wc_arg.runarg, + *runargs) + + py = "" + + if code == 0 and len(err) < 1: + # write a test that expects success + + actual_out = tree.build_tree_from_checkout(output) + py = ("expected_output = " + + self.tree2py(actual_out, wc) + "\n\n") + + pydisk = self.get_current_disk(wc) + py += pydisk + + pystatus = self.get_current_status(wc) + py += pystatus + + py += pychdir + py += ("actions.run_and_verify_switch(" + wc.py + ", " + + wc_arg.pyarg + ", " + url_arg.pyarg + ", " + + "expected_output, expected_disk, expected_status, " + + "None, None, None, None, None, False") + else: + # write a test that expects error + py = "expected_error = " + self.strlist2py(err) + "\n\n" + py += pychdir + py += ("actions.run_and_verify_switch(" + wc.py + ", " + + wc_arg.pyarg + ", " + url_arg.pyarg + ", " + + "None, None, None, expected_error, None, None, None, None, False") + + if len(pyargs) > 0: + py += ', ' + ', '.join(pyargs) + py += ")" + py += self.chdir_back(do_chdir) + + return py + + def cmd_svn_checkout(self, checkout_args): "Runs svn checkout, looks what happened and writes the script for it." @@ -661,8 +734,8 @@ class TestFactory: pychdir = self.chdir(do_chdir, wc) - if '--force' in runargs: - self.really_safe_rmtree(wc_arg.runarg) + #if '--force' in runargs: + # self.really_safe_rmtree(wc_arg.runarg) code, output, err = main.run_svn('Maybe', 'co', url_arg.runarg, wc_arg.runarg, @@ -761,7 +834,7 @@ class TestFactory: if i != len(echo_args)-1: raise Failure("don't understand: echo " + " ".join(echo_args)) - contents = " ".join(echo_args[:i]) + contents = " ".join(echo_args[:i]) + '\n' if target_arg is None: raise Failure("echo needs a '>' pipe to a file name: echo " + @@ -801,8 +874,12 @@ class TestFactory: for arg in rm_args: if not arg.startswith('-'): target = self.path2svntest(arg) - self.really_safe_rmtree(target.runarg) - out += "main.safe_rmtree(" + target.pyarg + ")\n" + if os.path.isfile(target.runarg): + os.remove(target.runarg) + out += "os.remove(" + target.pyarg + ")\n" + else: + self.really_safe_rmtree(target.runarg) + out += "main.safe_rmtree(" + target.pyarg + ")\n" return out @@ -1030,16 +1107,18 @@ class TestFactory: def get_sorted_vars_by_pathlen(self): """Compose a listing of variable names to be expanded in script output. This is intended to be stored in self.sorted_vars_by_pathlen.""" - list = [] + lst = [] for dict in [self.vars, self.other_wc_dirs]: for name in dict: runpath = dict[name][1] + if not runpath: + continue strlen = len(runpath) item = [strlen, name, runpath] - bisect.insort(list, item) + bisect.insort(lst, item) - return list + return lst def get_sorted_var_names(self): @@ -1275,6 +1354,10 @@ class TestFactory: # Check if the actual tree had this anyway all the way through. name = mod[0] val = mod[1] + + if name == 'contents' and val is None: + continue; + def check_node(node): if ( (name == 'contents' and node.contents == val) @@ -1397,7 +1480,7 @@ class TestFactory: return py - def path2svntest(self, path, argnr=None): + def path2svntest(self, path, argnr=None, do_remove_on_new_wc_path=True): """Given an input argument, do one hell of a path expansion on it. ARGNR is simply inserted into the resulting Target. Returns a self.Target instance. @@ -1494,12 +1577,18 @@ class TestFactory: if varname in self.other_wc_dirs: return self.other_wc_dirs[varname][1] - # else, we must still create one. - path = self.sbox.add_wc_path(suffix, do_remove) - py = "sbox.add_wc_path(" + str2py(suffix) - if not do_remove: - py += ", remove=False" - py += ')' + # see if there is a wc already in the sbox + path = self.sbox.wc_dir + '.' + suffix + if path in self.sbox.test_paths: + py = "sbox.wc_dir + '." + suffix + "'" + else: + # else, we must still create one. + path = self.sbox.add_wc_path(suffix, do_remove) + py = "sbox.add_wc_path(" + str2py(suffix) + if not do_remove: + py += ", remove=False" + py += ')' + value = [py, path] self.other_wc_dirs[varname] = [py, path] self.sorted_vars_by_pathlen = self.get_sorted_vars_by_pathlen() @@ -1523,6 +1612,11 @@ class TestFactory: def ensure_path_var(self, wc, pathelements): "Given a path in a working copy, make sure we have a variable for it." + + # special case: if a path is '.', simply use wc_dir. + if pathelements == ['.']: + return wc.py, wc.realpath + name = "_".join(pathelements) if wc.suffix is not None: diff --git a/svntest/main.py b/svntest/main.py index 1bb12d0b..db93e865 100644 --- a/svntest/main.py +++ b/svntest/main.py @@ -23,17 +23,20 @@ # under the License. ###################################################################### -import sys # for argv[] +import sys import os -import shutil # for rmtree() +import shutil import re -import stat # for ST_MODE +import stat import subprocess -import copy # for deepcopy() -import time # for time() -import traceback # for print_exc() +import time import threading -import optparse # for argument parsing +import optparse +import xml +import urllib +import logging +import hashlib +from urlparse import urlparse try: # Python >=3.0 @@ -50,6 +53,7 @@ import svntest from svntest import Failure from svntest import Skip +SVN_VER_MINOR = 8 ###################################################################### # @@ -74,6 +78,12 @@ from svntest import Skip ##################################################################### # Global stuff +default_num_threads = 5 + +# Don't try to use this before calling execute_tests() +logger = None + + class SVNProcessTerminatedBySignal(Failure): "Exception raised if a spawned process segfaulted, aborted, etc." pass @@ -101,7 +111,7 @@ class SVNRepositoryCreateFailure(Failure): # Windows specifics if sys.platform == 'win32': windows = True - file_scheme_prefix = 'file:///' + file_scheme_prefix = 'file:' _exe = '.exe' _bat = '.bat' os.environ['SVN_DBG_STACKTRACES_TO_STDERR'] = 'y' @@ -128,23 +138,6 @@ wc_author2 = 'jconstant' # use the same password as wc_author # Set C locale for command line programs os.environ['LC_ALL'] = 'C' -# This function mimics the Python 2.3 urllib function of the same name. -def pathname2url(path): - """Convert the pathname PATH from the local syntax for a path to the form - used in the path component of a URL. This does not produce a complete URL. - The return value will already be quoted using the quote() function.""" - - # Don't leave ':' in file://C%3A/ escaped as our canonicalization - # rules will replace this with a ':' on input. - return urllib_parse_quote(path.replace('\\', '/')).replace('%3A', ':') - -# This function mimics the Python 2.3 urllib function of the same name. -def url2pathname(path): - """Convert the path component PATH from an encoded URL to the local syntax - for a path. This does not accept a complete URL. This function uses - unquote() to decode PATH.""" - return os.path.normpath(urllib_parse_unquote(path)) - ###################################################################### # The locations of the svn, svnadmin and svnlook binaries, relative to # the only scripts that import this file right now (they live in ../). @@ -152,15 +145,21 @@ def url2pathname(path): svn_binary = os.path.abspath('../../svn/svn' + _exe) svnadmin_binary = os.path.abspath('../../svnadmin/svnadmin' + _exe) svnlook_binary = os.path.abspath('../../svnlook/svnlook' + _exe) +svnrdump_binary = os.path.abspath('../../svnrdump/svnrdump' + _exe) svnsync_binary = os.path.abspath('../../svnsync/svnsync' + _exe) svnversion_binary = os.path.abspath('../../svnversion/svnversion' + _exe) svndumpfilter_binary = os.path.abspath('../../svndumpfilter/svndumpfilter' + \ _exe) +svnmucc_binary=os.path.abspath('../../svnmucc/svnmucc' + _exe) entriesdump_binary = os.path.abspath('entries-dump' + _exe) +atomic_ra_revprop_change_binary = os.path.abspath('atomic-ra-revprop-change' + \ + _exe) +wc_lock_tester_binary = os.path.abspath('../libsvn_wc/wc-lock-tester' + _exe) +wc_incomplete_tester_binary = os.path.abspath('../libsvn_wc/wc-incomplete-tester' + _exe) # Location to the pristine repository, will be calculated from test_area_url # when we know what the user specified for --url. -pristine_url = None +pristine_greek_repos_url = None # Global variable to track all of our options options = None @@ -186,7 +185,7 @@ general_wc_dir = os.path.join(work_dir, "working_copies") temp_dir = os.path.join(work_dir, 'local_tmp') # (derivatives of the tmp dir.) -pristine_dir = os.path.join(temp_dir, "repos") +pristine_greek_repos_dir = os.path.join(temp_dir, "repos") greek_dump_dir = os.path.join(temp_dir, "greekfiles") default_config_dir = os.path.abspath(os.path.join(temp_dir, "config")) @@ -224,7 +223,7 @@ greek_state = svntest.wc.State('', { ###################################################################### # Utilities shared by the tests -def wrap_ex(func): +def wrap_ex(func, output): "Wrap a function, catch, print and ignore exceptions" def w(*args, **kwds): try: @@ -233,9 +232,9 @@ def wrap_ex(func): if ex.__class__ != Failure or ex.args: ex_args = str(ex) if ex_args: - print('EXCEPTION: %s: %s' % (ex.__class__.__name__, ex_args)) + logger.warn('EXCEPTION: %s: %s', ex.__class__.__name__, ex_args) else: - print('EXCEPTION: %s' % ex.__class__.__name__) + logger.warn('EXCEPTION: %s', ex.__class__.__name__) return w def setup_development_mode(): @@ -268,18 +267,34 @@ def get_admin_name(): else: return '.svn' +def wc_is_singledb(wcpath): + """Temporary function that checks whether a working copy directory looks + like it is part of a single-db working copy.""" + + pristine = os.path.join(wcpath, get_admin_name(), 'pristine') + if not os.path.exists(pristine): + return True + + # Now we must be looking at a multi-db WC dir or the root dir of a + # single-DB WC. Sharded 'pristine' dir => single-db, else => multi-db. + for name in os.listdir(pristine): + if len(name) == 2: + return True + elif len(name) == 40: + return False + + return False + def get_start_commit_hook_path(repo_dir): "Return the path of the start-commit-hook conf file in REPO_DIR." return os.path.join(repo_dir, "hooks", "start-commit") - def get_pre_commit_hook_path(repo_dir): "Return the path of the pre-commit-hook conf file in REPO_DIR." return os.path.join(repo_dir, "hooks", "pre-commit") - def get_post_commit_hook_path(repo_dir): "Return the path of the post-commit-hook conf file in REPO_DIR." @@ -290,6 +305,16 @@ def get_pre_revprop_change_hook_path(repo_dir): return os.path.join(repo_dir, "hooks", "pre-revprop-change") +def get_pre_lock_hook_path(repo_dir): + "Return the path of the pre-lock hook script in REPO_DIR." + + return os.path.join(repo_dir, "hooks", "pre-lock") + +def get_pre_unlock_hook_path(repo_dir): + "Return the path of the pre-unlock hook script in REPO_DIR." + + return os.path.join(repo_dir, "hooks", "pre-unlock") + def get_svnserve_conf_file_path(repo_dir): "Return the path of the svnserve.conf file in REPO_DIR." @@ -305,11 +330,18 @@ def get_fsfs_format_file_path(repo_dir): return os.path.join(repo_dir, "db", "format") +def filter_dbg(lines): + for line in lines: + if not line.startswith('DBG:'): + yield line + # Run any binary, logging the command line and return code def run_command(command, error_expected, binary_mode=0, *varargs): """Run COMMAND with VARARGS. Return exit code as int; stdout, stderr as lists of lines (including line terminators). See run_command_stdin() - for details. If ERROR_EXPECTED is None, any stderr also will be printed.""" + for details. If ERROR_EXPECTED is None, any stderr output will be + printed and any stderr output or a non-zero exit code will raise an + exception.""" return run_command_stdin(command, error_expected, 0, binary_mode, None, *varargs) @@ -322,25 +354,24 @@ _safe_arg_re = re.compile(r'^[A-Za-z\d\.\_\/\-\:\@]+$') def _quote_arg(arg): """Quote ARG for a command line. - Simply surround every argument in double-quotes unless it contains + Return a quoted version of the string ARG, or just ARG if it contains only universally harmless characters. WARNING: This function cannot handle arbitrary command-line - arguments. It can easily be confused by shell metacharacters. A - perfect job would be difficult and OS-dependent (see, for example, - http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp). - In other words, this function is just good enough for what we need - here.""" + arguments: it is just good enough for what we need here.""" arg = str(arg) if _safe_arg_re.match(arg): return arg + + if windows: + # Note: subprocess.list2cmdline is Windows-specific. + return subprocess.list2cmdline([arg]) else: - if os.name != 'nt': - arg = arg.replace('$', '\$') - return '"%s"' % (arg,) + # Quoting suitable for most Unix shells. + return "'" + arg.replace("'", "'\\''") + "'" -def open_pipe(command, bufsize=0, stdin=None, stdout=None, stderr=None): +def open_pipe(command, bufsize=-1, stdin=None, stdout=None, stderr=None): """Opens a subprocess.Popen pipe to COMMAND using STDIN, STDOUT, and STDERR. BUFSIZE is passed to subprocess.Popen's argument of the same name. @@ -354,15 +385,7 @@ def open_pipe(command, bufsize=0, stdin=None, stdout=None, stderr=None): if (sys.platform == 'win32') and (command[0].endswith('.py')): command.insert(0, sys.executable) - # Quote only the arguments on Windows. Later versions of subprocess, - # 2.5.2+ confirmed, don't require this quoting, but versions < 2.4.3 do. - if sys.platform == 'win32': - args = command[1:] - args = ' '.join([_quote_arg(x) for x in args]) - command = command[0] + ' ' + args - command_string = command - else: - command_string = ' '.join(command) + command_string = command[0] + ' ' + ' '.join(map(_quote_arg, command[1:])) if not stdin: stdin = subprocess.PIPE @@ -409,25 +432,19 @@ def wait_on_pipe(waiter, binary_mode, stdin=None): exit_signal = exit_code if stdout_lines is not None: - sys.stdout.write("".join(stdout_lines)) - sys.stdout.flush() + logger.info("".join(stdout_lines)) if stderr_lines is not None: - sys.stderr.write("".join(stderr_lines)) - sys.stderr.flush() - if options.verbose: - # show the whole path to make it easier to start a debugger - sys.stderr.write("CMD: %s terminated by signal %d\n" - % (command_string, exit_signal)) - sys.stderr.flush() + logger.warning("".join(stderr_lines)) + # show the whole path to make it easier to start a debugger + logger.warning("CMD: %s terminated by signal %d" + % (command_string, exit_signal)) raise SVNProcessTerminatedBySignal else: - if exit_code and options.verbose: - sys.stderr.write("CMD: %s exited with %d\n" - % (command_string, exit_code)) - sys.stderr.flush() + if exit_code: + logger.info("CMD: %s exited with %d" % (command_string, exit_code)) return stdout_lines, stderr_lines, exit_code -def spawn_process(command, bufsize=0, binary_mode=0, stdin_lines=None, +def spawn_process(command, bufsize=-1, binary_mode=0, stdin_lines=None, *varargs): """Run any binary, supplying input text, logging the command line. BUFSIZE dictates the pipe buffer size used in communication with the @@ -442,10 +459,9 @@ def spawn_process(command, bufsize=0, binary_mode=0, stdin_lines=None, raise TypeError("stdin_lines should have list type") # Log the command line - if options.verbose and not command.endswith('.py'): - sys.stdout.write('CMD: %s %s\n' % (os.path.basename(command), - ' '.join([_quote_arg(x) for x in varargs]))) - sys.stdout.flush() + if not command.endswith('.py'): + logger.info('CMD: %s %s' % (os.path.basename(command), + ' '.join([_quote_arg(x) for x in varargs]))) infile, outfile, errfile, kid = open_pipe([command] + list(varargs), bufsize) @@ -461,7 +477,7 @@ def spawn_process(command, bufsize=0, binary_mode=0, stdin_lines=None, return exit_code, stdout_lines, stderr_lines -def run_command_stdin(command, error_expected, bufsize=0, binary_mode=0, +def run_command_stdin(command, error_expected, bufsize=-1, binary_mode=0, stdin_lines=None, *varargs): """Run COMMAND with VARARGS; input STDIN_LINES (a list of strings which should include newline characters) to program via stdin - this @@ -473,10 +489,10 @@ def run_command_stdin(command, error_expected, bufsize=0, binary_mode=0, Normalize Windows line endings of stdout and stderr if not BINARY_MODE. Return exit code as int; stdout, stderr as lists of lines (including line terminators). - If ERROR_EXPECTED is None, any stderr also will be printed.""" + If ERROR_EXPECTED is None, any stderr output will be printed and any + stderr output or a non-zero exit code will raise an exception.""" - if options.verbose: - start = time.time() + start = time.time() exit_code, stdout_lines, stderr_lines = spawn_process(command, bufsize, @@ -484,25 +500,42 @@ def run_command_stdin(command, error_expected, bufsize=0, binary_mode=0, stdin_lines, *varargs) - if options.verbose: - stop = time.time() - print('