diff --git a/.github/workflows/btest.yml b/.github/workflows/btest.yml index e86b9faa..2cc6e8c5 100644 --- a/.github/workflows/btest.yml +++ b/.github/workflows/btest.yml @@ -9,23 +9,67 @@ jobs: Run-BTest: strategy: matrix: - # Even though python-3.5 is EOL it is still the minimum Python version - # required by Zeek so we should validate that our code works with it. - # - # Since that Python version is not available on ubuntu-22.04 (currently - # ubuntu-latest), we run the Linux job on ubuntu-20.04 which still has - # it. - python-version: [3, 3.5] - os: [macos-latest, ubuntu-20.04] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + os: [macos-latest, ubuntu-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - name: Set up git config + # Set autocrlf mode to false so that actions/checkout doesn't + # modify the line endings in all of the files (mostly in the + # test Baselines) to be \r\n on Windows. + run: | + git config --global core.autocrlf false + git config --global core.eol lf + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies + - name: Install dependencies (Windows) + if: matrix.os == 'windows-latest' + run: | + python -m pip install sphinx multiprocess + - name: Install dependencies (Linux/macOS) + if: matrix.os != 'windows-latest' run: | python -m pip install sphinx + - name: Rename wsl bash + # Something about github's runners sometimes puts WSL bash in the + # path before git bash. WSL bash has problems with permissions + # when it comes to writing files to the runner's disk, which + # causes tests to fail. This step renames WSL bash to something + # else so that git bash will execute instead. + if: matrix.os == 'windows-latest' + run: | + takeown /F C:\Windows\System32\bash.exe + icacls C:\Windows\System32\bash.exe /grant administrators:F + ren C:\Windows\System32\bash.exe wsl-bash.exe - run: make test + + Test-SetupPY: + strategy: + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + os: [ubuntu-latest] + + runs-on: ${{ matrix.os }} + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - uses: actions/checkout@v3 + - name: "Install pip and the btest package" + run: | + python3 -m pip install --upgrade pip + python3 -m pip install setuptools wheel + python3 -m pip install . + - name: "Run btests with installed version" + run: | + cd testing + which btest + make + - name: "Test building package" + run: | + python3 setup.py sdist bdist_wheel diff --git a/.gitignore b/.gitignore index acdb9280..01b84ca0 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,5 @@ build dist MANIFEST +.idea +sphinx diff --git a/README b/README index cbe09f36..dabe1f92 100644 --- a/README +++ b/README @@ -27,20 +27,35 @@ Prerequisites BTest has the following prerequisites: -- Python version >= 3.5 (older versions may work, but are not +- Python version >= 3.7 (older versions may work, but are not well-tested). -- Bash (note that on FreeBSD and Alpine Linux, bash is not installed - by default). +- Bash. Note that on FreeBSD and Alpine Linux, bash is not installed by + default. This is also required on Windows, in the form of Git's msys2, Cygwin, + etc. BTest has the following optional prerequisites to enable additional functionality: -- Sphinx. +- Sphinx. Sphinx functionality is currently disabled on Windows. - perf (Linux only). Note that on Debian/Ubuntu, you also need to install the "linux-tools" package. +Windows Caveats +--------------- + +When running BTest on Windows, you must have a bash shell installed of some +sort. This can be from WSL, Cygwin, msys2, Git, or any number of other methods, +but ``bash.exe`` must be available. BTest will check for its existence at +startup and exit if it is not available. + +A minor change must be made to any configuration value that is a path list. For +example, if you are setting the ``PATH`` environment variable from your +btest.cfg. In these cases, you should use ``$(pathsep)s`` in the configuration +instead of bare ``:`` or ``;`` values to separate the paths. This ensures that +both POSIX and Windows systems handle the path lists correctly. + Download and Installation ========================= @@ -330,6 +345,13 @@ and 1 otherwise. Exit code 1 can also result in case of other errors. Markdown. In the output each test includes the documentation string that's defined for it through ``@TEST-DOC``. +-s , --set= + Takes a ``key=value`` argument and uses it to override a value + used during parsing of the configuration file read by btest at + startup. This can be used to override various default values + prior to parsing. Can be passed multiple times to override + different keys. See `defaults`_ for an example. + -t, --tmp-keep Does not delete any temporary files created for running the tests (including their outputs). By default, the temporary @@ -375,8 +397,8 @@ and 1 otherwise. Exit code 1 can also result in case of other errors. If the file exists already, it is overwritten. -z RETRIES, --retries=RETRIES - Retry any failed tests up to this many times to determine if - they are unstable. + Retry any failed tests up to this many times to determine if + they are unstable. .. _configuration file: configuration_ .. _configuration: @@ -410,6 +432,30 @@ include the output of external commands (e.g., xyz=`\echo test\`). Note that the backtick expansion is performed after any ``%(..)`` have already been replaced (including within the backticks). +.. _default: `defaults`_ +.. _defaults: + +Defaults +~~~~~~~~ + +There is a special section that can be added to the configuration file that will +set default values to be used during the parsing of other configuration +directives. For example:: + + [DEFAULT] + val=abcd + + [environment] + ENV_VALUE=%(val)s + +The configuration parser reads the keys and values from the DEFAULT section +prior to reading the other sections. It uses those keys to replace the ``%()s`` +macros as described earlier. The values stored in these keys can be overridden +at runtime by using the ``-s``/``--set`` command-line argument. For example to +override the ``val`` default above, the ``-s val=other`` argument can be +passed. In that case, ``ENV_VALUE`` would be set to ``other`` instead of +``abcd``. + .. _option: `options`_ .. _options: diff --git a/btest b/btest index 10d9c52e..7caa3720 100755 --- a/btest +++ b/btest @@ -5,21 +5,20 @@ # pylint: disable=line-too-long,too-many-lines,invalid-name,missing-function-docstring,missing-class-docstring import atexit +import binascii import configparser import copy import fnmatch import glob import io import json -import locale -import multiprocessing -import multiprocessing.managers -import multiprocessing.sharedctypes import optparse import os import os.path +import pathlib import platform as pform import re +import shlex import shutil import signal import socket @@ -33,9 +32,30 @@ import xml.dom.minidom from datetime import datetime -VERSION = "0.72-20" # Automatically filled in. +# We require the external multiprocess library on Windows due to pickling issues +# with the standard one. +if sys.platform == 'win32': + try: + import multiprocess as mp + import multiprocess.managers as mp_managers + import multiprocess.sharedctypes as mp_sharedctypes + except ImportError as error: + print( + "error: btest failed to import the 'multiprocess' library\n" + "\n" + "This library is required for btest to function on Windows. It can be installed from pip like:\n" + "\n" + " pip install multiprocess\n" + "\n" + "Also check the following exception output for possible alternate explanations:\n\n" + "{}: {}".format(type(error).__name__, error), + file=sys.stderr) +else: + import multiprocessing as mp + import multiprocessing.managers as mp_managers + import multiprocessing.sharedctypes as mp_sharedctypes -using_py3 = (sys.version_info[0] == 3) +VERSION = "0.72-20" # Automatically filled in. Name = "btest" Config = None @@ -46,9 +66,35 @@ except KeyError: ConfigDefault = "btest.cfg" +def normalize_path(path): + '''Ensures that paths on Windows convert from C:\path to C:/path, to make + path handling easier in lots of other places. On non-Windows platforms + this is a no-op beyond converting things to absolute paths.''' + os_path = os.path.abspath(path) + windows_path = pathlib.PureWindowsPath(os_path) + return windows_path.as_posix() + + +def normalize_path_join(*args): + return normalize_path(os.path.join(*args)) + + +def reopen_std_file(stdfile): + '''Reopens one of the stderr or stdout files, but resets the newline + used in the output to "\n" in order to force that line ending on Windows. + Without this, Windows will use "\r\n" which breaks a lot of tests.''' + return open(stdfile.fileno(), + mode=stdfile.mode, + buffering=1, + encoding=stdfile.encoding, + errors=stdfile.errors, + newline='\n', + closefd=False) + + def output(msg, nl=True, file=None): if not file: - file = sys.stderr + file = reopen_std_file(sys.__stderr__) if nl: print(msg, file=file) @@ -103,11 +149,11 @@ def platform(): def validate_version_requirement(required: str, present: str): - '''Helper function to validate that a `present` version is semantically newer or equal than a `required` version.''' + """Helper function to validate that a `present` version is semantically newer or equal than a `required` version.""" def extract_version(v: str): - '''Helper function to extract version components from a string.''' + """Helper function to extract version components from a string.""" try: - xyz = [int(x) for x in re.split(r'\.|-', v)] + xyz = [int(x) for x in re.split(r'[.-]', v)] except ValueError: error("invalid version %s: versions must contain only numeric identifiers" % v) @@ -135,7 +181,7 @@ def getOption(key, default, section="btest"): return ExpandBackticks(value) -reBackticks = re.compile(r"`(([^`]|\`)*)`") +reBackticks = re.compile(r"`(([^`]|`)*)`") def readStateFile(): @@ -158,6 +204,29 @@ def readStateFile(): return (True, tests) +def _build_win_subprocess_cmd_script(cmd, tmpdir=None): + ''' + Builds a bash file for running subprocess commands under Windows. + + :param cmd The command line to be run under bash. + :param tmpdir An optional directory path where the script file will be written. + If None, it will be written to the system's temp directory. + :return A tuple containing a file object pointing at the script file and a bash + command for running the script. + ''' + tf = tempfile.NamedTemporaryFile(mode='w', + encoding='utf-8', + suffix='.sh', + dir=tmpdir, + delete=True) + fcontents = '#!/usr/bin/env bash\n%s\n' % cmd + tf.write(fcontents) + tf.flush() + + bash_cmd = ['bash.exe', '-c', normalize_path(tf.name)] + return tf, bash_cmd + + # Expand backticks in a config option value and return the result. def ExpandBackticks(origvalue): def _exec(m): @@ -165,14 +234,25 @@ def ExpandBackticks(origvalue): if not cmd: return "" - try: - pp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) - except OSError as e: - error("cannot execute '%s': %s" % (cmd, e)) + tf = None + if sys.platform == 'win32': + try: + tf, bash_cmd = _build_win_subprocess_cmd_script(cmd, None) + pp = subprocess.Popen(bash_cmd, stdout=subprocess.PIPE) + except OSError as e: + error("cannot execute '%s': %s" % (cmd, e)) + else: + try: + pp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) + except OSError as e: + error("cannot execute '%s': %s" % (cmd, e)) out = pp.communicate()[0] out = out.decode() + if tf: + tf.close() + return out.strip() value = reBackticks.sub(_exec, origvalue) @@ -192,6 +272,14 @@ def cpItemsNoDefaults(self, section): except KeyError: raise configparser.NoSectionError(section) + # Override any of the defaults with ones that we read from the command-line + # options before expanding macros below. + cfg_defaults = self.defaults() + if Options.defaults: + for d in Options.defaults: + k, v = d.split('=', 1) + cfg_defaults[k] = v.strip("\'\"") + result = {} for (key, rawvalue) in items: @@ -206,6 +294,22 @@ def cpItemsNoDefaults(self, section): return result.items() +def getcfgparser(defaults): + configparser.ConfigParser.itemsNoDefaults = cpItemsNoDefaults + + cfg = configparser.ConfigParser() + + # We make all key lookups case-sensitive to avoid aliasing of + # case-sensitive environment variables. + cfg.optionxform = lambda optionstr: optionstr + + default_section = cfg.defaults() + for key, value in defaults.items(): + default_section[key] = value + + return cfg + + # Replace environment variables in string. def replaceEnvs(s): def replace_with_env(m): @@ -238,7 +342,18 @@ def runTestCommandLine(cmdline, measure_time, **kwargs): def runSubprocess(*args, **kwargs): def child(q): try: - subprocess.check_call(*args, **kwargs) + if sys.platform == 'win32': + tmpdir = normalize_path(kwargs.get('cwd', '')) + if len(args) > 1: + cmd = shlex.join(args) + else: + cmd = args[0] + + tf, bash_cmd = _build_win_subprocess_cmd_script(cmd, tmpdir) + with tf: + subprocess.check_call(bash_cmd, **kwargs) + else: + subprocess.check_call(*args, **kwargs) success = True rc = 0 @@ -253,8 +368,8 @@ def runSubprocess(*args, **kwargs): q.put([success, rc]) try: - q = multiprocessing.Queue() - p = multiprocessing.Process(target=child, args=(q, )) + q = mp.Queue() + p = mp.Process(target=child, args=(q, )) p.start() result = q.get() p.join() @@ -262,27 +377,11 @@ def runSubprocess(*args, **kwargs): except KeyboardInterrupt: # Bail out here directly as otherwise we'd get a bunch of errors. # from all the childs. - os._exit(1) + sys.exit(1) return result -def getcfgparser(defaults): - configparser.ConfigParser.itemsNoDefaults = cpItemsNoDefaults - - cfg = configparser.ConfigParser() - - # We make all key lookups case-sensitive to avoid aliasing of - # case-sensitive environment variables. - cfg.optionxform = lambda optionstr: optionstr - - default_section = cfg.defaults() - for key, value in defaults.items(): - default_section[key] = value - - return cfg - - # Description of an alternative configuration. class Alternative: DEFAULT = "default" @@ -307,7 +406,7 @@ class Abort(Exception): # Main class distributing the work across threads. -class TestManager(multiprocessing.managers.SyncManager): +class TestManager(mp_managers.SyncManager): def __init__(self, *args, **kwargs): super(TestManager, self).__init__(*args, **kwargs) @@ -327,14 +426,27 @@ class TestManager(multiprocessing.managers.SyncManager): def run(self, tests, output_handler): self.start() + mgr_data = self.dict() + mgr_data['Alternatives'] = Alternatives + mgr_data['BaselineDirs'] = BaselineDirs + mgr_data['Initializer'] = Initializer + mgr_data['Finalizer'] = Finalizer + mgr_data['Teardown'] = Teardown + mgr_data['Options'] = Options + mgr_data['TestBase'] = TestBase + mgr_data['TmpDir'] = TmpDir + mgr_data['RE_INPUT'] = RE_INPUT + mgr_data['RE_DIR'] = RE_DIR + mgr_data['RE_ENV'] = RE_ENV + output_handler.prepare(self) self._output_handler = output_handler self._lock = self.RLock() - self._succeeded = multiprocessing.sharedctypes.RawValue('i', 0) - self._failed = multiprocessing.sharedctypes.RawValue('i', 0) - self._failed_expected = multiprocessing.sharedctypes.RawValue('i', 0) - self._unstable = multiprocessing.sharedctypes.RawValue('i', 0) - self._skipped = multiprocessing.sharedctypes.RawValue('i', 0) + self._succeeded = mp_sharedctypes.RawValue('i', 0) + self._failed = mp_sharedctypes.RawValue('i', 0) + self._failed_expected = mp_sharedctypes.RawValue('i', 0) + self._unstable = mp_sharedctypes.RawValue('i', 0) + self._skipped = mp_sharedctypes.RawValue('i', 0) self._tests = self.list(tests) self._failed_tests = self.list([]) self._num_tests = len(self._tests) @@ -368,12 +480,13 @@ class TestManager(multiprocessing.managers.SyncManager): # processes post-btest-exit when using CTRL-C during the input # stage. if Options.mode == "UPDATE_INTERACTIVE": - self.threadRun(0) + self.threadRun(0, mgr_data) else: try: + # Create a set of processes for running each of the tests. This isn't the actual + # zeek processes, but runner processes executing individual test commands. for i in range(Options.threads): - t = multiprocessing.Process(name="#%d" % (i + 1), - target=lambda: self.threadRun(i)) + t = mp.Process(name="#%d" % (i + 1), target=lambda: self.threadRun(i, mgr_data)) t.start() threads += [t] @@ -412,20 +525,35 @@ class TestManager(multiprocessing.managers.SyncManager): count = self._succeeded.value + self._failed.value + self._skipped.value return 100.0 * count / self._num_tests - def threadRun(self, thread_num): + # Worker method for each of the "threads" specified by the "-j" argument passed + # at run time. This basically segments the list of tests into chunks and runs + # until we're out of chunks. + def threadRun(self, thread_num, mgr_data): + # This should prevent the child processes from receiving SIGINT signals and + # let the KeyboardInterrupt handler in the manager's run() method handle + # those. signal.signal(signal.SIGINT, signal.SIG_IGN) all_tests = [] + # Globals get lost moving from the parent to the child on Windows, so we need to use + # the data proxied from the manager to rebuild the dict of globals before continuing. + if sys.platform == 'win32': + for (global_key, global_value) in mgr_data.items(): + globals()[global_key] = global_value + while True: - tests = self.nextTests(thread_num) - if tests is None: + # Pull the next test from the list that was built at startup. This may + # be more than one test if there were alternatives requested in the + # arguments passed to btest. + thread_tests = self.nextTests(thread_num) + if thread_tests is None: # No more work for us. return - all_tests += tests + all_tests += thread_tests - for t in tests: + for t in thread_tests: t.run(self) self.testReplayOutput(t) @@ -449,7 +577,7 @@ class TestManager(multiprocessing.managers.SyncManager): if not t: continue - if t.serialize and hash(t.serialize) % Options.threads != thread_num: + if t.serialize and t.serialize_hash() % Options.threads != thread_num: # Not ours. continue @@ -710,8 +838,10 @@ class CmdSeq: class Test(object): def __init__(self, file=None, directory=None): # Allow dir to be directly defined - if file is not None: self.dir = os.path.abspath(os.path.dirname(file)) - else: self.dir = directory + if file is not None: + self.dir = normalize_path(os.path.dirname(file)) + else: + self.dir = normalize_path(directory) self.alternative = None self.baselines = [] @@ -755,6 +885,12 @@ class Test(object): def __lt__(self, value): return self.name and value.name and self.name < value.name + def serialize_hash(self): + if not self.serialize: + return 0 + + return int(binascii.crc32(self.serialize.encode('utf-8'))) + def displayName(self): name = self.name @@ -821,7 +957,7 @@ class Test(object): name = os.path.relpath(basename, TestBase) (name, ext) = os.path.splitext(name) - name = name.replace("/", ".") + name = name.replace(os.sep, ".") while name.startswith("."): name = name[1:] @@ -885,7 +1021,9 @@ class Test(object): # Copies all control information over to a new Test but replacing the test's # content with a new one. def clone(self, content=None, increment=True): - clone = Test("") + # Cloning the class like this ensures that the globals continue to exist in + # cloned object just as they are in the original object. + clone = self.__class__("") clone.number = self.number clone.basename = self.basename clone.name = self.basename @@ -969,10 +1107,12 @@ class Test(object): self.mgr = mgr mgr.testStart(self) - self.tmpdir = os.path.abspath(os.path.join(TmpDir, self.name)) - self.diag = os.path.join(self.tmpdir, ".diag") - self.verbose = os.path.join(self.tmpdir, ".verbose") - self.baselines = [os.path.abspath(os.path.join(d, self.name)) for d in BaselineDirs] + self.tmpdir = normalize_path_join(TmpDir, self.name) + self.diag = normalize_path_join(self.tmpdir, ".diag") + self.verbose = normalize_path_join(self.tmpdir, ".verbose") + self.baselines = [] + for d in BaselineDirs: + self.baselines.append(normalize_path_join(d, self.name)) self.diagmsgs = [] self.utime = -1 self.utime_base = self.mgr.testTimingBaseline(self) @@ -993,7 +1133,7 @@ class Test(object): if subdir != "": mkdir(subdir) try: - ffile = open(fname, "w") + ffile = open(fname, "w", newline="\n") except IOError as e: error("cannot write test's additional file '%s'" % fname) @@ -1010,8 +1150,8 @@ class Test(object): error("cannot copy %s: %s" % (src, e)) for (file, content) in self.contents: - localfile = os.path.join(self.tmpdir, os.path.basename(file)) - out = io.open(localfile, "w", encoding="utf-8") + localfile = normalize_path_join(self.tmpdir, os.path.basename(file)) + out = io.open(localfile, "w", encoding="utf-8", newline="\n") try: for line in content: @@ -1022,8 +1162,8 @@ class Test(object): out.close() self.log = open(os.path.join(self.tmpdir, ".log"), "w", encoding="utf-8") - self.stdout = open(os.path.join(self.tmpdir, ".stdout"), "w", encoding="utf-8") - self.stderr = open(os.path.join(self.tmpdir, ".stderr"), "w", encoding="utf-8") + self.stdout = open(os.path.join(self.tmpdir, ".stdout"), "a", encoding="utf-8") + self.stderr = open(os.path.join(self.tmpdir, ".stderr"), "a", encoding="utf-8") for cmd in self.requires: (success, rc) = self.execute(cmd, apply_alternative=self.alternative) @@ -1031,7 +1171,7 @@ class Test(object): if not success: self.mgr.testSkipped(self) if not Options.tmps: - self.rmTmp() + self.rmTmp(with_close=True) self.finish() return @@ -1085,7 +1225,13 @@ class Test(object): # first. This processes teardowns for those sequences as # needed, and skips them when nothing was actually run in a # CmdSeq. - if isinstance(cmd, CmdSeq): + # + # The use of isinstance() to determine "is a CmdSeq" is + # dangerous since e.g. the dill serializer creates a + # different type upon un-serializing, failing + # isinstance(). So we take the class name as a sufficent + # signal. + if type(cmd).__name__ == 'CmdSeq': need_teardown |= run_cmdseq(cmd) continue @@ -1168,7 +1314,7 @@ class Test(object): self.mgr.testSucceeded(self) if not Options.tmps and self.reruns == 0: - self.rmTmp() + self.rmTmp(with_close=True) self.finish() @@ -1214,12 +1360,12 @@ class Test(object): env = alt.envs - localfile = os.path.join(self.tmpdir, os.path.basename(cmd.file)) + localfile = normalize_path_join(self.tmpdir, os.path.basename(cmd.file)) if filter_cmd and cmd.expect_success: # Do not apply filter if we expect failure. # This is not quite correct as it does not necessarily need to be # the %INPUT file which we are filtering ... - filtered = os.path.join(self.tmpdir, "filtered-%s" % os.path.basename(localfile)) + filtered = normalize_path_join(self.tmpdir, "filtered-%s" % os.path.basename(localfile)) filter = CmdLine("%s %s %s" % (filter_cmd, localfile, filtered), True, 1, "") @@ -1278,22 +1424,29 @@ class Test(object): self.diagmsgs += ["'%s' failed unexpectedly (exit code %s)" % (cmdline, rc)] return (False, rc) - def rmTmp(self): + def rmTmp(self, *, with_close=False): + if with_close: + self.log.close() + self.stdout.close() + self.stderr.close() + try: if os.path.isfile(self.tmpdir): os.remove(self.tmpdir) if os.path.isdir(self.tmpdir): - subprocess.call("rm -rf %s 2>/dev/null" % self.tmpdir, shell=True) + subprocess.call(['rm', '-rf', self.tmpdir], stderr=subprocess.DEVNULL) except OSError as e: error("cannot remove tmp directory %s: %s" % (self.tmpdir, e)) # Prepares the environment for the child processes. - def prepareEnv(self, cmd, addl={}): + def prepareEnv(self, cmd, addl=None): + if addl is None: + addl = {} env = copy.deepcopy(os.environ) - env["TEST_BASELINE"] = ":".join(self.baselines) + env["TEST_BASELINE"] = os.pathsep.join(self.baselines) env["TEST_DIAGNOSTICS"] = self.diag env["TEST_MODE"] = Options.mode.upper() env["TEST_NAME"] = self.name @@ -1372,13 +1525,13 @@ class OutputHandler: a form suitable to prefix output with. With a single thread, returns the empty string.""" if self.options().threads > 1: - return "[%s]" % multiprocessing.current_process().name + return "[%s]" % mp.current_process().name else: return "" def _output(self, msg, nl=True, file=None): if not file: - file = sys.stderr + file = reopen_std_file(sys.__stderr__) if nl: print(msg, file=file) @@ -1567,6 +1720,7 @@ class Console(OutputHandler): def __init__(self, options): OutputHandler.__init__(self, options) self.show_all = True + self.stdout = reopen_std_file(sys.__stdout__) def testStart(self, test): msg = "[%3d%%] %s ..." % (test.mgr.percentage(), test.displayName()) @@ -1602,18 +1756,18 @@ class Console(OutputHandler): self._consoleOutput(test, msg, self.show_all) def finished(self): - sys.stdout.flush() + self.stdout.flush() def _consoleOutput(self, test, msg, sticky): self._consoleWrite(test, msg, sticky) def _consoleWrite(self, test, msg, sticky): - sys.stdout.write(msg.strip() + " ") + self.stdout.write(msg.strip() + " ") if sticky: - sys.stdout.write("\n") + self.stdout.write("\n") - sys.stdout.flush() + self.stdout.flush() class CompactConsole(Console): @@ -1632,16 +1786,17 @@ class CompactConsole(Console): def __init__(self, options): Console.__init__(self, options) self.show_all = False + self.stdout = reopen_std_file(sys.__stdout__) def cleanup(): - sys.stdout.write(self.CursorOn) + self.stdout.write(self.CursorOn) atexit.register(cleanup) def testStart(self, test): test.console_last_line = None self._consoleOutput(test, "", False) - sys.stdout.write(self.CursorOff) + self.stdout.write(self.CursorOff) def testProgress(self, test, msg): """Called when a test signals having made progress.""" @@ -1652,10 +1807,10 @@ class CompactConsole(Console): test.console_last_line = None def finished(self): - sys.stdout.write(self.EraseToEndOfLine) - sys.stdout.write("\r") - sys.stdout.write(self.CursorOn) - sys.stdout.flush() + self.stdout.write(self.EraseToEndOfLine) + self.stdout.write("\r") + self.stdout.write(self.CursorOn) + self.stdout.flush() def _consoleOutput(self, test, msg, sticky): line = "[%3d%%] %s ..." % (test.mgr.percentage(), test.displayName()) @@ -1667,20 +1822,20 @@ class CompactConsole(Console): self._consoleWrite(test, line, sticky) def _consoleAugment(self, test, msg): - sys.stdout.write(self.EraseToEndOfLine) - sys.stdout.write(" %s" % msg.strip()) - sys.stdout.write("\r%s" % test.console_last_line) - sys.stdout.flush() + self.stdout.write(self.EraseToEndOfLine) + self.stdout.write(" %s" % msg.strip()) + self.stdout.write("\r%s" % test.console_last_line) + self.stdout.flush() def _consoleWrite(self, test, msg, sticky): - sys.stdout.write(chr(27) + '[2K') - sys.stdout.write("\r%s" % msg.strip()) + self.stdout.write(chr(27) + '[2K') + self.stdout.write("\r%s" % msg.strip()) if sticky: - sys.stdout.write("\n") + self.stdout.write("\n") test.console_last_line = None - sys.stdout.flush() + self.stdout.flush() class Brief(OutputHandler): @@ -1830,6 +1985,7 @@ class SphinxOutput(OutputHandler): OutputHandler.__init__(self, options) self._output = None + self._part = None try: self._rst_output = os.environ["BTEST_RST_OUTPUT"] @@ -1855,7 +2011,7 @@ class SphinxOutput(OutputHandler): if not self._output: return - out = open(self._output, "a") + out = open(self._output, "a", newline="\n") print("\n.. code-block:: none ", file=out) print("\n ERROR executing test '%s' (part %s)\n" % (test.displayName(), self._part), @@ -1872,7 +2028,7 @@ class SphinxOutput(OutputHandler): if os.path.isfile(f): print(" % cat " + os.path.basename(f), file=out) - for line in open(f): + for line in open(f, newline="\n"): print(" %s" % line.strip(), file=out) print(file=out) @@ -1901,6 +2057,7 @@ class XMLReport(OutputHandler): self._file = xmlfile self._start = time.time() self._timestamp = datetime.now().isoformat() + self._results = None def prepare(self, mgr): self._results = mgr.list([]) @@ -1911,7 +2068,8 @@ class XMLReport(OutputHandler): def testCommand(self, test, cmdline): pass - def makeTestCaseElement(self, doc, testsuite, name, duration): + @staticmethod + def makeTestCaseElement(doc, testsuite, name, duration): parts = name.split('.') if len(parts) > 1: classname = ".".join(parts[:-1]) @@ -1928,7 +2086,8 @@ class XMLReport(OutputHandler): return e - def getContext(self, test, context_file): + @staticmethod + def getContext(test, context_file): context = "" for line in test.diagmsgs: context += " % " + line + "\n" @@ -1939,7 +2098,7 @@ class XMLReport(OutputHandler): if os.path.isfile(f): context += " % cat " + os.path.basename(f) + "\n" - for line in open(f): + for line in open(f, newline="\n"): context += " " + line.strip() + "\n" return context @@ -2021,6 +2180,7 @@ class ChromeTracing(OutputHandler): def __init__(self, options, tracefile): OutputHandler.__init__(self, options) self._file = tracefile + self._results = None def prepare(self, mgr): self._results = mgr.list([]) @@ -2029,7 +2189,7 @@ class ChromeTracing(OutputHandler): self._results.append({ "name": test.name, "ts": test.start * 1e6, - "tid": multiprocessing.current_process().pid, + "tid": mp.current_process().pid, "pid": 1, "ph": "X", "cat": "test", @@ -2041,6 +2201,65 @@ class ChromeTracing(OutputHandler): self._file.close() +def create_output_handler(options): + + output_handlers = [] + + if options.verbose: + output_handlers += [Verbose(options, )] + + elif options.brief: + output_handlers += [Brief(options, )] + + else: + if sys.stdout.isatty(): + if options.show_all: + output_handlers += [Console(options, )] + else: + output_handlers += [CompactConsole(options, )] + else: + output_handlers += [Standard(options, )] + + if options.diagall: + output_handlers += [Diag(options, True, None)] + + elif options.diag: + output_handlers += [Diag(options, False, None)] + + if options.diagfile: + try: + diagfile = open(options.diagfile, "w", 1, newline="\n") + output_handlers += [Diag(options, options.diagall, diagfile)] + + except IOError as e: + print("cannot open %s: %s" % (options.diagfile, e), file=sys.stderr) + + if options.sphinx: + if sys.platform == 'win32': + print("Sphinx support is disabled on Windows", file=sys.stderr) + sys.exit(1) + + output_handlers += [SphinxOutput(options)] + + if options.xmlfile: + try: + xmlfile = open(options.xmlfile, "w", 1, newline="\n") + output_handlers += [XMLReport(options, xmlfile)] + + except IOError as e: + print("cannot open %s: %s" % (options.xmlfile, e), file=sys.stderr) + + if options.tracefile: + try: + tracefile = open(options.tracefile, "w", 1, newline="\n") + output_handlers += [ChromeTracing(options, tracefile)] + + except IOError as e: + print("cannot open %s: %s" % (options.tracefile, e), file=sys.stderr) + + return Forwarder(options, output_handlers) + + ### Timing measurements. @@ -2132,7 +2351,7 @@ def findTests(paths, expand_globs=False): if os.path.isdir(path) and os.path.basename(path) in ignore_dirs: continue - ignores = [os.path.join(path, dir) for dir in ignore_dirs] + ignores = [normalize_path_join(path, dir) for dir in ignore_dirs] m = RE_PART.match(rpath) if m: @@ -2163,9 +2382,10 @@ def findTests(paths, expand_globs=False): tests += readTestFile(os.path.join(dirpath, file)) # Don't recurse into these. - for (dir, path) in [(dir, os.path.join(dirpath, dir)) for dir in dirnames]: + for (dir, dir_path) in [(dir, normalize_path_join(dirpath, dir)) + for dir in dirnames]: for skip in ignores: - if path == skip: + if dir_path == skip: dirnames.remove(dir) else: @@ -2305,7 +2525,7 @@ def readTestFile(filename): def jOption(option, _, __, parser): - val = multiprocessing.cpu_count() + val = mp.cpu_count() if parser.rargs and not parser.rargs[0].startswith('-'): try: @@ -2369,6 +2589,196 @@ def outputDocumentation(tests, fmt): print() +def parse_options(): + optparser = optparse.OptionParser(usage="%prog [options] ", version=VERSION) + optparser.add_option("-U", + "--update-baseline", + action="store_const", + dest="mode", + const="UPDATE", + help="create a new baseline from the tests' output") + optparser.add_option("-u", + "--update-interactive", + action="store_const", + dest="mode", + const="UPDATE_INTERACTIVE", + help="interactively asks whether to update baseline for a failed test") + optparser.add_option("-d", + "--diagnostics", + action="store_true", + dest="diag", + default=False, + help="show diagnostic output for failed tests") + optparser.add_option("-D", + "--diagnostics-all", + action="store_true", + dest="diagall", + default=False, + help="show diagnostic output for ALL tests") + optparser.add_option( + "-f", + "--file-diagnostics", + action="store", + type="string", + dest="diagfile", + default="", + help="write diagnostic output for failed tests into file; if file exists, it is overwritten" + ) + optparser.add_option("-v", + "--verbose", + action="store_true", + dest="verbose", + default=False, + help="show commands as they are executed") + optparser.add_option("-w", + "--wait", + action="store_true", + dest="wait", + default=False, + help="wait for after each failed (with -d) or all (with -D) tests") + optparser.add_option("-b", + "--brief", + action="store_true", + dest="brief", + default=False, + help="outputs only failed tests") + optparser.add_option("-c", + "--config", + action="store", + type="string", + dest="config", + default=ConfigDefault, + help="configuration file") + optparser.add_option("-t", + "--tmp-keep", + action="store_true", + dest="tmps", + default=False, + help="do not delete tmp files created for running tests") + optparser.add_option( + "-j", + "--jobs", + action="callback", + callback=jOption, + dest="threads", + default=1, + help="number of threads running tests in parallel; with no argument will use all CPUs") + optparser.add_option("-g", + "--groups", + action="store", + type="string", + dest="groups", + default="", + help="execute only tests of given comma-separated list of groups") + optparser.add_option("-r", + "--rerun", + action="store_true", + dest="rerun", + default=False, + help="execute commands for tests that failed last time") + optparser.add_option("-q", + "--quiet", + action="store_true", + dest="quiet", + default=False, + help="suppress information output other than about failed tests") + optparser.add_option( + "-x", + "--xml", + action="store", + type="string", + dest="xmlfile", + default="", + help= + "write a report of test results in JUnit XML format to file; if file exists, it is overwritten" + ) + optparser.add_option("-a", + "--alternative", + action="store", + type="string", + dest="alternatives", + default=None, + help="activate given alternative") + optparser.add_option( + "-S", + "--sphinx", + action="store_true", + dest="sphinx", + default=False, + help="indicates that we're running from inside Sphinx; for internal purposes") + optparser.add_option("-T", + "--update-times", + action="store_true", + dest="update_times", + default=False, + help="create a new timing baseline for tests being measured") + optparser.add_option("-R", + "--documentation", + action="store", + type="choice", + dest="doc", + choices=("rst", "md"), + metavar="format", + default=None, + help="Output documentation for tests, supported formats: rst, md") + optparser.add_option( + "-A", + "--show-all", + action="store_true", + default=False, + help= + "For console output, show one-liners for passing/skipped tests in addition to any failing ones" + ) + optparser.add_option( + "-z", + "--retries", + action="store", + dest="retries", + type="int", + default=0, + help="Retry failed tests this many times to determine if they are unstable") + optparser.add_option( + "--trace-file", + action="store", + dest="tracefile", + default="", + help="write Chrome tracing file to file; if file exists, it is overwritten") + optparser.add_option("-F", + "--abort-on-failure", + action="store_true", + dest="abort_on_failure", + help="terminate after first test failure") + optparser.add_option("-l", + "--list", + action="store_true", + dest="list", + default=False, + help="list available tests instead of executing them") + optparser.add_option( + '-s', + '--set', + action="append", + dest="defaults", + default=[], + help= + "Override default key used in btest.cfg with another value. Can be specified multiple times." + ) + + optparser.set_defaults(mode="TEST") + + (options, parsed_args) = optparser.parse_args() + + # Update-interactive mode implies single-threaded operation + if options.mode == "UPDATE_INTERACTIVE" and options.threads > 1: + warning("ignoring requested parallelism in interactive-update mode") + options.threads = 1 + + if not os.path.exists(options.config): + error("configuration file '%s' not found" % options.config) + + return options, parsed_args + + ### Main if __name__ == '__main__': @@ -2377,531 +2787,340 @@ if __name__ == '__main__': pyver_maj = sys.version_info[0] pyver_min = sys.version_info[1] - if (pyver_maj == 3 and pyver_min >= 8) or pyver_maj > 3: - multiprocessing.set_start_method('fork') - -optparser = optparse.OptionParser(usage="%prog [options] ", version=VERSION) -optparser.add_option("-U", - "--update-baseline", - action="store_const", - dest="mode", - const="UPDATE", - help="create a new baseline from the tests' output") -optparser.add_option("-u", - "--update-interactive", - action="store_const", - dest="mode", - const="UPDATE_INTERACTIVE", - help="interactively asks whether to update baseline for a failed test") -optparser.add_option("-d", - "--diagnostics", - action="store_true", - dest="diag", - default=False, - help="show diagnostic output for failed tests") -optparser.add_option("-D", - "--diagnostics-all", - action="store_true", - dest="diagall", - default=False, - help="show diagnostic output for ALL tests") -optparser.add_option( - "-f", - "--file-diagnostics", - action="store", - type="string", - dest="diagfile", - default="", - help="write diagnostic output for failed tests into file; if file exists, it is overwritten") -optparser.add_option("-v", - "--verbose", - action="store_true", - dest="verbose", - default=False, - help="show commands as they are executed") -optparser.add_option("-w", - "--wait", - action="store_true", - dest="wait", - default=False, - help="wait for after each failed (with -d) or all (with -D) tests") -optparser.add_option("-b", - "--brief", - action="store_true", - dest="brief", - default=False, - help="outputs only failed tests") -optparser.add_option("-c", - "--config", - action="store", - type="string", - dest="config", - default=ConfigDefault, - help="configuration file") -optparser.add_option("-t", - "--tmp-keep", - action="store_true", - dest="tmps", - default=False, - help="do not delete tmp files created for running tests") -optparser.add_option( - "-j", - "--jobs", - action="callback", - callback=jOption, - dest="threads", - default=1, - help="number of threads running tests in parallel; with no argument will use all CPUs") -optparser.add_option("-g", - "--groups", - action="store", - type="string", - dest="groups", - default="", - help="execute only tests of given comma-separated list of groups") -optparser.add_option("-r", - "--rerun", - action="store_true", - dest="rerun", - default=False, - help="execute commands for tests that failed last time") -optparser.add_option("-q", - "--quiet", - action="store_true", - dest="quiet", - default=False, - help="suppress information output other than about failed tests") -optparser.add_option( - "-x", - "--xml", - action="store", - type="string", - dest="xmlfile", - default="", - help= - "write a report of test results in JUnit XML format to file; if file exists, it is overwritten") -optparser.add_option("-a", - "--alternative", - action="store", - type="string", - dest="alternatives", - default=None, - help="activate given alternative") -optparser.add_option("-S", - "--sphinx", - action="store_true", - dest="sphinx", - default=False, - help="indicates that we're running from inside Sphinx; for internal purposes") -optparser.add_option("-T", - "--update-times", - action="store_true", - dest="update_times", - default=False, - help="create a new timing baseline for tests being measured") -optparser.add_option("-R", - "--documentation", - action="store", - type="choice", - dest="doc", - choices=("rst", "md"), - metavar="format", - default=None, - help="Output documentation for tests, supported formats: rst, md") -optparser.add_option( - "-A", - "--show-all", - action="store_true", - default=False, - help= - "For console output, show one-liners for passing/skipped tests in addition to any failing ones") -optparser.add_option("-z", - "--retries", - action="store", - dest="retries", - type="int", - default=0, - help="Retry failed tests this many times to determine if they are unstable") -optparser.add_option("--trace-file", - action="store", - dest="tracefile", - default="", - help="write Chrome tracing file to file; if file exists, it is overwritten") -optparser.add_option("-F", - "--abort-on-failure", - action="store_true", - dest="abort_on_failure", - help="terminate after first test failure") -optparser.add_option("-l", - "--list", - action="store_true", - dest="list", - default=False, - help="list available tests instead of executing them") - -optparser.set_defaults(mode="TEST") -(Options, args) = optparser.parse_args() - -# Update-interactive mode implies single-threaded operation -if Options.mode == "UPDATE_INTERACTIVE" and Options.threads > 1: - warning("ignoring requested parallelism in interactive-update mode") - Options.threads = 1 - -if not os.path.exists(Options.config): - error("configuration file '%s' not found" % Options.config) - -# The defaults come from environment variables, plus a few additional items. -defaults = {} -# Changes to defaults should not change os.environ -defaults.update(os.environ) -defaults["default_path"] = os.environ["PATH"] - -dirname = os.path.dirname(Options.config) -if not dirname: - dirname = os.getcwd() - -# If the BTEST_TEST_BASE envirnoment var is set, we'll use that as the testbase. -# If not, we'll use the current directory. -TestBase = os.path.abspath(os.environ.get("BTEST_TEST_BASE", dirname)) -defaults["testbase"] = TestBase -defaults["baselinedir"] = os.path.abspath( - os.environ.get("BTEST_BASELINE_DIR", os.path.join(TestBase, "Baseline"))) - -# Parse our config -Config = getcfgparser(defaults) -Config.read(Options.config, encoding="utf-8") - -defaults["baselinedir"] = getOption("BaselineDir", defaults["baselinedir"]) - -min_version = getOption("MinVersion", None) -if min_version: - validate_version_requirement(min_version, VERSION) - -if Options.alternatives: - # Preprocess to split into list. "-" refers to the default setup, as a - # shorthand for "default", to allow combination with select alternatives. - Options.alternatives = [alt.strip() for alt in Options.alternatives.split(",")] - Options.alternatives = [ - Alternative.DEFAULT if alt == "-" else alt for alt in Options.alternatives - ] - - # Helper function that, if an option wasn't explicitly specified as an - # environment variable, checks if an alternative sets its through - # its own environment section. If so, we make that value our new default. - # If multiple alternatives set it, we pick the value from the first. - def get_env_from_alternative(env, opt, default, transform=None): - for tag in Options.alternatives: - value = getOption(env, None, section="environment-%s" % tag) - if value is not None: - if transform: - value = transform(value) + if sys.platform == 'win32': + # The "fork" method doesn't exist at all on Windows, so force over to + # "spawn" instead. + mp.set_start_method('spawn') - defaults[opt] = value + # Double-check that `bash.exe` exists and is executable, since it's + # required for pretty much anything here to work on Windows. Note we're + # doing this prior to parsing the config file because it's required for + # backtick-expansion there as well. + try: + subprocess.call(['bash.exe', '--version'], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + except FileNotFoundError: + print("error: bash.exe is required to be in your PATH to run BTest.", file=sys.stderr) + sys.exit(1) - # At this point, our defaults have changed, so we - # reread the configuration. - new_config = getcfgparser(defaults) - new_config.read(Options.config) - return new_config, value + elif (pyver_maj == 3 and pyver_min >= 8) or pyver_maj > 3: + mp.set_start_method('fork') - return Config, default + (Options, args) = parse_options() - (Config, TestBase) = get_env_from_alternative("BTEST_TEST_BASE", "testbase", TestBase, - lambda x: os.path.abspath(x)) - # Need to update BaselineDir - it may be interpolated from testbase. - defaults["baselinedir"] = getOption("BaselineDir", defaults["baselinedir"]) - (Config, _) = get_env_from_alternative("BTEST_BASELINE_DIR", "baselinedir", None) + # The defaults come from environment variables, plus a few additional items. + defaults = {} + # Changes to defaults should not change os.environ + defaults.update(os.environ) + defaults["default_path"] = os.environ["PATH"] -os.chdir(TestBase) + dirname = os.path.dirname(Options.config) + if not dirname: + dirname = os.getcwd() -if Options.sphinx: - Options.quiet = True + # If the BTEST_TEST_BASE envirnoment var is set, we'll use that as the testbase. + # If not, we'll use the current directory. + TestBase = normalize_path(os.environ.get("BTEST_TEST_BASE", dirname)) + defaults["testbase"] = TestBase + defaults["baselinedir"] = normalize_path( + os.environ.get("BTEST_BASELINE_DIR", os.path.join(TestBase, "Baseline"))) + defaults['pathsep'] = os.pathsep -if Options.quiet: - Options.brief = True + # Parse our config + Config = getcfgparser(defaults) + Config.read(Options.config, encoding="utf-8") -# Determine output handlers to use. + defaults["baselinedir"] = getOption("BaselineDir", defaults["baselinedir"]) -output_handlers = [] + min_version = getOption("MinVersion", None) + if min_version: + validate_version_requirement(min_version, VERSION) -if Options.verbose: - output_handlers += [Verbose(Options, )] + if Options.alternatives: + # Preprocess to split into list. "-" refers to the default setup, as a + # shorthand for "default", to allow combination with select alternatives. + Options.alternatives = [alt.strip() for alt in Options.alternatives.split(",")] + Options.alternatives = [ + Alternative.DEFAULT if alt == "-" else alt for alt in Options.alternatives + ] -elif Options.brief: - output_handlers += [Brief(Options, )] + # Helper function that, if an option wasn't explicitly specified as an + # environment variable, checks if an alternative sets its through + # its own environment section. If so, we make that value our new default. + # If multiple alternatives set it, we pick the value from the first. + def get_env_from_alternative(env, opt, default, transform=None): + for tag in Options.alternatives: + value = getOption(env, None, section="environment-%s" % tag) + if value is not None: + if transform: + value = transform(value) -else: - if sys.stdout.isatty(): - if Options.show_all: - output_handlers += [Console(Options, )] - else: - output_handlers += [CompactConsole(Options, )] - else: - output_handlers += [Standard(Options, )] + defaults[opt] = value -if Options.diagall: - output_handlers += [Diag(Options, True, None)] + # At this point, our defaults have changed, so we + # reread the configuration. + new_config = getcfgparser(defaults) + new_config.read(Options.config) + return new_config, value -elif Options.diag: - output_handlers += [Diag(Options, False, None)] + return Config, default -if Options.diagfile: - try: - diagfile = open(Options.diagfile, "w", 1) - output_handlers += [Diag(Options, Options.diagall, diagfile)] + (Config, TestBase) = get_env_from_alternative("BTEST_TEST_BASE", "testbase", TestBase, + lambda x: normalize_path(os.path.abspath(x))) + # Need to update BaselineDir - it may be interpolated from testbase. + defaults["baselinedir"] = normalize_path(getOption("BaselineDir", defaults["baselinedir"])) + (Config, _) = get_env_from_alternative("BTEST_BASELINE_DIR", + "baselinedir", + None, + transform=lambda x: normalize_path(x)) - except IOError as e: - print("cannot open %s: %s" % (Options.diagfile, e), file=sys.stderr) + os.chdir(TestBase) -if Options.sphinx: - output_handlers += [SphinxOutput(Options)] + if Options.sphinx: + Options.quiet = True -if Options.xmlfile: - try: - xmlfile = open(Options.xmlfile, "w", 1) - output_handlers += [XMLReport(Options, xmlfile)] + if Options.quiet: + Options.brief = True - except IOError as e: - print("cannot open %s: %s" % (Options.xmlfile, e), file=sys.stderr) + # Determine output handlers to use. -if Options.tracefile: - try: - tracefile = open(Options.tracefile, "w", 1) - output_handlers += [ChromeTracing(Options, tracefile)] - - except IOError as e: - print("cannot open %s: %s" % (Options.tracefile, e), file=sys.stderr) + output_handler = create_output_handler(Options) -output_handler = Forwarder(Options, output_handlers) + # Determine Timer to use. -# Determine Timer to use. + Timer = None -Timer = None + if platform() == "Linux": + t = LinuxTimer() + if t.available(): + Timer = t -if platform() == "Linux": - t = LinuxTimer() - if t.available(): - Timer = t + if Options.update_times and not Timer: + warning("unable to create timing baseline because timer is not available") -if Options.update_times and not Timer: - warning("unable to create timing baseline because timer is not available") + # Evaluate other command line options. -# Evaluate other command line options. + if Config.has_section("environment"): + for (name, value) in Config.itemsNoDefaults("environment"): + # Here we don't want to include items from defaults + os.environ[name] = value -if Config.has_section("environment"): - for (name, value) in Config.itemsNoDefaults("environment"): - # Here we don't want to include items from defaults - os.environ[name] = value + Alternatives = {} -Alternatives = {} + if Options.alternatives: + for tag in Options.alternatives: + a = Alternative(tag) -if Options.alternatives: - for tag in Options.alternatives: - a = Alternative(tag) + try: + for (name, value) in Config.itemsNoDefaults("filter-%s" % tag): + a.filters[name] = value - try: - for (name, value) in Config.itemsNoDefaults("filter-%s" % tag): - a.filters[name] = value + except configparser.NoSectionError: + pass - except configparser.NoSectionError: - pass + try: + for (name, value) in Config.itemsNoDefaults("substitution-%s" % tag): + a.substitutions[name] = value - try: - for (name, value) in Config.itemsNoDefaults("substitution-%s" % tag): - a.substitutions[name] = value + except configparser.NoSectionError: + pass - except configparser.NoSectionError: - pass + try: + for (name, value) in Config.itemsNoDefaults("environment-%s" % tag): + a.envs[name] = value - try: - for (name, value) in Config.itemsNoDefaults("environment-%s" % tag): - a.envs[name] = value + except configparser.NoSectionError: + pass - except configparser.NoSectionError: - pass + if a.is_empty() and not a.is_default(): + error("alternative \"%s\" is undefined" % tag) + + Alternatives[tag] = a + + CommandPrefix = getOption("CommandPrefix", "@TEST-") + + RE_INPUT = re.compile(r"%INPUT") + RE_DIR = re.compile(r"%DIR") + RE_ENV = re.compile(r"\$\{(\w+)}") + RE_PART = re.compile(r"^(.*)#([0-9]+)$") + RE_IGNORE = re.compile(CommandPrefix + "IGNORE") + RE_START_NEXT_TEST = re.compile(CommandPrefix + "START-NEXT") + RE_START_FILE = re.compile(CommandPrefix + "START-FILE +([^\r\n ]*)") + RE_END_FILE = re.compile(CommandPrefix + "END-FILE") + + # Commands as tuple (tag, regexp, more-than-one-is-ok, optional, group-main, group-add) + # pylint: disable=bad-whitespace + # yapf: disable + RE_EXEC = ("exec", re.compile(CommandPrefix + "EXEC(-FAIL)?: *(.*)"), True, False, 2, 1) + RE_REQUIRES = ("requires", re.compile(CommandPrefix + "REQUIRES: *(.*)"), True, True, 1, -1) + RE_GROUP = ("group", re.compile(CommandPrefix + "GROUP: *(.*)"), True, True, 1, -1) + RE_SERIALIZE = ("serialize", re.compile(CommandPrefix + "SERIALIZE: *(.*)"), False, True, 1, -1) + RE_PORT = ("port", re.compile(CommandPrefix + "PORT: *(.*)"), True, True, 1, -1) + RE_INCLUDE_ALTERNATIVE = ("alternative", re.compile(CommandPrefix + "ALTERNATIVE: *(.*)"), True, True, 1, -1) + RE_IGNORE_ALTERNATIVE = ("not-alternative", + re.compile(CommandPrefix + "NOT-ALTERNATIVE: *(.*)"), True, True, 1, -1) + RE_COPY_FILE = ("copy-file", re.compile(CommandPrefix + "COPY-FILE: *(.*)"), True, True, 1, -1) + RE_KNOWN_FAILURE = ("known-failure", re.compile(CommandPrefix + "KNOWN-FAILURE"), False, True, -1, -1) + RE_MEASURE_TIME = ("measure-time", re.compile(CommandPrefix + "MEASURE-TIME"), False, True, -1, -1) + RE_DOC = ("doc", re.compile(CommandPrefix + "DOC: *(.*)"), True, True, 1, -1) + # yapf: enable + # pylint: enable=bad-whitespace + + Commands = (RE_EXEC, RE_REQUIRES, RE_GROUP, RE_SERIALIZE, RE_PORT, RE_INCLUDE_ALTERNATIVE, + RE_IGNORE_ALTERNATIVE, RE_COPY_FILE, RE_KNOWN_FAILURE, RE_MEASURE_TIME, RE_DOC) + + StateFile = normalize_path( + getOption("StateFile", os.path.join(defaults["testbase"], ".btest.failed.dat"))) + TmpDir = normalize_path(getOption("TmpDir", os.path.join(defaults["testbase"], ".tmp"))) + BaselineDirs = [normalize_path(dir) for dir in defaults["baselinedir"].split(os.pathsep)] + BaselineTimingDir = normalize_path( + getOption("TimingBaselineDir", os.path.join(BaselineDirs[0], "_Timing"))) + + Initializer = getOption("Initializer", "") + Finalizer = getOption("Finalizer", "") + Teardown = getOption("Teardown", "") + + PartInitializer = getOption("PartInitializer", "") + PartFinalizer = getOption("PartFinalizer", "") + PartTeardown = getOption("PartTeardown", "") + + Config.configured_tests = [] + + testdirs = getOption("TestDirs", "").split() + if testdirs: + Config.configured_tests = findTests(testdirs, True) + + if args: + tests = findTests(args) - if a.is_empty() and not a.is_default(): - error("alternative \"%s\" is undefined" % tag) - - Alternatives[tag] = a - -CommandPrefix = getOption("CommandPrefix", "@TEST-") - -RE_INPUT = re.compile(r"%INPUT") -RE_DIR = re.compile(r"%DIR") -RE_ENV = re.compile(r"\$\{(\w+)\}") -RE_PART = re.compile(r"^(.*)#([0-9]+)$") -RE_IGNORE = re.compile(CommandPrefix + "IGNORE") -RE_START_NEXT_TEST = re.compile(CommandPrefix + "START-NEXT") -RE_START_FILE = re.compile(CommandPrefix + "START-FILE +([^\r\n ]*)") -RE_END_FILE = re.compile(CommandPrefix + "END-FILE") - -# Commands as tuple (tag, regexp, more-than-one-is-ok, optional, group-main, group-add) -# pylint: disable=bad-whitespace -# yapf: disable -RE_EXEC = ("exec", re.compile(CommandPrefix + "EXEC(-FAIL)?: *(.*)"), True, False, 2, 1) -RE_REQUIRES = ("requires", re.compile(CommandPrefix + "REQUIRES: *(.*)"), True, True, 1, -1) -RE_GROUP = ("group", re.compile(CommandPrefix + "GROUP: *(.*)"), True, True, 1, -1) -RE_SERIALIZE = ("serialize", re.compile(CommandPrefix + "SERIALIZE: *(.*)"), False, True, 1, -1) -RE_PORT = ("port", re.compile(CommandPrefix + "PORT: *(.*)"), True, True, 1, -1) -RE_INCLUDE_ALTERNATIVE = ("alternative", re.compile(CommandPrefix + "ALTERNATIVE: *(.*)"), True, True, 1, -1) -RE_IGNORE_ALTERNATIVE = ("not-alternative", re.compile(CommandPrefix + "NOT-ALTERNATIVE: *(.*)"), True, True, 1, -1) -RE_COPY_FILE = ("copy-file", re.compile(CommandPrefix + "COPY-FILE: *(.*)"), True, True, 1, -1) -RE_KNOWN_FAILURE = ("known-failure", re.compile(CommandPrefix + "KNOWN-FAILURE"), False, True, -1, -1) -RE_MEASURE_TIME = ("measure-time", re.compile(CommandPrefix + "MEASURE-TIME"), False, True, -1, -1) -RE_DOC = ("doc", re.compile(CommandPrefix + "DOC: *(.*)"), True, True, 1, -1) -# yapf: enable -# pylint: enable=bad-whitespace - -Commands = (RE_EXEC, RE_REQUIRES, RE_GROUP, RE_SERIALIZE, RE_PORT, RE_INCLUDE_ALTERNATIVE, - RE_IGNORE_ALTERNATIVE, RE_COPY_FILE, RE_KNOWN_FAILURE, RE_MEASURE_TIME, RE_DOC) - -StateFile = os.path.abspath( - getOption("StateFile", os.path.join(defaults["testbase"], ".btest.failed.dat"))) -TmpDir = os.path.abspath(getOption("TmpDir", os.path.join(defaults["testbase"], ".tmp"))) -BaselineDirs = [os.path.abspath(dir) for dir in defaults["baselinedir"].split(":")] -BaselineTimingDir = os.path.abspath( - getOption("TimingBaselineDir", os.path.join(BaselineDirs[0], "_Timing"))) - -Initializer = getOption("Initializer", "") -Finalizer = getOption("Finalizer", "") -Teardown = getOption("Teardown", "") - -PartInitializer = getOption("PartInitializer", "") -PartFinalizer = getOption("PartFinalizer", "") -PartTeardown = getOption("PartTeardown", "") - -Config.configured_tests = [] - -testdirs = getOption("TestDirs", "").split() -if testdirs: - Config.configured_tests = findTests(testdirs, True) - -if args: - tests = findTests(args) + else: + if Options.rerun: + (success, tests) = readStateFile() -else: - if Options.rerun: - (success, tests) = readStateFile() + if success: + if not tests: + output("no tests failed last time") + sys.exit(0) - if success: - if not tests: - output("no tests failed last time") - sys.exit(0) + else: + warning("cannot read state file, executing all tests") + tests = Config.configured_tests else: - warning("cannot read state file, executing all tests") tests = Config.configured_tests - else: - tests = Config.configured_tests + if Options.groups: + groups = Options.groups.split(",") + Options.groups = set([g for g in groups if not g.startswith("-")]) + Options.no_groups = set([g[1:] for g in groups if g.startswith("-")]) + + def rightGroup(t): + if not t: + return True -if Options.groups: - groups = Options.groups.split(",") - Options.groups = set([g for g in groups if not g.startswith("-")]) - Options.no_groups = set([g[1:] for g in groups if g.startswith("-")]) + if t.groups & Options.groups: + return True - def rightGroup(t): - if not t: - return True + if "" in Options.no_groups: + if not t.groups: + return True - if t.groups & Options.groups: - return True + elif Options.no_groups: + if t.groups & Options.no_groups: + return False - if "" in Options.no_groups: - if not t.groups: return True - elif Options.no_groups: - if t.groups & Options.no_groups: - return False + return False - return True + tests = [t for t in tests if rightGroup(t)] - return False + if not tests: + output("no tests to execute") + sys.exit(0) - tests = [t for t in tests if rightGroup(t)] + tests = mergeTestParts(tests) -if not tests: - output("no tests to execute") - sys.exit(0) + if Options.doc: + outputDocumentation(tests, Options.doc) + sys.exit(0) -tests = mergeTestParts(tests) + for d in BaselineDirs: + mkdir(d) -if Options.doc: - outputDocumentation(tests, Options.doc) - sys.exit(0) + mkdir(TmpDir) -for d in BaselineDirs: - mkdir(d) + if sys.platform == 'win32': + # On win32 we have to use a named pipe so that python's multiprocessing + # chooses AF_PIPE as the family type. + addr = "\\\\.\\pipe\\btest-pipe-%s" % (os.getpid()) + else: + # Building our own path to avoid "error: AF_UNIX path too long" on + # some platforms. See BIT-862. + sname = "btest-socket-%s" % (os.getpid()) + addr = os.path.join(tempfile.gettempdir(), sname) -mkdir(TmpDir) + # Check if the pathname is too long to fit in struct sockaddr_un (the + # maximum length is system-dependent, so here we just use 100, which seems + # a safe default choice). + if len(addr) > 100: + # Try relative path to TmpDir (which would usually be ".tmp"). + addr = os.path.join(os.path.relpath(TmpDir), sname) -# Building our own path to avoid "error: AF_UNIX path too long" on -# some platforms. See BIT-862. -sname = "btest-socket-%d" % os.getpid() -addr = os.path.join(tempfile.gettempdir(), sname) + # If the path is still too long, then use the global tmp directory. + if len(addr) > 100: + addr = os.path.join("/tmp", sname) -# Check if the pathname is too long to fit in struct sockaddr_un (the -# maximum length is system-dependent, so here we just use 100, which seems -# a safe default choice). -if len(addr) > 100: - # Try relative path to TmpDir (which would usually be ".tmp"). - addr = os.path.join(os.path.relpath(TmpDir), sname) + mgr = TestManager(address=addr) - # If the path is still too long, then use the global tmp directory. - if len(addr) > 100: - addr = os.path.join("/tmp", sname) + try: + if Options.list: + for test in sorted(tests): + if test.name: + print(test.name) + sys.exit(0) + else: + (succeeded, failed, skipped, unstable, + failed_expected) = mgr.run(copy.deepcopy(tests), output_handler) + total = succeeded + failed + skipped + + output_handler.finished() + + # Ctrl-C can lead to broken pipe (e.g. FreeBSD), so include IOError here: + except (Abort, KeyboardInterrupt, IOError) as exc: + output_handler.finished() + print(str(exc) or "Aborted with %s." % type(exc).__name__, file=sys.stderr) + sys.stderr.flush() + # Explicitly shut down sync manager to avoid leaking manager + # processes, particularly with --abort-on-failure: + mgr.shutdown() + sys.exit(1) -mgr = TestManager(address=addr) + skip = (", %d skipped" % skipped) if skipped > 0 else "" + unstablestr = (", %d unstable" % unstable) if unstable > 0 else "" + failed_expectedstr = (" (with %d expected to fail)" % + failed_expected) if failed_expected > 0 else "" -try: - if Options.list: - for test in sorted(tests): - if test.name: - print(test.name) - sys.exit(0) - else: - (succeeded, failed, skipped, unstable, - failed_expected) = mgr.run(copy.deepcopy(tests), output_handler) - total = succeeded + failed + skipped - - output_handler.finished() - -# Ctrl-C can lead to broken pipe (e.g. FreeBSD), so include IOError here: -except (Abort, KeyboardInterrupt, IOError) as exc: - output_handler.finished() - print(str(exc) or "Aborted with %s." % type(exc).__name__, file=sys.stderr) - sys.stderr.flush() - # Explicitly shut down sync manager to avoid leaking manager - # processes, particularly with --abort-on-failure: - mgr.shutdown() - os._exit(1) - -skip = (", %d skipped" % skipped) if skipped > 0 else "" -unstablestr = (", %d unstable" % unstable) if unstable > 0 else "" -failed_expectedstr = (" (with %d expected to fail)" % - failed_expected) if failed_expected > 0 else "" - -if failed > 0: - if not Options.quiet: - output("%d of %d test%s failed%s%s%s" % - (failed, total, "s" if total > 1 else "", failed_expectedstr, skip, unstablestr)) - - if failed == failed_expected: - sys.exit(0) - else: - sys.exit(1) + if failed > 0: + if not Options.quiet: + output("%d of %d test%s failed%s%s%s" % + (failed, total, "s" if total > 1 else "", failed_expectedstr, skip, unstablestr)) -elif skipped > 0 or unstable > 0: - if not Options.quiet: - output("%d test%s successful%s%s" % - (succeeded, "s" if succeeded != 1 else "", skip, unstablestr)) + if failed == failed_expected: + sys.exit(0) + else: + sys.exit(1) - sys.exit(0) + elif skipped > 0 or unstable > 0: + if not Options.quiet: + output("%d test%s successful%s%s" % + (succeeded, "s" if succeeded != 1 else "", skip, unstablestr)) -else: - if not Options.quiet: - output("all %d tests successful" % total) + sys.exit(0) + + else: + if not Options.quiet: + output("all %d tests successful" % total) - sys.exit(0) + sys.exit(0) diff --git a/btest-diff b/btest-diff index 47010f4b..4e6af50c 100755 --- a/btest-diff +++ b/btest-diff @@ -131,7 +131,11 @@ if [ "$#" -lt 1 ]; then fi # Split string with baseline directories into array. -IFS=':' read -ra baseline_dirs <<<"$TEST_BASELINE" +if [ "$(uname -s | cut -c 1-5)" == "MINGW" ]; then + IFS=';' read -ra baseline_dirs <<<"$TEST_BASELINE" +else + IFS=':' read -ra baseline_dirs <<<"$TEST_BASELINE" +fi input="$1" # shellcheck disable=SC2001 @@ -221,7 +225,7 @@ if [ -n "$baseline" ]; then if is_binary_mode; then diff -s "$@" "$canon_baseline" "$canon_output" >>$TEST_DIAGNOSTICS else - diff -au "$@" "$canon_baseline" "$canon_output" >>$TEST_DIAGNOSTICS + diff -au --strip-trailing-cr "$@" "$canon_baseline" "$canon_output" >>$TEST_DIAGNOSTICS fi result=$? fi diff --git a/btest-setsid b/btest-setsid index e46a6855..43827b4a 100755 --- a/btest-setsid +++ b/btest-setsid @@ -9,7 +9,6 @@ except: pass prog = sys.argv[1] - args = sys.argv[1:] os.execvp(prog, args) diff --git a/setup.py b/setup.py index 2ebda14a..4ff1fda5 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,7 @@ #! /usr/bin/env python from setuptools import setup +import sys # When making changes to the following list, remember to keep # CMakeLists.txt in sync. @@ -21,13 +22,20 @@ py_modules = ["btest-sphinx"] +# We require the external multiprocess library on Windows due to pickling issues +# with the standard one. +if sys.platform == 'win32': + install_requires = ['multiprocess'] +else: + install_requires = [] + setup( name='btest', version="0.72-20", # Filled in automatically. description='A powerful system testing framework', long_description='See https://github.com/zeek/btest', - author='Robin Sommer', - author_email='robin@icir.org', + author='The Zeek Team', + author_email='info@zeek.org', url='https://github.com/zeek/btest', scripts=scripts, package_dir={"": "sphinx"}, @@ -43,4 +51,6 @@ 'Programming Language :: Python :: 3', 'Topic :: Utilities', ], + python_requires='>=3.7', + install_requires=install_requires, ) diff --git a/testing/Baseline/tests.environment-windows/output b/testing/Baseline/tests.environment-windows/output new file mode 100644 index 00000000..e58fc449 --- /dev/null +++ b/testing/Baseline/tests.environment-windows/output @@ -0,0 +1,25 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +Foo +testbase is correct +42 +macro expansion within backticks is correct +default_path is correct +<...>/.tmp/tests.environment-windows/.tmp/environment-windows/.diag +TEST +<...>/.tmp/tests.environment-windows/Baseline/environment-windows +environment-windows +<...>/.tmp/tests.environment-windows/.tmp/environment-windows/.verbose +<...>/.tmp/tests.environment-windows +1 +Foo +testbase is correct +42 +macro expansion within backticks is correct +default_path is correct +<...>/.tmp/tests.environment-windows/.tmp/environment-windows/.diag +UPDATE +<...>/.tmp/tests.environment-windows/Baseline/environment-windows +environment-windows +<...>/.tmp/tests.environment-windows/.tmp/environment-windows/.verbose +<...>/.tmp/tests.environment-windows +1 diff --git a/testing/Baseline/tests.set-key/output b/testing/Baseline/tests.set-key/output new file mode 100644 index 00000000..5222ee08 --- /dev/null +++ b/testing/Baseline/tests.set-key/output @@ -0,0 +1,4 @@ +normal +test +test2 +equals=ok diff --git a/testing/Scripts/convert-path-list.sh b/testing/Scripts/convert-path-list.sh new file mode 100644 index 00000000..15dce137 --- /dev/null +++ b/testing/Scripts/convert-path-list.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# This script is used by tests.environment-windows to convert a semi-colon +# separated list of Windows-style paths into a colon-separate list of +# POSIX-style paths. + +new_list="" + +IFS=';' read -ra PARTS <<<"$1" +for i in "${PARTS[@]}"; do + p=$(cygpath "${i}" | sed 's/\/$//') + new_list+=$p + new_list+=":" +done + +echo ${new_list%?} diff --git a/testing/Scripts/diff-remove-abspath b/testing/Scripts/diff-remove-abspath index 361ad3fa..93ca4eee 100755 --- a/testing/Scripts/diff-remove-abspath +++ b/testing/Scripts/diff-remove-abspath @@ -2,4 +2,4 @@ # # Replace absolute paths with the basename. -sed 's#/\([^/]\{1,\}/\)\{1,\}\([^/]\{1,\}\)#<...>/\2#g' +sed 's#[a-zA-Z:]*/\([^/]\{1,\}/\)\{1,\}\([^/]\{1,\}\)#<...>/\2#g' diff --git a/testing/Scripts/is-windows b/testing/Scripts/is-windows new file mode 100644 index 00000000..d1b9a855 --- /dev/null +++ b/testing/Scripts/is-windows @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +case "$OSTYPE" in + msys*) + exit 0 + ;; + cygwin*) + exit 0 + ;; + *) + exit 1 + ;; +esac diff --git a/testing/Scripts/strip-test-base b/testing/Scripts/strip-test-base index 867e39d1..7c023cce 100755 --- a/testing/Scripts/strip-test-base +++ b/testing/Scripts/strip-test-base @@ -3,4 +3,13 @@ dir=$(dirname "$0") testbase=$(cd "$dir/.." && pwd) +SCRIPTS="$(dirname -- "${BASH_SOURCE[0]}")" + +# shellcheck disable=SC2086 +# shellcheck disable=SC2157 +if [ ${SCRIPTS}/is-windows ]; then + # shellcheck disable=SC2001 + testbase=$(echo "${testbase}" | sed 's#/\([a-zA-Z]\)/\(.*\)#\u\1:/\2#') +fi + sed "s#${testbase}#<...>#g" diff --git a/testing/btest.cfg b/testing/btest.cfg index b8607a63..f8f252d0 100644 --- a/testing/btest.cfg +++ b/testing/btest.cfg @@ -11,7 +11,7 @@ CommandPrefix = %%TEST- Initializer = test -f btest.cfg || cp %(testbase)s/btest.tests.cfg btest.cfg; echo >/dev/null [environment] -PATH=%(testbase)s/..:%(testbase)s/../sphinx:%(testbase)s/Scripts:%(default_path)s +PATH=%(testbase)s/..%(pathsep)s%(testbase)s/../sphinx%(pathsep)s%(testbase)s/Scripts%(pathsep)s%(default_path)s SCRIPTS=%(testbase)s/Scripts TMPDIR=%(testbase)s/.tmp # BTEST_CFG=%(testbase)s/btest.tests.cfg diff --git a/testing/btest.tests.cfg b/testing/btest.tests.cfg index ea4be91f..f4335e52 100644 --- a/testing/btest.tests.cfg +++ b/testing/btest.tests.cfg @@ -4,6 +4,9 @@ # This is set so that all files will be created inside the current # sandbox. +[DEFAULT] +override=normal + [btest] TmpDir = `echo .tmp` BaselineDir = %(testbase)s/Baseline @@ -14,6 +17,7 @@ ENV1=Foo ENV2=%(testbase)s ENV3=`expr 42` ENV4=`echo \(%(testbase)s=%(testbase)s\)` +ENV5=%(override)s [environment-foo] FOO=BAR diff --git a/testing/tests/env-var-casing.test b/testing/tests/env-var-casing.test index 47e8a036..ded6c740 100644 --- a/testing/tests/env-var-casing.test +++ b/testing/tests/env-var-casing.test @@ -1,5 +1,6 @@ -# %TEST-DOC: Validates that env vars are case-sensitive; this is a regression test for #75. +# %TEST-DOC: Validates that env vars are case-sensitive; this is a regression test for #75. Environment variables on Windows are always uppercase, due to legacy DOS requirements. This test will be skipped on that platform. # +# %TEST-REQUIRES: ! ${SCRIPTS}/is-windows # %TEST-EXEC: http_proxy=aaa HTTP_PROXY=bbb btest -dv test # %TEST-START-FILE btest.cfg diff --git a/testing/tests/environment-windows.test b/testing/tests/environment-windows.test new file mode 100644 index 00000000..aaf88717 --- /dev/null +++ b/testing/tests/environment-windows.test @@ -0,0 +1,23 @@ +# %TEST-REQUIRES: ${SCRIPTS}/is-windows +# %TEST-EXEC: btest -d %INPUT +# %TEST-EXEC: btest -U %INPUT +# %TEST-EXEC: btest-diff output + +@TEST-REQUIRES: test -n "${ENV2}" +@TEST-EXEC-FAIL: test -z "${ENV2}" + +@TEST-EXEC: echo ${ENV1} >>../../output +@TEST-EXEC: echo ${ENV2} >1 +@TEST-EXEC: set >>1 +@TEST-EXEC: test "${ENV2}" = `cd ../.. && pwd | cygpath -m -f -` && echo "testbase is correct" >>../../output +@TEST-EXEC: echo ${ENV3} >>../../output +@TEST-EXEC: test "${ENV4}" = "(${TEST_BASE}=${TEST_BASE})" && echo "macro expansion within backticks is correct" >>../../output +@TEST-EXEC: test "`${SCRIPTS}/convert-path-list.sh \"${ORIGPATH}\"`" = "${PATH}" && echo "default_path is correct" >>../../output + +@TEST-EXEC: echo ${TEST_DIAGNOSTICS} | strip-test-base >>../../output +@TEST-EXEC: echo ${TEST_MODE} >>../../output +@TEST-EXEC: echo ${TEST_BASELINE} | strip-test-base >>../../output +@TEST-EXEC: echo ${TEST_NAME} >>../../output +@TEST-EXEC: echo ${TEST_VERBOSE} | strip-test-base >>../../output +@TEST-EXEC: echo ${TEST_BASE} | strip-test-base >>../../output +@TEST-EXEC: echo ${TEST_PART} >>../../output diff --git a/testing/tests/environment.test b/testing/tests/environment.test index 225463f0..215eb8d3 100644 --- a/testing/tests/environment.test +++ b/testing/tests/environment.test @@ -1,3 +1,4 @@ +# %TEST-REQUIRES: ! ${SCRIPTS}/is-windows # %TEST-EXEC: btest -d %INPUT # %TEST-EXEC: btest -U %INPUT # %TEST-EXEC: btest-diff output diff --git a/testing/tests/multiple-baseline-dirs.test b/testing/tests/multiple-baseline-dirs.test index d91661ed..619d4918 100644 --- a/testing/tests/multiple-baseline-dirs.test +++ b/testing/tests/multiple-baseline-dirs.test @@ -41,5 +41,5 @@ %TEST-START-FILE btest.cfg [btest] -BaselineDir = baseline1:baseline2:baseline3 +BaselineDir = baseline1%(pathsep)sbaseline2%(pathsep)sbaseline3 %TEST-END-FILE diff --git a/testing/tests/set-key.test b/testing/tests/set-key.test new file mode 100644 index 00000000..69a9d59e --- /dev/null +++ b/testing/tests/set-key.test @@ -0,0 +1,7 @@ +# %TEST-EXEC: btest %INPUT +# %TEST-EXEC: btest -s override=test %INPUT +# %TEST-EXEC: btest --set=override=test2 %INPUT +# %TEST-EXEC: btest -s override=equals=ok %INPUT +# %TEST-EXEC: btest-diff output + +@TEST-EXEC: echo ${ENV5} >>../../output diff --git a/testing/tests/sphinx/rst-cmd.sh b/testing/tests/sphinx/rst-cmd.sh index fd5a462d..d56c6a76 100755 --- a/testing/tests/sphinx/rst-cmd.sh +++ b/testing/tests/sphinx/rst-cmd.sh @@ -1,3 +1,4 @@ +# %TEST-REQUIRES: ! $SCRIPTS/is-windows # %TEST-EXEC: bash %INPUT %TEST-START-FILE file.txt diff --git a/testing/tests/sphinx/run-sphinx b/testing/tests/sphinx/run-sphinx index 765594fd..f00eeb5e 100644 --- a/testing/tests/sphinx/run-sphinx +++ b/testing/tests/sphinx/run-sphinx @@ -1,4 +1,5 @@ # %TEST-REQUIRES: which sphinx-build +# %TEST-REQUIRES: ! ${SCRIPTS}/is-windows # # %TEST-EXEC: cp -r %DIR/../../../examples/sphinx/* . # %TEST-EXEC: make clean && make text