diff --git a/test/message/testcfg.py b/test/message/testcfg.py index 819dfa12c5b631..7e8d73bb39acd7 100644 --- a/test/message/testcfg.py +++ b/test/message/testcfg.py @@ -108,10 +108,6 @@ def GetSource(self): class MessageTestConfiguration(test.TestConfiguration): - - def __init__(self, context, root): - super(MessageTestConfiguration, self).__init__(context, root) - def Ls(self, path): if isdir(path): return [f for f in os.listdir(path) @@ -135,11 +131,6 @@ def ListTests(self, current_path, path, arch, mode): def GetBuildRequirements(self): return ['sample', 'sample=shell'] - def GetTestStatus(self, sections, defs): - status_file = join(self.root, 'message.status') - if exists(status_file): - test.ReadConfigurationInto(status_file, sections, defs) - def GetConfiguration(context, root): - return MessageTestConfiguration(context, root) + return MessageTestConfiguration(context, root, 'message') diff --git a/test/pseudo-tty/testcfg.py b/test/pseudo-tty/testcfg.py index 920789844583de..a5b7917bc05a46 100644 --- a/test/pseudo-tty/testcfg.py +++ b/test/pseudo-tty/testcfg.py @@ -122,10 +122,6 @@ def RunCommand(self, command, env): class TTYTestConfiguration(test.TestConfiguration): - - def __init__(self, context, root): - super(TTYTestConfiguration, self).__init__(context, root) - def Ls(self, path): if isdir(path): return [f[:-3] for f in os.listdir(path) if f.endswith('.js')] @@ -155,11 +151,6 @@ def ListTests(self, current_path, path, arch, mode): def GetBuildRequirements(self): return ['sample', 'sample=shell'] - def GetTestStatus(self, sections, defs): - status_file = join(self.root, 'pseudo-tty.status') - if exists(status_file): - test.ReadConfigurationInto(status_file, sections, defs) - def GetConfiguration(context, root): - return TTYTestConfiguration(context, root) + return TTYTestConfiguration(context, root, 'pseudo-tty') diff --git a/test/root.status b/test/root.status new file mode 100644 index 00000000000000..9ed9004c2169d0 --- /dev/null +++ b/test/root.status @@ -0,0 +1,190 @@ +[$mode==debug] +async-hooks/test-callback-error: SLOW +async-hooks/test-callback-error: SLOW +async-hooks/test-emit-init: SLOW +async-hooks/test-emit-init: SLOW +async-hooks/test-querywrap: SLOW +async-hooks/test-querywrap: SLOW +async-hooks/test-tlswrap: SLOW +async-hooks/test-tlswrap: SLOW +message/eval_messages: SLOW +message/stdin_messages: SLOW +parallel/test-benchmark-assert: SLOW +parallel/test-benchmark-cluster: SLOW +parallel/test-benchmark-crypto: SLOW +parallel/test-benchmark-dns: SLOW +parallel/test-benchmark-domain: SLOW +parallel/test-benchmark-es: SLOW +parallel/test-benchmark-events: SLOW +parallel/test-benchmark-fs: SLOW +parallel/test-benchmark-misc: SLOW +parallel/test-benchmark-module: SLOW +parallel/test-benchmark-os: SLOW +parallel/test-benchmark-process: SLOW +parallel/test-benchmark-querystring: SLOW +parallel/test-benchmark-streams: SLOW +parallel/test-benchmark-string_decoder: SLOW +parallel/test-benchmark-timers: SLOW +parallel/test-benchmark-url: SLOW +parallel/test-benchmark-util: SLOW +parallel/test-benchmark-v8: SLOW +parallel/test-benchmark-vm: SLOW +parallel/test-benchmark-zlib: SLOW +parallel/test-buffer-constructor-node-modules-paths: SLOW +parallel/test-buffer-indexof: SLOW +parallel/test-child-process-spawnsync-input: SLOW +parallel/test-child-process-windows-hide: SLOW +parallel/test-cli-eval: SLOW +parallel/test-cli-eval-event: SLOW +parallel/test-cli-node-options: SLOW +parallel/test-cli-node-options-disallowed: SLOW +parallel/test-cli-node-print-help: SLOW +parallel/test-cli-syntax: SLOW +parallel/test-cluster-basic: SLOW +parallel/test-cluster-bind-privileged-port: SLOW +parallel/test-cluster-bind-twice: SLOW +parallel/test-cluster-disconnect: SLOW +parallel/test-cluster-disconnect-idle-worker: SLOW +parallel/test-crypto-fips: SLOW +parallel/test-domain-abort-on-uncaught: SLOW +parallel/test-domain-uncaught-exception: SLOW +parallel/test-domain-with-abort-on-uncaught-exception: SLOW +parallel/test-env-var-no-warnings: SLOW +parallel/test-error-reporting: SLOW +parallel/test-eslint-alphabetize-errors: SLOW +parallel/test-eslint-buffer-constructor: SLOW +parallel/test-eslint-crypto-check: SLOW +parallel/test-eslint-documented-errors: SLOW +parallel/test-eslint-duplicate-requires: SLOW +parallel/test-eslint-eslint-check: SLOW +parallel/test-eslint-inspector-check: SLOW +parallel/test-eslint-lowercase-name-for-primitive: SLOW +parallel/test-eslint-no-let-in-for-declaration: SLOW +parallel/test-eslint-no-unescaped-regexp-dot: SLOW +parallel/test-eslint-number-isnan: SLOW +parallel/test-eslint-prefer-assert-iferror: SLOW +parallel/test-eslint-prefer-assert-methods: SLOW +parallel/test-eslint-prefer-common-expectserror: SLOW +parallel/test-eslint-prefer-common-mustnotcall: SLOW +parallel/test-eslint-prefer-util-format-errors: SLOW +parallel/test-eslint-require-buffer: SLOW +parallel/test-eslint-required-modules: SLOW +parallel/test-fs-read-stream-concurrent-reads: SLOW +parallel/test-gc-tls-external-memory: SLOW +parallel/test-heapdump-dns: SLOW +parallel/test-heapdump-fs-promise: SLOW +parallel/test-heapdump-http2: SLOW +parallel/test-heapdump-inspector: SLOW +parallel/test-heapdump-tls: SLOW +parallel/test-heapdump-worker: SLOW +parallel/test-heapdump-zlib: SLOW +parallel/test-http-client-timeout-option-with-agent: SLOW +parallel/test-http-pipeline-flood: SLOW +parallel/test-http-pipeline-requests-connection-leak: SLOW +parallel/test-http2-forget-closed-streams: SLOW +parallel/test-http2-multiplex: SLOW +parallel/test-inspector-tracing-domain: SLOW +parallel/test-listen-fd-cluster: SLOW +parallel/test-module-loading-globalpaths: SLOW +parallel/test-module-main-fail: SLOW +parallel/test-module-main-preserve-symlinks-fail: SLOW +parallel/test-net-pingpong: SLOW +parallel/test-next-tick-fixed-queue-regression: SLOW +parallel/test-npm-install: SLOW +parallel/test-preload: SLOW +parallel/test-repl: SLOW +parallel/test-repl-tab-complete: SLOW +parallel/test-repl-top-level-await: SLOW +parallel/test-stdio-pipe-access: SLOW +parallel/test-stream-pipeline: SLOW +parallel/test-stream2-read-sync-stack: SLOW +parallel/test-stringbytes-external: SLOW +parallel/test-sync-io-option: SLOW +parallel/test-tick-processor-arguments: SLOW +parallel/test-tls-env-bad-extra-ca: SLOW +parallel/test-tls-env-extra-ca: SLOW +parallel/test-tls-handshake-exception: SLOW +parallel/test-tls-securepair-leak: SLOW +parallel/test-tls-server-verify: SLOW +parallel/test-tls-session-cache: SLOW +parallel/test-tls-ticket-cluster: SLOW +parallel/test-tls-timeout-server: SLOW +parallel/test-tls-timeout-server-2: SLOW +parallel/test-tls-tlswrap-segfault: SLOW +parallel/test-trace-events-all: SLOW +parallel/test-trace-events-api: SLOW +parallel/test-trace-events-async-hooks: SLOW +parallel/test-trace-events-binding: SLOW +parallel/test-trace-events-bootstrap: SLOW +parallel/test-trace-events-category-used: SLOW +parallel/test-trace-events-file-pattern: SLOW +parallel/test-trace-events-fs-sync: SLOW +parallel/test-trace-events-metadata: SLOW +parallel/test-trace-events-none: SLOW +parallel/test-trace-events-perf: SLOW +parallel/test-trace-events-process-exit: SLOW +parallel/test-trace-events-promises: SLOW +parallel/test-trace-events-v8: SLOW +parallel/test-trace-events-vm: SLOW +parallel/test-trace-events-worker-metadata: SLOW +parallel/test-tracing-no-crash: SLOW +parallel/test-url-relative: SLOW +parallel/test-util-callbackify: SLOW +parallel/test-util-inspect: SLOW +parallel/test-util-inspect-long-running: SLOW +parallel/test-util-types: SLOW +parallel/test-v8-coverage: SLOW +parallel/test-vm-api-handles-getter-errors: SLOW +parallel/test-vm-basic: SLOW +parallel/test-vm-cached-data: SLOW +parallel/test-vm-sigint: SLOW +parallel/test-vm-sigint-existing-handler: SLOW +parallel/test-vm-symbols: SLOW +parallel/test-vm-syntax-error-message: SLOW +parallel/test-vm-syntax-error-stderr: SLOW +parallel/test-worker: SLOW +parallel/test-worker-cleanup-handles: SLOW +parallel/test-worker-debug: SLOW +parallel/test-worker-esmodule: SLOW +parallel/test-worker-exit-code: SLOW +parallel/test-worker-memory: SLOW +parallel/test-worker-message-channel: SLOW +parallel/test-worker-message-channel-sharedarraybuffer: SLOW +parallel/test-worker-nexttick-terminate: SLOW +parallel/test-worker-onmessage: SLOW +parallel/test-worker-onmessage-not-a-function: SLOW +parallel/test-worker-parent-port-ref: SLOW +parallel/test-worker-relative-path: SLOW +parallel/test-worker-relative-path-double-dot: SLOW +parallel/test-worker-stdio: SLOW +parallel/test-worker-syntax-error: SLOW +parallel/test-worker-syntax-error-file: SLOW +parallel/test-worker-uncaught-exception: SLOW +parallel/test-worker-uncaught-exception-async: SLOW +parallel/test-worker-unsupported-things: SLOW +parallel/test-worker-workerdata-sharedarraybuffer: SLOW +parallel/test-zlib-bytes-read: SLOW +parallel/test-zlib-convenience-methods: SLOW +sequential/test-benchmark-buffer: SLOW +sequential/test-benchmark-child-process: SLOW +sequential/test-benchmark-dgram: SLOW +sequential/test-benchmark-http: SLOW +sequential/test-benchmark-net: SLOW +sequential/test-benchmark-path: SLOW +sequential/test-benchmark-tls: SLOW +sequential/test-child-process-execsync: SLOW +sequential/test-child-process-exit: SLOW +sequential/test-child-process-pass-fd: SLOW +sequential/test-fs-readfile-tostring-fail: SLOW +sequential/test-fs-watch-system-limit: SLOW +sequential/test-gc-http-client: SLOW +sequential/test-gc-http-client-connaborted: SLOW +sequential/test-gc-http-client-onerror: SLOW +sequential/test-gc-http-client-timeout: SLOW +sequential/test-gc-net-timeout: SLOW +sequential/test-http2-ping-flood: SLOW +sequential/test-http2-settings-flood: SLOW +sequential/test-inspector-port-cluster: SLOW +sequential/test-net-bytes-per-incoming-chunk-overhead: SLOW +sequential/test-pipe: SLOW +sequential/test-util-debug: SLOW diff --git a/test/testpy/__init__.py b/test/testpy/__init__.py index 8b5b2f6b48f09f..27d7124bf2ed16 100644 --- a/test/testpy/__init__.py +++ b/test/testpy/__init__.py @@ -95,11 +95,10 @@ def GetCommand(self): def GetSource(self): return open(self.file).read() -class SimpleTestConfiguration(test.TestConfiguration): +class SimpleTestConfiguration(test.TestConfiguration): def __init__(self, context, root, section, additional=None): - super(SimpleTestConfiguration, self).__init__(context, root) - self.section = section + super(SimpleTestConfiguration, self).__init__(context, root, section) if additional is not None: self.additional_flags = additional else: @@ -122,11 +121,6 @@ def ListTests(self, current_path, path, arch, mode): def GetBuildRequirements(self): return ['sample', 'sample=shell'] - def GetTestStatus(self, sections, defs): - status_file = join(self.root, '%s.status' % (self.section)) - if exists(status_file): - test.ReadConfigurationInto(status_file, sections, defs) - class ParallelTestConfiguration(SimpleTestConfiguration): def __init__(self, context, root, section, additional=None): super(ParallelTestConfiguration, self).__init__(context, root, section, diff --git a/tools/test.py b/tools/test.py index c5c9fb53c07626..0571f3394bcc38 100755 --- a/tools/test.py +++ b/tools/test.py @@ -131,7 +131,7 @@ def RunSingle(self, parallel, thread_id): test = self.sequential_queue.get_nowait() except Empty: return - case = test.case + case = test case.thread_id = thread_id self.lock.acquire() self.AboutToRun(case) @@ -780,10 +780,10 @@ def CarCdr(path): class TestConfiguration(object): - - def __init__(self, context, root): + def __init__(self, context, root, section): self.context = context self.root = root + self.section = section def Contains(self, path, file): if len(path) > len(file): @@ -794,7 +794,9 @@ def Contains(self, path, file): return True def GetTestStatus(self, sections, defs): - pass + status_file = join(self.root, '%s.status' % self.section) + if exists(status_file): + ReadConfigurationInto(status_file, sections, defs) class TestSuite(object): @@ -848,15 +850,15 @@ def GetTestStatus(self, context, sections, defs): class LiteralTestSuite(TestSuite): - - def __init__(self, tests): + def __init__(self, tests_repos, test_root): super(LiteralTestSuite, self).__init__('root') - self.tests = tests + self.tests_repos = tests_repos + self.test_root = test_root def GetBuildRequirements(self, path, context): (name, rest) = CarCdr(path) result = [ ] - for test in self.tests: + for test in self.tests_repos: if not name or name.match(test.GetName()): result += test.GetBuildRequirements(rest, context) return result @@ -864,7 +866,7 @@ def GetBuildRequirements(self, path, context): def ListTests(self, current_path, path, context, arch, mode): (name, rest) = CarCdr(path) result = [ ] - for test in self.tests: + for test in self.tests_repos: test_name = test.GetName() if not name or name.match(test_name): full_path = current_path + [test_name] @@ -873,8 +875,11 @@ def ListTests(self, current_path, path, context, arch, mode): return result def GetTestStatus(self, context, sections, defs): - for test in self.tests: - test.GetTestStatus(context, sections, defs) + # Just read the test configuration from root_path/root.status. + root = TestConfiguration(context, self.test_root, 'root') + root.GetTestStatus(sections, defs) + for tests_repos in self.tests_repos: + tests_repos.GetTestStatus(context, sections, defs) TIMEOUT_SCALEFACTOR = { @@ -934,6 +939,7 @@ def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode): # ------------------------------------------- +RUN = 'run' SKIP = 'skip' FAIL = 'fail' PASS = 'pass' @@ -963,8 +969,8 @@ def __init__(self, name): self.name = name def GetOutcomes(self, env, defs): - if self.name in env: return ListSet([env[self.name]]) - else: return Nothing() + if self.name in env: return set([env[self.name]]) + else: return set() class Outcome(Expression): @@ -976,45 +982,7 @@ def GetOutcomes(self, env, defs): if self.name in defs: return defs[self.name].GetOutcomes(env, defs) else: - return ListSet([self.name]) - - -class Set(object): - pass - - -class ListSet(Set): - - def __init__(self, elms): - self.elms = elms - - def __str__(self): - return "ListSet%s" % str(self.elms) - - def Intersect(self, that): - if not isinstance(that, ListSet): - return that.Intersect(self) - return ListSet([ x for x in self.elms if x in that.elms ]) - - def Union(self, that): - if not isinstance(that, ListSet): - return that.Union(self) - return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ]) - - def IsEmpty(self): - return len(self.elms) == 0 - - -class Nothing(Set): - - def Intersect(self, that): - return self - - def Union(self, that): - return that - - def IsEmpty(self): - return True + return set([self.name]) class Operation(Expression): @@ -1030,21 +998,23 @@ def Evaluate(self, env, defs): elif self.op == 'if': return False elif self.op == '==': - inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs)) - return not inter.IsEmpty() + inter = self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs) + return bool(inter) else: assert self.op == '&&' return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs) def GetOutcomes(self, env, defs): if self.op == '||' or self.op == ',': - return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs)) + return self.left.GetOutcomes(env, defs) | self.right.GetOutcomes(env, defs) elif self.op == 'if': - if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs) - else: return Nothing() + if self.right.Evaluate(env, defs): + return self.left.GetOutcomes(env, defs) + else: + return set() else: assert self.op == '&&' - return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs)) + return self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs) def IsAlpha(str): @@ -1223,15 +1193,6 @@ def ParseCondition(expr): return ast -class ClassifiedTest(object): - - def __init__(self, case, outcomes): - self.case = case - self.outcomes = outcomes - self.parallel = self.case.parallel - self.disable_core_files = self.case.disable_core_files - - class Configuration(object): """The parsed contents of a configuration file""" @@ -1240,23 +1201,18 @@ def __init__(self, sections, defs): self.defs = defs def ClassifyTests(self, cases, env): - sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)] + sections = [ s for s in self.sections if s.condition.Evaluate(env, self.defs) ] all_rules = reduce(list.__add__, [s.rules for s in sections], []) unused_rules = set(all_rules) - result = [ ] - all_outcomes = set([]) + result = [] for case in cases: matches = [ r for r in all_rules if r.Contains(case.path) ] - outcomes = set([]) - for rule in matches: - outcomes = outcomes.union(rule.GetOutcomes(env, self.defs)) - unused_rules.discard(rule) - if not outcomes: - outcomes = [PASS] - case.outcomes = outcomes - all_outcomes = all_outcomes.union(outcomes) - result.append(ClassifiedTest(case, outcomes)) - return (result, list(unused_rules), all_outcomes) + outcomes_list = [ r.GetOutcomes(env, self.defs) for r in matches ] + outcomes = reduce(set.union, outcomes_list, set()) + unused_rules.difference_update(matches) + case.outcomes = set(outcomes) or set([PASS]) + result.append(case) + return result, unused_rules class Section(object): @@ -1281,9 +1237,7 @@ def __init__(self, raw_path, path, value): self.value = value def GetOutcomes(self, env, defs): - set = self.value.GetOutcomes(env, defs) - assert isinstance(set, ListSet) - return set.elms + return self.value.GetOutcomes(env, defs) def Contains(self, path): if len(self.path) > len(path): @@ -1428,6 +1382,7 @@ def ProcessOptions(options): options.mode = options.mode.split(',') options.run = options.run.split(',') options.skip_tests = options.skip_tests.split(',') + options.skip_tests.remove("") if options.run == [""]: options.run = None elif len(options.run) != 2: @@ -1450,7 +1405,7 @@ def ProcessOptions(options): # tends to exaggerate the number of available cpus/cores. cores = os.environ.get('JOBS') options.j = int(cores) if cores is not None else multiprocessing.cpu_count() - if options.flaky_tests not in ["run", "skip", "dontcare"]: + if options.flaky_tests not in [RUN, SKIP, DONTCARE]: print "Unknown flaky-tests mode %s" % options.flaky_tests return False return True @@ -1464,18 +1419,6 @@ def ProcessOptions(options): * %(fail)4d tests are expected to fail that we should fix\ """ -def PrintReport(cases): - def IsFailOk(o): - return (len(o) == 2) and (FAIL in o) and (OKAY in o) - unskipped = [c for c in cases if not SKIP in c.outcomes] - print REPORT_TEMPLATE % { - 'total': len(cases), - 'skipped': len(cases) - len(unskipped), - 'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]), - 'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]), - 'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]]) - } - class Pattern(object): @@ -1534,6 +1477,14 @@ def FormatTime(d): return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis) +def FormatTimedelta(td): + if hasattr(td.total, 'total_seconds'): + d = td.total_seconds() + else: # python2.6 compat + d = td.seconds + (td.microseconds / 10.0**6) + return FormatTime(d) + + def PrintCrashed(code): if utils.IsWindows(): return "CRASHED" @@ -1599,7 +1550,7 @@ def Main(): repositories = [TestRepository(join(workspace, 'test', name)) for name in suites] repositories += [TestRepository(a) for a in options.suite] - root = LiteralTestSuite(repositories) + root = LiteralTestSuite(repositories, test_root) paths = ArgsToTestPaths(test_root, args, suites) # Check for --valgrind option. If enabled, we overwrite the special @@ -1670,8 +1621,7 @@ def Main(): } test_list = root.ListTests([], path, context, arch, mode) unclassified_tests += test_list - (cases, unused_rules, _) = ( - config.ClassifyTests(test_list, env)) + cases, unused_rules = config.ClassifyTests(test_list, env) if globally_unused_rules is None: globally_unused_rules = set(unused_rules) else: @@ -1713,25 +1663,32 @@ def Main(): print "Could not create the temporary directory", options.temp_dir sys.exit(1) - if options.report: - PrintReport(all_cases) - - result = None - def DoSkip(case): - # A list of tests that should be skipped can be provided. This is - # useful for tests that fail in some environments, e.g., under coverage. - if options.skip_tests != [""]: - if [ st for st in options.skip_tests if st in case.case.file ]: - return True - if SKIP in case.outcomes or SLOW in case.outcomes: + def should_keep(case): + if any((s in case.file) for s in options.skip_tests): + return False + elif SKIP in case.outcomes: + return False + elif (options.flaky_tests == SKIP) and (set([SLOW, FLAKY]) & case.outcomes): + return False + else: return True - return FLAKY in case.outcomes and options.flaky_tests == SKIP - cases_to_run = [ c for c in all_cases if not DoSkip(c) ] + + cases_to_run = filter(should_keep, all_cases) + + if options.report: + print(REPORT_TEMPLATE % { + 'total': len(all_cases), + 'skipped': len(all_cases) - len(cases_to_run), + 'pass': len([t for t in cases_to_run if PASS in t.outcomes]), + 'fail_ok': len([t for t in cases_to_run if t.outcomes == set([FAIL, OKAY])]), + 'fail': len([t for t in cases_to_run if t.outcomes == set([FAIL])]) + }) + if options.run is not None: # Must ensure the list of tests is sorted before selecting, to avoid # silent errors if this file is changed to list the tests in a way that # can be different in different machines - cases_to_run.sort(key=lambda c: (c.case.arch, c.case.mode, c.case.file)) + cases_to_run.sort(key=lambda c: (c.arch, c.mode, c.file)) cases_to_run = [ cases_to_run[i] for i in xrange(options.run[0], len(cases_to_run), @@ -1756,13 +1713,11 @@ def DoSkip(case): # test output. print sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration)) - timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ] + timed_tests = [ t for t in cases_to_run if not t.duration is None ] timed_tests.sort(lambda a, b: a.CompareTime(b)) - index = 1 - for entry in timed_tests[:20]: - t = FormatTime(entry.duration.total_seconds()) - sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel())) - index += 1 + for i, entry in enumerate(timed_tests[:20], start=1): + t = FormatTimedelta(entry.duration) + sys.stderr.write("%4i (%s) %s\n" % (i, t, entry.GetLabel())) return result