mirror of
https://fuchsia.googlesource.com/third_party/pigweed.googlesource.com/pigweed/pigweed
synced 2024-09-20 22:00:58 +00:00
Update test runner to run tests from metadata
This change adds functionality to the test runner script to search a directory tree for generated test metadata files, build up a graph of test groups, and run a user-specified list of groups with all of their dependencies. Tests are run through an external runner executable provided on the command line. The executable is called with the path to a unit test binary file. The old test runner functionality to run a single test directly from its binary path is maintained, but now uses a runner executable. This is configured in GN using the build variable "pw_automatic_test_runner", which replaces the functionality of "pw_unit_test_create_run_targets". Change-Id: Ic1ed959d87a71266408e1f26e0ae1bf906eebfb0
This commit is contained in:
parent
38426d7416
commit
8403f0a2db
|
@ -12,39 +12,328 @@
|
|||
# License for the specific language governing permissions and limitations under
|
||||
# the License.
|
||||
|
||||
"""Script which runs Pigweed unit tests built using GN.
|
||||
|
||||
Currently, only a single test can be run at a time. The build path and GN target
|
||||
name of the test are given to the script.
|
||||
"""
|
||||
"""Runs Pigweed unit tests built using GN."""
|
||||
|
||||
import argparse
|
||||
import pathlib
|
||||
import enum
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from typing import Dict, Iterable, List, Sequence, Set, Tuple
|
||||
|
||||
import coloredlogs
|
||||
|
||||
# Global logger for the script.
|
||||
_LOG: logging.Logger = logging.getLogger('pw_test_runner')
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parses command-line arguments."""
|
||||
|
||||
parser = argparse.ArgumentParser('Run Pigweed unit tests')
|
||||
parser.add_argument('test', type=str, help='Path to unit test binary')
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument('--root', type=str, default='out',
|
||||
help='Path to the root build directory')
|
||||
parser.add_argument('-r', '--runner', type=str, required=True,
|
||||
help='Executable which runs a test on the target')
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help='Output additional logs as the script runs')
|
||||
parser.add_argument('runner_args', nargs=argparse.REMAINDER,
|
||||
help='Arguments to forward to the test runner')
|
||||
|
||||
# The runner script can either run binaries directly or groups.
|
||||
group = parser.add_mutually_exclusive_group()
|
||||
group.add_argument('-g', '--group', action='append',
|
||||
help='Test groups to run')
|
||||
group.add_argument('-t', '--test', action='append',
|
||||
help='Test binaries to run')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
class TestResult(enum.Enum):
|
||||
"""Result of a single unit test run."""
|
||||
UNKNOWN = 0
|
||||
SUCCESS = 1
|
||||
FAILURE = 2
|
||||
|
||||
|
||||
class Test:
|
||||
"""A unit test executable."""
|
||||
|
||||
def __init__(self, name: str, file_path: str):
|
||||
self.name: str = name
|
||||
self.file_path: str = file_path
|
||||
self.status: TestResult = TestResult.UNKNOWN
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f'Test({self.name})'
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, Test):
|
||||
return NotImplemented
|
||||
return self.file_path == other.file_path
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.file_path)
|
||||
|
||||
|
||||
class TestGroup:
|
||||
"""Graph node representing a group of unit tests."""
|
||||
|
||||
def __init__(self, name: str, tests: Iterable[Test]):
|
||||
self._name: str = name
|
||||
self._deps: Iterable['TestGroup'] = []
|
||||
self._tests: Iterable[Test] = tests
|
||||
|
||||
def set_deps(self, deps: Iterable['TestGroup']) -> None:
|
||||
"""Updates the dependency list of this group."""
|
||||
self._deps = deps
|
||||
|
||||
def all_test_dependencies(self) -> List[Test]:
|
||||
"""Returns a list of all tests in this group and its dependencies."""
|
||||
return list(self._all_test_dependencies(set()))
|
||||
|
||||
def _all_test_dependencies(self, processed_groups: Set[str]) -> Set[Test]:
|
||||
if self._name in processed_groups:
|
||||
return set()
|
||||
|
||||
tests: Set[Test] = set()
|
||||
for dep in self._deps:
|
||||
tests.update(dep._all_test_dependencies(processed_groups))
|
||||
|
||||
for test in self._tests:
|
||||
tests.add(test)
|
||||
|
||||
processed_groups.add(self._name)
|
||||
return tests
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f'TestGroup({self._name})'
|
||||
|
||||
|
||||
class TestRunner:
|
||||
"""Runs unit tests by calling out to a runner script."""
|
||||
|
||||
def __init__(self,
|
||||
executable: str,
|
||||
args: Sequence[str],
|
||||
tests: Iterable[Test]):
|
||||
self._executable: str = executable
|
||||
self._args: Sequence[str] = args
|
||||
self._tests: List[Test] = list(tests)
|
||||
|
||||
def run_tests(self) -> None:
|
||||
"""Runs all registered unit tests through the runner script."""
|
||||
|
||||
for test in self._tests:
|
||||
command = [self._executable, test.file_path, *self._args]
|
||||
try:
|
||||
status = subprocess.call(command)
|
||||
if status == 0:
|
||||
test.status = TestResult.SUCCESS
|
||||
else:
|
||||
test.status = TestResult.FAILURE
|
||||
except subprocess.CalledProcessError as err:
|
||||
_LOG.error(err)
|
||||
return
|
||||
|
||||
def all_passed(self) -> bool:
|
||||
"""Returns true if all unit tests passed."""
|
||||
return all(test.status is TestResult.SUCCESS for test in self._tests)
|
||||
|
||||
|
||||
# Filename extension for unit test metadata files.
|
||||
METADATA_EXTENSION = '.testinfo.json'
|
||||
|
||||
|
||||
def find_test_metadata(root: str) -> List[str]:
|
||||
"""Locates all test metadata files located within a directory tree."""
|
||||
|
||||
metadata: List[str] = []
|
||||
for path, _, files in os.walk(root):
|
||||
for filename in files:
|
||||
if not filename.endswith(METADATA_EXTENSION):
|
||||
continue
|
||||
|
||||
full_path = os.path.join(path, filename)
|
||||
_LOG.debug('Found group metadata at %s', full_path)
|
||||
metadata.append(full_path)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
# TODO(frolv): This is copied from the Python runner script.
|
||||
# It should be extracted into a library and imported instead.
|
||||
def find_binary(target: str) -> str:
|
||||
"""Tries to find a binary for a gn build target.
|
||||
|
||||
Args:
|
||||
target: Relative filesystem path to the target's output directory and
|
||||
target name, separated by a colon.
|
||||
|
||||
Returns:
|
||||
Full path to the target's binary.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: No binary found for target.
|
||||
"""
|
||||
|
||||
target_path, target_name = target.split(':')
|
||||
|
||||
for extension in ['', '.elf', '.exe']:
|
||||
potential_filename = f'{target_path}/{target_name}{extension}'
|
||||
if os.path.isfile(potential_filename):
|
||||
return potential_filename
|
||||
|
||||
raise FileNotFoundError(
|
||||
f'Could not find output binary for build target {target}')
|
||||
|
||||
|
||||
def parse_metadata(metadata: List[str], root: str) -> Dict[str, TestGroup]:
|
||||
"""Builds a graph of test group objects from metadata.
|
||||
|
||||
Args:
|
||||
metadata: List of paths to JSON test metadata files.
|
||||
root: Root output directory of the build.
|
||||
|
||||
Returns:
|
||||
Map of group name to TestGroup object. All TestGroup objects are fully
|
||||
populated with the paths to their unit tests and references to their
|
||||
dependencies.
|
||||
"""
|
||||
|
||||
def canonicalize(path: str) -> str:
|
||||
"""Removes a trailing slash from a GN target's directory.
|
||||
|
||||
'//module:target' -> '//module:target'
|
||||
'//module/:target' -> '//module:target'
|
||||
"""
|
||||
index = path.find(':')
|
||||
if index == -1 or path[index - 1] != '/':
|
||||
return path
|
||||
return path[:index - 1] + path[index:]
|
||||
|
||||
group_deps: List[Tuple[str, List[str]]] = []
|
||||
all_tests: Dict[str, Test] = {}
|
||||
test_groups: Dict[str, TestGroup] = {}
|
||||
num_tests = 0
|
||||
|
||||
for path in metadata:
|
||||
with open(path, 'r') as metadata_file:
|
||||
metadata_list = json.load(metadata_file)
|
||||
|
||||
deps: List[str] = []
|
||||
tests: List[Test] = []
|
||||
|
||||
for entry in metadata_list:
|
||||
if entry['type'] == 'self':
|
||||
group_name = canonicalize(entry['name'])
|
||||
elif entry['type'] == 'dep':
|
||||
deps.append(canonicalize(entry['group']))
|
||||
elif entry['type'] == 'test':
|
||||
test_directory = os.path.join(root, entry['test_directory'])
|
||||
test_binary = find_binary(
|
||||
f'{test_directory}:{entry["test_name"]}')
|
||||
|
||||
if test_binary not in all_tests:
|
||||
all_tests[test_binary] = Test(
|
||||
entry['test_name'], test_binary)
|
||||
|
||||
tests.append(all_tests[test_binary])
|
||||
|
||||
if deps:
|
||||
group_deps.append((group_name, deps))
|
||||
|
||||
num_tests += len(tests)
|
||||
test_groups[group_name] = TestGroup(group_name, tests)
|
||||
|
||||
for name, deps in group_deps:
|
||||
test_groups[name].set_deps([test_groups[dep] for dep in deps])
|
||||
|
||||
_LOG.info('Found %d test groups (%d tests).', len(metadata), num_tests)
|
||||
return test_groups
|
||||
|
||||
|
||||
def tests_from_groups(args: argparse.Namespace) -> List[Test]:
|
||||
"""Returns unit tests belonging to test groups and their dependencies.
|
||||
|
||||
If args.names is nonempty, only searches groups specified there.
|
||||
Otherwise, finds tests from all known test groups.
|
||||
"""
|
||||
|
||||
_LOG.info('Scanning for tests...')
|
||||
metadata = find_test_metadata(args.root)
|
||||
test_groups = parse_metadata(metadata, args.root)
|
||||
|
||||
groups_to_run = args.group if args.group else test_groups.keys()
|
||||
tests_to_run: Set[Test] = set()
|
||||
|
||||
for name in groups_to_run:
|
||||
try:
|
||||
tests_to_run.update(test_groups[name].all_test_dependencies())
|
||||
except KeyError:
|
||||
_LOG.error('Unknown test group: %s', name)
|
||||
sys.exit(1)
|
||||
|
||||
_LOG.info('Running test groups %s', ', '.join(groups_to_run))
|
||||
return list(tests_to_run)
|
||||
|
||||
|
||||
def tests_from_paths(paths: List[str]) -> List[Test]:
|
||||
"""Returns a list of tests from test executable paths."""
|
||||
|
||||
tests: List[Test] = []
|
||||
for path in paths:
|
||||
name = os.path.splitext(os.path.basename(path))[0]
|
||||
tests.append(Test(name, path))
|
||||
return tests
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""Runs some unit tests."""
|
||||
|
||||
args = parse_args()
|
||||
|
||||
try:
|
||||
exit_status = subprocess.call([args.test])
|
||||
except subprocess.CalledProcessError as err:
|
||||
print(f'{sys.argv[0]}: {err}', file=sys.stderr)
|
||||
return 1
|
||||
log_level = 'DEBUG' if args.verbose else 'INFO'
|
||||
coloredlogs.install(level=log_level,
|
||||
level_styles={'debug': {'color': 244},
|
||||
'error': {'color': 'red'}},
|
||||
fmt='%(asctime)s | %(message)s')
|
||||
|
||||
return exit_status
|
||||
runner_args = args.runner_args
|
||||
|
||||
if runner_args:
|
||||
if runner_args[0] != '--':
|
||||
_LOG.error('%s: Unrecognized argument: %s',
|
||||
sys.argv[0], runner_args[0])
|
||||
_LOG.info('')
|
||||
_LOG.info('Did you mean to pass this argument to the runner?')
|
||||
_LOG.info('Insert a -- in front of it to forward it through:')
|
||||
_LOG.info('')
|
||||
|
||||
index = sys.argv.index(runner_args[0])
|
||||
fixed_cmd = [*sys.argv[:index], '--', *sys.argv[index:]]
|
||||
|
||||
_LOG.info(' %s', ' '.join(shlex.quote(arg) for arg in fixed_cmd))
|
||||
_LOG.info('')
|
||||
|
||||
return 1
|
||||
|
||||
runner_args = runner_args[1:]
|
||||
|
||||
if args.test:
|
||||
tests = tests_from_paths(args.test)
|
||||
else:
|
||||
tests = tests_from_groups(args)
|
||||
|
||||
runner = TestRunner(args.runner, runner_args, tests)
|
||||
runner.run_tests()
|
||||
|
||||
return 0 if runner.all_passed() else 1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -17,9 +17,9 @@ import("$dir_pw_build/python_script.gni")
|
|||
|
||||
# Creates an executable target for a unit test.
|
||||
#
|
||||
# If the pw_unit_test_create_run_targets variable is set to true, this template
|
||||
# also creates a "${test_name}_run" target which runs the unit test executable
|
||||
# after building it.
|
||||
# If the pw_automatic_test_runner variable is set, this template also creates a
|
||||
# "${test_name}_run" target which runs the unit test executable after building
|
||||
# it.
|
||||
#
|
||||
# This template accepts all of the regular "executable" target args.
|
||||
template("pw_test") {
|
||||
|
@ -49,9 +49,9 @@ template("pw_test") {
|
|||
deps += [ pw_unit_test_main ]
|
||||
}
|
||||
|
||||
if (pw_unit_test_create_run_targets) {
|
||||
# When the run targets arg is set, create an action which runs the unit test
|
||||
# executable using the test runner script.
|
||||
if (pw_automatic_test_runner != "") {
|
||||
# When the automatic runner is set, create an action which runs the unit
|
||||
# test executable using the test runner script.
|
||||
_run_action_name = _test_target_name + "_run"
|
||||
|
||||
pw_python_script(_run_action_name) {
|
||||
|
@ -59,7 +59,12 @@ template("pw_test") {
|
|||
":$_test_target_name",
|
||||
]
|
||||
script = "$dir_pw_unit_test/py/test_runner.py"
|
||||
args = [ get_path_info("$target_out_dir:$_test_target_name", "abspath") ]
|
||||
args = [
|
||||
"--runner",
|
||||
pw_automatic_test_runner,
|
||||
"--test",
|
||||
get_path_info("$target_out_dir:$_test_target_name", "abspath"),
|
||||
]
|
||||
stamp = true
|
||||
}
|
||||
}
|
||||
|
@ -73,13 +78,16 @@ template("pw_test") {
|
|||
# tests: List of pw_test targets for each of the tests in the group.
|
||||
# group_deps: Optional pw_test_group targets on which this group depends.
|
||||
template("pw_test_group") {
|
||||
_group_target = target_name
|
||||
_group_deps_metadata = []
|
||||
_deps = invoker.tests
|
||||
|
||||
if (defined(invoker.group_deps)) {
|
||||
# If the group specified any other group dependencies, create a metadata
|
||||
# entry for each of them indicating that they are another group and a group
|
||||
# target to collect that metadata.
|
||||
_group_deps = []
|
||||
foreach(dep, invoker.group_deps) {
|
||||
_group_deps += [
|
||||
_group_deps_metadata += [
|
||||
{
|
||||
type = "dep"
|
||||
group = get_path_info(dep, "abspath")
|
||||
|
@ -87,31 +95,38 @@ template("pw_test_group") {
|
|||
]
|
||||
}
|
||||
|
||||
_metadata_group_target = "${target_name}_pw_test_group_metadata"
|
||||
group(_metadata_group_target) {
|
||||
metadata = {
|
||||
group_deps = _group_deps
|
||||
|
||||
# Metadata from the group's own unit test targets is forwarded through
|
||||
# the group dependencies group. This entry is listed as a "walk_key" in
|
||||
# the generated file so that only test targets' metadata (not group
|
||||
# targets) appear in the output.
|
||||
propagate_metadata_from = invoker.tests
|
||||
}
|
||||
deps = invoker.tests + invoker.group_deps
|
||||
}
|
||||
|
||||
_test_group_deps = [ ":$_metadata_group_target" ]
|
||||
} else {
|
||||
_test_group_deps = invoker.tests
|
||||
_deps += invoker.group_deps
|
||||
}
|
||||
|
||||
generated_file(target_name) {
|
||||
_metadata_group_target = "${target_name}_pw_test_group_metadata"
|
||||
group(_metadata_group_target) {
|
||||
metadata = {
|
||||
group_deps = _group_deps_metadata
|
||||
self = [
|
||||
{
|
||||
type = "self"
|
||||
name = get_path_info(":$_group_target", "abspath")
|
||||
},
|
||||
]
|
||||
|
||||
# Metadata from the group's own unit test targets is forwarded through
|
||||
# the group dependencies group. This entry is listed as a "walk_key" in
|
||||
# the generated file so that only test targets' metadata (not group
|
||||
# targets) appear in the output.
|
||||
propagate_metadata_from = invoker.tests
|
||||
}
|
||||
deps = _deps
|
||||
}
|
||||
|
||||
_test_group_deps = [ ":$_metadata_group_target" ]
|
||||
|
||||
generated_file(_group_target) {
|
||||
outputs = [
|
||||
"$target_out_dir/$target_name.utmeta.json",
|
||||
"$target_out_dir/$target_name.testinfo.json",
|
||||
]
|
||||
data_keys = [
|
||||
"group_deps",
|
||||
"self",
|
||||
"tests",
|
||||
]
|
||||
walk_keys = [ "propagate_metadata_from" ]
|
||||
|
|
|
@ -46,19 +46,19 @@ pw_target_toolchain = ""
|
|||
# Implementation of a main function for "pw_test" unit test binaries.
|
||||
pw_unit_test_main = "$dir_pw_unit_test:main"
|
||||
|
||||
# Whether GN unit test runner targets should be created.
|
||||
# Path to a test runner to automatically run unit tests after they are built.
|
||||
#
|
||||
# If set to true, the pw_test() template creates an action that invokes the test
|
||||
# runner script on each test executable. If false, the pw_test() template only
|
||||
# creates a test executable target.
|
||||
# If set, the pw_test() template creates an action that invokes the test runner
|
||||
# on each test executable. If unset, the pw_test() template only creates a test
|
||||
# executable target.
|
||||
#
|
||||
# This should be enabled for targets which support parallelized running
|
||||
# of unit tests, such as desktops with multiple cores.
|
||||
pw_unit_test_create_run_targets = false
|
||||
# This should only be enabled for targets which support parallelized running of
|
||||
# unit tests, such as desktops with multiple cores.
|
||||
pw_automatic_test_runner = ""
|
||||
|
||||
################################# BACKENDS #####################################
|
||||
|
||||
# This section of the file defines empty variables for each of the pigweed
|
||||
# This section of the file defines empty variables for each of the Pigweed
|
||||
# facades that expect a backend. This allows minimal breakages when adding new
|
||||
# facades. Instead of GN always halting due to encountering an undefined
|
||||
# variable, GN will only emit an error if something in the build depends on the
|
||||
|
@ -67,5 +67,5 @@ pw_unit_test_create_run_targets = false
|
|||
# All of these should default to empty strings. For target-specific defaults,
|
||||
# modify these variables in a target confiruation file.
|
||||
|
||||
# Declare dir_pw_dumb_io_backend
|
||||
# Backend for the dir_pw_dumb_io module.
|
||||
dir_pw_dumb_io_backend = ""
|
||||
|
|
|
@ -16,6 +16,3 @@ import("$dir_pigweed/pw_vars_default.gni")
|
|||
|
||||
# Configure backend for pw_dumb_io facade.
|
||||
dir_pw_dumb_io_backend = "$dir_pw_dumb_io_stdio"
|
||||
|
||||
# Tests can always be run in parallel when building for the host.
|
||||
pw_unit_test_create_run_targets = true
|
||||
|
|
|
@ -21,3 +21,5 @@ declare_args() {
|
|||
|
||||
pw_executable_config.bloaty_config_file =
|
||||
get_path_info("linux.bloaty", "abspath")
|
||||
|
||||
pw_automatic_test_runner = get_path_info("run_test", "abspath")
|
||||
|
|
|
@ -21,3 +21,5 @@ declare_args() {
|
|||
|
||||
pw_executable_config.bloaty_config_file =
|
||||
get_path_info("macos.bloaty", "abspath")
|
||||
|
||||
pw_automatic_test_runner = get_path_info("run_test", "abspath")
|
||||
|
|
18
targets/host/run_test
Executable file
18
targets/host/run_test
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/sh
|
||||
# Copyright 2019 The Pigweed Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy of
|
||||
# the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations under
|
||||
# the License.
|
||||
|
||||
# Test runner for host Linux/macOS systems.
|
||||
# Called with the path to a test binary and directly executes it.
|
||||
$*
|
Loading…
Reference in New Issue
Block a user