Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 8 additions & 13 deletions src/rez/cli/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,19 +96,14 @@ def command(opts, parser, extra_arg_groups=None):
print('\n'.join(test_names))
sys.exit(0)

if opts.TEST:
run_test_names = opts.TEST
else:
# if no tests are explicitly specified, then run only those with a
# 'default' run_on tag
run_test_names = runner.get_test_names(run_on=["default"])

if not run_test_names:
print(
"No tests with 'default' run_on tag found in %s" % uri,
file=sys.stderr
)
sys.exit(0)
run_test_names = runner.find_requested_test_names(opts.TEST)

if not run_test_names:
print(
"No tests with 'default' run_on tag found in %s" % uri,
file=sys.stderr
)
sys.exit(0)

exitcode = 0

Expand Down
11 changes: 11 additions & 0 deletions src/rez/package_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from rez.utils.logging_ import print_info, print_warning, print_error
from rez.version import Requirement, RequirementList
from shlex import quote
import fnmatch
import time
import sys
import os
Expand Down Expand Up @@ -206,6 +207,16 @@ def get_test_names(self, run_on=None):

return self.get_package_test_names(package, run_on=run_on)

def find_requested_test_names(self, requested_tests):

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Umm, could we maybe add a new parameter to get_test_names? I'm not sure why we need a different method when get_test_names could have the ability to do all the filtering for us.

What do you think?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's better to separate fetch tests from packages and filter them on regex in order keep available to fetch tests without any filter, but I could be wrong and this case won't appear the future. As you want I can make the fix I you want

# if no tests are explicitly specified, then run only those with a
# 'default' run_on tag
run_on = ["default"] if not requested_tests else None
pkg_test_names = self.get_test_names(run_on=run_on)
requested_test_names = set()
for requested_test in requested_tests:
requested_test_names.update(set(fnmatch.filter(pkg_test_names, requested_test)))
return requested_test_names

@property
def num_tests(self):
"""Get the number of tests, regardless of stats.
Expand Down
181 changes: 181 additions & 0 deletions src/rez/tests/test_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,3 +83,184 @@ def _get_test_result(self, runner, test_name):
(result for result in runner.test_results.test_results if result.get("test_name") == test_name),
None
)

def test_wildcard_01(self):
"""package.py unit tests are correctly found with a wildcard then run in a testing environment"""
self.inject_python_repo()
context = ResolvedContext(["testing_obj", "python"])
# This will get us more code coverage :)
self.inject_python_repo()
runner = PackageTestRunner(
package_request="testing_obj",
package_paths=context.package_paths,
)

test_names = runner.find_requested_test_names(["command_as_*"])
self.assertEqual(2, len(test_names))

for test_name in test_names:
runner.run_test(test_name)

self.assertEqual(runner.test_results.num_tests, 2)

self.assertEqual(
self._get_test_result(runner, "command_as_string_success")["status"],
"success",
"command_as_string_success did not succeed",
)
self.assertEqual(
self._get_test_result(runner, "command_as_string_fail")["status"],
"failed",
"command_as_string_fail did not fail",
)

def test_wildcard_02(self):
"""
package.py unit tests are correctly found with a wildcard + a package name then run
in a testing environment
"""
self.inject_python_repo()
context = ResolvedContext(["testing_obj", "python"])
# This will get us more code coverage :)
self.inject_python_repo()
runner = PackageTestRunner(
package_request="testing_obj",
package_paths=context.package_paths,

)

test_names = runner.find_requested_test_names(["command_as_*", "check_car_ideas"])
self.assertEqual(3, len(test_names))

for test_name in test_names:
runner.run_test(test_name)

self.assertEqual(runner.test_results.num_tests, 3)

self.assertEqual(
self._get_test_result(runner, "check_car_ideas")["status"],
"success",
"check_car_ideas did not succeed",
)
self.assertEqual(
self._get_test_result(runner, "command_as_string_success")["status"],
"success",
"command_as_string_success did not succeed",
)
self.assertEqual(
self._get_test_result(runner, "command_as_string_fail")["status"],
"failed",
"command_as_string_fail did not fail",
)

def test_wildcard_03(self):
"""
package.py unit tests are correctly found with a wildcard equivalent to 'default' then run
in a testing environment
"""
self.inject_python_repo()
context = ResolvedContext(["testing_obj", "python"])
# This will get us more code coverage :)
self.inject_python_repo()
runner = PackageTestRunner(
package_request="testing_obj",
package_paths=context.package_paths,

)

test_names = runner.find_requested_test_names(["*"])
self.assertEqual(4, len(test_names))

for test_name in test_names:
runner.run_test(test_name)

self.assertEqual(runner.test_results.num_tests, 4)

self.assertEqual(
self._get_test_result(runner, "check_car_ideas")["status"],
"success",
"check_car_ideas did not succeed",
)
self.assertEqual(
self._get_test_result(runner, "move_meeting_to_noon")["status"],
"failed",
"move_meeting_to_noon did not fail",
)
self.assertEqual(
self._get_test_result(runner, "command_as_string_success")["status"],
"success",
"command_as_string_success did not succeed",
)
self.assertEqual(
self._get_test_result(runner, "command_as_string_fail")["status"],
"failed",
"command_as_string_fail did not fail",
)

def test_wildcard_04(self):
"""
package.py unit tests are correctly found with a wildcard which get all test starting by 'c' and
the second letter is 'h' or 'o' then run in a testing environment
"""
self.inject_python_repo()
context = ResolvedContext(["testing_obj", "python"])
# This will get us more code coverage :)
self.inject_python_repo()
runner = PackageTestRunner(
package_request="testing_obj",
package_paths=context.package_paths,

)

test_names = runner.find_requested_test_names(["c[ho]*"])
self.assertEqual(3, len(test_names))

for test_name in test_names:
runner.run_test(test_name)

self.assertEqual(runner.test_results.num_tests, 3)

self.assertEqual(
self._get_test_result(runner, "check_car_ideas")["status"],
"success",
"check_car_ideas did not succeed",
)
self.assertEqual(
self._get_test_result(runner, "command_as_string_success")["status"],
"success",
"command_as_string_success did not succeed",
)
self.assertEqual(
self._get_test_result(runner, "command_as_string_fail")["status"],
"failed",
"command_as_string_fail did not fail",
)

def test_wildcard_05(self):
"""
package.py unit tests are correctly found with a wildcard which get all test which is not starting by 'c'
then run in a testing environment
"""
self.inject_python_repo()
context = ResolvedContext(["testing_obj", "python"])
# This will get us more code coverage :)
self.inject_python_repo()
runner = PackageTestRunner(
package_request="testing_obj",
package_paths=context.package_paths,

)

test_names = runner.find_requested_test_names(["[!c]*"])
self.assertEqual(1, len(test_names))

for test_name in test_names:
runner.run_test(test_name)

self.assertEqual(runner.test_results.num_tests, 1)

self.assertEqual(
self._get_test_result(runner, "move_meeting_to_noon")["status"],
"failed",
"move_meeting_to_noon did not fail",
)
Loading