diff --git a/.buildkite/common.py b/.buildkite/common.py index a2487aa1f44..196435bb2e0 100644 --- a/.buildkite/common.py +++ b/.buildkite/common.py @@ -180,16 +180,7 @@ def random_str(k: int): def ab_revision_build(revision): """Generate steps for building an A/B-test revision""" - # Copied from framework/ab_test. Double dollar signs needed for Buildkite (otherwise it will try to interpolate itself) - return [ - f"commitish={revision}", - f"if ! git cat-file -t $$commitish; then commitish=origin/{revision}; fi", - "branch_name=tmp-$$commitish", - "git branch $$branch_name $$commitish", - f"git clone -b $$branch_name . build/{revision}", - f"cd build/{revision} && ./tools/devtool -y build --release && cd -", - "git branch -D $$branch_name", - ] + return [f"./tools/devtool -y build --rev {revision} --release"] def shared_build(): diff --git a/.buildkite/pipeline_perf.py b/.buildkite/pipeline_perf.py index 6f82c3c86d3..e8169bfb2cd 100755 --- a/.buildkite/pipeline_perf.py +++ b/.buildkite/pipeline_perf.py @@ -98,7 +98,9 @@ pytest_opts = "" if REVISION_A: devtool_opts += " --ab" - pytest_opts = f"{ab_opts} run {REVISION_A} {REVISION_B} --test {test_path}" + pytest_opts = ( + f"{ab_opts} run build/{REVISION_A}/ build/{REVISION_B} --test {test_path}" + ) else: # Passing `-m ''` below instructs pytest to collect tests regardless of # their markers (e.g. it will collect both tests marked as nonci, and diff --git a/tests/README.md b/tests/README.md index 95a86215d9e..4fee1baee30 100644 --- a/tests/README.md +++ b/tests/README.md @@ -169,16 +169,21 @@ function to [`.buildkite/pipeline_perf.py`](../.buildkite/pipeline_perf.py). To manually run an A/B-Test, use ```sh -tools/devtool -y test --ab [optional arguments to ab_test.py] run --test +tools/devtool -y test --ab [optional arguments to ab_test.py] run --test ``` -Here, _revision A_ and _revision B_ can be arbitrary git objects, such as commit -SHAs, branches or tags. For example, to compare boottime of microVMs between -Firecracker binaries compiled from the `main` branch and the `HEAD` of your -current branch, run +Here, _dir A_ and _dir B_ are directories containing firecracker and jailer +binaries whose performance characteristics you wish to compare. You can use +`./tools/devtool build --rev --release` to compile binaries from an +arbitrary git object (commit SHAs, branches, tags etc.). This will create +sub-directories in `build` containing the binaries. For example, to compare +boottime of microVMs between Firecracker binaries compiled from the `main` +branch and the `HEAD` of your current branch, run ```sh -tools/devtool -y test --ab run main HEAD --test integration_tests/performance/test_boottime.py::test_boottime +tools/devtool -y build --rev main --release +tools/devtool -y build --rev HEAD --release +tools/devtool -y test --ab -- run build/main build/HEAD --test integration_tests/performance/test_boottime.py::test_boottime ``` #### How to Write an A/B-Compatible Test and Common Pitfalls diff --git a/tests/conftest.py b/tests/conftest.py index 8e4c2e2848e..c77fee09039 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -99,19 +99,7 @@ def record_props(request, record_property): # Extract attributes from the docstrings function_docstring = inspect.getdoc(request.function) - description = [] - attributes = {} - for line in function_docstring.split("\n"): - # extract tags like @type, @issue, etc - match = re.match(r"\s*@(?P\w+):\s*(?P\w+)", line) - if match: - attr, value = match["attr"], match["value"] - attributes[attr] = value - else: - description.append(line) - for attr_name, attr_value in attributes.items(): - record_property(attr_name, attr_value) - record_property("description", "".join(description)) + record_property("description", function_docstring) def pytest_runtest_logreport(report): diff --git a/tests/framework/ab_test.py b/tests/framework/ab_test.py index b90cdfd1503..cf909d44fa6 100644 --- a/tests/framework/ab_test.py +++ b/tests/framework/ab_test.py @@ -21,19 +21,20 @@ of both invocations is the same, the test passes (with us being alerted to this situtation via a special pipeline that does not block PRs). If not, it fails, preventing PRs from introducing new vulnerable dependencies. """ -import contextlib import os import statistics from pathlib import Path +from tempfile import TemporaryDirectory from typing import Callable, List, Optional, TypeVar import scipy from framework import utils +from framework.defs import FC_WORKSPACE_DIR from framework.microvm import Microvm from framework.utils import CommandReturn from framework.with_filelock import with_filelock -from host_tools.cargo_build import get_binary, get_firecracker_binaries +from host_tools.cargo_build import DEFAULT_TARGET_DIR, get_firecracker_binaries # Locally, this will always compare against main, even if we try to merge into, say, a feature branch. # We might want to do a more sophisticated way to determine a "parent" branch here. @@ -82,19 +83,41 @@ def git_ab_test( (alternatively, your comparator can perform any required assertions and not return anything). """ - dir_a = git_clone(Path("../build") / a_revision, a_revision) - result_a = test_runner(dir_a, True) + with TemporaryDirectory() as tmp_dir: + dir_a = git_clone(Path(tmp_dir) / a_revision, a_revision) + result_a = test_runner(dir_a, True) - if b_revision: - dir_b = git_clone(Path("../build") / b_revision, b_revision) - else: - # By default, pytest execution happens inside the `tests` subdirectory. Pass the repository root, as - # documented. - dir_b = Path.cwd().parent - result_b = test_runner(dir_b, False) + if b_revision: + dir_b = git_clone(Path(tmp_dir) / b_revision, b_revision) + else: + # By default, pytest execution happens inside the `tests` subdirectory. Pass the repository root, as + # documented. + dir_b = Path.cwd().parent + result_b = test_runner(dir_b, False) - comparison = comparator(result_a, result_b) - return result_a, result_b, comparison + comparison = comparator(result_a, result_b) + return result_a, result_b, comparison + + +DEFAULT_A_DIRECTORY = FC_WORKSPACE_DIR / "build" / "main" +DEFAULT_B_DIRECTORY = FC_WORKSPACE_DIR / "build" / "cargo_target" / DEFAULT_TARGET_DIR + + +def binary_ab_test( + test_runner: Callable[[Path, bool], T], + comparator: Callable[[T, T], U] = default_comparator, + *, + a_directory: Path = DEFAULT_A_DIRECTORY, + b_directory: Path = DEFAULT_B_DIRECTORY, +): + """ + Similar to `git_ab_test`, but instead of locally checking out different revisions, it operates on + directories containing firecracker/jailer binaries + """ + result_a = test_runner(a_directory, True) + result_b = test_runner(b_directory, False) + + return result_a, result_b, comparator(result_a, result_b) def is_pr() -> bool: @@ -153,7 +176,7 @@ def set_did_not_grow_comparator( ) -def git_ab_test_guest_command( +def precompiled_ab_test_guest_command( microvm_factory: Callable[[Path, Path], Microvm], command: str, *, @@ -163,22 +186,21 @@ def git_ab_test_guest_command( ): """The same as git_ab_test_command, but via SSH. The closure argument should setup a microvm using the passed paths to firecracker and jailer binaries.""" + b_directory = ( + DEFAULT_B_DIRECTORY + if b_revision is None + else FC_WORKSPACE_DIR / "build" / b_revision + ) - @with_filelock - def build_firecracker(workspace_dir): - utils.check_output("./tools/release.sh --profile release", cwd=workspace_dir) - - def test_runner(workspace_dir, _is_a: bool): - firecracker = get_binary("firecracker", workspace_dir=workspace_dir) - if not firecracker.exists(): - build_firecracker(workspace_dir) - bin_dir = firecracker.parent.resolve() - firecracker, jailer = bin_dir / "firecracker", bin_dir / "jailer" - microvm = microvm_factory(firecracker, jailer) + def test_runner(bin_dir, _is_a: bool): + microvm = microvm_factory(bin_dir / "firecracker", bin_dir / "jailer") return microvm.ssh.run(command) - (_, old_out, old_err), (_, new_out, new_err), the_same = git_ab_test( - test_runner, comparator, a_revision=a_revision, b_revision=b_revision + (_, old_out, old_err), (_, new_out, new_err), the_same = binary_ab_test( + test_runner, + comparator, + a_directory=FC_WORKSPACE_DIR / "build" / a_revision, + b_directory=b_directory, ) assert ( @@ -186,7 +208,7 @@ def test_runner(workspace_dir, _is_a: bool): ), f"The output of running command `{command}` changed:\nOld:\nstdout:\n{old_out}\nstderr\n{old_err}\n\nNew:\nstdout:\n{new_out}\nstderr:\n{new_err}" -def git_ab_test_guest_command_if_pr( +def precompiled_ab_test_guest_command_if_pr( microvm_factory: Callable[[Path, Path], Microvm], command: str, *, @@ -195,7 +217,9 @@ def git_ab_test_guest_command_if_pr( ): """The same as git_ab_test_command_if_pr, but via SSH""" if is_pr(): - git_ab_test_guest_command(microvm_factory, command, comparator=comparator) + precompiled_ab_test_guest_command( + microvm_factory, command, comparator=comparator + ) return None microvm = microvm_factory(*get_firecracker_binaries()) @@ -249,17 +273,3 @@ def git_clone(clone_path, commitish): ) utils.check_output(f"git branch -D {branch_name}") return clone_path - - -# Once we upgrade to python 3.11, this will be in contextlib: -# https://docs.python.org/3/library/contextlib.html#contextlib.chdir -@contextlib.contextmanager -def chdir(to): - """Context manager that temporarily `chdir`s to the specified path""" - cur = os.getcwd() - - try: - os.chdir(to) - yield - finally: - os.chdir(cur) diff --git a/tests/framework/properties.py b/tests/framework/properties.py index 07a52692a38..b40df56249e 100644 --- a/tests/framework/properties.py +++ b/tests/framework/properties.py @@ -48,11 +48,11 @@ def get_host_os(kv: str = None): kv = platform.release() parts = kv.split("-") if len(parts) < 2: - return None + return kv misc = parts[1].split(".") if len(misc) > 2 and misc[2] in {"amzn2", "amzn2023"}: return misc[2] - return None + return kv class GlobalProps: diff --git a/tests/framework/utils.py b/tests/framework/utils.py index dbae40016ea..a8715f00e94 100644 --- a/tests/framework/utils.py +++ b/tests/framework/utils.py @@ -234,17 +234,14 @@ def _cpus(cls): # - cgroupsv1: /cpuset.cpus # - cgroupsv2: /cpuset.cpus.effective # For more details, see https://docs.kernel.org/admin-guide/cgroup-v2.html#cpuset-interface-files - cpulist = None for path in [ Path("/sys/fs/cgroup/cpuset/cpuset.cpus"), Path("/sys/fs/cgroup/cpuset.cpus.effective"), ]: if path.exists(): - cpulist = path.read_text("ascii").strip() - break - else: - raise RuntimeError("Could not find cgroups cpuset") - return ListFormatParser(cpulist).parse() + return ListFormatParser(path.read_text("ascii").strip()).parse() + + raise RuntimeError("Could not find cgroups cpuset") class ListFormatParser: diff --git a/tests/integration_tests/performance/test_benchmarks.py b/tests/integration_tests/performance/test_benchmarks.py index 3a33a1a80bb..6e6541a688d 100644 --- a/tests/integration_tests/performance/test_benchmarks.py +++ b/tests/integration_tests/performance/test_benchmarks.py @@ -1,6 +1,7 @@ # Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """Optional benchmarks-do-not-regress test""" +import contextlib import json import logging import platform @@ -10,7 +11,7 @@ import pytest from framework import utils -from framework.ab_test import chdir, git_ab_test +from framework.ab_test import git_ab_test from host_tools.cargo_build import cargo LOGGER = logging.getLogger(__name__) @@ -32,7 +33,7 @@ def run_criterion(firecracker_checkout: Path, is_a: bool) -> Path: """ baseline_name = "a_baseline" if is_a else "b_baseline" - with chdir(firecracker_checkout): + with contextlib.chdir(firecracker_checkout): # Passing --message-format json to cargo tells it to print its log in a json format. At the end, instead of the # usual "placed executable <...> at <...>" we'll get a json object with an 'executable' key, from which we # extract the path to the compiled benchmark binary. diff --git a/tests/integration_tests/security/test_jail.py b/tests/integration_tests/security/test_jail.py index 68bb220e0c9..ad6a16e48f5 100644 --- a/tests/integration_tests/security/test_jail.py +++ b/tests/integration_tests/security/test_jail.py @@ -64,19 +64,14 @@ def test_empty_jailer_id(uvm_plain): exec_file=test_microvm.fc_binary_path, ) - # pylint: disable=W0703 - try: + # If the exception is not thrown, it means that Firecracker was + # started successfully, hence there's a bug in the code due to which + # we can set an empty ID. + with pytest.raises( + ChildProcessError, + match=r"Jailer error: Invalid instance ID: Invalid len \(0\); the length must be between 1 and 64", + ): test_microvm.spawn() - # If the exception is not thrown, it means that Firecracker was - # started successfully, hence there's a bug in the code due to which - # we can set an empty ID. - assert False - except Exception as err: - expected_err = ( - "Jailer error: Invalid instance ID: Invalid len (0);" - " the length must be between 1 and 64" - ) - assert expected_err in str(err) def test_exec_file_not_exist(uvm_plain, tmp_path): diff --git a/tests/integration_tests/security/test_vulnerabilities.py b/tests/integration_tests/security/test_vulnerabilities.py index a7faf762acd..d00aeb9d3c8 100644 --- a/tests/integration_tests/security/test_vulnerabilities.py +++ b/tests/integration_tests/security/test_vulnerabilities.py @@ -11,11 +11,11 @@ import pytest import requests +from framework import utils from framework.ab_test import ( - git_ab_test_guest_command, - git_ab_test_guest_command_if_pr, - git_ab_test_host_command_if_pr, is_pr, + precompiled_ab_test_guest_command, + precompiled_ab_test_guest_command_if_pr, set_did_not_grow_comparator, ) from framework.properties import global_props @@ -212,20 +212,17 @@ def check_vulnerabilities_on_guest(status): assert report_guest_vulnerabilities == known_guest_vulnerabilities +# Nothing can be sensibly tested in a PR context here +@pytest.mark.skipif( + is_pr(), reason="Test depends solely on factors external to GitHub repository" +) def test_spectre_meltdown_checker_on_host(spectre_meltdown_checker): """ Test with the spectre / meltdown checker on host. """ - output = git_ab_test_host_command_if_pr( - f"sh {spectre_meltdown_checker} --batch json", - comparator=set_did_not_grow_comparator( - spectre_meltdown_reported_vulnerablities - ), - check_in_nonpr=False, - ) + rc, output, _ = utils.run_cmd(f"sh {spectre_meltdown_checker} --batch json") - # Outside the PR context, checks the return code with some exceptions. - if output and output.returncode != 0: + if output and rc != 0: report = spectre_meltdown_reported_vulnerablities(output) expected = {} assert report == expected, f"Unexpected vulnerabilities: {report} vs {expected}" @@ -236,7 +233,7 @@ def test_spectre_meltdown_checker_on_guest(spectre_meltdown_checker, build_micro Test with the spectre / meltdown checker on guest. """ - status = git_ab_test_guest_command_if_pr( + status = precompiled_ab_test_guest_command_if_pr( with_checker(build_microvm, spectre_meltdown_checker), REMOTE_CHECKER_COMMAND, comparator=set_did_not_grow_comparator( @@ -254,7 +251,7 @@ def test_spectre_meltdown_checker_on_restored_guest( """ Test with the spectre / meltdown checker on a restored guest. """ - status = git_ab_test_guest_command_if_pr( + status = precompiled_ab_test_guest_command_if_pr( with_checker( with_restore(build_microvm, microvm_factory), spectre_meltdown_checker ), @@ -275,7 +272,7 @@ def test_spectre_meltdown_checker_on_guest_with_template( Test with the spectre / meltdown checker on guest with CPU template. """ - git_ab_test_guest_command_if_pr( + precompiled_ab_test_guest_command_if_pr( with_checker(build_microvm_with_template, spectre_meltdown_checker), REMOTE_CHECKER_COMMAND, comparator=set_did_not_grow_comparator( @@ -290,7 +287,7 @@ def test_spectre_meltdown_checker_on_guest_with_custom_template( """ Test with the spectre / meltdown checker on guest with a custom CPU template. """ - git_ab_test_guest_command_if_pr( + precompiled_ab_test_guest_command_if_pr( with_checker(build_microvm_with_custom_template, spectre_meltdown_checker), REMOTE_CHECKER_COMMAND, comparator=set_did_not_grow_comparator( @@ -305,7 +302,7 @@ def test_spectre_meltdown_checker_on_restored_guest_with_template( """ Test with the spectre / meltdown checker on a restored guest with a CPU template. """ - git_ab_test_guest_command_if_pr( + precompiled_ab_test_guest_command_if_pr( with_checker( with_restore(build_microvm_with_template, microvm_factory), spectre_meltdown_checker, @@ -325,7 +322,7 @@ def test_spectre_meltdown_checker_on_restored_guest_with_custom_template( """ Test with the spectre / meltdown checker on a restored guest with a custom CPU template. """ - git_ab_test_guest_command_if_pr( + precompiled_ab_test_guest_command_if_pr( with_checker( with_restore(build_microvm_with_custom_template, microvm_factory), spectre_meltdown_checker, @@ -383,17 +380,15 @@ def get_vuln_files_exception_dict(template): return exception_dict +# Nothing can be sensibly tested here in a PR context +@pytest.mark.skipif( + is_pr(), reason="Test depends solely on factors external to GitHub repository" +) def test_vulnerabilities_on_host(): """ Test vulnerabilities files on host. """ - - git_ab_test_host_command_if_pr( - f"! grep -r Vulnerable {VULN_DIR}", - comparator=set_did_not_grow_comparator( - lambda output: set(output.stdout.splitlines()) - ), - ) + utils.check_output(f"! grep -r Vulnerable {VULN_DIR}") def check_vulnerabilities_files_on_guest(microvm): @@ -429,7 +424,7 @@ def check_vulnerabilities_files_ab(builder): running in a PR pipeline, and otherwise calls `check_vulnerabilities_files_on_guest` """ if is_pr(): - git_ab_test_guest_command( + precompiled_ab_test_guest_command( builder, f"! grep -r Vulnerable {VULN_DIR}", comparator=set_did_not_grow_comparator( diff --git a/tools/ab_test.py b/tools/ab_test.py index fa76f087cf8..2d03d9591a1 100755 --- a/tools/ab_test.py +++ b/tools/ab_test.py @@ -32,10 +32,8 @@ sys.path.append(str(Path(__file__).parent.parent / "tests")) # pylint:disable=wrong-import-position -from framework import utils -from framework.ab_test import check_regression, git_ab_test +from framework.ab_test import binary_ab_test, check_regression from framework.properties import global_props -from host_tools.cargo_build import get_binary from host_tools.metrics import ( emit_raw_emf, format_with_reduced_unit, @@ -107,7 +105,7 @@ def find_unit(emf: dict, metric: str): return metrics.get(metric, "None") -def load_data_series(report_path: Path, revision: str = None, *, reemit: bool = False): +def load_data_series(report_path: Path, tag=None, *, reemit: bool = False): """Loads the data series relevant for A/B-testing from test_results/test-report.json into a dictionary mapping each message's cloudwatch dimensions to a dictionary of its list-valued properties/metrics. @@ -126,10 +124,9 @@ def load_data_series(report_path: Path, revision: str = None, *, reemit: bool = emf = json.loads(line) if reemit: - assert revision is not None + assert tag is not None - # These will show up in Cloudwatch, so canonicalize to long commit SHAs - emf["git_commit_id"] = canonicalize_revision(revision) + emf["git_commit_id"] = str(tag) emit_raw_emf(emf) dimensions, result = process_log_entry(emf) @@ -158,8 +155,7 @@ def load_data_series(report_path: Path, revision: str = None, *, reemit: bool = def collect_data(binary_dir: Path, tests: list[str]): """Executes the specified test using the provided firecracker binaries""" - # Example binary_dir: ../build/main/build/cargo_target/x86_64-unknown-linux-musl/release - revision = binary_dir.parents[3].name + binary_dir = binary_dir.resolve() print(f"Collecting samples with {binary_dir}") subprocess.run( @@ -172,7 +168,7 @@ def collect_data(binary_dir: Path, tests: list[str]): check=True, ) return load_data_series( - Path("test_results/test-report.json"), revision, reemit=True + Path("test_results/test-report.json"), binary_dir, reemit=True ) @@ -311,23 +307,17 @@ def analyze_data( def ab_performance_test( - a_revision, b_revision, tests, p_thresh, strength_abs_thresh, noise_threshold + a_revision: Path, + b_revision: Path, + tests, + p_thresh, + strength_abs_thresh, + noise_threshold, ): - """Does an A/B-test of the specified test across the given revisions""" - _, commit_list, _ = utils.check_output( - f"git --no-pager log --oneline {a_revision}..{b_revision}" - ) - print( - f"Performance A/B-test across {a_revision}..{b_revision}. This includes the following commits:" - ) - print(commit_list.strip()) + """Does an A/B-test of the specified test with the given firecracker/jailer binaries""" - def test_runner(workspace, _is_ab: bool): - bin_dir = get_binary("firecracker", workspace_dir=workspace).parent - return collect_data(bin_dir, tests) - - return git_ab_test( - test_runner, + return binary_ab_test( + lambda bin_dir, _: collect_data(bin_dir, tests), lambda ah, be: analyze_data( ah, be, @@ -336,16 +326,11 @@ def test_runner(workspace, _is_ab: bool): noise_threshold, n_resamples=int(100 / p_thresh), ), - a_revision=a_revision, - b_revision=b_revision, + a_directory=a_revision, + b_directory=b_revision, ) -def canonicalize_revision(revision): - """Canonicalizes the given revision to a 40 digit hex SHA""" - return utils.check_output(f"git rev-parse {revision}").stdout.strip() - - if __name__ == "__main__": parser = argparse.ArgumentParser( description="Executes Firecracker's A/B testsuite across the specified commits" @@ -357,11 +342,13 @@ def canonicalize_revision(revision): ) run_parser.add_argument( "a_revision", - help="The baseline revision compared to which we want to avoid regressing", + help="Directory containing firecracker and jailer binaries to be considered the performance baseline", + type=Path, ) run_parser.add_argument( "b_revision", - help="The revision whose performance we want to compare against the results from a_revision", + help="Directory containing firecracker and jailer binaries whose performance we want to compare against the results from a_revision", + type=Path, ) run_parser.add_argument("--test", help="The test to run", nargs="+", required=True) analyze_parser = subparsers.add_parser( diff --git a/tools/devtool b/tools/devtool index cfd746ee40c..e9eb3d48db0 100755 --- a/tools/devtool +++ b/tools/devtool @@ -446,12 +446,14 @@ cmd_build() { profile="debug" libc="musl" + # Parse any command line args. while [ $# -gt 0 ]; do case "$1" in "-h"|"--help") { cmd_help; exit 1; } ;; "--debug") { profile="debug"; } ;; "--release") { profile="release"; } ;; + "--rev") { shift; revision=$1; } ;; "--ssh-keys") shift [[ -z "$1" ]] && \ @@ -489,16 +491,40 @@ cmd_build() { extra_args="--volume $host_pub_key_path:$PUB_KEY_PATH:z \ --volume $host_priv_key_path:$PRIV_KEY_PATH:z" + workdir="$CTR_FC_ROOT_DIR" + if [ ! -z "$revision" ]; then + commitish="$revision" + if ! git cat-file -t "$commitish"; then commitish=origin/"$revision"; fi + branch_name=tmp-$commitish + + tmp_dir=$(mktemp -d) + + git branch $branch_name $commitish + git clone -b $branch_name . $tmp_dir + pushd $tmp_dir + workdir=$tmp_dir + extra_args="$extra_args --volume $tmp_dir:$tmp_dir:z" + fi + # Run the cargo build process inside the container. # We don't need any special privileges for the build phase, so we run the # container as the current user/group. run_devctr \ --user "$(id -u):$(id -g)" \ - --workdir "$CTR_FC_ROOT_DIR" \ + --workdir "$workdir" \ ${extra_args} \ -- \ ./tools/release.sh --libc $libc --profile $profile ret=$? + + if [ ! -z "$revision" ]; then + popd + git branch -D $branch_name + mkdir -p build/"$revision" + cp $tmp_dir/build/cargo_target/$(uname -m)-unknown-linux-$libc/$profile/* build/"$revision" + rm -rf $tmp_dir + fi + return $ret }