From d38e04465f2d59de94476043b58e3463d80bb7cc Mon Sep 17 00:00:00 2001 From: lucylq Date: Fri, 25 Apr 2025 16:40:07 -0700 Subject: [PATCH 1/2] Use generated files in module test Instead of checking in the PTE/PTD files. This helps us keep up to date with aot changes, especially with incoming flat_tensor refactor. Main changes here are with the model. Instead of using AddModule from examples/portable, use ModuleAdd from test/models/export_program, like our other tests. There is a shape difference (1 tensor vs 2 tensors+alpha), so we have to change all the inputs. Differential Revision: [D73470865](https://our.internmc.facebook.com/intern/diff/D73470865/) [ghstack-poisoned] --- extension/module/test/CMakeLists.txt | 28 ++++- extension/module/test/TARGETS | 2 +- extension/module/test/module_test.cpp | 144 +++++++++++++------------- extension/module/test/targets.bzl | 61 +++++------ 4 files changed, 132 insertions(+), 103 deletions(-) diff --git a/extension/module/test/CMakeLists.txt b/extension/module/test/CMakeLists.txt index 0192b63e632..ab92257621f 100644 --- a/extension/module/test/CMakeLists.txt +++ b/extension/module/test/CMakeLists.txt @@ -19,6 +19,31 @@ include(${EXECUTORCH_ROOT}/tools/cmake/Test.cmake) set(_test_srcs module_test.cpp) +add_custom_command( + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/ModuleAdd.pte" + "${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.pte" + "${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.ptd" + COMMAND + python3 -m test.models.export_program --modules "ModuleAdd" + --outdir "${CMAKE_CURRENT_BINARY_DIR}" 2> /dev/null + COMMAND + python3 -m test.models.export_program --modules "ModuleLinear" + --external-constants --outdir "${CMAKE_CURRENT_BINARY_DIR}" 2> /dev/null +) + +add_custom_target( + generated_module_test_files + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/ModuleAdd.pte" + "${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.pte" + "${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.ptd" +) + +set(test_env + "ET_MODULE_ADD_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleAdd.pte" + "ET_MODULE_LINEAR_PROGRAM_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.pte" + "ET_MODULE_LINEAR_DATA_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.ptd" +) + et_cxx_test( extension_module_test SOURCES @@ -31,7 +56,8 @@ et_cxx_test( portable_ops_lib ) -set(test_env "RESOURCES_PATH=${EXECUTORCH_ROOT}/extension/module/test/resources") +add_dependencies(extension_module_test generated_pte_files) +set_property(TEST extension_module_test PROPERTY ENVIRONMENT ${test_env}) set_property( TEST extension_module_test diff --git a/extension/module/test/TARGETS b/extension/module/test/TARGETS index 2341af9282f..a6c52d105f6 100644 --- a/extension/module/test/TARGETS +++ b/extension/module/test/TARGETS @@ -5,4 +5,4 @@ load(":targets.bzl", "define_common_targets") oncall("executorch") -define_common_targets() +define_common_targets(is_fbcode = True) diff --git a/extension/module/test/module_test.cpp b/extension/module/test/module_test.cpp index a82e257a703..92e62077bed 100644 --- a/extension/module/test/module_test.cpp +++ b/extension/module/test/module_test.cpp @@ -15,6 +15,7 @@ #include #include +#include using namespace ::executorch::extension; using namespace ::executorch::runtime; @@ -26,9 +27,9 @@ class ModuleTest : public ::testing::Test { if (const char* env = std::getenv("RESOURCES_PATH")) { resources_path = env; } - model_path_ = resources_path + "/add.pte"; - linear_path_ = resources_path + "/linear.pte"; - linear_data_path_ = resources_path + "/linear.ptd"; + model_path_ = std::getenv("ET_MODULE_ADD_PATH"); + linear_path_ = std::getenv("ET_MODULE_LINEAR_PROGRAM_PATH"); + linear_data_path_ = std::getenv("ET_MODULE_LINEAR_DATA_PATH"); } static inline std::string model_path_; @@ -109,7 +110,8 @@ TEST_F(ModuleTest, TestMethodMeta) { const auto meta = module.method_meta("forward"); EXPECT_EQ(meta.error(), Error::Ok); EXPECT_STREQ(meta->name(), "forward"); - EXPECT_EQ(meta->num_inputs(), 2); + // tensor, tensor, alpha + EXPECT_EQ(meta->num_inputs(), 3); EXPECT_EQ(*(meta->input_tag(0)), Tag::Tensor); EXPECT_EQ(meta->num_outputs(), 1); EXPECT_EQ(*(meta->output_tag(0)), Tag::Tensor); @@ -117,14 +119,20 @@ TEST_F(ModuleTest, TestMethodMeta) { const auto input_meta = meta->input_tensor_meta(0); EXPECT_EQ(input_meta.error(), Error::Ok); EXPECT_EQ(input_meta->scalar_type(), executorch::aten::ScalarType::Float); - EXPECT_EQ(input_meta->sizes().size(), 1); - EXPECT_EQ(input_meta->sizes()[0], 1); + EXPECT_EQ(input_meta->sizes().size(), 2); + EXPECT_EQ(input_meta->sizes()[0], 2); + + const auto input_meta1 = meta->input_tensor_meta(1); + EXPECT_EQ(input_meta1.error(), Error::Ok); + EXPECT_EQ(input_meta1->scalar_type(), executorch::aten::ScalarType::Float); + EXPECT_EQ(input_meta1->sizes().size(), 2); + EXPECT_EQ(input_meta1->sizes()[0], 2); const auto output_meta = meta->output_tensor_meta(0); EXPECT_EQ(output_meta.error(), Error::Ok); EXPECT_EQ(output_meta->scalar_type(), executorch::aten::ScalarType::Float); - EXPECT_EQ(output_meta->sizes().size(), 1); - EXPECT_EQ(output_meta->sizes()[0], 1); + EXPECT_EQ(output_meta->sizes().size(), 2); + EXPECT_EQ(output_meta->sizes()[0], 2); } TEST_F(ModuleTest, TestNonExistentMethodMeta) { @@ -136,17 +144,16 @@ TEST_F(ModuleTest, TestNonExistentMethodMeta) { TEST_F(ModuleTest, TestExecute) { Module module(model_path_); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); - const auto result = module.execute("forward", {tensor, tensor}); + const auto result = module.execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); EXPECT_TRUE(module.is_loaded()); EXPECT_TRUE(module.is_method_loaded("forward")); - const auto data = result->at(0).toTensor().const_data_ptr(); - - EXPECT_NEAR(data[0], 2, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {2.f, 4.f, 6.f, 8.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); } TEST_F(ModuleTest, TestExecutePreload) { @@ -155,14 +162,13 @@ TEST_F(ModuleTest, TestExecutePreload) { const auto error = module.load(); EXPECT_EQ(error, Error::Ok); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); - const auto result = module.execute("forward", {tensor, tensor}); + const auto result = module.execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); - const auto data = result->at(0).toTensor().const_data_ptr(); - - EXPECT_NEAR(data[0], 2, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {2.f, 4.f, 6.f, 8.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); } TEST_F(ModuleTest, TestExecutePreload_method) { @@ -171,14 +177,13 @@ TEST_F(ModuleTest, TestExecutePreload_method) { const auto error = module.load_method("forward"); EXPECT_EQ(error, Error::Ok); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); - const auto result = module.execute("forward", {tensor, tensor}); + const auto result = module.execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); - const auto data = result->at(0).toTensor().const_data_ptr(); - - EXPECT_NEAR(data[0], 2, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {2.f, 4.f, 6.f, 8.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); } TEST_F(ModuleTest, TestExecutePreloadProgramAndMethod) { @@ -190,14 +195,13 @@ TEST_F(ModuleTest, TestExecutePreloadProgramAndMethod) { const auto load_method_error = module.load_method("forward"); EXPECT_EQ(load_method_error, Error::Ok); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); - const auto result = module.execute("forward", {tensor, tensor}); + const auto result = module.execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); - const auto data = result->at(0).toTensor().const_data_ptr(); - - EXPECT_NEAR(data[0], 2, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {2.f, 4.f, 6.f, 8.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); } TEST_F(ModuleTest, TestExecuteOnNonExistent) { @@ -219,32 +223,30 @@ TEST_F(ModuleTest, TestExecuteOnCurrupted) { TEST_F(ModuleTest, TestGet) { Module module(model_path_); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); - const auto result = module.get("forward", {tensor, tensor}); + const auto result = module.get("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); - const auto data = result->toTensor().const_data_ptr(); - EXPECT_NEAR(data[0], 2, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {2.f, 4.f, 6.f, 8.f}); + EXPECT_TENSOR_CLOSE(result->toTensor(), *expected.get()); } TEST_F(ModuleTest, TestForward) { auto module = std::make_unique(model_path_); - auto tensor = make_tensor_ptr({21.f}); + auto tensor = make_tensor_ptr({2, 2}, {21.f, 22.f, 23.f, 24.f}); - const auto result = module->forward({tensor, tensor}); + const auto result = module->forward({tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); - const auto data = result->at(0).toTensor().const_data_ptr(); + const auto expected = make_tensor_ptr({2, 2}, {42.f, 44.f, 46.f, 48.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); - EXPECT_NEAR(data[0], 42, 1e-5); - - auto tensor2 = make_tensor_ptr({2.f}); + auto tensor2 = make_tensor_ptr({2, 2}, {2.f, 3.f, 4.f, 5.f}); const auto result2 = module->forward({tensor2, tensor2}); EXPECT_EQ(result2.error(), Error::Ok); - const auto data2 = result->at(0).toTensor().const_data_ptr(); - - EXPECT_NEAR(data2[0], 4, 1e-5); + const auto expected2 = make_tensor_ptr({2, 2}, {4.f, 6.f, 8.f, 10.f}); + EXPECT_TENSOR_CLOSE(result2->at(0).toTensor(), *expected2.get()); } TEST_F(ModuleTest, TestForwardWithInvalidInputs) { @@ -294,20 +296,20 @@ TEST_F(ModuleTest, TestProgramSharingAndDataLoaderManagement) { EXPECT_EQ(load_error, Error::Ok); EXPECT_TRUE(module1->is_loaded()); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); - const auto result1 = module1->execute("forward", {tensor, tensor}); + const auto result1 = module1->execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result1.error(), Error::Ok); auto module2 = std::make_unique(module1->program()); - const auto result2 = module2->execute("forward", {tensor, tensor}); + const auto result2 = module2->execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result2.error(), Error::Ok); module1 = std::make_unique("/path/to/nonexistent/file.pte"); EXPECT_FALSE(module1->is_loaded()); - const auto result3 = module2->execute("forward", {tensor, tensor}); + const auto result3 = module2->execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result3.error(), Error::Ok); } @@ -339,14 +341,13 @@ TEST_F(ModuleTest, TestProgramPersistenceAndReuseAfterModuleDestruction) { EXPECT_EQ(module.program(), shared_program); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); - const auto result = module.execute("forward", {tensor, tensor}); + const auto result = module.execute("forward", {tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); - auto data = result->at(0).toTensor().const_data_ptr(); - - EXPECT_NEAR(data[0], 2, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {2.f, 4.f, 6.f, 8.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); } TEST_F(ModuleTest, TestConcurrentExecutionWithSharedProgram) { @@ -364,22 +365,22 @@ TEST_F(ModuleTest, TestConcurrentExecutionWithSharedProgram) { EXPECT_TRUE(program != nullptr); auto thread = [](std::shared_ptr program, - const std::array& input) { + const std::array& input) { Module module(program); - auto tensor = from_blob((void*)input.data(), {1}); + auto tensor = from_blob((void*)input.data(), {2, 2}); - const auto result = module.forward({tensor, tensor}); + const auto result = module.forward({tensor, tensor, 1.0}); EXPECT_EQ(result.error(), Error::Ok); const auto data = result->at(0).toTensor().const_data_ptr(); EXPECT_NEAR(data[0], (input[0] * 2), 1e-5); }; - std::thread t1(thread, program, std::array{1}); - std::thread t2(thread, program, std::array{2}); - std::thread t3(thread, program, std::array{3}); - std::thread t4(thread, program, std::array{4}); - std::thread t5(thread, program, std::array{5}); + std::thread t1(thread, program, std::array{1, 2, 3, 4}); + std::thread t2(thread, program, std::array{2, 3, 4, 5}); + std::thread t3(thread, program, std::array{3, 4, 5, 6}); + std::thread t4(thread, program, std::array{4, 5, 6, 7}); + std::thread t5(thread, program, std::array{5, 6, 7, 8}); t1.join(); t2.join(); @@ -391,37 +392,38 @@ TEST_F(ModuleTest, TestConcurrentExecutionWithSharedProgram) { TEST_F(ModuleTest, TestSetInputsBeforeExecute) { Module module(model_path_); - auto tensor1 = make_tensor_ptr({4.f}); - auto tensor2 = make_tensor_ptr({5.f}); + auto tensor1 = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); + auto tensor2 = make_tensor_ptr({2, 2}, {2.f, 3.f, 4.f, 5.f}); - EXPECT_EQ(module.set_inputs({tensor1, tensor2}), Error::Ok); + EXPECT_EQ(module.set_inputs({tensor1, tensor2, 1.0}), Error::Ok); const auto result = module.forward(); EXPECT_EQ(result.error(), Error::Ok); - const auto data = result->at(0).toTensor().const_data_ptr(); - EXPECT_NEAR(data[0], 9, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {3.f, 5.f, 7.f, 9.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); } TEST_F(ModuleTest, TestSetInputCombinedWithExecute) { Module module(model_path_); - auto tensor1 = make_tensor_ptr({2.f}); - auto tensor2 = make_tensor_ptr({3.f}); + auto tensor1 = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); + auto tensor2 = make_tensor_ptr({2, 2}, {2.f, 3.f, 4.f, 5.f}); EXPECT_EQ(module.set_input(tensor2, 1), Error::Ok); + EXPECT_EQ(module.set_input(1.0, 2), Error::Ok); // alpha const auto result = module.forward(tensor1); EXPECT_EQ(result.error(), Error::Ok); - const auto data = result->at(0).toTensor().const_data_ptr(); - EXPECT_NEAR(data[0], 5, 1e-5); + const auto expected = make_tensor_ptr({2, 2}, {3.f, 5.f, 7.f, 9.f}); + EXPECT_TENSOR_CLOSE(result->at(0).toTensor(), *expected.get()); } TEST_F(ModuleTest, TestPartiallySetInputs) { Module module(model_path_); - auto tensor = make_tensor_ptr({1.f}); + auto tensor = make_tensor_ptr({2, 2}, {1.f, 2.f, 3.f, 4.f}); EXPECT_EQ(module.set_input(tensor, 0), Error::Ok); @@ -455,8 +457,6 @@ TEST_F(ModuleTest, TestPTD) { ASSERT_EQ(module.load_method("forward"), Error::Ok); - auto tensor1 = - make_tensor_ptr({3, 3}, {2.f, 3.f, 4.f, 2.f, 3.f, 4.f, 2.f, 3.f, 4.f}); - - ASSERT_EQ(module.forward(tensor1).error(), Error::Ok); + auto tensor = make_tensor_ptr({2, 2}, {2.f, 3.f, 4.f, 2.f}); + ASSERT_EQ(module.forward(tensor).error(), Error::Ok); } diff --git a/extension/module/test/targets.bzl b/extension/module/test/targets.bzl index 19ba09cf4e6..b684e8d07b6 100644 --- a/extension/module/test/targets.bzl +++ b/extension/module/test/targets.bzl @@ -5,39 +5,42 @@ load( ) load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "get_aten_mode_options", "runtime") -def define_common_targets(): +def define_common_targets(is_fbcode=False): """Defines targets that should be shared between fbcode and xplat. The directory containing this targets.bzl file should also contain both TARGETS and BUCK files that call this function. """ + if not runtime.is_oss and is_fbcode: + modules_env = { + # The tests use this var to find the program file to load. This uses + # an fbcode target path because the authoring/export tools + # intentionally don't work in xplat (since they're host-only tools). + "ET_MODULE_ADD_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleAdd.pte])", + "ET_MODULE_LINEAR_PROGRAM_PATH": "$(location fbcode//executorch/test/models:exported_program_and_data[ModuleLinear.pte])", + "ET_MODULE_LINEAR_DATA_PATH": "$(location fbcode//executorch/test/models:exported_program_and_data[ModuleLinear.ptd])", + } - for aten_mode in get_aten_mode_options(): - aten_suffix = ("_aten" if aten_mode else "") + for aten_mode in get_aten_mode_options(): + aten_suffix = ("_aten" if aten_mode else "") - runtime.cxx_test( - name = "test" + aten_suffix, - srcs = [ - "module_test.cpp", - ], - deps = [ - "//executorch/kernels/portable:generated_lib" + aten_suffix, - "//executorch/extension/data_loader:file_data_loader", - "//executorch/extension/module:module" + aten_suffix, - "//executorch/extension/tensor:tensor" + aten_suffix, - ], - env = { - "RESOURCES_PATH": "$(location :resources)/resources", - }, - platforms = [CXX, ANDROID], # Cannot bundle resources on Apple platform. - compiler_flags = [ - "-Wno-error=deprecated-declarations", - ], - ) - - runtime.filegroup( - name = "resources", - srcs = native.glob([ - "resources/**", - ]), - ) + runtime.cxx_test( + name = "test" + aten_suffix, + srcs = [ + "module_test.cpp", + ], + deps = [ + "//executorch/kernels/portable:generated_lib" + aten_suffix, + "//executorch/extension/data_loader:file_data_loader", + "//executorch/extension/module:module" + aten_suffix, + "//executorch/extension/tensor:tensor" + aten_suffix, + "//executorch/runtime/core/exec_aten/testing_util:tensor_util" + aten_suffix, + ], + env = modules_env, + platforms = [CXX, ANDROID], # Cannot bundle resources on Apple platform. + compiler_flags = [ + "-Wno-error=deprecated-declarations", + ], + ) + + From 903998ade6448c9afba82fb13257fdc5ac976413 Mon Sep 17 00:00:00 2001 From: lucylq Date: Fri, 25 Apr 2025 17:44:14 -0700 Subject: [PATCH 2/2] Update on "Use generated files in module test" Instead of checking in the PTE/PTD files. This helps us keep up to date with aot changes, especially with incoming flat_tensor refactor. Main changes here are with the model. Instead of using AddModule from examples/portable, use ModuleAdd from test/models/export_program, like our other tests. There is a shape difference (1 tensor vs 2 tensors+alpha), so we have to change all the inputs. Differential Revision: [D73470865](https://our.internmc.facebook.com/intern/diff/D73470865/) [ghstack-poisoned] --- extension/module/test/resources/README.md | 23 --------------------- extension/module/test/resources/add.pte | Bin 728 -> 0 bytes extension/module/test/resources/linear.ptd | Bin 336 -> 0 bytes extension/module/test/resources/linear.pte | Bin 1208 -> 0 bytes 4 files changed, 23 deletions(-) delete mode 100644 extension/module/test/resources/README.md delete mode 100644 extension/module/test/resources/add.pte delete mode 100644 extension/module/test/resources/linear.ptd delete mode 100644 extension/module/test/resources/linear.pte diff --git a/extension/module/test/resources/README.md b/extension/module/test/resources/README.md deleted file mode 100644 index ecbdd41c107..00000000000 --- a/extension/module/test/resources/README.md +++ /dev/null @@ -1,23 +0,0 @@ -## Resources - -### add.pte, linear.pte, linear.ptd -- Internally generated after D62209852, 2024-09-06 with: - ``` - buck2 run fbcode//executorch/examples/portable/scripts:export -- --model_name="add" - ``` - - and - - ``` - buck2 run fbcode//executorch/examples/portable/scripts:export -- --model_name="linear" -examples - ``` -- In OSS, the same file can be generated after [#5145](https://github.com/pytorch/executorch/pull/5145), 2024-09-06 with: - ``` - python -m examples.portable.scripts.export --model_name="add" - ``` - - and - - ``` - python -m examples.portable.scripts.export --model_name="linear" -e - ``` diff --git a/extension/module/test/resources/add.pte b/extension/module/test/resources/add.pte deleted file mode 100644 index 43252ca7d3d05e8fe847e122c9c7de976e0e0096..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 728 zcmZ`$O-_Sg5Pi1LVna-08q!40d z9v`$+<8G4NtCNznyo*d;{901sofq4 z&95FC(_^3>z=$tz@ie@O=wFQ6?sRR{e3+V%PkTlvcFUN0_=L1XT6i=0w*D?~&6+W_ zm?cw28ad;8ZTa+8zxfcf= X8imU+Ugk*@$4qIuZ+H9Wa$n&GV)QgK diff --git a/extension/module/test/resources/linear.ptd b/extension/module/test/resources/linear.ptd deleted file mode 100644 index edab857bb3f24db7f53e812ae6949588d6e62fb8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 336 zcmZ=^U|?_yF)(!VFfh~rvJHS31Rj7%1_l8}2*Ux&hfzEX0zf7U0|x^S5CYW!u?kp( zfdQliBnFa$s9|7m0J1=U35XMbSOka#7-WEI*nz4*x<$YwkiiBrM*@gJx?%2tIsY5S`O}oEk+$B!xvFDJ~Es5epHE6bk-;AXe&WP7N3%F%kqz3rn%Gu&}VS zu(Y)F7l=Q=A7P=M@9pfyD>C8T?#<4;nVp%-yojtfYKvH8TJm_N#FrfQvw+Adumn_q zc_0Nm{>Cv^4{*Xgbq_yroYc`rWKMzvZyrnqDM1TfIDL(Ztbt$|r~uc{2S5QJ=Lq2B z#%=7%Gv-FSt?xKy&fJ4iI_Eu^z#8_Vla-aQ*=)Dva)`~$z~(gSw$FRf&VJ|gLhnaD zk#W>Q>oShFE8r4vxz+7{OYiPc|A{!bim}OJ<(|1RFCQ2Ks3&f$Uk-cUoHCaBl8cf7 z=u0>uXOf$_RS@I_2fI83KCrw3tmy_20OWi_53!6eCfRe)G6vl!C3{ZW?O5+(4)Cb! zS(;~?ID@~(KD*k#TYJ+t5W0Ia_qwr{tc&a0xa6W8S4vtJ^L)0sXiZYa=303M`LjRC z{OS0lHfc_^$P6Z$7MyNYZ5kR%#w*}C3o!OQVzCBQU|9aXOu<{b1d{|eImu0N+>$Ba z&2fKO?p^4Hao^c|S<5_r|Jmvkl1cB9@%T-$m&}!U->R>fJm&-h3wy2bY|vBM z>in?+|98-MbBTB`G@}0qtWh2BjOo4yoz*P93?nA)B(^qgKWZHw_3%FIo~#lloc(k& l!`w;$Yt>jR){51jQ7hJhQn|8L4a#MxP1g=O-G10@${&jA(