diff --git a/.cspell-wordlist.txt b/.cspell-wordlist.txt index c72c8334d..fcef68ce2 100644 --- a/.cspell-wordlist.txt +++ b/.cspell-wordlist.txt @@ -90,4 +90,7 @@ phonemizers phonemis Español Français -Português \ No newline at end of file +Português +codegen +cstdint +ocurred diff --git a/apps/text-embeddings/utils/math.ts b/apps/text-embeddings/utils/math.ts index 794e653c2..50c70d1f9 100644 --- a/apps/text-embeddings/utils/math.ts +++ b/apps/text-embeddings/utils/math.ts @@ -1,6 +1,14 @@ +import { + RnExecutorchError, + RnExecutorchErrorCode, +} from 'react-native-executorch'; + export const dotProduct = (a: Float32Array, b: Float32Array) => { if (a.length !== b.length) { - throw new Error('Vectors must be of the same length'); + throw new RnExecutorchError( + RnExecutorchErrorCode.WrongDimensions, + `dotProduct needs both vector to have the same length: got a: ${a.length}, b: ${b.length}` + ); } let sum = 0; diff --git a/docs/docs/03-typescript-api/04-error-handling.md b/docs/docs/03-typescript-api/04-error-handling.md new file mode 100644 index 000000000..5a89f4d2f --- /dev/null +++ b/docs/docs/03-typescript-api/04-error-handling.md @@ -0,0 +1,179 @@ +--- +title: Error handling +--- + +## Overview + +In order to handle different types of errors, you can use `instanceof` with our exported class `RnExecutorchError` and its `code` property. This allows you to check what exactly went wrong and act accordingly. + +This example uses the `LLMModule`, and then tries to change its `generationConfig`. As the topp param has to be a value between 0 and 1 (inclusive), the `.configure()` method will throw an error with a code InvalidConfig. + +```typescript +import { + LLMModule, + LLAMA3_2_1B_QLORA, + RnExecutorchError, + RnExecutorchErrorCode, +} from 'react-native-executorch'; + +const llm = new LLMModule({ + tokenCallback: (token) => console.log(token), + messageHistoryCallback: (messages) => console.log(messages), +}); + +await llm.load(LLAMA3_2_1B_QLORA, (progress) => console.log(progress)); + +// Try to set an invalid configuration +try { + await llm.configure({ topp: 1.5 }); // This will throw InvalidConfig error +} catch (err) { + if ( + err instanceof RnExecutorchError && + err.code === RnExecutorchErrorCode.InvalidConfig + ) { + console.error('Invalid configuration:', err.message); + // Handle the invalid config - set default values + await llm.configure({ topp: 0.9 }); + } else { + throw err; + } +} + +// Running the model +try { + await llm.sendMessage('Hello, World!'); +} catch (err) { + if (err instanceof RnExecutorchError) { + if (err.code === RnExecutorchErrorCode.ModuleNotLoaded) { + console.error('Model not loaded:', err.message); + // Load the model first + } else if (err.code === RnExecutorchErrorCode.ModelGenerating) { + console.error('Model is already generating:', err.message); + // Wait for current generation to complete + } else { + console.error('Generation error:', err.message); + throw err; + } + } else { + throw err; + } +} + +// Interrupting the model (to actually interrupt the generation it has to be called when sendMessage or generate is running) +llm.interrupt(); + +// Deleting the model from memory +llm.delete(); +``` + +## Reference + +All errors in React Native ExecuTorch inherit from `RnExecutorchError` and include a `code` property from the `RnExecutorchErrorCode` enum. Below is a comprehensive list of all possible errors, organized by category. + +### Module State Errors + +These errors occur when trying to perform operations on a model in an invalid state. + +| Error Code | Description | When It Occurs | How to Handle | +| ----------------- | ------------------------------------- | ------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `ModuleNotLoaded` | Model is not loaded into memory | Calling `forward()`, `generate()`, or other inference methods before calling `load()` | Load the model first with `load()` | +| `ModelGenerating` | Model is already processing a request | Calling `generate()` or `forward()` while another inference is running | Wait for current generation to complete, or use `interrupt()` to cancel it | + +### Configuration Errors + +These errors occur when invalid configuration or input is provided. + +| Error Code | Description | When It Occurs | How to Handle | +| ---------------------- | ------------------------------------ | ------------------------------------------------------------------------- | ---------------------------------------------------- | +| `InvalidConfig` | Configuration parameters are invalid | Setting parameters outside valid ranges (e.g., `topp` outside [0, 1]) | Check parameter constraints and provide valid values | +| `InvalidUserInput` | Input provided to API is invalid | Passing empty arrays, null values, or malformed data to methods | Validate input before calling methods | +| `InvalidModelSource` | Model source type is invalid | Providing wrong type for model source (e.g., object when string expected) | Ensure model source matches expected type | +| `LanguageNotSupported` | Language not supported by model | Passing unsupported language to multilingual OCR or Speech-to-Text models | Use a supported language or different model | +| `WrongDimensions` | Input tensor dimensions don't match | Providing input with incorrect shape for the model | Check model's expected input dimensions | +| `UnexpectedNumInputs` | Wrong number of inputs provided | Passing more or fewer inputs than model expects | Match the number of inputs to model metadata | + +### File Operations Errors + +These errors occur during file read/write operations. + +| Error Code | Description | When It Occurs | How to Handle | +| ----------------- | --------------------------- | ------------------------------------------------------------ | --------------------------------------------- | +| `FileReadFailed` | File read operation failed | Invalid image URL, unsupported format, or file doesn't exist | Verify file path and format are correct | +| `FileWriteFailed` | File write operation failed | Saving result image or output file fails | Check storage permissions and available space | + +### Download & Resource Fetcher Errors + +These errors occur during model download and resource management. + +| Error Code | Description | When It Occurs | How to Handle | +| ----------------------------------- | -------------------------------- | ----------------------------------------------------- | ------------------------------------------------------------------------- | +| `DownloadInterrupted` | Download was interrupted | Not all files were downloaded successfully | Retry the download | +| `ResourceFetcherDownloadFailed` | Resource download failed | Network error, invalid URL, or server error | Check network connection and URL validity, retry with exponential backoff | +| `ResourceFetcherDownloadInProgress` | Download already in progress | Calling `fetch()` for same resource while downloading | Wait for current download to complete | +| `ResourceFetcherAlreadyPaused` | Download already paused | Calling `pauseFetching()` on already paused download | Check download state before pausing | +| `ResourceFetcherAlreadyOngoing` | Download already ongoing | Calling `resumeFetching()` on active download | No action needed, download is already running | +| `ResourceFetcherNotActive` | No active download found | Calling pause/resume/cancel on non-existent download | Verify download was started before trying to control it | +| `ResourceFetcherMissingUri` | Required URI information missing | Internal state error during download operations | Restart the download from beginning | + +### Speech-to-Text Streaming Errors + +These errors are specific to streaming transcription operations. + +| Error Code | Description | When It Occurs | How to Handle | +| --------------------------- | ----------------------------------- | ----------------------------------------------------------------------------------------- | --------------------------------------------------------------- | +| `MultilingualConfiguration` | Multilingual configuration mismatch | Setting language on non-multilingual model, or not setting language on multilingual model | Check if model is multilingual and provide language accordingly | +| `MissingDataChunk` | Audio data chunk missing | Streaming transcription without providing audio data | Ensure audio data is provided to streaming methods | +| `StreamingNotStarted` | Stream not started | Calling `stop()` or `insertData()` without calling `start()` first | Call `start()` before other streaming operations | +| `StreamingInProgress` | Stream already in progress | Calling `start()` while another stream is active | Stop current stream before starting a new one | + +### Model Execution Errors + +These errors come from the ExecuTorch runtime during model execution. + +| Error Code | Description | When It Occurs | How to Handle | +| -------------------- | ---------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `InvalidModelOutput` | Model output size unexpected | Model produces output of wrong size | Verify model is compatible with the library | +| `ThreadPoolError` | Threadpool operation failed | Internal threading issue | Restart the model or app | +| `UnknownError` | Unexpected error occurred | 3rd-party library error or unhandled exception | Check logs for details, report if reproducible | + +### ExecuTorch Runtime Errors + +These errors are mapped directly from the ExecuTorch runtime. They typically indicate lower-level execution issues. + +#### System Errors + +| Error Code | Description | +| -------------- | ----------------------------------- | +| `Ok` | Operation successful (not an error) | +| `Internal` | Internal ExecuTorch error | +| `InvalidState` | Operation called in invalid state | +| `EndOfMethod` | End of method reached | + +#### Logical Errors + +| Error Code | Description | +| ----------------- | ------------------------------------ | +| `NotSupported` | Operation not supported by model | +| `NotImplemented` | Feature not implemented | +| `InvalidArgument` | Invalid argument passed to operation | +| `InvalidType` | Type mismatch in operation | +| `OperatorMissing` | Required operator missing from model | + +#### Resource Errors + +| Error Code | Description | +| ------------------------ | -------------------------- | +| `NotFound` | Resource not found | +| `MemoryAllocationFailed` | Memory allocation failed | +| `AccessFailed` | Access to resource failed | +| `InvalidProgram` | Model program is invalid | +| `InvalidExternalData` | External data is invalid | +| `OutOfResources` | System resources exhausted | + +#### Delegate Errors + +| Error Code | Description | +| -------------------------------- | ---------------------------------- | +| `DelegateInvalidCompatibility` | Delegate not compatible with model | +| `DelegateMemoryAllocationFailed` | Delegate memory allocation failed | +| `DelegateInvalidHandle` | Invalid delegate handle | diff --git a/package.json b/package.json index 2438500c5..e4ee020b2 100644 --- a/package.json +++ b/package.json @@ -10,7 +10,8 @@ }, "scripts": { "lint": "yarn workspaces foreach --all --parallel run lint", - "typecheck": "yarn workspaces foreach --all --parallel run typecheck" + "typecheck": "yarn workspaces foreach --all --parallel run typecheck", + "codegen:errors": "npx ts-node scripts/generate-errors.ts" }, "private": true, "devDependencies": { diff --git a/packages/react-native-executorch/common/rnexecutorch/Error.h b/packages/react-native-executorch/common/rnexecutorch/Error.h new file mode 100644 index 000000000..661fd24b6 --- /dev/null +++ b/packages/react-native-executorch/common/rnexecutorch/Error.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace rnexecutorch { + +using ErrorVariant = + std::variant; + +class RnExecutorchError : public std::runtime_error { +public: + ErrorVariant errorCode; + + RnExecutorchError(ErrorVariant code, const std::string &message) + : std::runtime_error(message), errorCode(code) {} + + int32_t getNumericCode() const noexcept { + return std::visit( + [](auto &&arg) -> int32_t { return static_cast(arg); }, + errorCode); + } + + bool isRnExecutorchError() const noexcept { + return std::holds_alternative(errorCode); + } + + bool isExecuTorchRuntimeError() const noexcept { + return std::holds_alternative(errorCode); + } +}; + +} // namespace rnexecutorch diff --git a/packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h b/packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h new file mode 100644 index 000000000..2a95ce847 --- /dev/null +++ b/packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h @@ -0,0 +1,126 @@ +#pragma once + +// Auto-generated from scripts/errors.config.ts +// DO NOT EDIT MANUALLY - Run 'yarn codegen:errors' to regenerate + +#include + +namespace rnexecutorch { + +enum class RnExecutorchErrorCode : int32_t { + /** + * An umbrella-error that is thrown usually when something unexpected happens, + * for example a 3rd-party library error. + */ + UnknownError = 101, + /** + * Thrown when a user tries to run a model that is not yet downloaded or + * loaded into memory. + */ + ModuleNotLoaded = 102, + /** + * An error ocurred when saving a file. This could be, for instance a result + * image from an image model. + */ + FileWriteFailed = 103, + /** + * Thrown when a user tries to run a model that is currently processing. It is + * only allowed to run a single model prediction at a time. + */ + ModelGenerating = 104, + /** + * Thrown when a language is passed to a multi-language model that is not + * supported. For example OCR or Speech To Text. + */ + LanguageNotSupported = 105, + /** + * Thrown when config parameters passed to a model are invalid. For example, + * when LLM's topp is outside of range [0, 1]. + */ + InvalidConfig = 112, + /** + * Thrown when the type of model source passed by the user is invalid. + */ + InvalidModelSource = 255, + /** + * Thrown when the number of passed inputs to the model is different than the + * model metadata specifies. + */ + UnexpectedNumInputs = 97, + /** + * Thrown when React Native ExecuTorch threadpool problem occurs. + */ + ThreadPoolError = 113, + /** + * Thrown when a file read operation failed. This could be invalid image url + * passed to image models, or unsupported format. + */ + FileReadFailed = 114, + /** + * Thrown when the size of model output is unexpected. + */ + InvalidModelOutput = 115, + /** + * Thrown when the dimensions of input tensors don't match the model's + * expected dimensions. + */ + WrongDimensions = 116, + /** + * Thrown when the input passed to our APIs is invalid, for example when + * passing an empty message aray to LLM's generate(). + */ + InvalidUserInput = 117, + /** + * Thrown when the number of downloaded files is unexpected, due to download + * interruptions. + */ + DownloadInterrupted = 118, + /** + * Thrown when there's a configuration mismatch between multilingual and + * language settings in Speech-to-Text models. + */ + MultilingualConfiguration = 160, + /** + * Thrown when streaming transcription is attempted but audio data chunk is + * missing. + */ + MissingDataChunk = 161, + /** + * Thrown when trying to stop or insert data into a stream that hasn't been + * started. + */ + StreamingNotStarted = 162, + /** + * Thrown when trying to start a new streaming session while another is + * already in progress. + */ + StreamingInProgress = 163, + /** + * Thrown when a resource fails to download. This could be due to invalid URL, + * or for example a network problem. + */ + ResourceFetcherDownloadFailed = 180, + /** + * Thrown when a user tries to trigger a download that's already in progress. + */ + ResourceFetcherDownloadInProgress = 181, + /** + * Thrown when trying to pause a download that is already paused. + */ + ResourceFetcherAlreadyPaused = 182, + /** + * Thrown when trying to resume a download that is already ongoing. + */ + ResourceFetcherAlreadyOngoing = 183, + /** + * Thrown when trying to pause, resume, or cancel a download that is not + * active. + */ + ResourceFetcherNotActive = 184, + /** + * Thrown when required URI information is missing for a download operation. + */ + ResourceFetcherMissingUri = 185, +}; + +} // namespace rnexecutorch diff --git a/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h b/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h index fe562285f..d5c98763d 100644 --- a/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h +++ b/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h @@ -1,11 +1,10 @@ -#pragma once - #include #include #include #include +#include #include #include #include @@ -65,6 +64,14 @@ class RnExecutorchInstaller { jsiObject.setExternalMemoryPressure( runtime, modelImplementationPtr->getMemoryLowerBound()); return jsiObject; + } catch (const rnexecutorch::RnExecutorchError &e) { + jsi::Object errorData(runtime); + errorData.setProperty(runtime, "code", e.getNumericCode()); + errorData.setProperty( + runtime, "message", + jsi::String::createFromUtf8(runtime, e.what())); + throw jsi::JSError(runtime, + jsi::Value(runtime, std::move(errorData))); } catch (const std::runtime_error &e) { // This catch should be merged with the next one // (std::runtime_error inherits from std::exception) HOWEVER react diff --git a/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp b/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp index 8384f5107..9c08e7b91 100644 --- a/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp @@ -1,4 +1,6 @@ #include "TokenizerModule.h" +#include "Error.h" +#include "ErrorCodes.h" #include #include #include @@ -15,7 +17,8 @@ TokenizerModule::TokenizerModule( void TokenizerModule::ensureTokenizerLoaded( const std::string &methodName) const { if (!tokenizer) { - throw std::runtime_error( + throw RnExecutorchError( + RnExecutorchErrorCode::ModuleNotLoaded, methodName + " function was called on an uninitialized tokenizer!"); } } diff --git a/packages/react-native-executorch/common/rnexecutorch/data_processing/FileUtils.h b/packages/react-native-executorch/common/rnexecutorch/data_processing/FileUtils.h index d97be6e69..7f441ae48 100644 --- a/packages/react-native-executorch/common/rnexecutorch/data_processing/FileUtils.h +++ b/packages/react-native-executorch/common/rnexecutorch/data_processing/FileUtils.h @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include namespace rnexecutorch::file_utils { @@ -16,7 +18,8 @@ inline std::string getTimeID() { inline std::string loadBytesFromFile(const std::string &path) { std::ifstream fs(path, std::ios::in | std::ios::binary); if (fs.fail()) { - throw std::runtime_error("Failed to open tokenizer file"); + throw RnExecutorchError(RnExecutorchErrorCode::FileReadFailed, + "Failed to open tokenizer file!"); } std::string data; fs.seekg(0, std::ios::end); diff --git a/packages/react-native-executorch/common/rnexecutorch/data_processing/ImageProcessing.cpp b/packages/react-native-executorch/common/rnexecutorch/data_processing/ImageProcessing.cpp index 0402a5d08..ab28d1263 100644 --- a/packages/react-native-executorch/common/rnexecutorch/data_processing/ImageProcessing.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/data_processing/ImageProcessing.cpp @@ -68,7 +68,8 @@ std::string saveToTempFile(const cv::Mat &image) { std::filesystem::path filePath = tempDir / filename; if (!cv::imwrite(filePath.string(), image)) { - throw std::runtime_error("Failed to save the image: " + filePath.string()); + throw RnExecutorchError(RnExecutorchErrorCode::FileWriteFailed, + "Failed to save the image: " + filePath.string()); } return "file://" + filePath.string(); @@ -86,7 +87,8 @@ cv::Mat readImage(const std::string &imageURI) { ++segmentIndex; } if (segmentIndex != 1) { - throw std::runtime_error("Read image error: invalid base64 URI"); + throw RnExecutorchError(RnExecutorchErrorCode::FileReadFailed, + "Read image error: invalid base64 URI"); } auto data = base64_decode(stringData); cv::Mat encodedData(1, data.size(), CV_8UC1, (void *)data.data()); @@ -102,11 +104,13 @@ cv::Mat readImage(const std::string &imageURI) { cv::Mat(1, imageData.size(), CV_8UC1, (void *)imageData.data()), cv::IMREAD_COLOR); } else { - throw std::runtime_error("Read image error: unknown protocol"); + throw RnExecutorchError(RnExecutorchErrorCode::FileReadFailed, + "Read image error: unknown protocol"); } if (image.empty()) { - throw std::runtime_error("Read image error: invalid argument"); + throw RnExecutorchError(RnExecutorchErrorCode::FileReadFailed, + "Read image error: invalid argument"); } return image; @@ -221,7 +225,8 @@ readImageToTensor(const std::string &path, "Unexpected tensor size, expected at least 2 dimentions " "but got: %zu.", tensorDims.size()); - throw std::runtime_error(errorMessage); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + errorMessage); } cv::Size tensorSize = cv::Size(tensorDims[tensorDims.size() - 1], tensorDims[tensorDims.size() - 2]); diff --git a/packages/react-native-executorch/common/rnexecutorch/data_processing/Numerical.cpp b/packages/react-native-executorch/common/rnexecutorch/data_processing/Numerical.cpp index cceae9c52..4b19cc99b 100644 --- a/packages/react-native-executorch/common/rnexecutorch/data_processing/Numerical.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/data_processing/Numerical.cpp @@ -7,6 +7,9 @@ #include #include +#include +#include + namespace rnexecutorch::numerical { void softmax(std::span input) { @@ -34,8 +37,9 @@ void softmaxWithTemperature(std::span input, float temperature) { } if (temperature <= 0.0F) { - throw std::invalid_argument( - "Temperature must be greater than 0 for softmax with temperature."); + throw RnExecutorchError( + RnExecutorchErrorCode::InvalidConfig, + "Temperature must be greater than 0 for softmax with temperature!"); } const auto maxElement = *std::ranges::max_element(input); @@ -74,7 +78,7 @@ std::vector meanPooling(std::span modelOutput, << "by the size of attention mask but got size: " << modelOutput.size() << " for model output and size: " << attnMask.size() << " for attention mask"; - throw std::invalid_argument(ss.str()); + throw RnExecutorchError(RnExecutorchErrorCode::InvalidConfig, ss.str()); } auto attnMaskLength = attnMask.size(); diff --git a/packages/react-native-executorch/common/rnexecutorch/data_processing/gzip.cpp b/packages/react-native-executorch/common/rnexecutorch/data_processing/gzip.cpp index 877b85995..aeda796d0 100644 --- a/packages/react-native-executorch/common/rnexecutorch/data_processing/gzip.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/data_processing/gzip.cpp @@ -2,6 +2,8 @@ #include #include "gzip.h" +#include +#include namespace rnexecutorch::gzip { @@ -16,7 +18,8 @@ size_t deflateSize(const std::string &input) { if (::deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, MAX_WBITS + kGzipWrapper, kMemLevel, Z_DEFAULT_STRATEGY) != Z_OK) { - throw std::runtime_error("deflateInit2 failed"); + throw RnExecutorchError(RnExecutorchErrorCode::UnknownError, + "deflateInit2 failed"); } size_t outSize = 0; @@ -34,7 +37,8 @@ size_t deflateSize(const std::string &input) { ret = ::deflate(&strm, strm.avail_in ? Z_NO_FLUSH : Z_FINISH); if (ret == Z_STREAM_ERROR) { ::deflateEnd(&strm); - throw std::runtime_error("deflate stream error"); + throw RnExecutorchError(RnExecutorchErrorCode::UnknownError, + "deflate stream error"); } outSize += buf.size() - strm.avail_out; diff --git a/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h b/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h index 32d8be7d4..c8232fe8c 100644 --- a/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h +++ b/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h @@ -1,13 +1,14 @@ #pragma once #include +#include #include #include -#include #include #include #include +#include #include #include #include @@ -181,6 +182,12 @@ template class ModelHostObject : public JsiHostObject { std::apply(std::bind_front(FnPtr, model), std::move(argsConverted)); return jsi_conversion::getJsiValue(std::move(result), runtime); } + } catch (const RnExecutorchError &e) { + jsi::Object errorData(runtime); + errorData.setProperty(runtime, "code", e.getNumericCode()); + errorData.setProperty(runtime, "message", + jsi::String::createFromUtf8(runtime, e.what())); + throw jsi::JSError(runtime, jsi::Value(runtime, std::move(errorData))); } catch (const std::runtime_error &e) { // This catch should be merged with the next one // (std::runtime_error inherits from std::exception) HOWEVER react @@ -221,62 +228,77 @@ template class ModelHostObject : public JsiHostObject { // We need to dispatch a thread if we want the function to be // asynchronous. In this thread all accesses to jsi::Runtime need to // be done via the callInvoker. - threads::GlobalThreadPool::detach( - [this, promise, argsConverted = std::move(argsConverted)]() { - try { - if constexpr (std::is_void_v) { - // For void functions, just call the function and resolve - // with undefined - std::apply(std::bind_front(FnPtr, model), - std::move(argsConverted)); - callInvoker->invokeAsync( - [promise](jsi::Runtime &runtime) { - promise->resolve(jsi::Value::undefined()); - }); - } else { - // For non-void functions, capture the result and convert - // it - auto result = std::apply(std::bind_front(FnPtr, model), - std::move(argsConverted)); - // The result is copied. It should either be quickly - // copiable, or passed with a shared_ptr. - callInvoker->invokeAsync( - [promise, result](jsi::Runtime &runtime) { - promise->resolve(jsi_conversion::getJsiValue( - std::move(result), runtime)); - }); - } - } catch (const std::runtime_error &e) { - // This catch should be merged with the next two - // (std::runtime_error and jsi::JSError inherits from - // std::exception) HOWEVER react native has broken RTTI - // which breaks proper exception type checking. Remove when - // the following change is present in our version: - // https://github.com/facebook/react-native/commit/3132cc88dd46f95898a756456bebeeb6c248f20e - callInvoker->invokeAsync([e = std::move(e), promise]() { - promise->reject(e.what()); - }); - return; - } catch (const jsi::JSError &e) { - callInvoker->invokeAsync([e = std::move(e), promise]() { - promise->reject(e.what()); - }); - return; - } catch (const std::exception &e) { - callInvoker->invokeAsync([e = std::move(e), promise]() { - promise->reject(e.what()); - }); - return; - } catch (...) { - callInvoker->invokeAsync( - [promise]() { promise->reject("Unknown error"); }); - return; - } + threads::GlobalThreadPool::detach([this, promise, + argsConverted = + std::move(argsConverted)]() { + try { + if constexpr (std::is_void_v) { + // For void functions, just call the function and resolve + // with undefined + std::apply(std::bind_front(FnPtr, model), + std::move(argsConverted)); + callInvoker->invokeAsync([promise](jsi::Runtime &runtime) { + promise->resolve(jsi::Value::undefined()); + }); + } else { + // For non-void functions, capture the result and convert + // it + auto result = std::apply(std::bind_front(FnPtr, model), + std::move(argsConverted)); + // The result is copied. It should either be quickly + // copiable, or passed with a shared_ptr. + callInvoker->invokeAsync( + [promise, result](jsi::Runtime &runtime) { + promise->resolve(jsi_conversion::getJsiValue( + std::move(result), runtime)); + }); + } + } catch (const RnExecutorchError &e) { + auto code = e.getNumericCode(); + auto msg = std::string(e.what()); + callInvoker->invokeAsync([code, msg, + promise](jsi::Runtime &runtime) { + jsi::Object errorData(runtime); + errorData.setProperty(runtime, "code", code); + errorData.setProperty( + runtime, "message", + jsi::String::createFromUtf8(runtime, msg)); + promise->reject(jsi::Value(runtime, std::move(errorData))); }); + return; + } catch (const std::runtime_error &e) { + // This catch should be merged with the next two + // (std::runtime_error and jsi::JSError inherits from + // std::exception) HOWEVER react native has broken RTTI + // which breaks proper exception type checking. Remove when + // the following change is present in our version: + // https://github.com/facebook/react-native/commit/3132cc88dd46f95898a756456bebeeb6c248f20e + callInvoker->invokeAsync([e = std::move(e), promise]() { + promise->reject(std::string(e.what())); + }); + return; + } catch (const jsi::JSError &e) { + callInvoker->invokeAsync([e = std::move(e), promise]() { + promise->reject(std::string(e.what())); + }); + return; + } catch (const std::exception &e) { + callInvoker->invokeAsync([e = std::move(e), promise]() { + promise->reject(std::string(e.what())); + }); + return; + } catch (...) { + callInvoker->invokeAsync([promise]() { + promise->reject(std::string("Unknown error")); + }); + return; + } + }); } catch (...) { - promise->reject("Couldn't parse JS arguments in a native function"); + promise->reject(std::string( + "Couldn't parse JS arguments in a native function")); } }); @@ -286,6 +308,12 @@ template class ModelHostObject : public JsiHostObject { JSI_HOST_FUNCTION(unload) { try { model->unload(); + } catch (const RnExecutorchError &e) { + jsi::Object errorData(runtime); + errorData.setProperty(runtime, "code", e.getNumericCode()); + errorData.setProperty(runtime, "message", + jsi::String::createFromUtf8(runtime, e.what())); + throw jsi::JSError(runtime, jsi::Value(runtime, std::move(errorData))); } catch (const std::runtime_error &e) { // This catch should be merged with the next one // (std::runtime_error inherits from std::exception) HOWEVER react diff --git a/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.cpp b/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.cpp index f08f26654..510890e34 100644 --- a/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.cpp @@ -12,9 +12,12 @@ void Promise::resolve(jsi::Value &&result) { _resolver.asObject(runtime).asFunction(runtime).call(runtime, result); } +void Promise::reject(jsi::Value &&error) { + _rejecter.asObject(runtime).asFunction(runtime).call(runtime, error); +} + void Promise::reject(std::string message) { jsi::JSError error(runtime, message); _rejecter.asObject(runtime).asFunction(runtime).call(runtime, error.value()); } - -} // namespace rnexecutorch \ No newline at end of file +} // namespace rnexecutorch diff --git a/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.h b/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.h index 4dba08891..b278daa91 100644 --- a/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.h +++ b/packages/react-native-executorch/common/rnexecutorch/jsi/Promise.h @@ -27,6 +27,7 @@ class Promise { Promise &operator=(const Promise &) = delete; void resolve(jsi::Value &&result); + void reject(jsi::Value &&result); void reject(std::string error); /** @@ -66,4 +67,4 @@ class Promise { jsi::Value _rejecter; }; -} // namespace rnexecutorch \ No newline at end of file +} // namespace rnexecutorch diff --git a/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.cpp b/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.cpp index ee53c7d5a..2ecc3d84c 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.cpp @@ -2,7 +2,7 @@ #include #include -#include +#include namespace rnexecutorch::models { @@ -18,8 +18,7 @@ BaseModel::BaseModel(const std::string &modelSource, module_(std::make_unique(modelSource, loadMode)) { Error loadError = module_->load(); if (loadError != Error::Ok) { - throw std::runtime_error("Failed to load model: Error " + - std::to_string(static_cast(loadError))); + throw RnExecutorchError(loadError, "Failed to load model"); } // We use the size of the model .pte file as the lower bound for the memory // occupied by the ET module. This is not the whole size however, the module @@ -32,22 +31,23 @@ BaseModel::BaseModel(const std::string &modelSource, std::vector BaseModel::getInputShape(std::string method_name, int32_t index) const { if (!module_) { - throw std::runtime_error("Model not loaded: Cannot get input shape"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Model not loaded: Cannot get input shape"); } auto method_meta = module_->method_meta(method_name); if (!method_meta.ok()) { - throw std::runtime_error( - "Failed to get metadata for method '" + method_name + "': Error " + - std::to_string(static_cast(method_meta.error()))); + throw RnExecutorchError(method_meta.error(), + "Failed to get metadata for method '" + + method_name + "'"); } auto input_meta = method_meta->input_tensor_meta(index); if (!input_meta.ok()) { - throw std::runtime_error( + throw RnExecutorchError( + input_meta.error(), "Failed to get metadata for input tensor at index " + - std::to_string(index) + " in method '" + method_name + "': Error " + - std::to_string(static_cast(input_meta.error()))); + std::to_string(index) + " in method '" + method_name + "'"); } auto sizes = input_meta->sizes(); @@ -58,14 +58,15 @@ std::vector BaseModel::getInputShape(std::string method_name, std::vector> BaseModel::getAllInputShapes(std::string methodName) const { if (!module_) { - throw std::runtime_error("Model not loaded: Cannot get all input shapes"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Model not loaded: Cannot get all input shapes"); } auto method_meta = module_->method_meta(methodName); if (!method_meta.ok()) { - throw std::runtime_error( - "Failed to get metadata for method '" + methodName + "': Error " + - std::to_string(static_cast(method_meta.error()))); + throw RnExecutorchError(method_meta.error(), + "Failed to get metadata for method '" + methodName + + "'"); } std::vector> output; std::size_t numInputs = method_meta->num_inputs(); @@ -73,10 +74,10 @@ BaseModel::getAllInputShapes(std::string methodName) const { for (std::size_t input = 0; input < numInputs; ++input) { auto input_meta = method_meta->input_tensor_meta(input); if (!input_meta.ok()) { - throw std::runtime_error( + throw RnExecutorchError( + input_meta.error(), "Failed to get metadata for input tensor at index " + - std::to_string(input) + " in method '" + methodName + "': Error " + - std::to_string(static_cast(input_meta.error()))); + std::to_string(input) + " in method '" + methodName + "'"); } auto shape = input_meta->sizes(); output.emplace_back(std::vector(shape.begin(), shape.end())); @@ -90,7 +91,8 @@ BaseModel::getAllInputShapes(std::string methodName) const { std::vector BaseModel::forwardJS(std::vector tensorViewVec) const { if (!module_) { - throw std::runtime_error("Model not loaded: Cannot perform forward pass"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Model not loaded: Cannot perform forward pass"); } std::vector evalues; evalues.reserve(tensorViewVec.size()); @@ -113,8 +115,9 @@ BaseModel::forwardJS(std::vector tensorViewVec) const { auto result = module_->forward(evalues); if (!result.ok()) { - throw std::runtime_error("Forward pass failed: Error " + - std::to_string(static_cast(result.error()))); + throw RnExecutorchError(result.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } auto &outputs = result.get(); @@ -138,7 +141,8 @@ BaseModel::forwardJS(std::vector tensorViewVec) const { Result BaseModel::getMethodMeta(const std::string &methodName) const { if (!module_) { - throw std::runtime_error("Model not loaded: Cannot get method meta!"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Model not loaded: Cannot get method meta"); } return module_->method_meta(methodName); } @@ -146,7 +150,8 @@ BaseModel::getMethodMeta(const std::string &methodName) const { Result> BaseModel::forward(const EValue &input_evalue) const { if (!module_) { - throw std::runtime_error("Model not loaded: Cannot perform forward pass"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Model not loaded: Cannot perform forward pass"); } return module_->forward(input_evalue); } @@ -154,7 +159,8 @@ BaseModel::forward(const EValue &input_evalue) const { Result> BaseModel::forward(const std::vector &input_evalues) const { if (!module_) { - throw std::runtime_error("Model not loaded: Cannot perform forward pass"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Model not loaded: Cannot perform forward pass"); } return module_->forward(input_evalues); } @@ -163,7 +169,8 @@ Result> BaseModel::execute(const std::string &methodName, const std::vector &input_value) const { if (!module_) { - throw std::runtime_error("Model not loaded, cannot run execute."); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Model not loaded, cannot run execute"); } return module_->execute(methodName, input_value); } diff --git a/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp b/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp index 3d62dcf0d..0fba07108 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp @@ -2,6 +2,8 @@ #include +#include +#include #include #include #include @@ -13,7 +15,8 @@ Classification::Classification(const std::string &modelSource, : BaseModel(modelSource, callInvoker) { auto inputShapes = getAllInputShapes(); if (inputShapes.size() == 0) { - throw std::runtime_error("Model seems to not take any input tensors."); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + "Model seems to not take any input tensors."); } std::vector modelInputShape = inputShapes[0]; if (modelInputShape.size() < 2) { @@ -22,7 +25,8 @@ Classification::Classification(const std::string &modelSource, "Unexpected model input size, expected at least 2 dimentions " "but got: %zu.", modelInputShape.size()); - throw std::runtime_error(errorMessage); + throw RnExecutorchError(RnExecutorchErrorCode::WrongDimensions, + errorMessage); } modelImageSize = cv::Size(modelInputShape[modelInputShape.size() - 1], modelInputShape[modelInputShape.size() - 2]); @@ -35,11 +39,10 @@ Classification::generate(std::string imageSource) { .first; auto forwardResult = BaseModel::forward(inputTensor); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to forward, error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } - return postprocess(forwardResult->at(0).toTensor()); } @@ -56,7 +59,8 @@ Classification::postprocess(const Tensor &tensor) { "Unexpected classification output size, was expecting: %zu classes " "but got: %zu classes", constants::kImagenet1kV1Labels.size(), resultVec.size()); - throw std::runtime_error(errorMessage); + throw RnExecutorchError(RnExecutorchErrorCode::InvalidModelOutput, + errorMessage); } numerical::softmax(resultVec); @@ -69,4 +73,4 @@ Classification::postprocess(const Tensor &tensor) { return probs; } -} // namespace rnexecutorch::models::classification \ No newline at end of file +} // namespace rnexecutorch::models::classification diff --git a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp index 9ced00d45..ec3129e76 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp @@ -1,6 +1,8 @@ #include "ImageEmbeddings.h" #include +#include +#include #include #include @@ -12,7 +14,8 @@ ImageEmbeddings::ImageEmbeddings( : BaseEmbeddings(modelSource, callInvoker) { auto inputTensors = getAllInputShapes(); if (inputTensors.size() == 0) { - throw std::runtime_error("Model seems to not take any input tensors."); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + "Model seems to not take any input tensors."); } std::vector modelInputShape = inputTensors[0]; if (modelInputShape.size() < 2) { @@ -21,7 +24,8 @@ ImageEmbeddings::ImageEmbeddings( "Unexpected model input size, expected at least 2 dimentions " "but got: %zu.", modelInputShape.size()); - throw std::runtime_error(errorMessage); + throw RnExecutorchError(RnExecutorchErrorCode::WrongDimensions, + errorMessage); } modelImageSize = cv::Size(modelInputShape[modelInputShape.size() - 1], modelInputShape[modelInputShape.size() - 2]); @@ -33,10 +37,12 @@ ImageEmbeddings::generate(std::string imageSource) { image_processing::readImageToTensor(imageSource, getAllInputShapes()[0]); auto forwardResult = BaseModel::forward(inputTensor); + if (!forwardResult.ok()) { - throw std::runtime_error( - "Function forward in ImageEmbeddings failed with error code: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError( + forwardResult.error(), + "The model's forward function did not succeed. Ensure the model input " + "is correct."); } return BaseEmbeddings::postprocess(forwardResult); diff --git a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/text/TextEmbeddings.cpp b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/text/TextEmbeddings.cpp index c452aa331..d645d6afa 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/text/TextEmbeddings.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/text/TextEmbeddings.cpp @@ -1,5 +1,7 @@ #include "TextEmbeddings.h" #include +#include +#include #include namespace rnexecutorch::models::embeddings { @@ -49,9 +51,10 @@ TextEmbeddings::generate(const std::string input) { auto forwardResult = BaseModel::forward({tokenIds, attnMask}); if (!forwardResult.ok()) { - throw std::runtime_error( - "Function forward in TextEmbeddings failed with error code: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError( + forwardResult.error(), + "The model's forward function did not succeed. Ensure the model input " + "is correct."); } return BaseEmbeddings::postprocess(forwardResult); diff --git a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp index df42de8dd..a2c1ae865 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp @@ -3,7 +3,7 @@ #include #include - +#include #include #include #include @@ -17,7 +17,8 @@ ImageSegmentation::ImageSegmentation( : BaseModel(modelSource, callInvoker) { auto inputShapes = getAllInputShapes(); if (inputShapes.size() == 0) { - throw std::runtime_error("Model seems to not take any input tensors."); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + "Model seems to not take any input tensors."); } std::vector modelInputShape = inputShapes[0]; if (modelInputShape.size() < 2) { @@ -26,7 +27,8 @@ ImageSegmentation::ImageSegmentation( "Unexpected model input size, expected at least 2 dimentions " "but got: %zu.", modelInputShape.size()); - throw std::runtime_error(errorMessage); + throw RnExecutorchError(RnExecutorchErrorCode::WrongDimensions, + errorMessage); } modelImageSize = cv::Size(modelInputShape[modelInputShape.size() - 1], modelInputShape[modelInputShape.size() - 2]); @@ -41,9 +43,9 @@ std::shared_ptr ImageSegmentation::generate( auto forwardResult = BaseModel::forward(inputTensor); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to forward, error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } return postprocess(forwardResult->at(0).toTensor(), originalSize, @@ -165,4 +167,4 @@ std::shared_ptr ImageSegmentation::populateDictionary( return dictPtr; } -} // namespace rnexecutorch::models::image_segmentation \ No newline at end of file +} // namespace rnexecutorch::models::image_segmentation diff --git a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h index 4b2a41a66..301833ce8 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h +++ b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h @@ -1,11 +1,9 @@ #pragma once -#include -#include - #include #include #include +#include #include "rnexecutorch/metaprogramming/ConstructorHelpers.h" #include @@ -47,4 +45,4 @@ class ImageSegmentation : public BaseModel { REGISTER_CONSTRUCTOR(models::image_segmentation::ImageSegmentation, std::string, std::shared_ptr); -} // namespace rnexecutorch \ No newline at end of file +} // namespace rnexecutorch diff --git a/packages/react-native-executorch/common/rnexecutorch/models/llm/LLM.cpp b/packages/react-native-executorch/common/rnexecutorch/models/llm/LLM.cpp index 8063eaa7b..53d035afd 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/llm/LLM.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/llm/LLM.cpp @@ -2,6 +2,7 @@ #include #include +#include #include namespace rnexecutorch::models::llm { @@ -19,8 +20,7 @@ LLM::LLM(const std::string &modelSource, const std::string &tokenizerSource, std::make_unique(module_.get(), tokenizerSource)) { auto loadResult = runner->load(); if (loadResult != Error::Ok) { - throw std::runtime_error("Failed to load LLM runner, error code: " + - std::to_string(static_cast(loadResult))); + throw RnExecutorchError(loadResult, "Failed to load LLM runner"); } memorySizeLowerBound = fs::file_size(fs::path(modelSource)) + @@ -30,7 +30,8 @@ LLM::LLM(const std::string &modelSource, const std::string &tokenizerSource, // TODO: add a way to manipulate the generation config with params void LLM::generate(std::string input, std::shared_ptr callback) { if (!runner || !runner->is_loaded()) { - throw std::runtime_error("Runner is not loaded"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Runner is not loaded"); } // Create a native callback that will invoke the JS callback on the JS thread @@ -43,14 +44,14 @@ void LLM::generate(std::string input, std::shared_ptr callback) { auto config = llm::GenerationConfig{.echo = false, .warming = false}; auto error = runner->generate(input, config, nativeCallback, {}); if (error != executorch::runtime::Error::Ok) { - throw std::runtime_error("Failed to generate text, error code: " + - std::to_string(static_cast(error))); + throw RnExecutorchError(error, "Failed to generate text"); } } void LLM::interrupt() { if (!runner || !runner->is_loaded()) { - throw std::runtime_error("Can't interrupt a model that's not loaded!"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Can't interrupt a model that's not loaded"); } runner->stop(); } @@ -68,28 +69,32 @@ size_t LLM::getMemoryLowerBound() const noexcept { void LLM::setCountInterval(size_t countInterval) { if (!runner || !runner->is_loaded()) { - throw std::runtime_error("Can't configure a model that's not loaded!"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Can't configure a model that's not loaded"); } runner->set_count_interval(countInterval); } void LLM::setTimeInterval(size_t timeInterval) { if (!runner || !runner->is_loaded()) { - throw std::runtime_error("Can't configure a model that's not loaded!"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Can't configure a model that's not loaded"); } runner->set_time_interval(timeInterval); } void LLM::setTemperature(float temperature) { if (!runner || !runner->is_loaded()) { - throw std::runtime_error("Can't configure a model that's not loaded!"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Can't configure a model that's not loaded"); } runner->set_temperature(temperature); }; void LLM::setTopp(float topp) { if (!runner || !runner->is_loaded()) { - throw std::runtime_error("Can't configure a model that's not loaded!"); + throw RnExecutorchError(RnExecutorchErrorCode::ModuleNotLoaded, + "Can't configure a model that's not loaded"); } runner->set_topp(topp); } diff --git a/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp b/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp index c5e84e1bd..3bb1f9dea 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp @@ -1,5 +1,7 @@ #include "ObjectDetection.h" +#include +#include #include namespace rnexecutorch::models::object_detection { @@ -10,7 +12,8 @@ ObjectDetection::ObjectDetection( : BaseModel(modelSource, callInvoker) { auto inputTensors = getAllInputShapes(); if (inputTensors.size() == 0) { - throw std::runtime_error("Model seems to not take any input tensors."); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + "Model seems to not take any input tensors."); } std::vector modelInputShape = inputTensors[0]; if (modelInputShape.size() < 2) { @@ -19,7 +22,8 @@ ObjectDetection::ObjectDetection( "Unexpected model input size, expected at least 2 dimentions " "but got: %zu.", modelInputShape.size()); - throw std::runtime_error(errorMessage); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + errorMessage); } modelImageSize = cv::Size(modelInputShape[modelInputShape.size() - 1], modelInputShape[modelInputShape.size() - 2]); @@ -72,11 +76,11 @@ ObjectDetection::generate(std::string imageSource, double detectionThreshold) { auto forwardResult = BaseModel::forward(inputTensor); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to forward, error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } return postprocess(forwardResult.get(), originalSize, detectionThreshold); } -} // namespace rnexecutorch::models::object_detection \ No newline at end of file +} // namespace rnexecutorch::models::object_detection diff --git a/packages/react-native-executorch/common/rnexecutorch/models/ocr/Detector.cpp b/packages/react-native-executorch/common/rnexecutorch/models/ocr/Detector.cpp index cb8baee5f..e838a7a0f 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/ocr/Detector.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/ocr/Detector.cpp @@ -1,11 +1,12 @@ #include "Detector.h" #include "Constants.h" #include +#include #include #include #include -#include #include + namespace rnexecutorch::models::ocr { Detector::Detector(const std::string &modelSource, std::shared_ptr callInvoker) @@ -15,11 +16,12 @@ Detector::Detector(const std::string &modelSource, std::string methodName = "forward_" + std::to_string(input_size); auto inputShapes = getAllInputShapes(methodName); if (inputShapes[0].size() < 2) { - throw std::runtime_error( - "Unexpected detector model input size for method:" + methodName + - ", expected " - "at least 2 dimensions but got: " + - std::to_string(inputShapes[0].size()) + "."); + std::string errorMessage = + "Unexpected detector model input size for method: " + methodName + + "expected at least 2 dimensions but got: ." + + std::to_string(inputShapes[0].size()); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + errorMessage); } } } @@ -50,9 +52,9 @@ std::vector Detector::generate(const cv::Mat &inputImage, auto forwardResult = BaseModel::execute(methodName, {inputTensor}); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to " + methodName + " error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } return postprocess(forwardResult->at(0).toTensor(), modelInputSize); diff --git a/packages/react-native-executorch/common/rnexecutorch/models/ocr/OCR.cpp b/packages/react-native-executorch/common/rnexecutorch/models/ocr/OCR.cpp index 166e4de8b..a521b4e8b 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/ocr/OCR.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/ocr/OCR.cpp @@ -1,5 +1,7 @@ #include "OCR.h" #include "Constants.h" +#include +#include #include #include @@ -13,7 +15,8 @@ OCR::OCR(const std::string &detectorSource, const std::string &recognizerSource, std::vector OCR::generate(std::string input) { cv::Mat image = image_processing::readImage(input); if (image.empty()) { - throw std::runtime_error("Failed to load image from path: " + input); + throw RnExecutorchError(RnExecutorchErrorCode::FileReadFailed, + "Failed to load image from path: " + input); } /* diff --git a/packages/react-native-executorch/common/rnexecutorch/models/ocr/Recognizer.cpp b/packages/react-native-executorch/common/rnexecutorch/models/ocr/Recognizer.cpp index 6040408db..4ca4c0549 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/ocr/Recognizer.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/ocr/Recognizer.cpp @@ -1,6 +1,7 @@ #include "Recognizer.h" #include "Constants.h" #include +#include #include #include #include @@ -31,17 +32,17 @@ Recognizer::generate(const cv::Mat &grayImage, int32_t inputWidth) { std::string method_name = "forward_" + std::to_string(inputWidth); auto shapes = getAllInputShapes(method_name); if (shapes.empty()) { - throw std::runtime_error("Recognizer model: Input shapes for " + - method_name + " not found"); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + "OCR method takes no inputs: " + method_name); } std::vector tensorDims = shapes[0]; TensorPtr inputTensor = image_processing::getTensorFromMatrixGray(tensorDims, grayImage); auto forwardResult = BaseModel::execute(method_name, {inputTensor}); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to forward in Recognizer, error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } return postprocess(forwardResult->at(0).toTensor()); diff --git a/packages/react-native-executorch/common/rnexecutorch/models/ocr/utils/RecognizerUtils.cpp b/packages/react-native-executorch/common/rnexecutorch/models/ocr/utils/RecognizerUtils.cpp index 38c2657fe..25c2cea61 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/ocr/utils/RecognizerUtils.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/ocr/utils/RecognizerUtils.cpp @@ -1,4 +1,6 @@ #include "RecognizerUtils.h" +#include +#include namespace rnexecutorch::models::ocr::utils { cv::Mat softmax(const cv::Mat &inputs) { @@ -150,7 +152,8 @@ cropImageWithBoundingBox(const cv::Mat &img, const types::PaddingInfo &paddings, const types::PaddingInfo &originalPaddings) { if (originalBbox.empty()) { - throw std::runtime_error("Original bounding box cannot be empty."); + throw RnExecutorchError(RnExecutorchErrorCode::UnknownError, + "Original bounding box cannot be empty."); } const types::Point topLeft = originalBbox[0]; diff --git a/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/SpeechToText.cpp b/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/SpeechToText.cpp index 13a1b4b54..3c81eb8e9 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/SpeechToText.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/SpeechToText.cpp @@ -1,6 +1,8 @@ #include #include "SpeechToText.h" +#include +#include namespace rnexecutorch::models::speech_to_text { @@ -72,7 +74,8 @@ size_t SpeechToText::getMemoryLowerBound() const noexcept { void SpeechToText::stream(std::shared_ptr callback, std::string languageOption) { if (this->isStreaming) { - throw std::runtime_error("Streaming is already in progress"); + throw RnExecutorchError(RnExecutorchErrorCode::StreamingInProgress, + "Streaming is already in progress!"); } auto nativeCallback = diff --git a/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/asr/ASR.cpp b/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/asr/ASR.cpp index bf8f9fb86..fc15bd044 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/asr/ASR.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/speech_to_text/asr/ASR.cpp @@ -5,6 +5,7 @@ #include "executorch/extension/tensor/tensor_ptr.h" #include "rnexecutorch/data_processing/Numerical.h" #include "rnexecutorch/data_processing/gzip.h" +#include namespace rnexecutorch::models::speech_to_text::asr { @@ -253,9 +254,9 @@ std::vector ASR::encode(std::span waveform) const { const auto encoderResult = this->encoder->forward(modelInputTensor); if (!encoderResult.ok()) { - throw std::runtime_error( - "Forward pass failed during encoding, error code: " + - std::to_string(static_cast(encoderResult.error()))); + throw RnExecutorchError(encoderResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } const auto decoderOutputTensor = encoderResult.get().at(0).toTensor(); @@ -283,9 +284,9 @@ std::vector ASR::decode(std::span tokens, this->decoder->forward({tokenTensor, encoderTensor}); if (!decoderResult.ok()) { - throw std::runtime_error( - "Forward pass failed during decoding, error code: " + - std::to_string(static_cast(decoderResult.error()))); + throw RnExecutorchError(decoderResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } const auto logitsTensor = decoderResult.get().at(0).toTensor(); diff --git a/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp b/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp index aab87667e..9030807a3 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp @@ -4,6 +4,8 @@ #include #include +#include +#include namespace rnexecutorch::models::style_transfer { using namespace facebook; @@ -15,7 +17,8 @@ StyleTransfer::StyleTransfer(const std::string &modelSource, : BaseModel(modelSource, callInvoker) { auto inputShapes = getAllInputShapes(); if (inputShapes.size() == 0) { - throw std::runtime_error("Model seems to not take any input tensors."); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + "Model seems to not take any input tensors"); } std::vector modelInputShape = inputShapes[0]; if (modelInputShape.size() < 2) { @@ -24,7 +27,8 @@ StyleTransfer::StyleTransfer(const std::string &modelSource, "Unexpected model input size, expected at least 2 dimentions " "but got: %zu.", modelInputShape.size()); - throw std::runtime_error(errorMessage); + throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs, + errorMessage); } modelImageSize = cv::Size(modelInputShape[modelInputShape.size() - 1], modelInputShape[modelInputShape.size() - 2]); @@ -44,12 +48,12 @@ std::string StyleTransfer::generate(std::string imageSource) { auto forwardResult = BaseModel::forward(inputTensor); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to forward, error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } return postprocess(forwardResult->at(0).toTensor(), originalSize); } -} // namespace rnexecutorch::models::style_transfer \ No newline at end of file +} // namespace rnexecutorch::models::style_transfer diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Decoder.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Decoder.cpp index ceef51175..e3e37521e 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Decoder.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Decoder.cpp @@ -3,6 +3,8 @@ #include #include +#include +#include namespace rnexecutorch::models::text_to_image { @@ -20,9 +22,9 @@ std::vector Decoder::generate(std::vector &input) const { auto forwardResult = BaseModel::forward(inputTensor); if (!forwardResult.ok()) { - throw std::runtime_error( - "Function forward in decoder failed with error code: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError( + forwardResult.error(), + "Function forward in decoder failed with error code: "); } auto forwardResultTensor = forwardResult->at(0).toTensor(); diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Scheduler.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Scheduler.cpp index e20bf60bb..61640f7f6 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Scheduler.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/Scheduler.cpp @@ -5,6 +5,8 @@ #include #include +#include +#include namespace rnexecutorch::models::text_to_image { using namespace facebook; @@ -70,7 +72,8 @@ std::vector Scheduler::step(const std::vector &sample, const std::vector &noise, int32_t timestep) { if (numInferenceSteps == 0) { - throw std::runtime_error( + throw RnExecutorchError( + RnExecutorchErrorCode::InvalidConfig, "Number of inference steps is not set. Call `set_timesteps` first."); } diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/TextToImage.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/TextToImage.cpp index 8ce1e8b07..e8de58b70 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/TextToImage.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/TextToImage.cpp @@ -9,6 +9,9 @@ #include #include +#include +#include + namespace rnexecutorch::models::text_to_image { using namespace executorch::extension; @@ -32,7 +35,8 @@ TextToImage::TextToImage(const std::string &tokenizerSource, void TextToImage::setImageSize(int32_t imageSize) { if (imageSize % 32 != 0) { - throw std::runtime_error("Image size must be a multiple of 32."); + throw RnExecutorchError(RnExecutorchErrorCode::InvalidConfig, + "Image size must be a multiple of 32."); } this->imageSize = imageSize; constexpr int32_t latentDownsample = 8; diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/UNet.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/UNet.cpp index d5e7e9745..5505339b0 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/UNet.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_image/UNet.cpp @@ -1,4 +1,6 @@ #include "UNet.h" +#include +#include namespace rnexecutorch::models::text_to_image { @@ -26,9 +28,9 @@ std::vector UNet::generate(std::vector &latents, int32_t timestep, auto forwardResult = BaseModel::forward({latentsTensor, timestepTensor, embeddingsTensor}); if (!forwardResult.ok()) { - throw std::runtime_error( - "Function forward in UNet failed with error code: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } auto forwardResultTensor = forwardResult->at(0).toTensor(); diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Decoder.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Decoder.cpp index 13f5eadb4..ac46c08f7 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Decoder.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Decoder.cpp @@ -1,4 +1,5 @@ #include "Decoder.h" +#include #include #include @@ -44,9 +45,9 @@ Decoder::generate(const std::string &method, const Configuration &inputConfig, execute(method, {asrTensor, f0Tensor, nTensor, voiceRefTensor}); if (!results.ok()) { - throw std::runtime_error( - "[Kokoro::Decoder] Failed to execute method " + method + - ", error: " + std::to_string(static_cast(results.error()))); + throw RnExecutorchError(results.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } // Returns a single [audio] vector, which contains the diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/DurationPredictor.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/DurationPredictor.cpp index 1cb550c27..98bd0a986 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/DurationPredictor.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/DurationPredictor.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -51,9 +52,9 @@ std::tuple, int32_t> DurationPredictor::generate( method, {tokensTensor, textMaskTensor, voiceRefTensor, speedTensor}); if (!results.ok()) { - throw std::runtime_error( - "[Kokoro::DurationPredictor] Failed to execute method " + method + - ", error: " + std::to_string(static_cast(results.error()))); + throw RnExecutorchError(results.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } // Unpack the result @@ -103,13 +104,15 @@ void DurationPredictor::scaleDurations(Tensor &durations, // We expect durations tensor to be a Long tensor of a shape [1, n_tokens] if (durations.dtype() != ScalarType::Long && durations.dtype() != ScalarType::Int) { - throw std::runtime_error( + throw RnExecutorchError( + RnExecutorchErrorCode::InvalidModelOutput, "[Kokoro::DurationPredictor] Attempted to scale a non-integer tensor"); } auto shape = durations.sizes(); if (shape.size() != 1) { - throw std::runtime_error( + throw RnExecutorchError( + RnExecutorchErrorCode::InvalidModelOutput, "[Kokoro::DurationPredictor] Attempted to scale an ill-shaped tensor"); } diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Encoder.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Encoder.cpp index 5b0a9bf9b..5f7718828 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Encoder.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Encoder.cpp @@ -1,4 +1,5 @@ #include "Encoder.h" +#include #include #include @@ -46,9 +47,9 @@ Result> Encoder::generate(const std::string &method, execute(method, {tokensTensor, textMaskTensor, predAlnTrgTensor}); if (!results.ok()) { - throw std::runtime_error( - "[Kokoro::Encoder] Failed to execute method " + method + - ", error: " + std::to_string(static_cast(results.error()))); + throw RnExecutorchError(results.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } // Returns a single tensor with ASR (Acoustic State Representation) features. diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/F0NPredictor.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/F0NPredictor.cpp index a271f865b..e846c31b2 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/F0NPredictor.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/F0NPredictor.cpp @@ -1,4 +1,5 @@ #include "F0NPredictor.h" +#include #include #include @@ -38,9 +39,9 @@ Result> F0NPredictor::generate( auto results = execute(method, {indicesTensor, durTensor, voiceRefTensor}); if (!results.ok()) { - throw std::runtime_error( - "[Kokoro::DurationPredictor] Failed to execute method " + method + - ", error: " + std::to_string(static_cast(results.error()))); + throw RnExecutorchError(results.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } // Returns F0 prediction, N prediction, and related features (en, diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Kokoro.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Kokoro.cpp index 0fa0a191f..9e789fef2 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Kokoro.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Kokoro.cpp @@ -3,8 +3,8 @@ #include #include +#include #include -#include namespace rnexecutorch::models::text_to_speech::kokoro { @@ -37,8 +37,9 @@ void Kokoro::loadVoice(const std::string &voiceSource) { std::ifstream in(voiceSource, std::ios::binary); if (!in) { - throw std::runtime_error("[Kokoro::loadSingleVoice]: cannot open file: " + - voiceSource); + throw RnExecutorchError(RnExecutorchErrorCode::FileReadFailed, + "[Kokoro::loadSingleVoice]: cannot open file: " + + voiceSource); } // Check the file size @@ -46,24 +47,27 @@ void Kokoro::loadVoice(const std::string &voiceSource) { const std::streamsize fileSize = in.tellg(); in.seekg(0, std::ios::beg); if (fileSize < expectedBytes) { - throw std::runtime_error( + throw RnExecutorchError( + RnExecutorchErrorCode::FileReadFailed, "[Kokoro::loadSingleVoice]: file too small: expected at least " + - std::to_string(expectedBytes) + " bytes, got " + - std::to_string(fileSize)); + std::to_string(expectedBytes) + " bytes, got " + + std::to_string(fileSize)); } // Read [rows, 1, cols] as contiguous floats directly into voice_ // ([rows][cols]) if (!in.read(reinterpret_cast(voice_.data()->data()), expectedBytes)) { - throw std::runtime_error( + throw RnExecutorchError( + RnExecutorchErrorCode::FileReadFailed, "[Kokoro::loadSingleVoice]: failed to read voice weights"); } } std::vector Kokoro::generate(std::string text, float speed) { if (text.size() > constants::kMaxTextSize) { - throw std::invalid_argument("Kokoro: maximum input text size exceeded"); + throw RnExecutorchError(RnExecutorchErrorCode::InvalidUserInput, + "Kokoro: maximum input text size exceeded"); } // G2P (Grapheme to Phoneme) conversion @@ -105,7 +109,8 @@ std::vector Kokoro::generate(std::string text, float speed) { void Kokoro::stream(std::string text, float speed, std::shared_ptr callback) { if (text.size() > constants::kMaxTextSize) { - throw std::invalid_argument("Kokoro: maximum input text size exceeded"); + throw RnExecutorchError(RnExecutorchErrorCode::InvalidUserInput, + "Kokoro: maximum input text size exceeded"); } // Build a full callback function diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Partitioner.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Partitioner.cpp index 0aa744b96..da1e981f8 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Partitioner.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Partitioner.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include namespace rnexecutorch::models::text_to_speech::kokoro { @@ -51,7 +51,9 @@ std::vector Partitioner::divide( void Partitioner::setFixedModel(const std::string &modelLabel) { if (!constants::kInputs.contains(modelLabel)) - throw std::invalid_argument("Partitioner: invalid fixed model label"); + throw rnexecutorch::RnExecutorchError( + rnexecutorch::RnExecutorchErrorCode::InvalidConfig, + "Partitioner: invalid fixed model label"); fixedModel_ = {modelLabel}; } diff --git a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Utils.cpp b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Utils.cpp index 88e0cd68c..dff6f7cb5 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Utils.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/text_to_speech/kokoro/Utils.cpp @@ -2,6 +2,7 @@ #include "Constants.h" #include #include +#include namespace rnexecutorch::models::text_to_speech::kokoro::utils { @@ -61,7 +62,8 @@ std::span stripAudio(std::span audio, size_t margin) { std::vector tokenize(const std::u32string &phonemes, std::optional expectedSize) { if (expectedSize.has_value() && expectedSize.value() < 2) { - throw std::invalid_argument( + throw rnexecutorch::RnExecutorchError( + rnexecutorch::RnExecutorchErrorCode::InvalidUserInput, "expected number of tokens cannot be lower than 2"); } diff --git a/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalDetector.cpp b/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalDetector.cpp index a0faf43ee..d8ae817a6 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalDetector.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalDetector.cpp @@ -1,5 +1,6 @@ #include "VerticalDetector.h" +#include #include #include #include @@ -11,7 +12,7 @@ namespace rnexecutorch::models::ocr { VerticalDetector::VerticalDetector( const std::string &modelSource, std::shared_ptr callInvoker) - : Detector(modelSource, callInvoker) {} + : Detector(modelSource, callInvoker) {}; std::vector VerticalDetector::generate(const cv::Mat &inputImage, int32_t inputWidth) { @@ -35,9 +36,9 @@ VerticalDetector::generate(const cv::Mat &inputImage, int32_t inputWidth) { auto forwardResult = BaseModel::execute(methodName, {inputTensor}); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to " + methodName + " error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } return postprocess(forwardResult->at(0).toTensor(), calculateModelImageSize(inputWidth), diff --git a/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalOCR.cpp b/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalOCR.cpp index 40c0ce26a..0f75d2015 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalOCR.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/vertical_ocr/VerticalOCR.cpp @@ -1,5 +1,6 @@ #include "VerticalOCR.h" #include +#include #include #include #include @@ -18,7 +19,8 @@ VerticalOCR::VerticalOCR(const std::string &detectorSource, std::vector VerticalOCR::generate(std::string input) { cv::Mat image = image_processing::readImage(input); if (image.empty()) { - throw std::runtime_error("Failed to load image from path: " + input); + throw RnExecutorchError(RnExecutorchErrorCode::FileReadFailed, + "Failed to load image from path: " + input); } // 1. Large Detector std::vector largeBoxes = diff --git a/packages/react-native-executorch/common/rnexecutorch/models/voice_activity_detection/VoiceActivityDetection.cpp b/packages/react-native-executorch/common/rnexecutorch/models/voice_activity_detection/VoiceActivityDetection.cpp index dbc974706..a1252edfe 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/voice_activity_detection/VoiceActivityDetection.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/models/voice_activity_detection/VoiceActivityDetection.cpp @@ -1,6 +1,8 @@ #include "VoiceActivityDetection.h" #include "rnexecutorch/data_processing/dsp.h" #include "rnexecutorch/models/voice_activity_detection/Utils.h" +#include +#include #include #include @@ -77,9 +79,9 @@ VoiceActivityDetection::generate(std::span waveform) const { executorch::aten::ScalarType::Float); auto forwardResult = BaseModel::forward(inputTensor); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to forward, error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } auto tensor = forwardResult->at(0).toTensor(); startIdx = utils::getNonSpeechClassProbabilites( @@ -93,9 +95,9 @@ VoiceActivityDetection::generate(std::span waveform) const { executorch::aten::ScalarType::Float); auto forwardResult = BaseModel::forward(inputTensor); if (!forwardResult.ok()) { - throw std::runtime_error( - "Failed to forward, error: " + - std::to_string(static_cast(forwardResult.error()))); + throw RnExecutorchError(forwardResult.error(), + "The model's forward function did not succeed. " + "Ensure the model input is correct."); } auto tensor = forwardResult->at(0).toTensor(); startIdx = utils::getNonSpeechClassProbabilites(tensor, tensor.size(2), diff --git a/packages/react-native-executorch/common/rnexecutorch/threads/HighPerformanceThreadPool.h b/packages/react-native-executorch/common/rnexecutorch/threads/HighPerformanceThreadPool.h index 773980cc7..67610ec95 100644 --- a/packages/react-native-executorch/common/rnexecutorch/threads/HighPerformanceThreadPool.h +++ b/packages/react-native-executorch/common/rnexecutorch/threads/HighPerformanceThreadPool.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -20,6 +19,8 @@ #include #include +#include +#include #include #ifdef __APPLE__ @@ -87,7 +88,8 @@ class HighPerformanceThreadPool { std::scoped_lock lock(queueMutex); if (!running) { - throw std::runtime_error("Thread pool is shutting down"); + throw RnExecutorchError(RnExecutorchErrorCode::ThreadPoolError, + "Thread pool is shutting down"); } WorkItem item(std::move(task), priority, diff --git a/packages/react-native-executorch/src/Error.ts b/packages/react-native-executorch/src/Error.ts deleted file mode 100644 index d89cbc0f5..000000000 --- a/packages/react-native-executorch/src/Error.ts +++ /dev/null @@ -1,59 +0,0 @@ -export enum ETError { - // React-native-ExecuTorch errors - UndefinedError = 0x65, - ModuleNotLoaded = 0x66, - FileWriteFailed = 0x67, - ModelGenerating = 0x68, - LanguageNotSupported = 0x69, - InvalidConfig = 0x70, - InvalidModelSource = 0xff, - - // SpeechToText errors - MultilingualConfiguration = 0xa0, - MissingDataChunk = 0xa1, - StreamingNotStarted = 0xa2, - - // ExecuTorch mapped errors - // Based on: https://github.com/pytorch/executorch/blob/main/runtime/core/error.h - // System errors - Ok = 0x00, - Internal = 0x01, - InvalidState = 0x02, - EndOfMethod = 0x03, - - // Logical errors - NotSupported = 0x10, - NotImplemented = 0x11, - InvalidArgument = 0x12, - InvalidType = 0x13, - OperatorMissing = 0x14, - - // Resource errors - NotFound = 0x20, - MemoryAllocationFailed = 0x21, - AccessFailed = 0x22, - InvalidProgram = 0x23, - InvalidExternalData = 0x24, - OutOfResources = 0x25, - - // Delegate errors - DelegateInvalidCompatibility = 0x30, - DelegateMemoryAllocationFailed = 0x31, - DelegateInvalidHandle = 0x32, -} - -export const getError = (e: unknown | ETError | Error): string => { - if (typeof e === 'number') { - return ETError[e] ?? ETError[ETError.UndefinedError]; - } - - // try to extract number from message (can contain false positives) - const error = e as Error; - const errorCode = parseInt(error.message, 10); - - if (Number.isNaN(errorCode)) { - return error.message; - } - - return ETError[errorCode] ?? ETError[ETError.UndefinedError]; -}; diff --git a/packages/react-native-executorch/src/controllers/LLMController.ts b/packages/react-native-executorch/src/controllers/LLMController.ts index 80a129c13..7dcc51528 100644 --- a/packages/react-native-executorch/src/controllers/LLMController.ts +++ b/packages/react-native-executorch/src/controllers/LLMController.ts @@ -1,6 +1,5 @@ import { ResourceSource } from '../types/common'; import { ResourceFetcher } from '../utils/ResourceFetcher'; -import { ETError, getError } from '../Error'; import { Template } from '@huggingface/jinja'; import { DEFAULT_CHAT_CONFIG } from '../constants/llmDefaults'; import { @@ -14,6 +13,8 @@ import { import { parseToolCall } from '../utils/llm'; import { Logger } from '../common/Logger'; import { readAsStringAsync } from 'expo-file-system/legacy'; +import { RnExecutorchError, parseUnknownError } from '../errors/errorUtils'; +import { RnExecutorchErrorCode } from '../errors/ErrorCodes'; export class LLMController { private nativeModule: any; @@ -124,7 +125,10 @@ export class LLMController { const modelPath = modelResult?.[0]; if (!tokenizerPath || !tokenizerConfigPath || !modelPath) { - throw new Error('Download interrupted!'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.tokenizerConfig = JSON.parse( @@ -158,7 +162,7 @@ export class LLMController { }; } catch (e) { this.isReadyCallback(false); - throw new Error(getError(e)); + throw parseUnknownError(e); } } @@ -189,8 +193,9 @@ export class LLMController { } if (generationConfig?.topp) { if (generationConfig.topp < 0 || generationConfig.topp > 1) { - throw new Error( - getError(ETError.InvalidConfig) + 'TopP has to be in range [0, 1].' + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidConfig, + 'Top P has to be in range [0, 1]' ); } this.nativeModule.setTopp(generationConfig.topp); @@ -204,9 +209,9 @@ export class LLMController { public delete() { if (this._isGenerating) { - throw new Error( - getError(ETError.ModelGenerating) + - 'You cannot delete the model now. You need to interrupt first.' + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'You cannot delete the model now. You need ot interrupt it first.' ); } this.onToken = () => {}; @@ -217,17 +222,23 @@ export class LLMController { public async forward(input: string) { if (!this._isReady) { - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); } if (this._isGenerating) { - throw new Error(getError(ETError.ModelGenerating)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); } try { this.responseCallback(''); this.isGeneratingCallback(true); await this.nativeModule.generate(input, this.onToken); } catch (e) { - throw new Error(getError(e)); + throw parseUnknownError(e); } finally { this.isGeneratingCallback(false); } @@ -243,10 +254,16 @@ export class LLMController { public async generate(messages: Message[], tools?: LLMTool[]) { if (!this._isReady) { - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling generate().' + ); } if (messages.length === 0) { - throw new Error(`Empty 'messages' array!`); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidUserInput, + 'Messages array is empty!' + ); } if (messages[0] && messages[0].role !== 'system') { Logger.warn( @@ -319,7 +336,10 @@ export class LLMController { templateFlags?: Object ): string { if (!tokenizerConfig.chat_template) { - throw Error("Tokenizer config doesn't include chat_template"); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidConfig, + "Tokenizer config doesn't include chat_template" + ); } const template = new Template(tokenizerConfig.chat_template); diff --git a/packages/react-native-executorch/src/controllers/OCRController.ts b/packages/react-native-executorch/src/controllers/OCRController.ts index f2e81188d..4cac5d891 100644 --- a/packages/react-native-executorch/src/controllers/OCRController.ts +++ b/packages/react-native-executorch/src/controllers/OCRController.ts @@ -1,5 +1,6 @@ import { symbols } from '../constants/ocr/symbols'; -import { ETError, getError } from '../Error'; +import { RnExecutorchErrorCode } from '../errors/ErrorCodes'; +import { RnExecutorchError, parseUnknownError } from '../errors/errorUtils'; import { ResourceSource } from '../types/common'; import { OCRLanguage } from '../types/ocr'; import { ResourceFetcher } from '../utils/ResourceFetcher'; @@ -8,15 +9,15 @@ export class OCRController { private nativeModule: any; public isReady: boolean = false; public isGenerating: boolean = false; - public error: string | null = null; + public error: RnExecutorchError | null = null; private isReadyCallback: (isReady: boolean) => void; private isGeneratingCallback: (isGenerating: boolean) => void; - private errorCallback: (error: string) => void; + private errorCallback: (error: RnExecutorchError) => void; constructor({ isReadyCallback = (_isReady: boolean) => {}, isGeneratingCallback = (_isGenerating: boolean) => {}, - errorCallback = (_error: string) => {}, + errorCallback = (_error: RnExecutorchError) => {}, } = {}) { this.isReadyCallback = isReadyCallback; this.isGeneratingCallback = isGeneratingCallback; @@ -33,7 +34,10 @@ export class OCRController { if (!detectorSource || !recognizerSource) return; if (!symbols[language]) { - throw new Error(getError(ETError.LanguageNotSupported)); + throw new RnExecutorchError( + RnExecutorchErrorCode.LanguageNotSupported, + 'The provided language for OCR is not supported. Please try using other language.' + ); } this.isReady = false; @@ -45,7 +49,10 @@ export class OCRController { recognizerSource ); if (paths === null || paths.length < 2) { - throw new Error('Download interrupted!'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadOCR( paths[0]!, @@ -56,19 +63,25 @@ export class OCRController { this.isReadyCallback(this.isReady); } catch (e) { if (this.errorCallback) { - this.errorCallback(getError(e)); + this.errorCallback(parseUnknownError(e)); } else { - throw new Error(getError(e)); + throw parseUnknownError(e); } } }; public forward = async (imageSource: string) => { if (!this.isReady) { - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); } if (this.isGenerating) { - throw new Error(getError(ETError.ModelGenerating)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); } try { @@ -76,7 +89,7 @@ export class OCRController { this.isGeneratingCallback(this.isGenerating); return await this.nativeModule.generate(imageSource); } catch (e) { - throw new Error(getError(e)); + throw parseUnknownError(e); } finally { this.isGenerating = false; this.isGeneratingCallback(this.isGenerating); @@ -85,9 +98,9 @@ export class OCRController { public delete() { if (this.isGenerating) { - throw new Error( - getError(ETError.ModelGenerating) + - ', You cannot delete the model. You must wait until the generating is finished.' + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' ); } this.nativeModule.unload(); diff --git a/packages/react-native-executorch/src/controllers/VerticalOCRController.ts b/packages/react-native-executorch/src/controllers/VerticalOCRController.ts index 3e0223b5c..73ea54429 100644 --- a/packages/react-native-executorch/src/controllers/VerticalOCRController.ts +++ b/packages/react-native-executorch/src/controllers/VerticalOCRController.ts @@ -1,5 +1,6 @@ import { symbols } from '../constants/ocr/symbols'; -import { ETError, getError } from '../Error'; +import { RnExecutorchErrorCode } from '../errors/ErrorCodes'; +import { RnExecutorchError, parseUnknownError } from '../errors/errorUtils'; import { ResourceSource } from '../types/common'; import { OCRLanguage } from '../types/ocr'; import { ResourceFetcher } from '../utils/ResourceFetcher'; @@ -11,12 +12,12 @@ export class VerticalOCRController { public error: string | null = null; private isReadyCallback: (isReady: boolean) => void; private isGeneratingCallback: (isGenerating: boolean) => void; - private errorCallback: (error: string) => void; + private errorCallback: (error: RnExecutorchError) => void; constructor({ isReadyCallback = (_isReady: boolean) => {}, isGeneratingCallback = (_isGenerating: boolean) => {}, - errorCallback = (_error: string) => {}, + errorCallback = (_error: RnExecutorchError) => {}, } = {}) { this.isReadyCallback = isReadyCallback; this.isGeneratingCallback = isGeneratingCallback; @@ -34,7 +35,10 @@ export class VerticalOCRController { if (!detectorSource || !recognizerSource) return; if (!symbols[language]) { - throw new Error(getError(ETError.LanguageNotSupported)); + throw new RnExecutorchError( + RnExecutorchErrorCode.LanguageNotSupported, + 'The provided language for OCR is not supported. Please try using other language.' + ); } this.isReady = false; @@ -45,8 +49,11 @@ export class VerticalOCRController { detectorSource, recognizerSource ); - if (paths === null || paths.length < 2) { - throw new Error('Download interrupted'); + if (paths === null || paths.length < 3) { + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.ocrNativeModule = global.loadVerticalOCR( paths[0]!, @@ -59,19 +66,25 @@ export class VerticalOCRController { this.isReadyCallback(this.isReady); } catch (e) { if (this.errorCallback) { - this.errorCallback(getError(e)); + this.errorCallback(parseUnknownError(e)); } else { - throw new Error(getError(e)); + throw parseUnknownError(e); } } }; public forward = async (imageSource: string) => { if (!this.isReady) { - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); } if (this.isGenerating) { - throw new Error(getError(ETError.ModelGenerating)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); } try { @@ -79,7 +92,7 @@ export class VerticalOCRController { this.isGeneratingCallback(this.isGenerating); return await this.ocrNativeModule.generate(imageSource); } catch (e) { - throw new Error(getError(e)); + throw parseUnknownError(e); } finally { this.isGenerating = false; this.isGeneratingCallback(this.isGenerating); @@ -88,9 +101,9 @@ export class VerticalOCRController { public delete() { if (this.isGenerating) { - throw new Error( - getError(ETError.ModelGenerating) + - 'You cannot delete the model. You must wait until the generating is finished.' + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' ); } this.ocrNativeModule.unload(); diff --git a/packages/react-native-executorch/src/errors/ErrorCodes.ts b/packages/react-native-executorch/src/errors/ErrorCodes.ts new file mode 100644 index 000000000..a3e6e28ee --- /dev/null +++ b/packages/react-native-executorch/src/errors/ErrorCodes.ts @@ -0,0 +1,119 @@ +// Auto-generated from scripts/errors.config.ts +// DO NOT EDIT MANUALLY - Run 'yarn codegen:errors' to regenerate + +export enum RnExecutorchErrorCode { + /** + * An umbrella-error that is thrown usually when something unexpected happens, for example a 3rd-party library error. + */ + UnknownError = 101, + /** + * Thrown when a user tries to run a model that is not yet downloaded or loaded into memory. + */ + ModuleNotLoaded = 102, + /** + * An error ocurred when saving a file. This could be, for instance a result image from an image model. + */ + FileWriteFailed = 103, + /** + * Thrown when a user tries to run a model that is currently processing. It is only allowed to run a single model prediction at a time. + */ + ModelGenerating = 104, + /** + * Thrown when a language is passed to a multi-language model that is not supported. For example OCR or Speech To Text. + */ + LanguageNotSupported = 105, + /** + * Thrown when config parameters passed to a model are invalid. For example, when LLM's topp is outside of range [0, 1]. + */ + InvalidConfig = 112, + /** + * Thrown when the type of model source passed by the user is invalid. + */ + InvalidModelSource = 255, + /** + * Thrown when the number of passed inputs to the model is different than the model metadata specifies. + */ + UnexpectedNumInputs = 97, + /** + * Thrown when React Native ExecuTorch threadpool problem occurs. + */ + ThreadPoolError = 113, + /** + * Thrown when a file read operation failed. This could be invalid image url passed to image models, or unsupported format. + */ + FileReadFailed = 114, + /** + * Thrown when the size of model output is unexpected. + */ + InvalidModelOutput = 115, + /** + * Thrown when the dimensions of input tensors don't match the model's expected dimensions. + */ + WrongDimensions = 116, + /** + * Thrown when the input passed to our APIs is invalid, for example when passing an empty message array to LLM's generate(). + */ + InvalidUserInput = 117, + /** + * Thrown when the number of downloaded files is unexpected, due to download interruptions. + */ + DownloadInterrupted = 118, + /** + * Thrown when there's a configuration mismatch between multilingual and language settings in Speech-to-Text models. + */ + MultilingualConfiguration = 160, + /** + * Thrown when streaming transcription is attempted but audio data chunk is missing. + */ + MissingDataChunk = 161, + /** + * Thrown when trying to stop or insert data into a stream that hasn't been started. + */ + StreamingNotStarted = 162, + /** + * Thrown when trying to start a new streaming session while another is already in progress. + */ + StreamingInProgress = 163, + /** + * Thrown when a resource fails to download. This could be due to invalid URL, or for example a network problem. + */ + ResourceFetcherDownloadFailed = 180, + /** + * Thrown when a user tries to trigger a download that's already in progress. + */ + ResourceFetcherDownloadInProgress = 181, + /** + * Thrown when trying to pause a download that is already paused. + */ + ResourceFetcherAlreadyPaused = 182, + /** + * Thrown when trying to resume a download that is already ongoing. + */ + ResourceFetcherAlreadyOngoing = 183, + /** + * Thrown when trying to pause, resume, or cancel a download that is not active. + */ + ResourceFetcherNotActive = 184, + /** + * Thrown when required URI information is missing for a download operation. + */ + ResourceFetcherMissingUri = 185, + Ok = 0, + Internal = 1, + InvalidState = 2, + EndOfMethod = 3, + NotSupported = 16, + NotImplemented = 17, + InvalidArgument = 18, + InvalidType = 19, + OperatorMissing = 20, + NotFound = 32, + MemoryAllocationFailed = 33, + AccessFailed = 34, + InvalidProgram = 35, + InvalidExternalData = 36, + OutOfResources = 37, + DelegateInvalidCompatibility = 48, + DelegateMemoryAllocationFailed = 49, + DelegateInvalidHandle = 50, +} diff --git a/packages/react-native-executorch/src/errors/errorUtils.ts b/packages/react-native-executorch/src/errors/errorUtils.ts new file mode 100644 index 000000000..cc32f314b --- /dev/null +++ b/packages/react-native-executorch/src/errors/errorUtils.ts @@ -0,0 +1,29 @@ +import { RnExecutorchErrorCode } from './ErrorCodes'; + +export class RnExecutorchError extends Error { + public code: RnExecutorchErrorCode; + public cause?: unknown; + + constructor(code: number, message: string, cause?: unknown) { + super(message); + this.code = code; + this.message = message; + this.cause = cause; + } +} + +export function parseUnknownError(e: unknown): RnExecutorchError { + if (e instanceof RnExecutorchError) { + return e; + } + + if (e instanceof Error) { + return new RnExecutorchError(RnExecutorchErrorCode.Internal, e.message, e); + } + + if (typeof e === 'string') { + return new RnExecutorchError(RnExecutorchErrorCode.Internal, e); + } + + return new RnExecutorchError(RnExecutorchErrorCode.Internal, String(e)); +} diff --git a/packages/react-native-executorch/src/hooks/computer_vision/useOCR.ts b/packages/react-native-executorch/src/hooks/computer_vision/useOCR.ts index b7aaec32e..1790023c3 100644 --- a/packages/react-native-executorch/src/hooks/computer_vision/useOCR.ts +++ b/packages/react-native-executorch/src/hooks/computer_vision/useOCR.ts @@ -2,9 +2,10 @@ import { useEffect, useState } from 'react'; import { ResourceSource } from '../../types/common'; import { OCRDetection, OCRLanguage } from '../../types/ocr'; import { OCRController } from '../../controllers/OCRController'; +import { RnExecutorchError } from '../../errors/errorUtils'; interface OCRModule { - error: string | null; + error: RnExecutorchError | null; isReady: boolean; isGenerating: boolean; forward: (imageSource: string) => Promise; @@ -22,7 +23,7 @@ export const useOCR = ({ }; preventLoad?: boolean; }): OCRModule => { - const [error, setError] = useState(null); + const [error, setError] = useState(null); const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); diff --git a/packages/react-native-executorch/src/hooks/computer_vision/useTextToImage.ts b/packages/react-native-executorch/src/hooks/computer_vision/useTextToImage.ts index b6a514d22..b33fe59f0 100644 --- a/packages/react-native-executorch/src/hooks/computer_vision/useTextToImage.ts +++ b/packages/react-native-executorch/src/hooks/computer_vision/useTextToImage.ts @@ -1,5 +1,6 @@ import { useCallback, useEffect, useState } from 'react'; -import { ETError, getError } from '../../Error'; +import { RnExecutorchError, parseUnknownError } from '../../errors/errorUtils'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; import { ResourceSource } from '../../types/common'; import { TextToImageModule } from '../../modules/computer_vision/TextToImageModule'; @@ -7,7 +8,7 @@ interface TextToImageType { isReady: boolean; isGenerating: boolean; downloadProgress: number; - error: string | null; + error: RnExecutorchError | null; generate: ( input: string, imageSize?: number, @@ -35,7 +36,7 @@ export const useTextToImage = ({ const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); - const [error, setError] = useState(null); + const [error, setError] = useState(null); const [module] = useState(() => new TextToImageModule(inferenceCallback)); @@ -50,7 +51,7 @@ export const useTextToImage = ({ await module.load(model, setDownloadProgress); setIsReady(true); } catch (err) { - setError((err as Error).message); + setError(parseUnknownError(err)); } })(); @@ -65,8 +66,16 @@ export const useTextToImage = ({ numSteps?: number, seed?: number ): Promise => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); - if (isGenerating) throw new Error(getError(ETError.ModelGenerating)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); + if (isGenerating) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); try { setIsGenerating(true); return await module.forward(input, imageSize, numSteps, seed); diff --git a/packages/react-native-executorch/src/hooks/computer_vision/useVerticalOCR.ts b/packages/react-native-executorch/src/hooks/computer_vision/useVerticalOCR.ts index c033d3721..bf7da6e03 100644 --- a/packages/react-native-executorch/src/hooks/computer_vision/useVerticalOCR.ts +++ b/packages/react-native-executorch/src/hooks/computer_vision/useVerticalOCR.ts @@ -2,9 +2,10 @@ import { useEffect, useState } from 'react'; import { ResourceSource } from '../../types/common'; import { OCRDetection, OCRLanguage } from '../../types/ocr'; import { VerticalOCRController } from '../../controllers/VerticalOCRController'; +import { RnExecutorchError } from '../../errors/errorUtils'; interface OCRModule { - error: string | null; + error: RnExecutorchError | null; isReady: boolean; isGenerating: boolean; forward: (imageSource: string) => Promise; @@ -24,7 +25,7 @@ export const useVerticalOCR = ({ independentCharacters?: boolean; preventLoad?: boolean; }): OCRModule => { - const [error, setError] = useState(null); + const [error, setError] = useState(null); const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); diff --git a/packages/react-native-executorch/src/hooks/natural_language_processing/useLLM.ts b/packages/react-native-executorch/src/hooks/natural_language_processing/useLLM.ts index 25cad7969..23882e65c 100644 --- a/packages/react-native-executorch/src/hooks/natural_language_processing/useLLM.ts +++ b/packages/react-native-executorch/src/hooks/natural_language_processing/useLLM.ts @@ -9,6 +9,7 @@ import { ToolsConfig, } from '../../types/llm'; import { LLMController } from '../../controllers/LLMController'; +import { RnExecutorchError, parseUnknownError } from '../../errors/errorUtils'; /* Hook version of LLMModule @@ -30,7 +31,7 @@ export const useLLM = ({ const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); - const [error, setError] = useState(null); + const [error, setError] = useState(null); const tokenCallback = useCallback((newToken: string) => { setToken(newToken); @@ -62,7 +63,7 @@ export const useLLM = ({ onDownloadProgressCallback: setDownloadProgress, }); } catch (e) { - setError(e); + setError(parseUnknownError(e)); } })(); diff --git a/packages/react-native-executorch/src/hooks/natural_language_processing/useSpeechToText.ts b/packages/react-native-executorch/src/hooks/natural_language_processing/useSpeechToText.ts index 1c974ec52..3e1324f54 100644 --- a/packages/react-native-executorch/src/hooks/natural_language_processing/useSpeechToText.ts +++ b/packages/react-native-executorch/src/hooks/natural_language_processing/useSpeechToText.ts @@ -1,7 +1,8 @@ import { useEffect, useCallback, useState } from 'react'; -import { ETError, getError } from '../../Error'; import { SpeechToTextModule } from '../../modules/natural_language_processing/SpeechToTextModule'; import { DecodingOptions, SpeechToTextModelConfig } from '../../types/stt'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError, parseUnknownError } from '../../errors/errorUtils'; export const useSpeechToText = ({ model, @@ -10,7 +11,7 @@ export const useSpeechToText = ({ model: SpeechToTextModelConfig; preventLoad?: boolean; }) => { - const [error, setError] = useState(null); + const [error, setError] = useState(null); const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); @@ -38,7 +39,7 @@ export const useSpeechToText = ({ ); setIsReady(true); } catch (err) { - setError((err as Error).message); + setError(parseUnknownError(err)); } })(); }, [ @@ -53,8 +54,16 @@ export const useSpeechToText = ({ const stateWrapper = useCallback( Promise>(fn: T) => async (...args: Parameters): Promise>> => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); - if (isGenerating) throw new Error(getError(ETError.ModelGenerating)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling this function.' + ); + if (isGenerating) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); setIsGenerating(true); try { return await fn.apply(modelInstance, args); @@ -67,8 +76,16 @@ export const useSpeechToText = ({ const stream = useCallback( async (options?: DecodingOptions) => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); - if (isGenerating) throw new Error(getError(ETError.ModelGenerating)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling this function.' + ); + if (isGenerating) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); setIsGenerating(true); setCommittedTranscription(''); setNonCommittedTranscription(''); @@ -92,7 +109,11 @@ export const useSpeechToText = ({ const wrapper = useCallback( any>(fn: T) => { return (...args: Parameters): ReturnType => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling this function.' + ); return fn.apply(modelInstance, args); }; }, diff --git a/packages/react-native-executorch/src/hooks/natural_language_processing/useTextToSpeech.ts b/packages/react-native-executorch/src/hooks/natural_language_processing/useTextToSpeech.ts index aa6216290..4f789ded9 100644 --- a/packages/react-native-executorch/src/hooks/natural_language_processing/useTextToSpeech.ts +++ b/packages/react-native-executorch/src/hooks/natural_language_processing/useTextToSpeech.ts @@ -5,7 +5,8 @@ import { TextToSpeechInput, TextToSpeechStreamingInput, } from '../../types/tts'; -import { ETError, getError } from '../../Error'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError, parseUnknownError } from '../../errors/errorUtils'; interface Props extends TextToSpeechConfig { preventLoad?: boolean; @@ -17,7 +18,7 @@ export const useTextToSpeech = ({ options, preventLoad = false, }: Props) => { - const [error, setError] = useState(null); + const [error, setError] = useState(null); const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); @@ -45,7 +46,7 @@ export const useTextToSpeech = ({ ); setIsReady(true); } catch (err) { - setError((err as Error).message); + setError(parseUnknownError(err)); } })(); @@ -66,8 +67,16 @@ export const useTextToSpeech = ({ ]); const forward = async (input: TextToSpeechInput) => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); - if (isGenerating) throw new Error(getError(ETError.ModelGenerating)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); + if (isGenerating) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); try { setIsGenerating(true); return await moduleInstance.forward(input.text, input.speed ?? 1.0); @@ -78,8 +87,16 @@ export const useTextToSpeech = ({ const stream = useCallback( async (input: TextToSpeechStreamingInput) => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); - if (isGenerating) throw new Error(getError(ETError.ModelGenerating)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling stream().' + ); + if (isGenerating) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); setIsGenerating(true); try { await input.onBegin?.(); diff --git a/packages/react-native-executorch/src/hooks/natural_language_processing/useTokenizer.ts b/packages/react-native-executorch/src/hooks/natural_language_processing/useTokenizer.ts index 66f313b9d..34e33067e 100644 --- a/packages/react-native-executorch/src/hooks/natural_language_processing/useTokenizer.ts +++ b/packages/react-native-executorch/src/hooks/natural_language_processing/useTokenizer.ts @@ -1,7 +1,8 @@ import { useEffect, useState } from 'react'; import { TokenizerModule } from '../../modules/natural_language_processing/TokenizerModule'; import { ResourceSource } from '../../types/common'; -import { ETError, getError } from '../../Error'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError, parseUnknownError } from '../../errors/errorUtils'; export const useTokenizer = ({ tokenizer, @@ -10,7 +11,7 @@ export const useTokenizer = ({ tokenizer: { tokenizerSource: ResourceSource }; preventLoad?: boolean; }) => { - const [error, setError] = useState(null); + const [error, setError] = useState(null); const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); @@ -29,15 +30,23 @@ export const useTokenizer = ({ ); setIsReady(true); } catch (err) { - setError((err as Error).message); + setError(parseUnknownError(err)); } })(); }, [tokenizerInstance, tokenizer.tokenizerSource, preventLoad]); const stateWrapper = Promise>(fn: T) => { return (...args: Parameters): Promise> => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); - if (isGenerating) throw new Error(getError(ETError.ModelGenerating)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling this function.' + ); + if (isGenerating) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); try { setIsGenerating(true); return fn.apply(tokenizerInstance, args); diff --git a/packages/react-native-executorch/src/hooks/useModule.ts b/packages/react-native-executorch/src/hooks/useModule.ts index 93d18c349..4c97abb67 100644 --- a/packages/react-native-executorch/src/hooks/useModule.ts +++ b/packages/react-native-executorch/src/hooks/useModule.ts @@ -1,5 +1,6 @@ import { useEffect, useState } from 'react'; -import { ETError, getError } from '../Error'; +import { RnExecutorchErrorCode } from '../errors/ErrorCodes'; +import { RnExecutorchError, parseUnknownError } from '../errors/errorUtils'; interface Module { load: (...args: any[]) => Promise; @@ -25,7 +26,7 @@ export const useModule = < model: LoadArgs[0]; preventLoad?: boolean; }) => { - const [error, setError] = useState(null); + const [error, setError] = useState(null); const [isReady, setIsReady] = useState(false); const [isGenerating, setIsGenerating] = useState(false); const [downloadProgress, setDownloadProgress] = useState(0); @@ -42,7 +43,7 @@ export const useModule = < await moduleInstance.load(model, setDownloadProgress); setIsReady(true); } catch (err) { - setError((err as Error).message); + setError(parseUnknownError(err)); } })(); @@ -54,8 +55,16 @@ export const useModule = < }, [moduleInstance, ...Object.values(model), preventLoad]); const forward = async (...input: ForwardArgs): Promise => { - if (!isReady) throw new Error(getError(ETError.ModuleNotLoaded)); - if (isGenerating) throw new Error(getError(ETError.ModelGenerating)); + if (!isReady) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); + if (isGenerating) + throw new RnExecutorchError( + RnExecutorchErrorCode.ModelGenerating, + 'The model is currently generating. Please wait until previous model run is complete.' + ); try { setIsGenerating(true); return await moduleInstance.forward(...input); diff --git a/packages/react-native-executorch/src/index.ts b/packages/react-native-executorch/src/index.ts index 75891aafd..45770ced3 100644 --- a/packages/react-native-executorch/src/index.ts +++ b/packages/react-native-executorch/src/index.ts @@ -135,3 +135,6 @@ export * from './constants/ocr/models'; export * from './constants/tts/models'; export * from './constants/tts/voices'; export * from './constants/llmDefaults'; + +export { RnExecutorchError } from './errors/errorUtils'; +export { RnExecutorchErrorCode } from './errors/ErrorCodes'; diff --git a/packages/react-native-executorch/src/modules/computer_vision/ClassificationModule.ts b/packages/react-native-executorch/src/modules/computer_vision/ClassificationModule.ts index 7190bdb39..deaabbdcd 100644 --- a/packages/react-native-executorch/src/modules/computer_vision/ClassificationModule.ts +++ b/packages/react-native-executorch/src/modules/computer_vision/ClassificationModule.ts @@ -1,7 +1,8 @@ import { ResourceFetcher } from '../../utils/ResourceFetcher'; import { ResourceSource } from '../../types/common'; -import { ETError, getError } from '../../Error'; import { BaseModule } from '../BaseModule'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; export class ClassificationModule extends BaseModule { async load( @@ -13,14 +14,20 @@ export class ClassificationModule extends BaseModule { model.modelSource ); if (paths === null || paths.length < 1) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadClassification(paths[0] || ''); } async forward(imageSource: string) { if (this.nativeModule == null) - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); return await this.nativeModule.generate(imageSource); } } diff --git a/packages/react-native-executorch/src/modules/computer_vision/ImageEmbeddingsModule.ts b/packages/react-native-executorch/src/modules/computer_vision/ImageEmbeddingsModule.ts index 153e9c543..6ca6ea3b9 100644 --- a/packages/react-native-executorch/src/modules/computer_vision/ImageEmbeddingsModule.ts +++ b/packages/react-native-executorch/src/modules/computer_vision/ImageEmbeddingsModule.ts @@ -1,6 +1,7 @@ import { ResourceFetcher } from '../../utils/ResourceFetcher'; import { ResourceSource } from '../../types/common'; -import { ETError, getError } from '../../Error'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; import { BaseModule } from '../BaseModule'; export class ImageEmbeddingsModule extends BaseModule { @@ -13,14 +14,20 @@ export class ImageEmbeddingsModule extends BaseModule { model.modelSource ); if (paths === null || paths.length < 1) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadImageEmbeddings(paths[0] || ''); } async forward(imageSource: string): Promise { if (this.nativeModule == null) - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); return new Float32Array(await this.nativeModule.generate(imageSource)); } } diff --git a/packages/react-native-executorch/src/modules/computer_vision/ImageSegmentationModule.ts b/packages/react-native-executorch/src/modules/computer_vision/ImageSegmentationModule.ts index a028629c6..d964f530d 100644 --- a/packages/react-native-executorch/src/modules/computer_vision/ImageSegmentationModule.ts +++ b/packages/react-native-executorch/src/modules/computer_vision/ImageSegmentationModule.ts @@ -1,7 +1,8 @@ import { ResourceFetcher } from '../../utils/ResourceFetcher'; import { ResourceSource } from '../../types/common'; import { DeeplabLabel } from '../../types/imageSegmentation'; -import { ETError, getError } from '../../Error'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; import { BaseModule } from '../BaseModule'; export class ImageSegmentationModule extends BaseModule { @@ -14,7 +15,10 @@ export class ImageSegmentationModule extends BaseModule { model.modelSource ); if (paths === null || paths.length < 1) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadImageSegmentation(paths[0] || ''); } @@ -25,7 +29,10 @@ export class ImageSegmentationModule extends BaseModule { resize?: boolean ): Promise<{ [key in DeeplabLabel]?: number[] }> { if (this.nativeModule == null) { - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); } const stringDict = await this.nativeModule.generate( diff --git a/packages/react-native-executorch/src/modules/computer_vision/ObjectDetectionModule.ts b/packages/react-native-executorch/src/modules/computer_vision/ObjectDetectionModule.ts index 84b8dafd3..dde5d2233 100644 --- a/packages/react-native-executorch/src/modules/computer_vision/ObjectDetectionModule.ts +++ b/packages/react-native-executorch/src/modules/computer_vision/ObjectDetectionModule.ts @@ -1,7 +1,8 @@ import { ResourceFetcher } from '../../utils/ResourceFetcher'; import { ResourceSource } from '../../types/common'; import { Detection } from '../../types/objectDetection'; -import { ETError, getError } from '../../Error'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; import { BaseModule } from '../BaseModule'; export class ObjectDetectionModule extends BaseModule { @@ -14,7 +15,10 @@ export class ObjectDetectionModule extends BaseModule { model.modelSource ); if (paths === null || paths.length < 1) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadObjectDetection(paths[0] || ''); } @@ -24,7 +28,10 @@ export class ObjectDetectionModule extends BaseModule { detectionThreshold: number = 0.7 ): Promise { if (this.nativeModule == null) - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); return await this.nativeModule.generate(imageSource, detectionThreshold); } } diff --git a/packages/react-native-executorch/src/modules/computer_vision/StyleTransferModule.ts b/packages/react-native-executorch/src/modules/computer_vision/StyleTransferModule.ts index e312fdc84..772beeded 100644 --- a/packages/react-native-executorch/src/modules/computer_vision/StyleTransferModule.ts +++ b/packages/react-native-executorch/src/modules/computer_vision/StyleTransferModule.ts @@ -1,6 +1,7 @@ import { ResourceFetcher } from '../../utils/ResourceFetcher'; import { ResourceSource } from '../../types/common'; -import { ETError, getError } from '../../Error'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; import { BaseModule } from '../BaseModule'; export class StyleTransferModule extends BaseModule { @@ -13,14 +14,20 @@ export class StyleTransferModule extends BaseModule { model.modelSource ); if (paths === null || paths.length < 1) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadStyleTransfer(paths[0] || ''); } async forward(imageSource: string): Promise { if (this.nativeModule == null) - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); return await this.nativeModule.generate(imageSource); } } diff --git a/packages/react-native-executorch/src/modules/computer_vision/TextToImageModule.ts b/packages/react-native-executorch/src/modules/computer_vision/TextToImageModule.ts index cab509667..5526fcc8e 100644 --- a/packages/react-native-executorch/src/modules/computer_vision/TextToImageModule.ts +++ b/packages/react-native-executorch/src/modules/computer_vision/TextToImageModule.ts @@ -3,6 +3,8 @@ import { ResourceSource } from '../../types/common'; import { BaseModule } from '../BaseModule'; import { Buffer } from 'buffer'; import { PNG } from 'pngjs/browser'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; export class TextToImageModule extends BaseModule { private inferenceCallback: (stepIdx: number) => void; @@ -33,7 +35,10 @@ export class TextToImageModule extends BaseModule { model.decoderSource ); if (!results) { - throw new Error('Failed to fetch one or more resources.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } const [tokenizerPath, schedulerPath, encoderPath, unetPath, decoderPath] = results; @@ -45,7 +50,10 @@ export class TextToImageModule extends BaseModule { !unetPath || !decoderPath ) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } const response = await fetch('file://' + schedulerPath); diff --git a/packages/react-native-executorch/src/modules/general/ExecutorchModule.ts b/packages/react-native-executorch/src/modules/general/ExecutorchModule.ts index a7ffc50ed..6cf3bbc03 100644 --- a/packages/react-native-executorch/src/modules/general/ExecutorchModule.ts +++ b/packages/react-native-executorch/src/modules/general/ExecutorchModule.ts @@ -2,6 +2,8 @@ import { TensorPtr } from '../../types/common'; import { BaseModule } from '../BaseModule'; import { ResourceSource } from '../../types/common'; import { ResourceFetcher } from '../../utils/ResourceFetcher'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; export class ExecutorchModule extends BaseModule { async load( @@ -13,7 +15,10 @@ export class ExecutorchModule extends BaseModule { modelSource ); if (paths === null || paths.length < 1) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadExecutorchModule(paths[0] || ''); } diff --git a/packages/react-native-executorch/src/modules/natural_language_processing/SpeechToTextModule.ts b/packages/react-native-executorch/src/modules/natural_language_processing/SpeechToTextModule.ts index 1a371a4e4..9619547c8 100644 --- a/packages/react-native-executorch/src/modules/natural_language_processing/SpeechToTextModule.ts +++ b/packages/react-native-executorch/src/modules/natural_language_processing/SpeechToTextModule.ts @@ -1,6 +1,8 @@ import { Logger } from '../../common/Logger'; import { DecodingOptions, SpeechToTextModelConfig } from '../../types/stt'; import { ResourceFetcher } from '../../utils/ResourceFetcher'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError, parseUnknownError } from '../../errors/errorUtils'; export class SpeechToTextModule { private nativeModule: any; @@ -34,7 +36,10 @@ export class SpeechToTextModule { const encoderSource = encoderDecoderResults?.[0]; const decoderSource = encoderDecoderResults?.[1]; if (!encoderSource || !decoderSource || !tokenizerSources) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = await global.loadSpeechToText( encoderSource, @@ -148,7 +153,7 @@ export class SpeechToTextModule { } continue; } - if (error) throw error; + if (error) throw parseUnknownError(error); if (finished) return; await new Promise((r) => (waiter = r)); } @@ -170,10 +175,16 @@ export class SpeechToTextModule { private validateOptions(options: DecodingOptions) { if (!this.modelConfig.isMultilingual && options.language) { - throw new Error('Model is not multilingual, cannot set language'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidConfig, + 'Model is not multilingual, cannot set language' + ); } if (this.modelConfig.isMultilingual && !options.language) { - throw new Error('Model is multilingual, provide a language'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidConfig, + 'Model is multilingual, provide a language' + ); } } } diff --git a/packages/react-native-executorch/src/modules/natural_language_processing/TextEmbeddingsModule.ts b/packages/react-native-executorch/src/modules/natural_language_processing/TextEmbeddingsModule.ts index d30ead1d5..173909b33 100644 --- a/packages/react-native-executorch/src/modules/natural_language_processing/TextEmbeddingsModule.ts +++ b/packages/react-native-executorch/src/modules/natural_language_processing/TextEmbeddingsModule.ts @@ -1,6 +1,8 @@ import { ResourceSource } from '../../types/common'; import { ResourceFetcher } from '../../utils/ResourceFetcher'; import { BaseModule } from '../BaseModule'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; export class TextEmbeddingsModule extends BaseModule { async load( @@ -22,7 +24,10 @@ export class TextEmbeddingsModule extends BaseModule { const modelPath = modelResult?.[0]; const tokenizerPath = tokenizerResult?.[0]; if (!modelPath || !tokenizerPath) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadTextEmbeddings(modelPath, tokenizerPath); } diff --git a/packages/react-native-executorch/src/modules/natural_language_processing/TextToSpeechModule.ts b/packages/react-native-executorch/src/modules/natural_language_processing/TextToSpeechModule.ts index 9727b1467..782a1d693 100644 --- a/packages/react-native-executorch/src/modules/natural_language_processing/TextToSpeechModule.ts +++ b/packages/react-native-executorch/src/modules/natural_language_processing/TextToSpeechModule.ts @@ -1,5 +1,6 @@ +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; import { ResourceFetcher } from '../../utils/ResourceFetcher'; -import { ETError, getError } from '../../Error'; import { KokoroConfig, KokoroOptions, @@ -19,7 +20,10 @@ export class TextToSpeechModule { key.includes('Source') ); if (anySourceKey === undefined) { - throw new Error('No model source provided.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidModelSource, + 'No model source provided.' + ); } // Select the text to speech model based on it's fixed identifier @@ -46,7 +50,8 @@ export class TextToSpeechModule { !voice.extra.taggerSource || !voice.extra.lexiconSource ) { - throw new Error( + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidConfig, 'Kokoro: voice config is missing required extra fields: taggerSource and/or lexiconSource.' ); } @@ -63,7 +68,10 @@ export class TextToSpeechModule { ); if (paths === null || paths.length !== 7 || paths.some((p) => p == null)) { - throw new Error('Download interrupted or missing resource.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'Download interrupted or missing resource.' + ); } const modelPaths = paths.slice(0, 4) as [string, string, string, string]; @@ -89,7 +97,10 @@ export class TextToSpeechModule { public async forward(text: string, speed: number = 1.0) { if (this.nativeModule == null) - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); return await this.nativeModule.generate(text, speed); } diff --git a/packages/react-native-executorch/src/modules/natural_language_processing/TokenizerModule.ts b/packages/react-native-executorch/src/modules/natural_language_processing/TokenizerModule.ts index 47f3dd2f9..2dd35e048 100644 --- a/packages/react-native-executorch/src/modules/natural_language_processing/TokenizerModule.ts +++ b/packages/react-native-executorch/src/modules/natural_language_processing/TokenizerModule.ts @@ -1,5 +1,7 @@ import { ResourceSource } from '../../types/common'; import { ResourceFetcher } from '../../utils/ResourceFetcher'; +import { RnExecutorchError } from '../../errors/errorUtils'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; export class TokenizerModule { nativeModule: any; @@ -14,7 +16,10 @@ export class TokenizerModule { ); const path = paths?.[0]; if (!path) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadTokenizerModule(path); } diff --git a/packages/react-native-executorch/src/modules/natural_language_processing/VADModule.ts b/packages/react-native-executorch/src/modules/natural_language_processing/VADModule.ts index 9e784a0e4..43a3c9eee 100644 --- a/packages/react-native-executorch/src/modules/natural_language_processing/VADModule.ts +++ b/packages/react-native-executorch/src/modules/natural_language_processing/VADModule.ts @@ -1,8 +1,9 @@ import { ResourceFetcher } from '../../utils/ResourceFetcher'; import { ResourceSource } from '../../types/common'; import { Segment } from '../../types/vad'; -import { ETError, getError } from '../../Error'; import { BaseModule } from '../BaseModule'; +import { RnExecutorchErrorCode } from '../../errors/ErrorCodes'; +import { RnExecutorchError } from '../../errors/errorUtils'; export class VADModule extends BaseModule { async load( @@ -14,14 +15,20 @@ export class VADModule extends BaseModule { model.modelSource ); if (paths === null || paths.length < 1) { - throw new Error('Download interrupted.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.DownloadInterrupted, + 'The download has been interrupted. As a result, not every file was downloaded. Please retry the download.' + ); } this.nativeModule = global.loadVAD(paths[0] || ''); } async forward(waveform: Float32Array): Promise { if (this.nativeModule == null) - throw new Error(getError(ETError.ModuleNotLoaded)); + throw new RnExecutorchError( + RnExecutorchErrorCode.ModuleNotLoaded, + 'The model is currently not loaded. Please load the model before calling forward().' + ); return await this.nativeModule.generate(waveform); } } diff --git a/packages/react-native-executorch/src/types/llm.ts b/packages/react-native-executorch/src/types/llm.ts index 247b76c57..53b13cbd0 100644 --- a/packages/react-native-executorch/src/types/llm.ts +++ b/packages/react-native-executorch/src/types/llm.ts @@ -1,3 +1,5 @@ +import { RnExecutorchError } from '../errors/errorUtils'; + export interface LLMType { messageHistory: Message[]; response: string; @@ -5,7 +7,7 @@ export interface LLMType { isReady: boolean; isGenerating: boolean; downloadProgress: number; - error: string | null; + error: RnExecutorchError | null; configure: ({ chatConfig, toolsConfig, diff --git a/packages/react-native-executorch/src/utils/ResourceFetcher.ts b/packages/react-native-executorch/src/utils/ResourceFetcher.ts index 8f1488a7b..8e733e1ce 100644 --- a/packages/react-native-executorch/src/utils/ResourceFetcher.ts +++ b/packages/react-native-executorch/src/utils/ResourceFetcher.ts @@ -51,6 +51,8 @@ import { ResourceSourceExtended, DownloadResource, } from './ResourceFetcherUtils'; +import { RnExecutorchErrorCode } from '../errors/ErrorCodes'; +import { RnExecutorchError } from '../errors/errorUtils'; export class ResourceFetcher { static downloads = new Map(); //map of currently downloading (or paused) files, if the download was started by .fetch() method. @@ -60,7 +62,10 @@ export class ResourceFetcher { ...sources: ResourceSource[] ) { if (sources.length === 0) { - throw new Error('Empty list given as an argument!'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidUserInput, + 'Empty list given as an argument' + ); } const { results: info, totalLength } = await ResourceFetcherUtils.getFilesSizes(sources); @@ -161,7 +166,8 @@ export class ResourceFetcher { const resource = this.downloads.get(source)!; switch (resource.status) { case DownloadStatus.PAUSED: - throw new Error( + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherAlreadyPaused, "The file download is currently paused. Can't pause the download of the same file twice." ); default: { @@ -178,11 +184,15 @@ export class ResourceFetcher { !resource.extendedInfo.cacheFileUri || !resource.extendedInfo.uri ) { - throw new Error('Something went wrong. File uri info is not specified!'); + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherMissingUri, + 'Something went wrong. File uri info is not specified' + ); } switch (resource.status) { case DownloadStatus.ONGOING: - throw new Error( + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherAlreadyOngoing, "The file download is currently ongoing. Can't resume the ongoing download." ); default: { @@ -200,8 +210,9 @@ export class ResourceFetcher { (result.status !== HTTP_CODE.OK && result.status !== HTTP_CODE.PARTIAL_CONTENT) ) { - throw new Error( - `Failed to fetch resource from '${resource.extendedInfo.uri}'` + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherDownloadFailed, + `Failed to fetch resource from '${resource.extendedInfo.uri}, context: ${result}'` ); } await moveAsync({ @@ -248,7 +259,8 @@ export class ResourceFetcher { return source; } } - throw new Error( + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherNotActive, 'None of given sources are currently during downloading process.' ); } @@ -281,7 +293,10 @@ export class ResourceFetcher { private static async handleObject(source: ResourceSource) { if (typeof source !== 'object') { - throw new Error('Source is expected to be object!'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidModelSource, + 'Source is expected to be object' + ); } const jsonString = JSON.stringify(source); const digest = ResourceFetcherUtils.hashObject(jsonString); @@ -302,7 +317,10 @@ export class ResourceFetcher { private static handleLocalFile(source: ResourceSource) { if (typeof source !== 'string') { - throw new Error('Source is expected to be string.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidModelSource, + 'Source is expected to be string' + ); } return ResourceFetcherUtils.removeFilePrefix(source); } @@ -312,7 +330,10 @@ export class ResourceFetcher { ) { const source = sourceExtended.source; if (typeof source !== 'number') { - throw new Error('Source is expected to be string.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidModelSource, + 'Source is expected to be number' + ); } const asset = Asset.fromModule(source); const uri = asset.uri; @@ -337,7 +358,10 @@ export class ResourceFetcher { ) { const source = sourceExtended.source; if (typeof source !== 'number') { - throw new Error('Source is expected to be a number.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidModelSource, + 'Source is expected to be a number' + ); } sourceExtended.uri = Asset.fromModule(source).uri; return await this.handleRemoteFile(sourceExtended); @@ -348,7 +372,10 @@ export class ResourceFetcher { ) { const source = sourceExtended.source; if (typeof source === 'object') { - throw new Error('Source is expected to be a string or a number.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.InvalidModelSource, + 'Source is expected to be a string or a number' + ); } if (this.downloads.has(source)) { const resource = this.downloads.get(source)!; @@ -357,10 +384,16 @@ export class ResourceFetcher { this.resume(source); } // if the download is ongoing, throw error. - throw new Error('Already downloading this file.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherDownloadInProgress, + 'Already downloading this file' + ); } if (typeof source === 'number' && !sourceExtended.uri) { - throw new Error('Source Uri is expected to be available here.'); + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherMissingUri, + 'Source Uri is expected to be available here' + ); } if (typeof source === 'string') { sourceExtended.uri = source; @@ -405,7 +438,10 @@ export class ResourceFetcher { return null; } if (!result || result.status !== HTTP_CODE.OK) { - throw new Error(`Failed to fetch resource from '${source}'`); + throw new RnExecutorchError( + RnExecutorchErrorCode.ResourceFetcherDownloadFailed, + `Failed to fetch resource from '${source}, context: ${result}'` + ); } await moveAsync({ from: sourceExtended.cacheFileUri, diff --git a/scripts/errors.config.ts b/scripts/errors.config.ts new file mode 100644 index 000000000..f272211ea --- /dev/null +++ b/scripts/errors.config.ts @@ -0,0 +1,136 @@ +// Single source of truth for error codes +// Run `yarn codegen:errors` to generate C++ and TypeScript enums + +export const errorDefinitions = { + // React-native-ExecuTorch errors + /** + * An umbrella-error that is thrown usually when something unexpected happens, for example a 3rd-party library error. + */ + UnknownError: 0x65, + /** + * Thrown when a user tries to run a model that is not yet downloaded or loaded into memory. + */ + ModuleNotLoaded: 0x66, + /** + * An error ocurred when saving a file. This could be, for instance a result image from an image model. + */ + FileWriteFailed: 0x67, + /** + * Thrown when a user tries to run a model that is currently processing. It is only allowed to run a single model prediction at a time. + */ + ModelGenerating: 0x68, + /* + * Thrown when a language is passed to a multi-language model that is not supported. For example OCR or Speech To Text. + */ + LanguageNotSupported: 0x69, + /* + * Thrown when config parameters passed to a model are invalid. For example, when LLM's topp is outside of range [0, 1]. + */ + InvalidConfig: 0x70, + /* + * Thrown when the type of model source passed by the user is invalid. + */ + InvalidModelSource: 0xff, + /* + * Thrown when the number of passed inputs to the model is different than the model metadata specifies. + */ + UnexpectedNumInputs: 0x61, + /* + * Thrown when React Native ExecuTorch threadpool problem occurs. + */ + ThreadPoolError: 0x71, + /* + * Thrown when a file read operation failed. This could be invalid image url passed to image models, or unsupported format. + */ + FileReadFailed: 0x72, + /* + * Thrown when the size of model output is unexpected. + */ + InvalidModelOutput: 0x73, + /* + * Thrown when the dimensions of input tensors don't match the model's expected dimensions. + */ + WrongDimensions: 0x74, + /* + * Thrown when the input passed to our APIs is invalid, for example when passing an empty message aray to LLM's generate(). + */ + InvalidUserInput: 0x75, + /* + * Thrown when the number of downloaded files is unexpected, due to download interruptions. + */ + DownloadInterrupted: 0x76, + + // INFO: SpeechToText errors + /* + * Thrown when there's a configuration mismatch between multilingual and language settings in Speech-to-Text models. + */ + MultilingualConfiguration: 0xa0, + /* + * Thrown when streaming transcription is attempted but audio data chunk is missing. + */ + MissingDataChunk: 0xa1, + /* + * Thrown when trying to stop or insert data into a stream that hasn't been started. + */ + StreamingNotStarted: 0xa2, + /* + * Thrown when trying to start a new streaming session while another is already in progress. + */ + StreamingInProgress: 0xa3, + + // INFO: Resource Fetcher Errors + /** + * Thrown when a resource fails to download. This could be due to invalid URL, or for example a network problem. + */ + ResourceFetcherDownloadFailed: 0xb4, + /** + * Thrown when a user tries to trigger a download that's already in progress. + */ + ResourceFetcherDownloadInProgress: 0xb5, + /** + * Thrown when trying to pause a download that is already paused. + */ + ResourceFetcherAlreadyPaused: 0xb6, + /** + * Thrown when trying to resume a download that is already ongoing. + */ + ResourceFetcherAlreadyOngoing: 0xb7, + /** + * Thrown when trying to pause, resume, or cancel a download that is not active. + */ + ResourceFetcherNotActive: 0xb8, + /** + * Thrown when required URI information is missing for a download operation. + */ + ResourceFetcherMissingUri: 0xb9, + + // ExecuTorch mapped errors + // Based on: https://github.com/pytorch/executorch/blob/main/runtime/core/error.h + // System errors + Ok: 0x00, + Internal: 0x01, + InvalidState: 0x02, + EndOfMethod: 0x03, + + // Logical errors + NotSupported: 0x10, + NotImplemented: 0x11, + InvalidArgument: 0x12, + InvalidType: 0x13, + OperatorMissing: 0x14, + + // Resource errors + NotFound: 0x20, + MemoryAllocationFailed: 0x21, + AccessFailed: 0x22, + InvalidProgram: 0x23, + InvalidExternalData: 0x24, + OutOfResources: 0x25, + + // Delegate errors + DelegateInvalidCompatibility: 0x30, + DelegateMemoryAllocationFailed: 0x31, + DelegateInvalidHandle: 0x32, +} as const; + +export type ErrorName = keyof typeof errorDefinitions; diff --git a/scripts/generate-errors.ts b/scripts/generate-errors.ts new file mode 100755 index 000000000..37c9abbb8 --- /dev/null +++ b/scripts/generate-errors.ts @@ -0,0 +1,126 @@ +#!/usr/bin/env ts-node + +import { errorDefinitions } from './errors.config'; +import * as fs from 'fs'; +import * as path from 'path'; + +const REPO_ROOT = path.join(__dirname, '..'); + +function extractComments(): Map { + const configPath = path.join(__dirname, 'errors.config.ts'); + const content = fs.readFileSync(configPath, 'utf-8'); + const comments = new Map(); + + // Match JSDoc comments followed by error name + const commentPattern = /\/\*\*?\s*([\s\S]*?)\s*\*\/\s*(\w+):/g; + let match; + + while ((match = commentPattern.exec(content)) !== null) { + const commentText = match[1] + .split('\n') + .map((line) => line.replace(/^\s*\*\s?/, '').trim()) + .filter((line) => line.length > 0) + .join('\n * '); + const errorName = match[2]; + comments.set(errorName, commentText); + } + + return comments; +} + +function generateCppEnum() { + const comments = extractComments(); + + // Filter out ExecuTorch mapped errors (0x00-0x32) for C++ + const execuTorchErrorCodes = new Set([ + 'Ok', + 'Internal', + 'InvalidState', + 'EndOfMethod', + 'NotSupported', + 'NotImplemented', + 'InvalidArgument', + 'InvalidType', + 'OperatorMissing', + 'NotFound', + 'MemoryAllocationFailed', + 'AccessFailed', + 'InvalidProgram', + 'InvalidExternalData', + 'OutOfResources', + 'DelegateInvalidCompatibility', + 'DelegateMemoryAllocationFailed', + 'DelegateInvalidHandle', + ]); + + const entries = Object.entries(errorDefinitions) + .filter(([name]) => !execuTorchErrorCodes.has(name)) + .map(([name, code]) => { + const comment = comments.get(name); + if (comment) { + return ` /**\n * ${comment}\n */\n ${name} = ${code},`; + } + return ` ${name} = ${code},`; + }) + .join('\n'); + + const cpp = `#pragma once + +// Auto-generated from scripts/errors.config.ts +// DO NOT EDIT MANUALLY - Run 'yarn codegen:errors' to regenerate + +#include + +namespace rnexecutorch { + +enum class RnExecutorchErrorCode : int32_t { +${entries} +}; + +} // namespace rnexecutorch +`; + + const outputPath = path.join( + REPO_ROOT, + 'packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h' + ); + fs.writeFileSync(outputPath, cpp); + console.log(`Generated C++ enum: ${outputPath}`); +} + +function generateTypeScriptEnum() { + const comments = extractComments(); + const entries = Object.entries(errorDefinitions) + .map(([name, code]) => { + const comment = comments.get(name); + if (comment) { + return ` /**\n * ${comment}\n */\n ${name} = ${code},`; + } + return ` ${name} = ${code},`; + }) + .join('\n'); + + const ts = `// Auto-generated from scripts/errors.config.ts +// DO NOT EDIT MANUALLY - Run 'yarn codegen:errors' to regenerate + +export enum RnExecutorchErrorCode { +${entries} +} +`; + + const outputPath = path.join( + REPO_ROOT, + 'packages/react-native-executorch/src/errors/ErrorCodes.ts' + ); + fs.writeFileSync(outputPath, ts); + console.log(`Generated TypeScript enum: ${outputPath}`); +} + +function main() { + console.log('Generating error code enums...\n'); + generateCppEnum(); + generateTypeScriptEnum(); + console.log('\n✨ Done!'); +} + +main();