Skip to content

Commit d79ae01

Browse files
authored
@jakmro/standardize naming (#62)
## Description Standardize naming and fix urls ### Type of change - [ ] Bug fix (non-breaking change which fixes an issue) - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] Documentation update (improves or adds clarity to existing documentation) ### Tested on - [x] iOS - [x] Android ### Checklist - [x] I have performed a self-review of my code - [x] I have commented my code, particularly in hard-to-understand areas - [ ] I have updated the documentation accordingly - [x] My changes generate no new warnings
1 parent 1499364 commit d79ae01

File tree

16 files changed

+59
-53
lines changed

16 files changed

+59
-53
lines changed

android/src/main/java/com/swmansion/rnexecutorch/Classification.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class Classification(reactContext: ReactApplicationContext) :
3232
classificationModel.loadModel(modelSource)
3333
promise.resolve(0)
3434
} catch (e: Exception) {
35-
promise.reject(e.message!!, ETError.InvalidModelPath.toString())
35+
promise.reject(e.message!!, ETError.InvalidModelSource.toString())
3636
}
3737
}
3838

android/src/main/java/com/swmansion/rnexecutorch/ETModule.kt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@ class ETModule(reactContext: ReactApplicationContext) : NativeETModuleSpec(react
1717
return NAME
1818
}
1919

20-
override fun loadModule(modelPath: String, promise: Promise) {
20+
override fun loadModule(modelSource: String, promise: Promise) {
2121
Fetcher.downloadModel(
2222
reactApplicationContext,
23-
modelPath,
23+
modelSource,
2424
) { path, error ->
2525
if (error != null) {
26-
promise.reject(error.message!!, ETError.InvalidModelPath.toString())
26+
promise.reject(error.message!!, ETError.InvalidModelSource.toString())
2727
return@downloadModel
2828
}
2929

android/src/main/java/com/swmansion/rnexecutorch/StyleTransfer.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class StyleTransfer(reactContext: ReactApplicationContext) :
3131
styleTransferModel.loadModel(modelSource)
3232
promise.resolve(0)
3333
} catch (e: Exception) {
34-
promise.reject(e.message!!, ETError.InvalidModelPath.toString())
34+
promise.reject(e.message!!, ETError.InvalidModelSource.toString())
3535
}
3636
}
3737

android/src/main/java/com/swmansion/rnexecutorch/utils/ETError.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ enum class ETError(val code: Int) {
44
UndefinedError(0x65),
55
ModuleNotLoaded(0x66),
66
FileWriteFailed(0x67),
7-
InvalidModelPath(0xff),
7+
InvalidModelSource(0xff),
88

99
// System errors
1010
Ok(0x00),

docs/docs/guides/running-llms.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ React Native ExecuTorch supports Llama 3.2 models, including quantized versions.
1414
In order to load a model into the app, you need to run the following code:
1515

1616
```typescript
17-
import { useLLM, LLAMA3_2_1B_URL } from 'react-native-executorch';
17+
import { useLLM, LLAMA3_2_1B } from 'react-native-executorch';
1818

1919
const llama = useLLM({
20-
modelSource: LLAMA3_2_1B_URL,
20+
modelSource: LLAMA3_2_1B,
2121
tokenizer: require('../assets/tokenizer.bin'),
2222
contextWindowLength: 3,
2323
});
@@ -91,7 +91,7 @@ In order to send a message to the model, one can use the following code:
9191

9292
```typescript
9393
const llama = useLLM(
94-
modelSource: LLAMA3_2_1B_URL,
94+
modelSource: LLAMA3_2_1B,
9595
tokenizer: require('../assets/tokenizer.bin'),
9696
);
9797

examples/computer-vision/screens/ClassificationScreen.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ export const ClassificationScreen = ({
1717
);
1818

1919
const model = useClassification({
20-
modulePath: EFFICIENTNET_V2_S,
20+
modelSource: EFFICIENTNET_V2_S,
2121
});
2222

2323
const handleCameraPress = async (isCamera: boolean) => {

examples/computer-vision/screens/ObjectDetectionScreen.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { getImage } from '../utils';
55
import {
66
Detection,
77
useObjectDetection,
8-
SSDLITE_320_MOBILENET_V3_LARGE_URL,
8+
SSDLITE_320_MOBILENET_V3_LARGE,
99
} from 'react-native-executorch';
1010
import { View, StyleSheet, Image } from 'react-native';
1111
import ImageWithBboxes from '../components/ImageWithBboxes';
@@ -24,7 +24,7 @@ export const ObjectDetectionScreen = ({
2424
}>();
2525

2626
const ssdLite = useObjectDetection({
27-
modelSource: SSDLITE_320_MOBILENET_V3_LARGE_URL,
27+
modelSource: SSDLITE_320_MOBILENET_V3_LARGE,
2828
});
2929

3030
const handleCameraPress = async (isCamera: boolean) => {

examples/computer-vision/screens/StyleTransferScreen.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ export const StyleTransferScreen = ({
1515
setImageUri: (imageUri: string) => void;
1616
}) => {
1717
const model = useStyleTransfer({
18-
modulePath: STYLE_TRANSFER_CANDY,
18+
modelSource: STYLE_TRANSFER_CANDY,
1919
});
2020

2121
const handleCameraPress = async (isCamera: boolean) => {

examples/llama/screens/ChatScreen.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import { SafeAreaView } from 'react-native-safe-area-context';
1414
import SWMIcon from '../assets/icons/swm_icon.svg';
1515
import SendIcon from '../assets/icons/send_icon.svg';
1616
import Spinner from 'react-native-loading-spinner-overlay';
17-
import { LLAMA3_2_1B_QLORA_URL, useLLM } from 'react-native-executorch';
17+
import { LLAMA3_2_1B_QLORA, useLLM } from 'react-native-executorch';
1818
import PauseIcon from '../assets/icons/pause_icon.svg';
1919
import ColorPalette from '../colors';
2020
import Messages from '../components/Messages';
@@ -25,7 +25,7 @@ export default function ChatScreen() {
2525
const [isTextInputFocused, setIsTextInputFocused] = useState(false);
2626
const [userInput, setUserInput] = useState('');
2727
const llama = useLLM({
28-
modelSource: LLAMA3_2_1B_QLORA_URL,
28+
modelSource: LLAMA3_2_1B_QLORA,
2929
tokenizerSource: require('../assets/tokenizer.bin'),
3030
contextWindowLength: 6,
3131
});

ios/RnExecutorch/models/BaseModel.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ - (void)loadModel:(NSURL *)modelURL completion:(void (^)(BOOL success, NSNumber*
1313
module = [[ETModel alloc] init];
1414
[Fetcher fetchResource:modelURL resourceType:ResourceType::MODEL completionHandler:^(NSString *filePath, NSError *error) {
1515
if (error) {
16-
completion(NO, @(InvalidModelPath));
16+
completion(NO, @(InvalidModelSource));
1717
return;
1818
}
1919
NSNumber *result = [self->module loadModel: filePath];

0 commit comments

Comments
 (0)