Skip to content
This repository was archived by the owner on Apr 4, 2023. It is now read-only.

Commit bfab06c

Browse files
#699 Add ML Kit support (doc & demo ++)
1 parent 22c5041 commit bfab06c

File tree

6 files changed

+201
-57
lines changed

6 files changed

+201
-57
lines changed

demo-ng/app/tabs/mlkit/barcodescanning/barcodescanning.component.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ export class BarcodeScanningComponent {
1212
format: string;
1313
}>;
1414

15-
onBarcodeScanResult(event): void {
15+
onBarcodeScanResult(event: any): void {
1616
const result: MLKitScanBarcodesOnDeviceResult = event.value;
1717
this.barcodes = result.barcodes;
1818
}

demo-ng/app/tabs/mlkit/imagelabeling/imagelabeling.component.html

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
row="0"
99
width="260"
1010
height="340"
11-
(scanResult)="onImageLabeledResult($event)">
11+
confidenceThreshold="0.6"
12+
(scanResult)="onImageLabelingResult($event)">
1213
</MLKitImageLabeling>
1314

1415
<ListView row="1" [items]="labels" class="m-t-20">

demo-ng/app/tabs/mlkit/imagelabeling/imagelabeling.component.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ export class ImageLabelingComponent {
1212
confidence: number;
1313
}>;
1414

15-
onImageLabeledResult(scanResult: any): void {
15+
onImageLabelingResult(scanResult: any): void {
1616
const value: MLKitImageLabelingOnDeviceResult = scanResult.value;
1717
this.labels = value.labels;
1818
}

demo-ng/app/tabs/mlkit/mlkit.component.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,8 @@ export class MLKitComponent {
201201

202202
private recognizeLandmarkCloud(imageSource: ImageSource): void {
203203
firebase.mlkit.landmarkrecognition.recognizeLandmarksCloud({
204-
image: imageSource
204+
image: imageSource,
205+
maxResults: 8
205206
}).then(
206207
(result: MLKitLandmarkRecognitionCloudResult) => {
207208
alert({
@@ -261,7 +262,8 @@ export class MLKitComponent {
261262
private labelImageCloud(imageSource: ImageSource): void {
262263
firebase.mlkit.imagelabeling.labelImageCloud({
263264
image: imageSource,
264-
confidenceThreshold: 0.3
265+
modelType: "stable",
266+
maxResults: 5
265267
}).then(
266268
(result: MLKitImageLabelingCloudResult) => {
267269
alert({

demo/app/main-page.ts

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
11
import * as observable from 'tns-core-modules/data/observable';
22
import * as pages from 'tns-core-modules/ui/page';
33
import { HelloWorldModel } from './main-view-model';
4-
import { MLKitRecognizeTextResult } from "nativescript-plugin-firebase/mlkit/textrecognition";
5-
import { MLKitScanBarcodesResult } from "nativescript-plugin-firebase/mlkit/barcodescanning";
6-
import { MLKitDetectFacesResult } from "nativescript-plugin-firebase/mlkit/facedetection";
74

85
const model = new HelloWorldModel();
96

@@ -13,42 +10,3 @@ export function pageLoaded(args: observable.EventData) {
1310
let page = <pages.Page>args.object;
1411
page.bindingContext = model;
1512
}
16-
17-
export function onBarcodesScanResult(scanResult: any) {
18-
const value: MLKitScanBarcodesResult = scanResult.value;
19-
// if multiple barcodes are returned, this will show the last one ;)
20-
value.barcodes.forEach(barcode => {
21-
model.set("barcodeValue", barcode.value);
22-
model.set("barcodeFormat", barcode.format);
23-
});
24-
}
25-
26-
export function onTextRecognitionResult(scanResult: any) {
27-
const value: MLKitRecognizeTextResult = scanResult.value;
28-
model.set("textValue", value.features.map(feature => feature.text).join("\n\n"));
29-
}
30-
31-
export function onFaceDetectionResult(scanResult: any) {
32-
const value: MLKitDetectFacesResult = scanResult.value;
33-
if (value.faces.length > 0) {
34-
let allSmilingAndEyesOpen = true;
35-
value.faces.forEach(face => {
36-
allSmilingAndEyesOpen = allSmilingAndEyesOpen && face.smilingProbability && face.leftEyeOpenProbability && face.rightEyeOpenProbability &&
37-
face.smilingProbability > 0.7 && face.leftEyeOpenProbability > 0.7 && face.rightEyeOpenProbability > 0.7;
38-
});
39-
model.set("allOK", `All smiling and eyes open? ${allSmilingAndEyesOpen ? 'Yes, screen grabbed:' : 'Nope. Sad.'}`);
40-
// model.set("textValue", value.faces.map(face => JSON.stringify(face)).join("\n"));
41-
model.set("textValue", value.faces.map(face => `Smiling? ${round(face.smilingProbability)}%\nLeft eye open? ${round(face.leftEyeOpenProbability)}%\nRight eye open? ${round(face.rightEyeOpenProbability)}%`).join("\n\n"));
42-
43-
if (allSmilingAndEyesOpen && value.imageSource) {
44-
model.set("lastMatch", value.imageSource);
45-
}
46-
}
47-
}
48-
49-
function round (input) {
50-
if (isNaN(input)) {
51-
return 0;
52-
}
53-
return Math.round(input * 100);
54-
}

docs/ML_KIT.md

Lines changed: 193 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,3 @@
1-
Just dumping a few things I should not forget to add to the doc:
2-
3-
- see step 3 @ https://firebase.google.com/docs/ml-kit/android/recognize-text
4-
- For Cloud processing, enable the Vision API and upgrade your Firebase project to "Blaze" (pay as you go)
5-
6-
71
<img src="https://raw.githubusercontent.com/EddyVerbruggen/nativescript-plugin-firebase/master/docs/images/features/mlkit.png" height="84px" alt="ML Kit"/>
82

93
Make sure to check out [this demo app](https://github.com/EddyVerbruggen/nativescript-plugin-firebase/tree/master/demo-ng) because it has almost all ML Kit features this plugin currently supports! Steps:
@@ -22,11 +16,26 @@ In case you're upgrading and you have the `firebase.nativescript.json` file in y
2216
then clean your platforms folder (`rm -rf platforms`) and build your app again. You will be prompted which Firebase features you'll want to use.
2317

2418
## ML Kit Features
25-
There are two ways of using ML Kit: On-device or in the cloud. Depending on the
19+
There are two ways of using ML Kit:
2620

2721
- *On-device*. These features have been enhanced to not only interpret still images, but you can also run ML against a live camera feed. Why? Because it's fr***ing cool!
2822
- *Cloud*. The cloud has much larger and always up to date models, so results will be more accurate. Since this is a remote service reconition speed depends heavily on the size of the images you send to the cloud.
2923

24+
### On-device configuration
25+
Optionally (but recommended) for Android, you can have the device automatically download the relevant ML model(s) to the device
26+
after your app is installed from the Play Store. Add this to your `<resources>/Android/AndroidManifest.xml`:
27+
28+
```xml
29+
<meta-data
30+
android:name="com.google.firebase.ml.vision.DEPENDENCIES"
31+
android:value="text,face,.." />
32+
```
33+
34+
Replace `text,label,..` by whichever features you need. So if you only need Text recognitions, use `"text"`, but if you want
35+
to perform Text recognition, Face detection, Barcode scanning, and Image labeling on-device, use `"text,face,barcode,label"`.
36+
37+
Note that (because of how iOS works) we bundle the models you've picked during plugin configuration with your app.
38+
So if you have a change of heart, re-run the configuration as explained at the top of this document.
3039

3140
### Cloud configuration
3241
To nbe able to use Cloud features you need to do two things:
@@ -79,26 +88,200 @@ import { MLKitRecognizeTextCloudResult } from "nativescript-plugin-firebase/mlki
7988
const firebase = require("nativescript-plugin-firebase");
8089

8190
firebase.mlkit.textrecognition.recognizeTextOnDevice({
82-
image: imageSource, // a NativeScript Image or ImageSource, see the demo for examples
91+
image: imageSource, // a NativeScript Image or ImageSource, see the demo for examples
8392
modelType: "latest", // either "latest" or "stable" (default "stable")
84-
maxResults: 15 // default 10
93+
maxResults: 15 // default 10
8594
}).then((result: MLKitRecognizeTextCloudResult) => {
8695
console.log(result.text);
8796
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
8897
```
98+
8999
#### Live camera feed
100+
The exact details of using the live camera view depend on whether you're using Angular / Vue or not.
101+
102+
##### Angular / Vue
103+
Register a custom element like so in their component/module:
104+
105+
```typescript
106+
import { registerElement } from "nativescript-angular/element-registry";
107+
registerElement("MLKitTextRecognition", () => require("nativescript-plugin-firebase/mlkit/textrecognition").MLKitTextRecognition);
108+
```
109+
110+
Now you're able to use the registered element in the view:
111+
112+
```html
113+
<MLKitTextRecognition
114+
class="my-class"
115+
width="260"
116+
height="380"
117+
processEveryNthFrame="10"
118+
(scanResult)="onTextRecognitionResult($event)">
119+
</MLKitTextRecognition>
120+
```
121+
122+
You can use any view-related property you like as we're extending `ContentView`.
123+
So things like `class`, `row`, `width`, `horizontalAlignment`, `style` are all valid properties.
124+
125+
Plugin-specific are the optional property `processEveryNthFrame` and optional event `scanResult`.
126+
You can `processEveryNthFrame` set to a lower value than the default (5) to put less strain on the device.
127+
Especially 'Face detection' seems a bit more CPU intensive, but for 'Text recognition' the default is fine.
128+
129+
> Look at [the demo app](https://github.com/EddyVerbruggen/nativescript-plugin-firebase/tree/master/demo-ng) to see how to wire up that `onTextRecognitionResult` function.
130+
131+
### XML
132+
Declarate a namespace at the top of the embedding page, and use it somewhere on the page:
133+
134+
```xml
135+
<Page xmlns:MLKitTextRecognition="nativescript-plugin-firebase/mlkit/textrecognition">
136+
<OtherTags/>
137+
<MLKitTextRecognition:MLKitTextRecognition
138+
class="my-class"
139+
width="260"
140+
height="380"
141+
processEveryNthFrame="3"
142+
scanResult="onTextRecognitionResult" />
143+
<MoreOtherTags/>
144+
</Page>
145+
```
146+
147+
Note that with NativeScript 4 the `Page` tag may actually be a `TabView`, but adding the namespace
148+
declaration to the TabView works just as well.
149+
150+
Also note that you can use any view-related property you like as we're extending `ContentView`.
151+
So things like `class`, `row`, `colspan`, `horizontalAlignment`, `style` are all valid properties.
90152

91153
### [Face detection](https://firebase.google.com/docs/ml-kit/detect-faces)
92154
<img src="https://raw.githubusercontent.com/EddyVerbruggen/nativescript-plugin-firebase/master/docs/images/features/mlkit_face_detection.png" height="153px" alt="ML Kit - Face detection"/>
93155

156+
#### Still image (on-device)
157+
158+
```typescript
159+
import { MLKitDetectFacesOnDeviceResult } from "nativescript-plugin-firebase/mlkit/facedetection";
160+
const firebase = require("nativescript-plugin-firebase");
161+
162+
firebase.mlkit.facedetection.detectFacesOnDevice({
163+
image: imageSource // a NativeScript Image or ImageSource, see the demo for examples
164+
}).then((result: MLKitDetectFacesOnDeviceResult) => { // just look at this type to see what else is returned
165+
console.log(JSON.stringify(result.faces));
166+
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
167+
```
168+
169+
#### Live camera feed
170+
The basics are explained above for 'Text recognition', so we're only showing the differences here.
171+
172+
```typescript
173+
import { registerElement } from "nativescript-angular/element-registry";
174+
registerElement("MLKitFaceDetection", () => require("nativescript-plugin-firebase/mlkit/facedetection").MLKitFaceDetection);
175+
```
176+
177+
```html
178+
<MLKitFaceDetection
179+
width="260"
180+
height="380"
181+
(scanResult)="onFaceDetectionResult($event)">
182+
</MLKitFaceDetection>
183+
```
184+
94185
### [Barcode scanning](https://firebase.google.com/docs/ml-kit/read-barcodes)
95186
<img src="https://raw.githubusercontent.com/EddyVerbruggen/nativescript-plugin-firebase/master/docs/images/features/mlkit_text_barcode_scanning.png" height="153px" alt="ML Kit - Barcode scanning"/>
96187

188+
#### Still image (on-device)
189+
190+
```typescript
191+
import { BarcodeFormat, MLKitScanBarcodesOnDeviceResult } from "nativescript-plugin-firebase/mlkit/barcodescanning";
192+
const firebase = require("nativescript-plugin-firebase");
193+
194+
firebase.mlkit.barcodescanning.scanBarcodesOnDevice({
195+
image: imageSource,
196+
formats: [BarcodeFormat.QR_CODE, BarcodeFormat.CODABAR] // limit recognition to certain formats (faster), or leave out entirely for all formats (default)
197+
}).then((result: MLKitScanBarcodesOnDeviceResult) => { // just look at this type to see what else is returned
198+
console.log(JSON.stringify(result.barcodes));
199+
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
200+
```
201+
202+
#### Live camera feed
203+
The basics are explained above for 'Text recognition', so we're only showing the differences here.
204+
205+
```typescript
206+
import { registerElement } from "nativescript-angular/element-registry";
207+
registerElement("MLKitBarcodeScanner", () => require("nativescript-plugin-firebase/mlkit/barcodescanning").MLKitBarcodeScanner);
208+
```
209+
210+
```html
211+
<MLKitBarcodeScanner
212+
width="260"
213+
height="380"
214+
formats="QR_CODE, EAN_8, EAN_13"
215+
(scanResult)="onBarcodeScanningResult($event)">
216+
</MLKitBarcodeScanner>
217+
```
218+
97219
### [Image labeling](https://firebase.google.com/docs/ml-kit/label-images)
98220
<img src="https://raw.githubusercontent.com/EddyVerbruggen/nativescript-plugin-firebase/master/docs/images/features/mlkit_text_image_labeling.png" height="153px" alt="ML Kit - Image labeling"/>
99221

222+
#### Still image (on-device)
223+
224+
```typescript
225+
import { MLKitImageLabelingOnDeviceResult } from "nativescript-plugin-firebase/mlkit/imagelabeling";
226+
const firebase = require("nativescript-plugin-firebase");
227+
228+
firebase.mlkit.imagelabeling.labelImageOnDevice({
229+
image: imageSource,
230+
confidenceThreshold: 0.6 // this will only return labels with at least 0.6 (60%) confidence. Default 0.5.
231+
}).then((result: MLKitImageLabelingOnDeviceResult) => { // just look at this type to see what else is returned
232+
console.log(JSON.stringify(result.labels));
233+
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
234+
```
235+
236+
#### Still image (cloud)
237+
238+
```typescript
239+
import { MLKitImageLabelingCloudResult } from "nativescript-plugin-firebase/mlkit/imagelabeling";
240+
const firebase = require("nativescript-plugin-firebase");
241+
242+
firebase.mlkit.imagelabeling.labelImageCloud({
243+
image: imageSource,
244+
modelType: "stable", // either "latest" or "stable" (default "stable")
245+
maxResults: 5 // default 10
246+
}).then((result: MLKitImageLabelingCloudResult) => { // just look at this type to see what else is returned
247+
console.log(JSON.stringify(result.labels));
248+
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
249+
```
250+
251+
#### Live camera feed
252+
The basics are explained above for 'Text recognition', so we're only showing the differences here.
253+
254+
```typescript
255+
import { registerElement } from "nativescript-angular/element-registry";
256+
registerElement("MLKitImageLabeling", () => require("nativescript-plugin-firebase/mlkit/imagelabeling").MLKitImageLabeling);
257+
```
258+
259+
```html
260+
<MLKitImageLabeling
261+
width="260"
262+
height="380"
263+
confidenceThreshold="0.6"
264+
(scanResult)="onImageLabelingResult($event)">
265+
</MLKitImageLabeling>
266+
```
267+
100268
### [Landmark recognition](https://firebase.google.com/docs/ml-kit/recognize-landmarks)
101269
<img src="https://raw.githubusercontent.com/EddyVerbruggen/nativescript-plugin-firebase/master/docs/images/features/mlkit_text_landmark_recognition.png" height="153px" alt="ML Kit - Landmark recognition"/>
102270

271+
#### Still image (cloud)
272+
273+
```typescript
274+
import { MLKitLandmarkRecognitionCloudResult } from "nativescript-plugin-firebase/mlkit/landmarkrecognition";
275+
const firebase = require("nativescript-plugin-firebase");
276+
277+
firebase.mlkit.landmarkrecognition.recognizeLandmarksCloud({
278+
image: imageSource,
279+
modelType: "latest", // either "latest" or "stable" (default "stable")
280+
maxResults: 8 // default 10
281+
}).then((result: MLKitLandmarkRecognitionCloudResult) => { // just look at this type to see what else is returned
282+
console.log(JSON.stringify(result.landmarks));
283+
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
284+
```
285+
103286
### [Custom model inference](https://firebase.google.com/docs/ml-kit/use-custom-models)
104-
Coming soon
287+
Coming soon (probably with plugin version 6.1.0).

0 commit comments

Comments
 (0)