Skip to content
This repository was archived by the owner on Apr 4, 2023. It is now read-only.

Commit c001bfe

Browse files
Update Firebase Android SDK versions for ML Kit and Firestore (Aug 13 update) #853
1 parent e89f6c2 commit c001bfe

File tree

10 files changed

+102
-89
lines changed

10 files changed

+102
-89
lines changed

demo-ng/app/App_Resources/Android/AndroidManifest.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636

3737
<meta-data
3838
android:name="com.google.firebase.ml.vision.DEPENDENCIES"
39-
android:value="text,barcode,face,label" />
39+
android:value="ocr,barcode,face,label" />
4040

4141
<activity
4242
android:name="com.tns.NativeScriptActivity"

demo-ng/app/tabs/mlkit/mlkit.component.ts

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,7 @@ import * as Camera from "nativescript-camera";
1010
import { BarcodeFormat, MLKitScanBarcodesOnDeviceResult } from "nativescript-plugin-firebase/mlkit/barcodescanning";
1111
import { MLKitLandmarkRecognitionCloudResult } from "nativescript-plugin-firebase/mlkit/landmarkrecognition";
1212
import { MLKitDetectFacesOnDeviceResult } from "nativescript-plugin-firebase/mlkit/facedetection";
13-
import {
14-
MLKitRecognizeTextCloudResult,
15-
MLKitRecognizeTextOnDeviceResult
16-
} from "nativescript-plugin-firebase/mlkit/textrecognition";
13+
import { MLKitRecognizeTextResult } from "nativescript-plugin-firebase/mlkit/textrecognition";
1714
import {
1815
MLKitImageLabelingCloudResult,
1916
MLKitImageLabelingOnDeviceResult
@@ -102,7 +99,7 @@ export class MLKitComponent {
10299
width: 800,
103100
height: 800,
104101
keepAspectRatio: true,
105-
saveToGallery: false,
102+
saveToGallery: true,
106103
cameraFacing: "rear"
107104
}).then(imageAsset => {
108105
new ImageSource().fromAsset(imageAsset).then(imageSource => {
@@ -182,19 +179,20 @@ export class MLKitComponent {
182179
this.labelImageCloud(imageSource);
183180
} else if (pickedItem === "Landmark recognition (cloud)") {
184181
this.recognizeLandmarkCloud(imageSource);
185-
// } else if (pickedItem === "Custom model (on device)") {
186-
// this.customModelOnDevice(imageSource);
182+
// } else if (pickedItem === "Custom model (on device)") {
183+
// this.customModelOnDevice(imageSource);
187184
}
188185
});
189186
}
190187

191188
private recognizeTextOnDevice(imageSource: ImageSource): void {
192189
firebase.mlkit.textrecognition.recognizeTextOnDevice({
193190
image: imageSource
194-
}).then((result: MLKitRecognizeTextOnDeviceResult) => {
191+
}).then((result: MLKitRecognizeTextResult) => {
192+
console.log("recognizeTextOnDevice result: " + JSON.stringify(result));
195193
alert({
196194
title: `Result`,
197-
message: result.blocks.map(block => block.text).join(""),
195+
message: result.text ? result.text : "",
198196
okButtonText: "OK"
199197
});
200198
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
@@ -206,10 +204,11 @@ export class MLKitComponent {
206204
modelType: "latest",
207205
maxResults: 15
208206
}).then(
209-
(result: MLKitRecognizeTextCloudResult) => {
207+
(result: MLKitRecognizeTextResult) => {
208+
console.log("recognizeTextCloud result: " + JSON.stringify(result));
210209
alert({
211210
title: `Result`,
212-
message: result.text,
211+
message: result.text ? result.text : "",
213212
okButtonText: "OK"
214213
});
215214
})

demo-ng/app/tabs/mlkit/textrecognition/textrecognition.component.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { Component } from "@angular/core";
22
import {
3-
MLKitRecognizeTextOnDeviceResult,
3+
MLKitRecognizeTextResult,
44
MLKitRecognizeTextResultBlock
55
} from "nativescript-plugin-firebase/mlkit/textrecognition";
66

@@ -13,7 +13,7 @@ export class TextRecognitionComponent {
1313
blocks: Array<MLKitRecognizeTextResultBlock>;
1414

1515
onTextRecognitionResult(scanResult: any): void {
16-
const value: MLKitRecognizeTextOnDeviceResult = scanResult.value;
16+
const value: MLKitRecognizeTextResult = scanResult.value;
1717
this.blocks = value.blocks;
1818
}
1919
}

docs/ML_KIT.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,11 @@ after your app is installed from the Play Store. Add this to your `<resources>/A
4646
```xml
4747
<meta-data
4848
android:name="com.google.firebase.ml.vision.DEPENDENCIES"
49-
android:value="text,face,.." />
49+
android:value="ocr,face,.." />
5050
```
5151

52-
Replace `text,label,..` by whichever features you need. So if you only need Text recognitions, use `"text"`, but if you want
53-
to perform Text recognition, Face detection, Barcode scanning, and Image labeling on-device, use `"text,face,barcode,label"`.
52+
Replace `ocr,label,..` by whichever features you need. So if you only need Text recognitions, use `"ocr"`, but if you want
53+
to perform Text recognition, Face detection, Barcode scanning, and Image labeling on-device, use `"ocr,face,barcode,label"`.
5454

5555
Note that (because of how iOS works) we bundle the models you've picked during plugin configuration with your app.
5656
So if you have a change of heart, re-run the configuration as explained at the top of this document.
@@ -91,26 +91,26 @@ To be able to use Cloud features you need to do two things:
9191
#### Still image (on-device)
9292
9393
```typescript
94-
import { MLKitRecognizeTextOnDeviceResult } from "nativescript-plugin-firebase/mlkit/textrecognition";
94+
import { MLKitRecognizeTextResult } from "nativescript-plugin-firebase/mlkit/textrecognition";
9595
const firebase = require("nativescript-plugin-firebase");
9696
9797
firebase.mlkit.textrecognition.recognizeTextOnDevice({
9898
image: imageSource // a NativeScript Image or ImageSource, see the demo for examples
99-
}).then((result: MLKitRecognizeTextOnDeviceResult) => { // just look at this type to see what else is returned
100-
console.log(result.blocks.map(block => block.text).join(""));
99+
}).then((result: MLKitRecognizeTextResult) => { // just look at this type to see what else is returned
100+
console.log(result.text ? result.text : "");
101101
}).catch(errorMessage => console.log("ML Kit error: " + errorMessage));
102102
```
103103
104104
#### Still image (cloud)
105105
106106
```typescript
107-
import { MLKitRecognizeTextCloudResult } from "nativescript-plugin-firebase/mlkit/textrecognition";
107+
import { MLKitRecognizeTextResult } from "nativescript-plugin-firebase/mlkit/textrecognition";
108108
const firebase = require("nativescript-plugin-firebase");
109109
110110
firebase.mlkit.textrecognition.recognizeTextCloud({
111111
image: imageSource, // a NativeScript Image or ImageSource, see the demo for examples
112112
})
113-
.then((result: MLKitRecognizeTextCloudResult) => console.log(result.text))
113+
.then((result: MLKitRecognizeTextResult) => console.log(result.text ? result.text : ""))
114114
.catch(errorMessage => console.log("ML Kit error: " + errorMessage));
115115
```
116116

publish/scripts/installer.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ function echoAndroidManifestChanges(result) {
239239
if (isSelected(result.ml_kit)) {
240240
var selectedFeatures = [];
241241
if (isSelected(result.ml_kit_text_recognition)) {
242-
selectedFeatures.push("text");
242+
selectedFeatures.push("ocr");
243243
}
244244
if (isSelected(result.ml_kit_barcode_scanning)) {
245245
selectedFeatures.push("barcode");

src/mlkit/mlkit-cameraview.android.ts

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -185,10 +185,17 @@ export abstract class MLKitCameraView extends MLKitCameraViewBase {
185185

186186
this.lastVisionImage = com.google.firebase.ml.vision.common.FirebaseVisionImage.fromByteBuffer(data, metadata);
187187

188-
this.detector
189-
.detectInImage(this.lastVisionImage)
190-
.addOnSuccessListener(onSuccessListener)
191-
.addOnFailureListener(onFailureListener);
188+
if (this.detector.processImage) {
189+
this.detector
190+
.processImage(this.lastVisionImage)
191+
.addOnSuccessListener(onSuccessListener)
192+
.addOnFailureListener(onFailureListener);
193+
} else {
194+
this.detector
195+
.detectInImage(this.lastVisionImage)
196+
.addOnSuccessListener(onSuccessListener)
197+
.addOnFailureListener(onFailureListener);
198+
}
192199
}
193200
}));
194201

src/mlkit/textrecognition/index.android.ts

Lines changed: 48 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,30 @@
11
import { ImageSource } from "tns-core-modules/image-source";
22
import { MLKitOptions, } from "../";
3-
import { MLKitRecognizeTextOnDeviceOptions, MLKitRecognizeTextOnDeviceResult } from "./";
3+
import { MLKitRecognizeTextOnDeviceOptions, MLKitRecognizeTextResult } from "./";
44
import { MLKitTextRecognition as MLKitTextRecognitionBase } from "./textrecognition-common";
55
import {
66
MLKitRecognizeTextCloudOptions,
7-
MLKitRecognizeTextCloudResult,
8-
MLKitRecognizeTextResultBlock,
9-
MLKitRecognizeTextResultLine,
7+
MLKitRecognizeTextResultBounds,
108
MLKitRecognizeTextResultElement,
11-
MLKitRecognizeTextResultBounds
9+
MLKitRecognizeTextResultLine
1210
} from "./index";
1311

1412
declare const com: any;
1513

1614
export class MLKitTextRecognition extends MLKitTextRecognitionBase {
1715

18-
protected createDetector(): any {
19-
return com.google.firebase.ml.vision.FirebaseVision.getInstance().getVisionTextDetector();
16+
protected createDetector(): any /* FirebaseVisionTextRecognizer */ {
17+
return com.google.firebase.ml.vision.FirebaseVision.getInstance().getOnDeviceTextRecognizer();
2018
}
2119

2220
protected createSuccessListener(): any {
2321
return new com.google.android.gms.tasks.OnSuccessListener({
24-
onSuccess: textBlocks => {
25-
if (textBlocks.getBlocks().size() > 0) {
22+
onSuccess: firebaseVisionText => {
23+
if (firebaseVisionText.getTextBlocks().size() > 0) {
2624
this.notify({
2725
eventName: MLKitTextRecognition.scanResultEvent,
2826
object: this,
29-
value: getOnDeviceResult(textBlocks.getBlocks())
27+
value: getResult(firebaseVisionText)
3028
});
3129
}
3230
}
@@ -48,17 +46,29 @@ function boundingBoxToBounds(rect: any): MLKitRecognizeTextResultBounds {
4846
}
4947

5048
// see https://github.com/firebase/quickstart-android/blob/0f4c86877fc5f771cac95797dffa8bd026dd9dc7/mlkit/app/src/main/java/com/google/firebase/samples/apps/mlkit/textrecognition/TextRecognitionProcessor.java#L62
51-
function getOnDeviceResult(blocks: any): MLKitRecognizeTextOnDeviceResult {
52-
const blks: MLKitRecognizeTextResultBlock[] = [];
49+
function getResult(firebaseVisionText: any): MLKitRecognizeTextResult {
50+
if (firebaseVisionText === null) {
51+
return {};
52+
}
53+
54+
const result = <MLKitRecognizeTextResult>{ // TODO rename the return type
55+
text: firebaseVisionText.getText(),
56+
blocks: [],
57+
android: firebaseVisionText
58+
};
5359

54-
for (let i = 0; i < blocks.size(); i++) {
55-
const block = blocks.get(i);
56-
const lines = block.getLines();
60+
for (let i = 0; i < firebaseVisionText.getTextBlocks().size(); i++) {
61+
const textBlock = firebaseVisionText.getTextBlocks().get(i);
62+
// const blockText: string = textBlock.getText();
63+
// const blockConfidence: number = textBlock.getConfidence();
64+
const lines = textBlock.getLines();
5765

58-
const lns: MLKitRecognizeTextResultLine[] = [];
66+
const lns: MLKitRecognizeTextResultLine[] = [];
5967

6068
for (let j = 0; j < lines.size(); j++) {
6169
const line = lines.get(j);
70+
// const lineText = line.getText();
71+
// const lineConfidence = line.getConfidence();
6272
const elements = line.getElements();
6373

6474
const elms: MLKitRecognizeTextResultElement[] = [];
@@ -78,36 +88,34 @@ function getOnDeviceResult(blocks: any): MLKitRecognizeTextOnDeviceResult {
7888
});
7989
}
8090

81-
blks.push({
82-
text: block.getText(),
83-
bounds: boundingBoxToBounds(block.getBoundingBox()),
91+
result.blocks.push({
92+
text: textBlock.getText(),
93+
bounds: boundingBoxToBounds(textBlock.getBoundingBox()),
8494
lines: lns
8595
});
8696
}
8797

88-
return {
89-
blocks: blks
90-
};
98+
return result;
9199
}
92100

93-
export function recognizeTextOnDevice(options: MLKitRecognizeTextOnDeviceOptions): Promise<MLKitRecognizeTextOnDeviceResult> {
101+
export function recognizeTextOnDevice(options: MLKitRecognizeTextOnDeviceOptions): Promise<MLKitRecognizeTextResult> {
94102
return new Promise((resolve, reject) => {
95103
try {
96-
const firebaseVisionTextDetector = com.google.firebase.ml.vision.FirebaseVision.getInstance().getVisionTextDetector();
104+
const firebaseVisionTextRecognizer = com.google.firebase.ml.vision.FirebaseVision.getInstance().getOnDeviceTextRecognizer();
97105

98106
const onSuccessListener = new com.google.android.gms.tasks.OnSuccessListener({
99-
onSuccess: textBlocks => {
100-
resolve(getOnDeviceResult(textBlocks.getBlocks()));
101-
firebaseVisionTextDetector.close();
107+
onSuccess: firebaseVisionText => {
108+
resolve(getResult(firebaseVisionText));
109+
firebaseVisionTextRecognizer.close();
102110
}
103111
});
104112

105113
const onFailureListener = new com.google.android.gms.tasks.OnFailureListener({
106114
onFailure: exception => reject(exception.getMessage())
107115
});
108116

109-
firebaseVisionTextDetector
110-
.detectInImage(getImage(options))
117+
firebaseVisionTextRecognizer
118+
.processImage(getImage(options))
111119
.addOnSuccessListener(onSuccessListener)
112120
.addOnFailureListener(onFailureListener);
113121

@@ -118,32 +126,31 @@ export function recognizeTextOnDevice(options: MLKitRecognizeTextOnDeviceOptions
118126
});
119127
}
120128

121-
export function recognizeTextCloud(options: MLKitRecognizeTextCloudOptions): Promise<MLKitRecognizeTextCloudResult> {
129+
export function recognizeTextCloud(options: MLKitRecognizeTextCloudOptions): Promise<MLKitRecognizeTextResult> {
122130
return new Promise((resolve, reject) => {
123131
try {
124-
const cloudDetectorOptions =
132+
const firebaseVisionCloudTextRecognizerOptions =
125133
new com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions.Builder()
126-
.setModelType(options.modelType === "latest" ? com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions.LATEST_MODEL : com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions.STABLE_MODEL)
127-
.setMaxResults(options.maxResults || 10)
134+
// TODO see 'setLanguageHints' at https://firebase.google.com/docs/ml-kit/android/recognize-text
135+
// .setModelType(options.modelType === "latest" ? com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions.LATEST_MODEL : com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions.STABLE_MODEL)
136+
// .setMaxResults(options.maxResults || 10)
128137
.build();
129138

130-
const firebaseVisionCloudTextDetector = com.google.firebase.ml.vision.FirebaseVision.getInstance().getVisionCloudTextDetector(cloudDetectorOptions);
139+
const firebaseVisionCloudTextRecognizer = com.google.firebase.ml.vision.FirebaseVision.getInstance().getCloudTextRecognizer(firebaseVisionCloudTextRecognizerOptions);
131140

132141
const onSuccessListener = new com.google.android.gms.tasks.OnSuccessListener({
133-
onSuccess: firebaseVisionCloudText => {
134-
resolve({
135-
text: firebaseVisionCloudText ? firebaseVisionCloudText.getText() : null
136-
});
137-
firebaseVisionCloudTextDetector.close();
142+
onSuccess: firebaseVisionText => {
143+
resolve(getResult(firebaseVisionText));
144+
firebaseVisionCloudTextRecognizer.close();
138145
}
139146
});
140147

141148
const onFailureListener = new com.google.android.gms.tasks.OnFailureListener({
142149
onFailure: exception => reject(exception.getMessage())
143150
});
144151

145-
firebaseVisionCloudTextDetector
146-
.detectInImage(getImage(options))
152+
firebaseVisionCloudTextRecognizer
153+
.processImage(getImage(options))
147154
.addOnSuccessListener(onSuccessListener)
148155
.addOnFailureListener(onFailureListener);
149156

src/mlkit/textrecognition/index.d.ts

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,16 +28,13 @@ export interface MLKitRecognizeTextResultBlock {
2828
lines: Array<MLKitRecognizeTextResultLine>;
2929
}
3030

31-
export interface MLKitRecognizeTextOnDeviceResult extends MLKitResult {
32-
blocks: Array<MLKitRecognizeTextResultBlock>;
31+
export interface MLKitRecognizeTextResult extends MLKitResult {
32+
text?: string;
33+
blocks?: Array<MLKitRecognizeTextResultBlock>;
3334
ios?: any;
3435
android?: any;
3536
}
3637

37-
export interface MLKitRecognizeTextCloudResult extends MLKitResult {
38-
text: string;
39-
}
40-
4138
// TODO see 'setLanguageHints' at https://firebase.google.com/docs/ml-kit/android/recognize-text
4239

4340
export interface MLKitRecognizeTextOnDeviceOptions extends MLKitOptions {
@@ -46,9 +43,9 @@ export interface MLKitRecognizeTextOnDeviceOptions extends MLKitOptions {
4643
export interface MLKitRecognizeTextCloudOptions extends MLKitCloudOptions {
4744
}
4845

49-
export declare function recognizeTextOnDevice(options: MLKitRecognizeTextOnDeviceOptions): Promise<MLKitRecognizeTextOnDeviceResult>;
46+
export declare function recognizeTextOnDevice(options: MLKitRecognizeTextOnDeviceOptions): Promise<MLKitRecognizeTextResult>;
5047

51-
export declare function recognizeTextCloud(options: MLKitRecognizeTextCloudOptions): Promise<MLKitRecognizeTextCloudResult>;
48+
export declare function recognizeTextCloud(options: MLKitRecognizeTextCloudOptions): Promise<MLKitRecognizeTextResult>;
5249

5350
export declare class MLKitTextRecognition extends MLKitCameraView {
5451
}

0 commit comments

Comments
 (0)