I'm doing an online version of the text recognition with Nativescript core and firebase ML kit with nativescript-camera plugin (I don't know if there's a better plugin for this)
At the moment I have a buttom with this event:
exports.onCapture = function () {
if (camera.isAvailable()) {
var options = { width: 300, height: 300, keepAspectRatio: false, saveToGallery: false};
camera.takePicture(options)
.then(function (imageAsset) {
getTextFromPhotoCloud("HOW TO CONVERT imageAsset TO IMAGESOURCE");
}).catch(function (err) {
console.log("Error -> " + err.message);
});
}
}
and this code for ml Kit:
function getTextFromPhotoCloud(imageSource) {
var firebase = require("nativescript-plugin-firebase");
firebase.mlkit.textrecognition.recognizeTextCloud({
image: imageSource
}).then(function (result) {
console.log(result.text ? result.text : "");
}).catch(function (errorMessage) {
return console.log("ML Kit error: " + errorMessage);
});
}
How can I convert the camera response to imagesource format (for ML kit) without saving it in gallery?
Are there a better plugin or something for the camera? Actually I have to launch the camera app, take the photo and accept the preview to launch ML kit. Could be something more integrated in the app (something that no needs to take 3 actions for each photo) that can connect to online mode of ML kit? Something like this code, nut working with cloud method and not in realtime:
<MLKitTextRecognition
class="my-class"
width="260"
height="380"
processEveryNthFrame="10"
preferFrontCamera="false"
[pause]="pause"
[torchOn]="torchOn"
(scanResult)="onTextRecognitionResult($event)">
</MLKitTextRecognition>
Use fromAsset
method in image-source.
import { fromAsset } from "tns-core-modules/image-source"
fromAsset(imageAsset).
then((imageSource) => {
.....
});