I have integrated the Firebase MLKit SDK
as per the document, texts are detected correctly with the back camera
photo. When I am using a captured photo from the front camera, texts are detecting wrongly and bad results are coming out.
@IBAction func findTextDidTouch(_ sender: UIButton) {
runTextRecognition(with:imageView.image!)
}
func runCloudTextRecognition(with image: UIImage) {
let visionImage = VisionImage(image: image)
cloudTextDetector.detect(in: visionImage) { features, error in
if let error = error {
print("Received error: \(error)")
return
}
self.processCloudResult(from: features, error: error)
}
}
for text in features {
if let block = text as? VisionTextBlock {
print(block.text)
}
}
I am not able to figure out. Do I need change with the camera or firebase ml kit?
You need to provide image orientation as well. Adding code snippet may help you.
let visionImage = VisionImage(image: image)
let metadata = VisionImageMetadata()
metadata.orientation = self.detectorOrientation(in: image)
visionImage.metadata = metadata
textDetector.process(visionImage) { (features, error) in
if features != nil
{
self.processResult(from: [features!], error: error)
}
}
//Detects orientation of the selected or captured image
func detectorOrientation(in image: UIImage) -> VisionDetectorImageOrientation {
switch image.imageOrientation {
case .up:
return .topLeft
case .down:
return .bottomRight
case .left:
return .leftBottom
case .right:
return .rightTop
case .upMirrored:
return .topRight
case .downMirrored:
return .bottomLeft
case .leftMirrored:
return .leftTop
case .rightMirrored:
return .rightBottom
}
}